diff options
author | antirez <antirez@gmail.com> | 2018-05-24 17:17:37 +0200 |
---|---|---|
committer | antirez <antirez@gmail.com> | 2018-05-24 17:17:37 +0200 |
commit | 08e1c8e820031751bd58736cc334dda1bc55cb5c (patch) | |
tree | 2ee6ccfc7a5d7986b7a55bca5cf80ecbb6c08fc6 /deps/jemalloc/src | |
parent | 8f4e2075a703acce08056321f49defd44dae19ed (diff) | |
download | redis-08e1c8e820031751bd58736cc334dda1bc55cb5c.tar.gz |
Jemalloc upgraded to version 5.0.1.
Diffstat (limited to 'deps/jemalloc/src')
40 files changed, 14586 insertions, 9135 deletions
diff --git a/deps/jemalloc/src/arena.c b/deps/jemalloc/src/arena.c index 3081519cc..5d55bf1a0 100644 --- a/deps/jemalloc/src/arena.c +++ b/deps/jemalloc/src/arena.c @@ -1,21 +1,46 @@ -#define JEMALLOC_ARENA_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_ARENA_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/div.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ -ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; -static ssize_t lg_dirty_mult_default; -arena_bin_info_t arena_bin_info[NBINS]; - -size_t map_bias; -size_t map_misc_offset; -size_t arena_maxrun; /* Max run size for arenas. */ -size_t large_maxclass; /* Max large size class. */ -static size_t small_maxrun; /* Max run size used for small size classes. */ -static bool *small_run_tab; /* Valid small run page multiples. */ -unsigned nlclasses; /* Number of large size classes. */ -unsigned nhclasses; /* Number of huge size classes. */ +/* + * Define names for both unininitialized and initialized phases, so that + * options and mallctl processing are straightforward. + */ +const char *percpu_arena_mode_names[] = { + "percpu", + "phycpu", + "disabled", + "percpu", + "phycpu" +}; +percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; + +ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; +ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; + +static atomic_zd_t dirty_decay_ms_default; +static atomic_zd_t muzzy_decay_ms_default; + +const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { +#define STEP(step, h, x, y) \ + h, + SMOOTHSTEP +#undef STEP +}; + +static div_info_t arena_binind_div_info[NBINS]; /******************************************************************************/ /* @@ -23,2008 +48,1244 @@ unsigned nhclasses; /* Number of huge size classes. */ * definition. */ -static void arena_purge(arena_t *arena, bool all); -static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, - bool cleaned, bool decommitted); -static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); -static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); +static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, + size_t npages_decay_max, bool is_background_thread); +static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, + bool is_background_thread, bool all); +static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + bin_t *bin); +static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + bin_t *bin); /******************************************************************************/ -#define CHUNK_MAP_KEY ((uintptr_t)0x1U) - -JEMALLOC_INLINE_C arena_chunk_map_misc_t * -arena_miscelm_key_create(size_t size) -{ - - return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) | - CHUNK_MAP_KEY)); +void +arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy) { + *nthreads += arena_nthreads_get(arena, false); + *dss = dss_prec_names[arena_dss_prec_get(arena)]; + *dirty_decay_ms = arena_dirty_decay_ms_get(arena); + *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); + *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); + *ndirty += extents_npages_get(&arena->extents_dirty); + *nmuzzy += extents_npages_get(&arena->extents_muzzy); } -JEMALLOC_INLINE_C bool -arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm) -{ +void +arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, + bin_stats_t *bstats, arena_stats_large_t *lstats) { + cassert(config_stats); - return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0); + arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, + muzzy_decay_ms, nactive, ndirty, nmuzzy); + + size_t base_allocated, base_resident, base_mapped, metadata_thp; + base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, + &base_mapped, &metadata_thp); + + arena_stats_lock(tsdn, &arena->stats); + + arena_stats_accum_zu(&astats->mapped, base_mapped + + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); + arena_stats_accum_zu(&astats->retained, + extents_npages_get(&arena->extents_retained) << LG_PAGE); + + arena_stats_accum_u64(&astats->decay_dirty.npurge, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.npurge)); + arena_stats_accum_u64(&astats->decay_dirty.nmadvise, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.nmadvise)); + arena_stats_accum_u64(&astats->decay_dirty.purged, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.purged)); + + arena_stats_accum_u64(&astats->decay_muzzy.npurge, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.npurge)); + arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.nmadvise)); + arena_stats_accum_u64(&astats->decay_muzzy.purged, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.purged)); + + arena_stats_accum_zu(&astats->base, base_allocated); + arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); + arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); + arena_stats_accum_zu(&astats->resident, base_resident + + (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + + extents_npages_get(&arena->extents_dirty) + + extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); + + for (szind_t i = 0; i < NSIZES - NBINS; i++) { + uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].nmalloc); + arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); + arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); + + uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].ndalloc); + arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); + arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); + + uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].nrequests); + arena_stats_accum_u64(&lstats[i].nrequests, + nmalloc + nrequests); + arena_stats_accum_u64(&astats->nrequests_large, + nmalloc + nrequests); + + assert(nmalloc >= ndalloc); + assert(nmalloc - ndalloc <= SIZE_T_MAX); + size_t curlextents = (size_t)(nmalloc - ndalloc); + lstats[i].curlextents += curlextents; + arena_stats_accum_zu(&astats->allocated_large, + curlextents * sz_index2size(NBINS + i)); + } + + arena_stats_unlock(tsdn, &arena->stats); + + /* tcache_bytes counts currently cached bytes. */ + atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + cache_bin_array_descriptor_t *descriptor; + ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { + szind_t i = 0; + for (; i < NBINS; i++) { + cache_bin_t *tbin = &descriptor->bins_small[i]; + arena_stats_accum_zu(&astats->tcache_bytes, + tbin->ncached * sz_index2size(i)); + } + for (; i < nhbins; i++) { + cache_bin_t *tbin = &descriptor->bins_large[i]; + arena_stats_accum_zu(&astats->tcache_bytes, + tbin->ncached * sz_index2size(i)); + } + } + malloc_mutex_prof_read(tsdn, + &astats->mutex_prof_data[arena_prof_mutex_tcache_list], + &arena->tcache_ql_mtx); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); + +#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ + malloc_mutex_lock(tsdn, &arena->mtx); \ + malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ + &arena->mtx); \ + malloc_mutex_unlock(tsdn, &arena->mtx); + + /* Gather per arena mutex profiling data. */ + READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); + READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, + arena_prof_mutex_extent_avail) + READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, + arena_prof_mutex_extents_dirty) + READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, + arena_prof_mutex_extents_muzzy) + READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, + arena_prof_mutex_extents_retained) + READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, + arena_prof_mutex_decay_dirty) + READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, + arena_prof_mutex_decay_muzzy) + READ_ARENA_MUTEX_PROF_DATA(base->mtx, + arena_prof_mutex_base) +#undef READ_ARENA_MUTEX_PROF_DATA + + nstime_copy(&astats->uptime, &arena->create_time); + nstime_update(&astats->uptime); + nstime_subtract(&astats->uptime, &arena->create_time); + + for (szind_t i = 0; i < NBINS; i++) { + bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]); + } } -#undef CHUNK_MAP_KEY - -JEMALLOC_INLINE_C size_t -arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm) -{ - - assert(arena_miscelm_is_key(miscelm)); - - return (arena_mapbits_size_decode((uintptr_t)miscelm)); +void +arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, + extent); + if (arena_dirty_decay_ms_get(arena) == 0) { + arena_decay_dirty(tsdn, arena, false, true); + } else { + arena_background_thread_inactivity_check(tsdn, arena, false); + } } -JEMALLOC_INLINE_C size_t -arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm) -{ - arena_chunk_t *chunk; - size_t pageind, mapbits; +static void * +arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { + void *ret; + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + size_t regind; - assert(!arena_miscelm_is_key(miscelm)); + assert(extent_nfree_get(slab) > 0); + assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); - pageind = arena_miscelm_to_pageind(miscelm); - mapbits = arena_mapbits_get(chunk, pageind); - return (arena_mapbits_size_decode(mapbits)); + regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); + ret = (void *)((uintptr_t)extent_addr_get(slab) + + (uintptr_t)(bin_info->reg_size * regind)); + extent_nfree_dec(slab); + return ret; } -JEMALLOC_INLINE_C int -arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) -{ - uintptr_t a_miscelm = (uintptr_t)a; - uintptr_t b_miscelm = (uintptr_t)b; - - assert(a != NULL); - assert(b != NULL); - - return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); -} +#ifndef JEMALLOC_JET +static +#endif +size_t +arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { + size_t diff, regind; -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, - rb_link, arena_run_comp) + /* Freeing a pointer outside the slab can cause assertion failure. */ + assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); + assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); + /* Freeing an interior pointer can cause assertion failure. */ + assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % + (uintptr_t)bin_infos[binind].reg_size == 0); -static size_t -run_quantize(size_t size) -{ - size_t qsize; + diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); - assert(size != 0); - assert(size == PAGE_CEILING(size)); + /* Avoid doing division with a variable divisor. */ + regind = div_compute(&arena_binind_div_info[binind], diff); - /* Don't change sizes that are valid small run sizes. */ - if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) - return (size); + assert(regind < bin_infos[binind].nregs); - /* - * Round down to the nearest run size that can actually be requested - * during normal large allocation. Add large_pad so that cache index - * randomization can offset the allocation from the page boundary. - */ - qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; - if (qsize <= SMALL_MAXCLASS + large_pad) - return (run_quantize(size - large_pad)); - assert(qsize <= size); - return (qsize); + return regind; } -static size_t -run_quantize_next(size_t size) -{ - size_t large_run_size_next; - - assert(size != 0); - assert(size == PAGE_CEILING(size)); +static void +arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) { + szind_t binind = extent_szind_get(slab); + const bin_info_t *bin_info = &bin_infos[binind]; + size_t regind = arena_slab_regind(slab, binind, ptr); - /* - * Return the next quantized size greater than the input size. - * Quantized sizes comprise the union of run sizes that back small - * region runs, and run sizes that back large regions with no explicit - * alignment constraints. - */ + assert(extent_nfree_get(slab) < bin_info->nregs); + /* Freeing an unallocated pointer can cause assertion failure. */ + assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); - if (size > SMALL_MAXCLASS) { - large_run_size_next = PAGE_CEILING(index2size(size2index(size - - large_pad) + 1) + large_pad); - } else - large_run_size_next = SIZE_T_MAX; - if (size >= small_maxrun) - return (large_run_size_next); - - while (true) { - size += PAGE; - assert(size <= small_maxrun); - if (small_run_tab[size >> LG_PAGE]) { - if (large_run_size_next < size) - return (large_run_size_next); - return (size); - } - } + bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); + extent_nfree_inc(slab); } -static size_t -run_quantize_first(size_t size) -{ - size_t qsize = run_quantize(size); +static void +arena_nactive_add(arena_t *arena, size_t add_pages) { + atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); +} - if (qsize < size) { - /* - * Skip a quantization that may have an adequately large run, - * because under-sized runs may be mixed in. This only happens - * when an unusual size is requested, i.e. for aligned - * allocation, and is just one of several places where linear - * search would potentially find sufficiently aligned available - * memory somewhere lower. - */ - qsize = run_quantize_next(size); - } - return (qsize); +static void +arena_nactive_sub(arena_t *arena, size_t sub_pages) { + assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); + atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); } -JEMALLOC_INLINE_C int -arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) -{ - int ret; - uintptr_t a_miscelm = (uintptr_t)a; - size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ? - arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a)); - size_t b_qsize = run_quantize(arena_miscelm_size_get(b)); +static void +arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { + szind_t index, hindex; - /* - * Compare based on quantized size rather than size, in order to sort - * equally useful runs only by address. - */ - ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); - if (ret == 0) { - if (!arena_miscelm_is_key(a)) { - uintptr_t b_miscelm = (uintptr_t)b; + cassert(config_stats); - ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm); - } else { - /* - * Treat keys as if they are lower than anything else. - */ - ret = -1; - } + if (usize < LARGE_MINCLASS) { + usize = LARGE_MINCLASS; } + index = sz_size2index(usize); + hindex = (index >= NBINS) ? index - NBINS : 0; - return (ret); + arena_stats_add_u64(tsdn, &arena->stats, + &arena->stats.lstats[hindex].nmalloc, 1); } -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, - arena_chunk_map_misc_t, rb_link, arena_avail_comp) - static void -arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages) -{ +arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { + szind_t index, hindex; - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk, - pageind)); -} + cassert(config_stats); -static void -arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages) -{ + if (usize < LARGE_MINCLASS) { + usize = LARGE_MINCLASS; + } + index = sz_size2index(usize); + hindex = (index >= NBINS) ? index - NBINS : 0; - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk, - pageind)); + arena_stats_add_u64(tsdn, &arena->stats, + &arena->stats.lstats[hindex].ndalloc, 1); } static void -arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages) -{ - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); - assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == - CHUNK_MAP_DIRTY); - - qr_new(&miscelm->rd, rd_link); - qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); - arena->ndirty += npages; +arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, + size_t usize) { + arena_large_dalloc_stats_update(tsdn, arena, oldusize); + arena_large_malloc_stats_update(tsdn, arena, usize); } -static void -arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages) -{ - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); +extent_t * +arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool *zero) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); - assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == - CHUNK_MAP_DIRTY); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); - qr_remove(&miscelm->rd, rd_link); - assert(arena->ndirty >= npages); - arena->ndirty -= npages; -} + szind_t szind = sz_size2index(usize); + size_t mapped_add; + bool commit = true; + extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, + szind, zero, &commit); + if (extent == NULL) { + extent = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, + false, szind, zero, &commit); + } + size_t size = usize + sz_large_pad; + if (extent == NULL) { + extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, + usize, sz_large_pad, alignment, false, szind, zero, + &commit); + if (config_stats) { + /* + * extent may be NULL on OOM, but in that case + * mapped_add isn't used below, so there's no need to + * conditionlly set it to 0 here. + */ + mapped_add = size; + } + } else if (config_stats) { + mapped_add = 0; + } -static size_t -arena_chunk_dirty_npages(const extent_node_t *node) -{ + if (extent != NULL) { + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_malloc_stats_update(tsdn, arena, usize); + if (mapped_add != 0) { + arena_stats_add_zu(tsdn, &arena->stats, + &arena->stats.mapped, mapped_add); + } + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_add(arena, size >> LG_PAGE); + } - return (extent_node_size_get(node) >> LG_PAGE); + return extent; } void -arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) -{ - - if (cache) { - extent_node_dirty_linkage_init(node); - extent_node_dirty_insert(node, &arena->runs_dirty, - &arena->chunks_cache); - arena->ndirty += arena_chunk_dirty_npages(node); +arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_dalloc_stats_update(tsdn, arena, + extent_usize_get(extent)); + arena_stats_unlock(tsdn, &arena->stats); } + arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); } void -arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) -{ +arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + size_t oldusize) { + size_t usize = extent_usize_get(extent); + size_t udiff = oldusize - usize; - if (dirty) { - extent_node_dirty_remove(node); - assert(arena->ndirty >= arena_chunk_dirty_npages(node)); - arena->ndirty -= arena_chunk_dirty_npages(node); + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); + arena_stats_unlock(tsdn, &arena->stats); } + arena_nactive_sub(arena, udiff >> LG_PAGE); } -JEMALLOC_INLINE_C void * -arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) -{ - void *ret; - unsigned regind; - arena_chunk_map_misc_t *miscelm; - void *rpages; - - assert(run->nfree > 0); - assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); - - regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); - miscelm = arena_run_to_miscelm(run); - rpages = arena_miscelm_to_rpages(miscelm); - ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + - (uintptr_t)(bin_info->reg_interval * regind)); - run->nfree--; - return (ret); -} - -JEMALLOC_INLINE_C void -arena_run_reg_dalloc(arena_run_t *run, void *ptr) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t mapbits = arena_mapbits_get(chunk, pageind); - szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - unsigned regind = arena_run_regind(run, bin_info, ptr); - - assert(run->nfree < bin_info->nregs); - /* Freeing an interior pointer can cause assertion failure. */ - assert(((uintptr_t)ptr - - ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + - (uintptr_t)bin_info->reg0_offset)) % - (uintptr_t)bin_info->reg_interval == 0); - assert((uintptr_t)ptr >= - (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + - (uintptr_t)bin_info->reg0_offset); - /* Freeing an unallocated pointer can cause assertion failure. */ - assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); - - bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); - run->nfree++; -} - -JEMALLOC_INLINE_C void -arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) -{ - - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (npages << LG_PAGE)); - memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, - (npages << LG_PAGE)); -} - -JEMALLOC_INLINE_C void -arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ +void +arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + size_t oldusize) { + size_t usize = extent_usize_get(extent); + size_t udiff = usize - oldusize; - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind - << LG_PAGE)), PAGE); + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_add(arena, udiff >> LG_PAGE); } -JEMALLOC_INLINE_C void -arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ - size_t i; - UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); - - arena_run_page_mark_zeroed(chunk, run_ind); - for (i = 0; i < PAGE / sizeof(size_t); i++) - assert(p[i] == 0); +static ssize_t +arena_decay_ms_read(arena_decay_t *decay) { + return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); } static void -arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) -{ - - if (config_stats) { - ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages - - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << - LG_PAGE); - if (cactive_diff != 0) - stats_cactive_add(cactive_diff); - } +arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { + atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); } static void -arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, - size_t flag_dirty, size_t flag_decommitted, size_t need_pages) -{ - size_t total_pages, rem_pages; - - assert(flag_dirty == 0 || flag_decommitted == 0); - - total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> - LG_PAGE; - assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == - flag_dirty); - assert(need_pages <= total_pages); - rem_pages = total_pages - need_pages; - - arena_avail_remove(arena, chunk, run_ind, total_pages); - if (flag_dirty != 0) - arena_run_dirty_remove(arena, chunk, run_ind, total_pages); - arena_cactive_update(arena, need_pages, 0); - arena->nactive += need_pages; - - /* Keep track of trailing unused pages for later use. */ - if (rem_pages > 0) { - size_t flags = flag_dirty | flag_decommitted; - size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : - 0; - - arena_mapbits_unallocated_set(chunk, run_ind+need_pages, - (rem_pages << LG_PAGE), flags | - (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & - flag_unzeroed_mask)); - arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, - (rem_pages << LG_PAGE), flags | - (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & - flag_unzeroed_mask)); - if (flag_dirty != 0) { - arena_run_dirty_insert(arena, chunk, run_ind+need_pages, - rem_pages); - } - arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); - } -} - -static bool -arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, - bool remove, bool zero) -{ - arena_chunk_t *chunk; - arena_chunk_map_misc_t *miscelm; - size_t flag_dirty, flag_decommitted, run_ind, need_pages; - size_t flag_unzeroed_mask; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - miscelm = arena_run_to_miscelm(run); - run_ind = arena_miscelm_to_pageind(miscelm); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); - - if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, - run_ind << LG_PAGE, size, arena->ind)) - return (true); - - if (remove) { - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, - flag_decommitted, need_pages); - } - - if (zero) { - if (flag_decommitted != 0) { - /* The run is untouched, and therefore zeroed. */ - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void - *)((uintptr_t)chunk + (run_ind << LG_PAGE)), - (need_pages << LG_PAGE)); - } else if (flag_dirty != 0) { - /* The run is dirty, so all pages must be zeroed. */ - arena_run_zero(chunk, run_ind, need_pages); - } else { - /* - * The run is clean, so some pages may be zeroed (i.e. - * never before touched). - */ - size_t i; - for (i = 0; i < need_pages; i++) { - if (arena_mapbits_unzeroed_get(chunk, run_ind+i) - != 0) - arena_run_zero(chunk, run_ind+i, 1); - else if (config_debug) { - arena_run_page_validate_zeroed(chunk, - run_ind+i); - } else { - arena_run_page_mark_zeroed(chunk, - run_ind+i); - } - } - } - } else { - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); - } - +arena_decay_deadline_init(arena_decay_t *decay) { /* - * Set the last element first, in case the run only contains one page - * (i.e. both statements set the same element). + * Generate a new deadline that is uniformly random within the next + * epoch after the current one. */ - flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? - CHUNK_MAP_UNZEROED : 0; - arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - run_ind+need_pages-1))); - arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); - return (false); -} - -static bool -arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) -{ - - return (arena_run_split_large_helper(arena, run, size, true, zero)); -} - -static bool -arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) -{ - - return (arena_run_split_large_helper(arena, run, size, false, zero)); -} - -static bool -arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, - szind_t binind) -{ - arena_chunk_t *chunk; - arena_chunk_map_misc_t *miscelm; - size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; - - assert(binind != BININD_INVALID); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - miscelm = arena_run_to_miscelm(run); - run_ind = arena_miscelm_to_pageind(miscelm); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); - - if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, - run_ind << LG_PAGE, size, arena->ind)) - return (true); + nstime_copy(&decay->deadline, &decay->epoch); + nstime_add(&decay->deadline, &decay->interval); + if (arena_decay_ms_read(decay) > 0) { + nstime_t jitter; - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, - flag_decommitted, need_pages); - - for (i = 0; i < need_pages; i++) { - size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, - run_ind+i); - arena_mapbits_small_set(chunk, run_ind+i, i, binind, - flag_unzeroed); - if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) - arena_run_page_validate_zeroed(chunk, run_ind+i); + nstime_init(&jitter, prng_range_u64(&decay->jitter_state, + nstime_ns(&decay->interval))); + nstime_add(&decay->deadline, &jitter); } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); - return (false); } -static arena_chunk_t * -arena_chunk_init_spare(arena_t *arena) -{ - arena_chunk_t *chunk; - - assert(arena->spare != NULL); - - chunk = arena->spare; - arena->spare = NULL; - - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxrun); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxrun); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - - return (chunk); +static bool +arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { + return (nstime_compare(&decay->deadline, time) <= 0); } -static bool -arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) -{ +static size_t +arena_decay_backlog_npages_limit(const arena_decay_t *decay) { + uint64_t sum; + size_t npages_limit_backlog; + unsigned i; /* - * The extent node notion of "committed" doesn't directly apply to - * arena chunks. Arbitrarily mark them as committed. The commit state - * of runs is tracked individually, and upon chunk deallocation the - * entire chunk is in a consistent commit state. + * For each element of decay_backlog, multiply by the corresponding + * fixed-point smoothstep decay factor. Sum the products, then divide + * to round down to the nearest whole number of pages. */ - extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); - extent_node_achunk_set(&chunk->node, true); - return (chunk_register(chunk, &chunk->node)); -} - -static arena_chunk_t * -arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, - bool *zero, bool *commit) -{ - arena_chunk_t *chunk; - - malloc_mutex_unlock(&arena->lock); - - chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL, - chunksize, chunksize, zero, commit); - if (chunk != NULL && !*commit) { - /* Commit header. */ - if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << - LG_PAGE, arena->ind)) { - chunk_dalloc_wrapper(arena, chunk_hooks, - (void *)chunk, chunksize, *commit); - chunk = NULL; - } - } - if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) { - if (!*commit) { - /* Undo commit of header. */ - chunk_hooks->decommit(chunk, chunksize, 0, map_bias << - LG_PAGE, arena->ind); - } - chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, - chunksize, *commit); - chunk = NULL; + sum = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + sum += decay->backlog[i] * h_steps[i]; } + npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); - malloc_mutex_lock(&arena->lock); - return (chunk); + return npages_limit_backlog; } -static arena_chunk_t * -arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit) -{ - arena_chunk_t *chunk; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; +static void +arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { + size_t npages_delta = (current_npages > decay->nunpurged) ? + current_npages - decay->nunpurged : 0; + decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; - chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize, - chunksize, zero, true); - if (chunk != NULL) { - if (arena_chunk_register(arena, chunk, *zero)) { - chunk_dalloc_cache(arena, &chunk_hooks, chunk, - chunksize, true); - return (NULL); + if (config_debug) { + if (current_npages > decay->ceil_npages) { + decay->ceil_npages = current_npages; } - *commit = true; - } - if (chunk == NULL) { - chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks, - zero, commit); - } - - if (config_stats && chunk != NULL) { - arena->stats.mapped += chunksize; - arena->stats.metadata_mapped += (map_bias << LG_PAGE); - } - - return (chunk); -} - -static arena_chunk_t * -arena_chunk_init_hard(arena_t *arena) -{ - arena_chunk_t *chunk; - bool zero, commit; - size_t flag_unzeroed, flag_decommitted, i; - - assert(arena->spare == NULL); - - zero = false; - commit = false; - chunk = arena_chunk_alloc_internal(arena, &zero, &commit); - if (chunk == NULL) - return (NULL); - - /* - * Initialize the map to contain one maximal free untouched run. Mark - * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted - * chunk. - */ - flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; - flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; - arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, - flag_unzeroed | flag_decommitted); - /* - * There is no need to initialize the internal page map entries unless - * the chunk is not zeroed. - */ - if (!zero) { - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( - (void *)arena_bitselm_get(chunk, map_bias+1), - (size_t)((uintptr_t) arena_bitselm_get(chunk, - chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, - map_bias+1))); - for (i = map_bias+1; i < chunk_npages-1; i++) - arena_mapbits_internal_set(chunk, i, flag_unzeroed); - } else { - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void - *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) - arena_bitselm_get(chunk, chunk_npages-1) - - (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); - if (config_debug) { - for (i = map_bias+1; i < chunk_npages-1; i++) { - assert(arena_mapbits_unzeroed_get(chunk, i) == - flag_unzeroed); - } + size_t npages_limit = arena_decay_backlog_npages_limit(decay); + assert(decay->ceil_npages >= npages_limit); + if (decay->ceil_npages > npages_limit) { + decay->ceil_npages = npages_limit; } } - arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, - flag_unzeroed); - - return (chunk); -} - -static arena_chunk_t * -arena_chunk_alloc(arena_t *arena) -{ - arena_chunk_t *chunk; - - if (arena->spare != NULL) - chunk = arena_chunk_init_spare(arena); - else { - chunk = arena_chunk_init_hard(arena); - if (chunk == NULL) - return (NULL); - } - - /* Insert the run into the runs_avail tree. */ - arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); - - return (chunk); } static void -arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) -{ - - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxrun); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxrun); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - assert(arena_mapbits_decommitted_get(chunk, map_bias) == - arena_mapbits_decommitted_get(chunk, chunk_npages-1)); - - /* - * Remove run from the runs_avail tree, so that the arena does not use - * it. - */ - arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); +arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, + size_t current_npages) { + if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { + memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * + sizeof(size_t)); + } else { + size_t nadvance_z = (size_t)nadvance_u64; - if (arena->spare != NULL) { - arena_chunk_t *spare = arena->spare; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - bool committed; + assert((uint64_t)nadvance_z == nadvance_u64); - arena->spare = chunk; - if (arena_mapbits_dirty_get(spare, map_bias) != 0) { - arena_run_dirty_remove(arena, spare, map_bias, - chunk_npages-map_bias); + memmove(decay->backlog, &decay->backlog[nadvance_z], + (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); + if (nadvance_z > 1) { + memset(&decay->backlog[SMOOTHSTEP_NSTEPS - + nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); } + } - chunk_deregister(spare, &spare->node); - - committed = (arena_mapbits_decommitted_get(spare, map_bias) == - 0); - if (!committed) { - /* - * Decommit the header. Mark the chunk as decommitted - * even if header decommit fails, since treating a - * partially committed chunk as committed has a high - * potential for causing later access of decommitted - * memory. - */ - chunk_hooks = chunk_hooks_get(arena); - chunk_hooks.decommit(spare, chunksize, 0, map_bias << - LG_PAGE, arena->ind); - } - - chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare, - chunksize, committed); - - if (config_stats) { - arena->stats.mapped -= chunksize; - arena->stats.metadata_mapped -= (map_bias << LG_PAGE); - } - } else - arena->spare = chunk; + arena_decay_backlog_update_last(decay, current_npages); } static void -arena_huge_malloc_stats_update(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; - - cassert(config_stats); - - arena->stats.nmalloc_huge++; - arena->stats.allocated_huge += usize; - arena->stats.hstats[index].nmalloc++; - arena->stats.hstats[index].curhchunks++; +arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, size_t current_npages, size_t npages_limit, + bool is_background_thread) { + if (current_npages > npages_limit) { + arena_decay_to_limit(tsdn, arena, decay, extents, false, + npages_limit, current_npages - npages_limit, + is_background_thread); + } } static void -arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; +arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, + size_t current_npages) { + assert(arena_decay_deadline_reached(decay, time)); - cassert(config_stats); + nstime_t delta; + nstime_copy(&delta, time); + nstime_subtract(&delta, &decay->epoch); - arena->stats.nmalloc_huge--; - arena->stats.allocated_huge -= usize; - arena->stats.hstats[index].nmalloc--; - arena->stats.hstats[index].curhchunks--; -} + uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); + assert(nadvance_u64 > 0); -static void -arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; + /* Add nadvance_u64 decay intervals to epoch. */ + nstime_copy(&delta, &decay->interval); + nstime_imultiply(&delta, nadvance_u64); + nstime_add(&decay->epoch, &delta); - cassert(config_stats); + /* Set a new deadline. */ + arena_decay_deadline_init(decay); - arena->stats.ndalloc_huge++; - arena->stats.allocated_huge -= usize; - arena->stats.hstats[index].ndalloc++; - arena->stats.hstats[index].curhchunks--; + /* Update the backlog. */ + arena_decay_backlog_update(decay, nadvance_u64, current_npages); } static void -arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; - - cassert(config_stats); +arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, const nstime_t *time, bool is_background_thread) { + size_t current_npages = extents_npages_get(extents); + arena_decay_epoch_advance_helper(decay, time, current_npages); - arena->stats.ndalloc_huge--; - arena->stats.allocated_huge += usize; - arena->stats.hstats[index].ndalloc--; - arena->stats.hstats[index].curhchunks++; -} - -static void -arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) -{ + size_t npages_limit = arena_decay_backlog_npages_limit(decay); + /* We may unlock decay->mtx when try_purge(). Finish logging first. */ + decay->nunpurged = (npages_limit > current_npages) ? npages_limit : + current_npages; - arena_huge_dalloc_stats_update(arena, oldsize); - arena_huge_malloc_stats_update(arena, usize); + if (!background_thread_enabled() || is_background_thread) { + arena_decay_try_purge(tsdn, arena, decay, extents, + current_npages, npages_limit, is_background_thread); + } } static void -arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, - size_t usize) -{ - - arena_huge_dalloc_stats_update_undo(arena, oldsize); - arena_huge_malloc_stats_update_undo(arena, usize); -} - -extent_node_t * -arena_node_alloc(arena_t *arena) -{ - extent_node_t *node; - - malloc_mutex_lock(&arena->node_cache_mtx); - node = ql_last(&arena->node_cache, ql_link); - if (node == NULL) { - malloc_mutex_unlock(&arena->node_cache_mtx); - return (base_alloc(sizeof(extent_node_t))); +arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { + arena_decay_ms_write(decay, decay_ms); + if (decay_ms > 0) { + nstime_init(&decay->interval, (uint64_t)decay_ms * + KQU(1000000)); + nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); } - ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); - malloc_mutex_unlock(&arena->node_cache_mtx); - return (node); -} - -void -arena_node_dalloc(arena_t *arena, extent_node_t *node) -{ - malloc_mutex_lock(&arena->node_cache_mtx); - ql_elm_new(node, ql_link); - ql_tail_insert(&arena->node_cache, node, ql_link); - malloc_mutex_unlock(&arena->node_cache_mtx); + nstime_init(&decay->epoch, 0); + nstime_update(&decay->epoch); + decay->jitter_state = (uint64_t)(uintptr_t)decay; + arena_decay_deadline_init(decay); + decay->nunpurged = 0; + memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); } -static void * -arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, - size_t usize, size_t alignment, bool *zero, size_t csize) -{ - void *ret; - bool commit = true; - - ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment, - zero, &commit); - if (ret == NULL) { - /* Revert optimistic stats updates. */ - malloc_mutex_lock(&arena->lock); - if (config_stats) { - arena_huge_malloc_stats_update_undo(arena, usize); - arena->stats.mapped -= usize; +static bool +arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, + arena_stats_decay_t *stats) { + if (config_debug) { + for (size_t i = 0; i < sizeof(arena_decay_t); i++) { + assert(((char *)decay)[i] == 0); } - arena->nactive -= (usize >> LG_PAGE); - malloc_mutex_unlock(&arena->lock); - } - - return (ret); -} - -void * -arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, - bool *zero) -{ - void *ret; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - size_t csize = CHUNK_CEILING(usize); - - malloc_mutex_lock(&arena->lock); - - /* Optimistically update stats. */ - if (config_stats) { - arena_huge_malloc_stats_update(arena, usize); - arena->stats.mapped += usize; + decay->ceil_npages = 0; } - arena->nactive += (usize >> LG_PAGE); - - ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment, - zero, true); - malloc_mutex_unlock(&arena->lock); - if (ret == NULL) { - ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize, - alignment, zero, csize); + if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, + malloc_mutex_rank_exclusive)) { + return true; } - - if (config_stats && ret != NULL) - stats_cactive_add(usize); - return (ret); -} - -void -arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) -{ - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - size_t csize; - - csize = CHUNK_CEILING(usize); - malloc_mutex_lock(&arena->lock); + decay->purging = false; + arena_decay_reinit(decay, decay_ms); + /* Memory is zeroed, so there is no need to clear stats. */ if (config_stats) { - arena_huge_dalloc_stats_update(arena, usize); - arena->stats.mapped -= usize; - stats_cactive_sub(usize); - } - arena->nactive -= (usize >> LG_PAGE); - - chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true); - malloc_mutex_unlock(&arena->lock); -} - -void -arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, - size_t usize) -{ - - assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); - assert(oldsize != usize); - - malloc_mutex_lock(&arena->lock); - if (config_stats) - arena_huge_ralloc_stats_update(arena, oldsize, usize); - if (oldsize < usize) { - size_t udiff = usize - oldsize; - arena->nactive += udiff >> LG_PAGE; - if (config_stats) - stats_cactive_add(udiff); - } else { - size_t udiff = oldsize - usize; - arena->nactive -= udiff >> LG_PAGE; - if (config_stats) - stats_cactive_sub(udiff); + decay->stats = stats; } - malloc_mutex_unlock(&arena->lock); + return false; } -void -arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, - size_t usize) -{ - size_t udiff = oldsize - usize; - size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); - - malloc_mutex_lock(&arena->lock); - if (config_stats) { - arena_huge_ralloc_stats_update(arena, oldsize, usize); - if (cdiff != 0) { - arena->stats.mapped -= cdiff; - stats_cactive_sub(udiff); - } +static bool +arena_decay_ms_valid(ssize_t decay_ms) { + if (decay_ms < -1) { + return false; } - arena->nactive -= udiff >> LG_PAGE; - - if (cdiff != 0) { - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - void *nchunk = (void *)((uintptr_t)chunk + - CHUNK_CEILING(usize)); - - chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true); + if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * + KQU(1000)) { + return true; } - malloc_mutex_unlock(&arena->lock); + return false; } static bool -arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, - void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk, - size_t udiff, size_t cdiff) -{ - bool err; - bool commit = true; - - err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize, - zero, &commit) == NULL); - if (err) { - /* Revert optimistic stats updates. */ - malloc_mutex_lock(&arena->lock); - if (config_stats) { - arena_huge_ralloc_stats_update_undo(arena, oldsize, - usize); - arena->stats.mapped -= cdiff; +arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool is_background_thread) { + malloc_mutex_assert_owner(tsdn, &decay->mtx); + + /* Purge all or nothing if the option is disabled. */ + ssize_t decay_ms = arena_decay_ms_read(decay); + if (decay_ms <= 0) { + if (decay_ms == 0) { + arena_decay_to_limit(tsdn, arena, decay, extents, false, + 0, extents_npages_get(extents), + is_background_thread); } - arena->nactive -= (udiff >> LG_PAGE); - malloc_mutex_unlock(&arena->lock); - } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, - cdiff, true, arena->ind)) { - chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero, - true); - err = true; - } - return (err); -} - -bool -arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, - size_t usize, bool *zero) -{ - bool err; - chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); - void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); - size_t udiff = usize - oldsize; - size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); - - malloc_mutex_lock(&arena->lock); - - /* Optimistically update stats. */ - if (config_stats) { - arena_huge_ralloc_stats_update(arena, oldsize, usize); - arena->stats.mapped += cdiff; + return false; } - arena->nactive += (udiff >> LG_PAGE); - err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff, - chunksize, zero, true) == NULL); - malloc_mutex_unlock(&arena->lock); - if (err) { - err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks, - chunk, oldsize, usize, zero, nchunk, udiff, - cdiff); - } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, - cdiff, true, arena->ind)) { - chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero, - true); - err = true; + nstime_t time; + nstime_init(&time, 0); + nstime_update(&time); + if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) + > 0)) { + /* + * Time went backwards. Move the epoch back in time and + * generate a new deadline, with the expectation that time + * typically flows forward for long enough periods of time that + * epochs complete. Unfortunately, this strategy is susceptible + * to clock jitter triggering premature epoch advances, but + * clock jitter estimation and compensation isn't feasible here + * because calls into this code are event-driven. + */ + nstime_copy(&decay->epoch, &time); + arena_decay_deadline_init(decay); + } else { + /* Verify that time does not go backwards. */ + assert(nstime_compare(&decay->epoch, &time) <= 0); } - if (config_stats && !err) - stats_cactive_add(udiff); - return (err); -} - -/* - * Do first-best-fit run selection, i.e. select the lowest run that best fits. - * Run sizes are quantized, so not all candidate runs are necessarily exactly - * the same size. - */ -static arena_run_t * -arena_run_first_best_fit(arena_t *arena, size_t size) -{ - size_t search_size = run_quantize_first(size); - arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size); - arena_chunk_map_misc_t *miscelm = - arena_avail_tree_nsearch(&arena->runs_avail, key); - if (miscelm == NULL) - return (NULL); - return (&miscelm->run); -} - -static arena_run_t * -arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) -{ - arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); - if (run != NULL) { - if (arena_run_split_large(arena, run, size, zero)) - run = NULL; - } - return (run); -} - -static arena_run_t * -arena_run_alloc_large(arena_t *arena, size_t size, bool zero) -{ - arena_chunk_t *chunk; - arena_run_t *run; - - assert(size <= arena_maxrun); - assert(size == PAGE_CEILING(size)); - - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_large_helper(arena, size, zero); - if (run != NULL) - return (run); - /* - * No usable runs. Create a new chunk from which to allocate the run. + * If the deadline has been reached, advance to the current epoch and + * purge to the new limit if necessary. Note that dirty pages created + * during the current epoch are not subject to purge until a future + * epoch, so as a result purging only happens during epoch advances, or + * being triggered by background threads (scheduled event). */ - chunk = arena_chunk_alloc(arena); - if (chunk != NULL) { - run = &arena_miscelm_get(chunk, map_bias)->run; - if (arena_run_split_large(arena, run, size, zero)) - run = NULL; - return (run); + bool advance_epoch = arena_decay_deadline_reached(decay, &time); + if (advance_epoch) { + arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, + is_background_thread); + } else if (is_background_thread) { + arena_decay_try_purge(tsdn, arena, decay, extents, + extents_npages_get(extents), + arena_decay_backlog_npages_limit(decay), + is_background_thread); } - /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. - */ - return (arena_run_alloc_large_helper(arena, size, zero)); + return advance_epoch; } -static arena_run_t * -arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) -{ - arena_run_t *run = arena_run_first_best_fit(arena, size); - if (run != NULL) { - if (arena_run_split_small(arena, run, size, binind)) - run = NULL; - } - return (run); +static ssize_t +arena_decay_ms_get(arena_decay_t *decay) { + return arena_decay_ms_read(decay); } -static arena_run_t * -arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind) -{ - arena_chunk_t *chunk; - arena_run_t *run; - - assert(size <= arena_maxrun); - assert(size == PAGE_CEILING(size)); - assert(binind != BININD_INVALID); +ssize_t +arena_dirty_decay_ms_get(arena_t *arena) { + return arena_decay_ms_get(&arena->decay_dirty); +} - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_small_helper(arena, size, binind); - if (run != NULL) - return (run); +ssize_t +arena_muzzy_decay_ms_get(arena_t *arena) { + return arena_decay_ms_get(&arena->decay_muzzy); +} - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(arena); - if (chunk != NULL) { - run = &arena_miscelm_get(chunk, map_bias)->run; - if (arena_run_split_small(arena, run, size, binind)) - run = NULL; - return (run); +static bool +arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; } + malloc_mutex_lock(tsdn, &decay->mtx); /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. + * Restart decay backlog from scratch, which may cause many dirty pages + * to be immediately purged. It would conceptually be possible to map + * the old backlog onto the new backlog, but there is no justification + * for such complexity since decay_ms changes are intended to be + * infrequent, either between the {-1, 0, >0} states, or a one-time + * arbitrary change during initial arena configuration. */ - return (arena_run_alloc_small_helper(arena, size, binind)); -} - -static bool -arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) -{ + arena_decay_reinit(decay, decay_ms); + arena_maybe_decay(tsdn, arena, decay, extents, false); + malloc_mutex_unlock(tsdn, &decay->mtx); - return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) - << 3)); -} - -ssize_t -arena_lg_dirty_mult_get(arena_t *arena) -{ - ssize_t lg_dirty_mult; - - malloc_mutex_lock(&arena->lock); - lg_dirty_mult = arena->lg_dirty_mult; - malloc_mutex_unlock(&arena->lock); - - return (lg_dirty_mult); + return false; } bool -arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult) -{ - - if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) - return (true); - - malloc_mutex_lock(&arena->lock); - arena->lg_dirty_mult = lg_dirty_mult; - arena_maybe_purge(arena); - malloc_mutex_unlock(&arena->lock); - - return (false); +arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, + ssize_t decay_ms) { + return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, + &arena->extents_dirty, decay_ms); } -void -arena_maybe_purge(arena_t *arena) -{ - - /* Don't purge if the option is disabled. */ - if (arena->lg_dirty_mult < 0) - return; - /* Don't recursively purge. */ - if (arena->purging) - return; - /* - * Iterate, since preventing recursive purging could otherwise leave too - * many dirty pages. - */ - while (true) { - size_t threshold = (arena->nactive >> arena->lg_dirty_mult); - if (threshold < chunk_npages) - threshold = chunk_npages; - /* - * Don't purge unless the number of purgeable pages exceeds the - * threshold. - */ - if (arena->ndirty <= threshold) - return; - arena_purge(arena, false); - } -} - -static size_t -arena_dirty_count(arena_t *arena) -{ - size_t ndirty = 0; - arena_runs_dirty_link_t *rdelm; - extent_node_t *chunkselm; - - for (rdelm = qr_next(&arena->runs_dirty, rd_link), - chunkselm = qr_next(&arena->chunks_cache, cc_link); - rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { - size_t npages; - - if (rdelm == &chunkselm->rd) { - npages = extent_node_size_get(chunkselm) >> LG_PAGE; - chunkselm = qr_next(chunkselm, cc_link); - } else { - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - rdelm); - arena_chunk_map_misc_t *miscelm = - arena_rd_to_miscelm(rdelm); - size_t pageind = arena_miscelm_to_pageind(miscelm); - assert(arena_mapbits_allocated_get(chunk, pageind) == - 0); - assert(arena_mapbits_large_get(chunk, pageind) == 0); - assert(arena_mapbits_dirty_get(chunk, pageind) != 0); - npages = arena_mapbits_unallocated_size_get(chunk, - pageind) >> LG_PAGE; - } - ndirty += npages; - } - - return (ndirty); +bool +arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, + ssize_t decay_ms) { + return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, + &arena->extents_muzzy, decay_ms); } static size_t -arena_compute_npurge(arena_t *arena, bool all) -{ - size_t npurge; +arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, + size_t npages_decay_max, extent_list_t *decay_extents) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); - /* - * Compute the minimum number of pages that this thread should try to - * purge. - */ - if (!all) { - size_t threshold = (arena->nactive >> arena->lg_dirty_mult); - threshold = threshold < chunk_npages ? chunk_npages : threshold; - - npurge = arena->ndirty - threshold; - } else - npurge = arena->ndirty; - - return (npurge); -} - -static size_t -arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, - size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel, - extent_node_t *purge_chunks_sentinel) -{ - arena_runs_dirty_link_t *rdelm, *rdelm_next; - extent_node_t *chunkselm; + /* Stash extents according to npages_limit. */ size_t nstashed = 0; - - /* Stash at least npurge pages. */ - for (rdelm = qr_next(&arena->runs_dirty, rd_link), - chunkselm = qr_next(&arena->chunks_cache, cc_link); - rdelm != &arena->runs_dirty; rdelm = rdelm_next) { - size_t npages; - rdelm_next = qr_next(rdelm, rd_link); - - if (rdelm == &chunkselm->rd) { - extent_node_t *chunkselm_next; - bool zero; - UNUSED void *chunk; - - chunkselm_next = qr_next(chunkselm, cc_link); - /* - * Allocate. chunkselm remains valid due to the - * dalloc_node=false argument to chunk_alloc_cache(). - */ - zero = false; - chunk = chunk_alloc_cache(arena, chunk_hooks, - extent_node_addr_get(chunkselm), - extent_node_size_get(chunkselm), chunksize, &zero, - false); - assert(chunk == extent_node_addr_get(chunkselm)); - assert(zero == extent_node_zeroed_get(chunkselm)); - extent_node_dirty_insert(chunkselm, purge_runs_sentinel, - purge_chunks_sentinel); - npages = extent_node_size_get(chunkselm) >> LG_PAGE; - chunkselm = chunkselm_next; - } else { - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); - arena_chunk_map_misc_t *miscelm = - arena_rd_to_miscelm(rdelm); - size_t pageind = arena_miscelm_to_pageind(miscelm); - arena_run_t *run = &miscelm->run; - size_t run_size = - arena_mapbits_unallocated_size_get(chunk, pageind); - - npages = run_size >> LG_PAGE; - - assert(pageind + npages <= chunk_npages); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, pageind+npages-1)); - - /* - * If purging the spare chunk's run, make it available - * prior to allocation. - */ - if (chunk == arena->spare) - arena_chunk_alloc(arena); - - /* Temporarily allocate the free dirty run. */ - arena_run_split_large(arena, run, run_size, false); - /* Stash. */ - if (false) - qr_new(rdelm, rd_link); /* Redundant. */ - else { - assert(qr_next(rdelm, rd_link) == rdelm); - assert(qr_prev(rdelm, rd_link) == rdelm); - } - qr_meld(purge_runs_sentinel, rdelm, rd_link); - } - - nstashed += npages; - if (!all && nstashed >= npurge) - break; + extent_t *extent; + while (nstashed < npages_decay_max && + (extent = extents_evict(tsdn, arena, r_extent_hooks, extents, + npages_limit)) != NULL) { + extent_list_append(decay_extents, extent); + nstashed += extent_size_get(extent) >> LG_PAGE; } - - return (nstashed); + return nstashed; } static size_t -arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, - arena_runs_dirty_link_t *purge_runs_sentinel, - extent_node_t *purge_chunks_sentinel) -{ - size_t npurged, nmadvise; - arena_runs_dirty_link_t *rdelm; - extent_node_t *chunkselm; - - if (config_stats) +arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, + bool all, extent_list_t *decay_extents, bool is_background_thread) { + UNUSED size_t nmadvise, nunmapped; + size_t npurged; + + if (config_stats) { nmadvise = 0; + nunmapped = 0; + } npurged = 0; - malloc_mutex_unlock(&arena->lock); - for (rdelm = qr_next(purge_runs_sentinel, rd_link), - chunkselm = qr_next(purge_chunks_sentinel, cc_link); - rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { - size_t npages; - - if (rdelm == &chunkselm->rd) { - /* - * Don't actually purge the chunk here because 1) - * chunkselm is embedded in the chunk and must remain - * valid, and 2) we deallocate the chunk in - * arena_unstash_purged(), where it is destroyed, - * decommitted, or purged, depending on chunk - * deallocation policy. - */ - size_t size = extent_node_size_get(chunkselm); - npages = size >> LG_PAGE; - chunkselm = qr_next(chunkselm, cc_link); - } else { - size_t pageind, run_size, flag_unzeroed, flags, i; - bool decommitted; - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); - arena_chunk_map_misc_t *miscelm = - arena_rd_to_miscelm(rdelm); - pageind = arena_miscelm_to_pageind(miscelm); - run_size = arena_mapbits_large_size_get(chunk, pageind); - npages = run_size >> LG_PAGE; - - assert(pageind + npages <= chunk_npages); - assert(!arena_mapbits_decommitted_get(chunk, pageind)); - assert(!arena_mapbits_decommitted_get(chunk, - pageind+npages-1)); - decommitted = !chunk_hooks->decommit(chunk, chunksize, - pageind << LG_PAGE, npages << LG_PAGE, arena->ind); - if (decommitted) { - flag_unzeroed = 0; - flags = CHUNK_MAP_DECOMMITTED; - } else { - flag_unzeroed = chunk_purge_wrapper(arena, - chunk_hooks, chunk, chunksize, pageind << - LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; - flags = flag_unzeroed; + ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); + for (extent_t *extent = extent_list_first(decay_extents); extent != + NULL; extent = extent_list_first(decay_extents)) { + if (config_stats) { + nmadvise++; + } + size_t npages = extent_size_get(extent) >> LG_PAGE; + npurged += npages; + extent_list_remove(decay_extents, extent); + switch (extents_state_get(extents)) { + case extent_state_active: + not_reached(); + case extent_state_dirty: + if (!all && muzzy_decay_ms != 0 && + !extent_purge_lazy_wrapper(tsdn, arena, + r_extent_hooks, extent, 0, + extent_size_get(extent))) { + extents_dalloc(tsdn, arena, r_extent_hooks, + &arena->extents_muzzy, extent); + arena_background_thread_inactivity_check(tsdn, + arena, is_background_thread); + break; } - arena_mapbits_large_set(chunk, pageind+npages-1, 0, - flags); - arena_mapbits_large_set(chunk, pageind, run_size, - flags); - - /* - * Set the unzeroed flag for internal pages, now that - * chunk_purge_wrapper() has returned whether the pages - * were zeroed as a side effect of purging. This chunk - * map modification is safe even though the arena mutex - * isn't currently owned by this thread, because the run - * is marked as allocated, thus protecting it from being - * modified by any other thread. As long as these - * writes don't perturb the first and last elements' - * CHUNK_MAP_ALLOCATED bits, behavior is well defined. - */ - for (i = 1; i < npages-1; i++) { - arena_mapbits_internal_set(chunk, pageind+i, - flag_unzeroed); + /* Fall through. */ + case extent_state_muzzy: + extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, + extent); + if (config_stats) { + nunmapped += npages; } + break; + case extent_state_retained: + default: + not_reached(); } - - npurged += npages; - if (config_stats) - nmadvise++; } - malloc_mutex_lock(&arena->lock); if (config_stats) { - arena->stats.nmadvise += nmadvise; - arena->stats.purged += npurged; + arena_stats_lock(tsdn, &arena->stats); + arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, + 1); + arena_stats_add_u64(tsdn, &arena->stats, + &decay->stats->nmadvise, nmadvise); + arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, + npurged); + arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, + nunmapped << LG_PAGE); + arena_stats_unlock(tsdn, &arena->stats); } - return (npurged); + return npurged; } +/* + * npages_limit: Decay at most npages_decay_max pages without violating the + * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper + * bound on number of pages in order to prevent unbounded growth (namely in + * stashed), otherwise unbounded new pages could be added to extents during the + * current decay run, so that the purging thread never finishes. + */ static void -arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, - arena_runs_dirty_link_t *purge_runs_sentinel, - extent_node_t *purge_chunks_sentinel) -{ - arena_runs_dirty_link_t *rdelm, *rdelm_next; - extent_node_t *chunkselm; - - /* Deallocate chunks/runs. */ - for (rdelm = qr_next(purge_runs_sentinel, rd_link), - chunkselm = qr_next(purge_chunks_sentinel, cc_link); - rdelm != purge_runs_sentinel; rdelm = rdelm_next) { - rdelm_next = qr_next(rdelm, rd_link); - if (rdelm == &chunkselm->rd) { - extent_node_t *chunkselm_next = qr_next(chunkselm, - cc_link); - void *addr = extent_node_addr_get(chunkselm); - size_t size = extent_node_size_get(chunkselm); - bool zeroed = extent_node_zeroed_get(chunkselm); - bool committed = extent_node_committed_get(chunkselm); - extent_node_dirty_remove(chunkselm); - arena_node_dalloc(arena, chunkselm); - chunkselm = chunkselm_next; - chunk_dalloc_arena(arena, chunk_hooks, addr, size, - zeroed, committed); - } else { - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); - arena_chunk_map_misc_t *miscelm = - arena_rd_to_miscelm(rdelm); - size_t pageind = arena_miscelm_to_pageind(miscelm); - bool decommitted = (arena_mapbits_decommitted_get(chunk, - pageind) != 0); - arena_run_t *run = &miscelm->run; - qr_remove(rdelm, rd_link); - arena_run_dalloc(arena, run, false, true, decommitted); - } +arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, + bool is_background_thread) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 1); + malloc_mutex_assert_owner(tsdn, &decay->mtx); + + if (decay->purging) { + return; } -} + decay->purging = true; + malloc_mutex_unlock(tsdn, &decay->mtx); -static void -arena_purge(arena_t *arena, bool all) -{ - chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); - size_t npurge, npurgeable, npurged; - arena_runs_dirty_link_t purge_runs_sentinel; - extent_node_t purge_chunks_sentinel; + extent_hooks_t *extent_hooks = extent_hooks_get(arena); - arena->purging = true; + extent_list_t decay_extents; + extent_list_init(&decay_extents); - /* - * Calls to arena_dirty_count() are disabled even for debug builds - * because overhead grows nonlinearly as memory usage increases. - */ - if (false && config_debug) { - size_t ndirty = arena_dirty_count(arena); - assert(ndirty == arena->ndirty); + size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, + npages_limit, npages_decay_max, &decay_extents); + if (npurge != 0) { + UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, + &extent_hooks, decay, extents, all, &decay_extents, + is_background_thread); + assert(npurged == npurge); } - assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all); - - if (config_stats) - arena->stats.npurge++; - - npurge = arena_compute_npurge(arena, all); - qr_new(&purge_runs_sentinel, rd_link); - extent_node_dirty_linkage_init(&purge_chunks_sentinel); - - npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge, - &purge_runs_sentinel, &purge_chunks_sentinel); - assert(npurgeable >= npurge); - npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel, - &purge_chunks_sentinel); - assert(npurged == npurgeable); - arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel, - &purge_chunks_sentinel); - arena->purging = false; + malloc_mutex_lock(tsdn, &decay->mtx); + decay->purging = false; } -void -arena_purge_all(arena_t *arena) -{ - - malloc_mutex_lock(&arena->lock); - arena_purge(arena, true); - malloc_mutex_unlock(&arena->lock); -} - -static void -arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, - size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, - size_t flag_decommitted) -{ - size_t size = *p_size; - size_t run_ind = *p_run_ind; - size_t run_pages = *p_run_pages; - - /* Try to coalesce forward. */ - if (run_ind + run_pages < chunk_npages && - arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && - arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && - arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == - flag_decommitted) { - size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages); - size_t nrun_pages = nrun_size >> LG_PAGE; - - /* - * Remove successor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages+nrun_pages-1) == nrun_size); - assert(arena_mapbits_dirty_get(chunk, - run_ind+run_pages+nrun_pages-1) == flag_dirty); - assert(arena_mapbits_decommitted_get(chunk, - run_ind+run_pages+nrun_pages-1) == flag_decommitted); - arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); - - /* - * If the successor is dirty, remove it from the set of dirty - * pages. - */ - if (flag_dirty != 0) { - arena_run_dirty_remove(arena, chunk, run_ind+run_pages, - nrun_pages); - } - - size += nrun_size; - run_pages += nrun_pages; +static bool +arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool is_background_thread, bool all) { + if (all) { + malloc_mutex_lock(tsdn, &decay->mtx); + arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, + extents_npages_get(extents), is_background_thread); + malloc_mutex_unlock(tsdn, &decay->mtx); - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); + return false; } - /* Try to coalesce backward. */ - if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, - run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == - flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == - flag_decommitted) { - size_t prun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind-1); - size_t prun_pages = prun_size >> LG_PAGE; - - run_ind -= prun_pages; - - /* - * Remove predecessor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - prun_size); - assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); - assert(arena_mapbits_decommitted_get(chunk, run_ind) == - flag_decommitted); - arena_avail_remove(arena, chunk, run_ind, prun_pages); - - /* - * If the predecessor is dirty, remove it from the set of dirty - * pages. - */ - if (flag_dirty != 0) { - arena_run_dirty_remove(arena, chunk, run_ind, - prun_pages); - } + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + /* No need to wait if another thread is in progress. */ + return true; + } - size += prun_size; - run_pages += prun_pages; + bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, + is_background_thread); + UNUSED size_t npages_new; + if (epoch_advanced) { + /* Backlog is updated on epoch advance. */ + npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; + } + malloc_mutex_unlock(tsdn, &decay->mtx); - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); + if (have_background_thread && background_thread_enabled() && + epoch_advanced && !is_background_thread) { + background_thread_interval_check(tsdn, arena, decay, + npages_new); } - *p_size = size; - *p_run_ind = run_ind; - *p_run_pages = run_pages; + return false; } -static size_t -arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t run_ind) -{ - size_t size; - - assert(run_ind >= map_bias); - assert(run_ind < chunk_npages); - - if (arena_mapbits_large_get(chunk, run_ind) != 0) { - size = arena_mapbits_large_size_get(chunk, run_ind); - assert(size == PAGE || arena_mapbits_large_size_get(chunk, - run_ind+(size>>LG_PAGE)-1) == 0); - } else { - arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; - size = bin_info->run_size; - } - - return (size); +static bool +arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all) { + return arena_decay_impl(tsdn, arena, &arena->decay_dirty, + &arena->extents_dirty, is_background_thread, all); } static bool -arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - size_t run_ind = arena_miscelm_to_pageind(miscelm); - size_t offset = run_ind << LG_PAGE; - size_t length = arena_run_size_get(arena, chunk, run, run_ind); +arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all) { + return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, + &arena->extents_muzzy, is_background_thread, all); +} - return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length, - arena->ind)); +void +arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { + if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { + return; + } + arena_decay_muzzy(tsdn, arena, is_background_thread, all); } static void -arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, - bool decommitted) -{ - arena_chunk_t *chunk; - arena_chunk_map_misc_t *miscelm; - size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - miscelm = arena_run_to_miscelm(run); - run_ind = arena_miscelm_to_pageind(miscelm); - assert(run_ind >= map_bias); - assert(run_ind < chunk_npages); - size = arena_run_size_get(arena, chunk, run, run_ind); - run_pages = (size >> LG_PAGE); - arena_cactive_update(arena, 0, run_pages); - arena->nactive -= run_pages; +arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { + arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); - /* - * The run is dirty if the caller claims to have dirtied it, as well as - * if it was already dirty before being allocated and the caller - * doesn't claim to have cleaned it. - */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) - != 0) - dirty = true; - flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; - flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; - - /* Mark pages as unallocated in the chunk map. */ - if (dirty || decommitted) { - size_t flags = flag_dirty | flag_decommitted; - arena_mapbits_unallocated_set(chunk, run_ind, size, flags); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - flags); - } else { - arena_mapbits_unallocated_set(chunk, run_ind, size, - arena_mapbits_unzeroed_get(chunk, run_ind)); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); - } - - arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, - flag_dirty, flag_decommitted); + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); +} - /* Insert into runs_avail, now that coalescing is complete. */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - assert(arena_mapbits_decommitted_get(chunk, run_ind) == - arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); - arena_avail_insert(arena, chunk, run_ind, run_pages); +static void +arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { + assert(extent_nfree_get(slab) > 0); + extent_heap_insert(&bin->slabs_nonfull, slab); +} - if (dirty) - arena_run_dirty_insert(arena, chunk, run_ind, run_pages); +static void +arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { + extent_heap_remove(&bin->slabs_nonfull, slab); +} - /* Deallocate chunk if it is now completely unused. */ - if (size == arena_maxrun) { - assert(run_ind == map_bias); - assert(run_pages == (arena_maxrun >> LG_PAGE)); - arena_chunk_dalloc(arena, chunk); +static extent_t * +arena_bin_slabs_nonfull_tryget(bin_t *bin) { + extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); + if (slab == NULL) { + return NULL; + } + if (config_stats) { + bin->stats.reslabs++; } + return slab; +} +static void +arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { + assert(extent_nfree_get(slab) == 0); /* - * It is okay to do dirty page processing here even if the chunk was - * deallocated above, since in that case it is the spare. Waiting - * until after possible chunk deallocation to do dirty processing - * allows for an old spare to be fully deallocated, thus decreasing the - * chances of spuriously crossing the dirty page purging threshold. + * Tracking extents is required by arena_reset, which is not allowed + * for auto arenas. Bypass this step to avoid touching the extent + * linkage (often results in cache misses) for auto arenas. */ - if (dirty) - arena_maybe_purge(arena); + if (arena_is_auto(arena)) { + return; + } + extent_list_append(&bin->slabs_full, slab); } static void -arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run) -{ - bool committed = arena_run_decommit(arena, chunk, run); - - arena_run_dalloc(arena, run, committed, false, !committed); +arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { + if (arena_is_auto(arena)) { + return; + } + extent_list_remove(&bin->slabs_full, slab); } -static void -arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - size_t pageind = arena_miscelm_to_pageind(miscelm); - size_t head_npages = (oldsize - newsize) >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); - size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); - size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? - CHUNK_MAP_UNZEROED : 0; - - assert(oldsize > newsize); - +void +arena_reset(tsd_t *tsd, arena_t *arena) { /* - * Update the chunk map so that arena_run_dalloc() can treat the - * leading run as separately allocated. Set the last element of each - * run first, in case of single-page runs. + * Locking in this function is unintuitive. The caller guarantees that + * no concurrent operations are happening in this arena, but there are + * still reasons that some locking is necessary: + * + * - Some of the functions in the transitive closure of calls assume + * appropriate locks are held, and in some cases these locks are + * temporarily dropped to avoid lock order reversal or deadlock due to + * reentry. + * - mallctl("epoch", ...) may concurrently refresh stats. While + * strictly speaking this is a "concurrent operation", disallowing + * stats refreshes would impose an inconvenient burden. */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+head_npages-1))); - arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); - if (config_debug) { - UNUSED size_t tail_npages = newsize >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); + /* Large allocations. */ + malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); + + for (extent_t *extent = extent_list_first(&arena->large); extent != + NULL; extent = extent_list_first(&arena->large)) { + void *ptr = extent_base_get(extent); + size_t usize; + + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + + if (config_stats || (config_prof && opt_prof)) { + usize = sz_index2size(alloc_ctx.szind); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + } + /* Remove large allocation from prof sample set. */ + if (config_prof && opt_prof) { + prof_free(tsd, ptr, usize, &alloc_ctx); + } + large_dalloc(tsd_tsdn(tsd), extent); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); + + /* Bins. */ + for (unsigned i = 0; i < NBINS; i++) { + extent_t *slab; + bin_t *bin = &arena->bins[i]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + if (bin->slabcur != NULL) { + slab = bin->slabcur; + bin->slabcur = NULL; + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != + NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + for (slab = extent_list_first(&bin->slabs_full); slab != NULL; + slab = extent_list_first(&bin->slabs_full)) { + arena_bin_slabs_full_remove(arena, bin, slab); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + if (config_stats) { + bin->stats.curregs = 0; + bin->stats.curslabs = 0; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } - arena_mapbits_large_set(chunk, pageind+head_npages, newsize, - flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+head_npages))); - arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0)); + atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); } static void -arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize, bool dirty) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - size_t pageind = arena_miscelm_to_pageind(miscelm); - size_t head_npages = newsize >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); - size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); - size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? - CHUNK_MAP_UNZEROED : 0; - arena_chunk_map_misc_t *tail_miscelm; - arena_run_t *tail_run; - - assert(oldsize > newsize); - +arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { /* - * Update the chunk map so that arena_run_dalloc() can treat the - * trailing run as separately allocated. Set the last element of each - * run first, in case of single-page runs. + * Iterate over the retained extents and destroy them. This gives the + * extent allocator underlying the extent hooks an opportunity to unmap + * all retained memory without having to keep its own metadata + * structures. In practice, virtual memory for dss-allocated extents is + * leaked here, so best practice is to avoid dss for arenas to be + * destroyed, or provide custom extent hooks that track retained + * dss-based extents for later reuse. */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+head_npages-1))); - arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); - - if (config_debug) { - UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + extent_t *extent; + while ((extent = extents_evict(tsdn, arena, &extent_hooks, + &arena->extents_retained, 0)) != NULL) { + extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); } - arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, - flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+head_npages))); - - tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); - tail_run = &tail_miscelm->run; - arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted != - 0)); } -static arena_run_t * -arena_bin_runs_first(arena_bin_t *bin) -{ - arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); - if (miscelm != NULL) - return (&miscelm->run); +void +arena_destroy(tsd_t *tsd, arena_t *arena) { + assert(base_ind_get(arena->base) >= narenas_auto); + assert(arena_nthreads_get(arena, false) == 0); + assert(arena_nthreads_get(arena, true) == 0); - return (NULL); -} + /* + * No allocations have occurred since arena_reset() was called. + * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached + * extents, so only retained extents may remain. + */ + assert(extents_npages_get(&arena->extents_dirty) == 0); + assert(extents_npages_get(&arena->extents_muzzy) == 0); -static void -arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + /* Deallocate retained memory. */ + arena_destroy_retained(tsd_tsdn(tsd), arena); - assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); + /* + * Remove the arena pointer from the arenas array. We rely on the fact + * that there is no way for the application to get a dirty read from the + * arenas array unless there is an inherent race in the application + * involving access of an arena being concurrently destroyed. The + * application must synchronize knowledge of the arena's validity, so as + * long as we use an atomic write to update the arenas array, the + * application will get a clean read any time after it synchronizes + * knowledge that the arena is no longer valid. + */ + arena_set(base_ind_get(arena->base), NULL); - arena_run_tree_insert(&bin->runs, miscelm); + /* + * Destroy the base allocator, which manages all metadata ever mapped by + * this arena. + */ + base_delete(tsd_tsdn(tsd), arena->base); } -static void -arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); +static extent_t * +arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info, + szind_t szind) { + extent_t *slab; + bool zero, commit; + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); - assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); + zero = false; + commit = true; + slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, + bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); + + if (config_stats && slab != NULL) { + arena_stats_mapped_add(tsdn, &arena->stats, + bin_info->slab_size); + } - arena_run_tree_remove(&bin->runs, miscelm); + return slab; } -static arena_run_t * -arena_bin_nonfull_run_tryget(arena_bin_t *bin) -{ - arena_run_t *run = arena_bin_runs_first(bin); - if (run != NULL) { - arena_bin_runs_remove(bin, run); - if (config_stats) - bin->stats.reruns++; +static extent_t * +arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, + const bin_info_t *bin_info) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + szind_t szind = sz_size2index(bin_info->reg_size); + bool zero = false; + bool commit = true; + extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, + binind, &zero, &commit); + if (slab == NULL) { + slab = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, + true, binind, &zero, &commit); + } + if (slab == NULL) { + slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, + bin_info, szind); + if (slab == NULL) { + return NULL; + } } - return (run); + assert(extent_slab_get(slab)); + + /* Initialize slab internals. */ + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + extent_nfree_set(slab, bin_info->nregs); + bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); + + arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); + + return slab; } -static arena_run_t * -arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) -{ - arena_run_t *run; - szind_t binind; - arena_bin_info_t *bin_info; +static extent_t * +arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + szind_t binind) { + extent_t *slab; + const bin_info_t *bin_info; - /* Look for a usable run. */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); - /* No existing runs have any space available. */ + /* Look for a usable slab. */ + slab = arena_bin_slabs_nonfull_tryget(bin); + if (slab != NULL) { + return slab; + } + /* No existing slabs have any space available. */ - binind = arena_bin_index(arena, bin); - bin_info = &arena_bin_info[binind]; + bin_info = &bin_infos[binind]; - /* Allocate a new run. */ - malloc_mutex_unlock(&bin->lock); + /* Allocate a new slab. */ + malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_small(arena, bin_info->run_size, binind); - if (run != NULL) { - /* Initialize run internals. */ - run->binind = binind; - run->nfree = bin_info->nregs; - bitmap_init(run->bitmap, &bin_info->bitmap_info); - } - malloc_mutex_unlock(&arena->lock); + slab = arena_slab_alloc(tsdn, arena, binind, bin_info); /********************************/ - malloc_mutex_lock(&bin->lock); - if (run != NULL) { + malloc_mutex_lock(tsdn, &bin->lock); + if (slab != NULL) { if (config_stats) { - bin->stats.nruns++; - bin->stats.curruns++; + bin->stats.nslabs++; + bin->stats.curslabs++; } - return (run); + return slab; } /* - * arena_run_alloc_small() failed, but another thread may have made + * arena_slab_alloc() failed, but another thread may have made * sufficient memory available while this one dropped bin->lock above, * so search one more time. */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); + slab = arena_bin_slabs_nonfull_tryget(bin); + if (slab != NULL) { + return slab; + } - return (NULL); + return NULL; } -/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ +/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ static void * -arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) -{ - szind_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run; - - binind = arena_bin_index(arena, bin); - bin_info = &arena_bin_info[binind]; - bin->runcur = NULL; - run = arena_bin_nonfull_run_get(arena, bin); - if (bin->runcur != NULL && bin->runcur->nfree > 0) { +arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + szind_t binind) { + const bin_info_t *bin_info; + extent_t *slab; + + bin_info = &bin_infos[binind]; + if (!arena_is_auto(arena) && bin->slabcur != NULL) { + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + bin->slabcur = NULL; + } + slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind); + if (bin->slabcur != NULL) { /* - * Another thread updated runcur while this one ran without the - * bin lock in arena_bin_nonfull_run_get(). + * Another thread updated slabcur while this one ran without the + * bin lock in arena_bin_nonfull_slab_get(). */ - void *ret; - assert(bin->runcur->nfree > 0); - ret = arena_run_reg_alloc(bin->runcur, bin_info); - if (run != NULL) { - arena_chunk_t *chunk; - - /* - * arena_run_alloc_small() may have allocated run, or - * it may have pulled run from the bin's run tree. - * Therefore it is unsafe to make any assumptions about - * how run has previously been used, and - * arena_bin_lower_run() must be called, as if a region - * were just deallocated from the run. - */ - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - if (run->nfree == bin_info->nregs) - arena_dalloc_bin_run(arena, chunk, run, bin); - else - arena_bin_lower_run(arena, chunk, run, bin); + if (extent_nfree_get(bin->slabcur) > 0) { + void *ret = arena_slab_reg_alloc(bin->slabcur, + bin_info); + if (slab != NULL) { + /* + * arena_slab_alloc() may have allocated slab, + * or it may have been pulled from + * slabs_nonfull. Therefore it is unsafe to + * make any assumptions about how slab has + * previously been used, and + * arena_bin_lower_slab() must be called, as if + * a region were just deallocated from the slab. + */ + if (extent_nfree_get(slab) == bin_info->nregs) { + arena_dalloc_bin_slab(tsdn, arena, slab, + bin); + } else { + arena_bin_lower_slab(tsdn, arena, slab, + bin); + } + } + return ret; } - return (ret); - } - if (run == NULL) - return (NULL); + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + bin->slabcur = NULL; + } - bin->runcur = run; + if (slab == NULL) { + return NULL; + } + bin->slabcur = slab; - assert(bin->runcur->nfree > 0); + assert(extent_nfree_get(bin->slabcur) > 0); - return (arena_run_reg_alloc(bin->runcur, bin_info)); + return arena_slab_reg_alloc(slab, bin_info); } void -arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, - uint64_t prof_accumbytes) -{ +arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; - arena_bin_t *bin; + bin_t *bin; assert(tbin->ncached == 0); - if (config_prof && arena_prof_accum(arena, prof_accumbytes)) - prof_idump(); + if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { + prof_idump(tsdn); + } bin = &arena->bins[binind]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> - tbin->lg_fill_div); i < nfill; i++) { - arena_run_t *run; + tcache->lg_fill_div[binind]); i < nfill; i++) { + extent_t *slab; void *ptr; - if ((run = bin->runcur) != NULL && run->nfree > 0) - ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ptr = arena_bin_malloc_hard(arena, bin); + if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > + 0) { + ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]); + } else { + ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); + } if (ptr == NULL) { /* * OOM. tbin->avail isn't yet filled down to its first * element, so the successful allocations (if any) must - * be moved to the base of tbin->avail before bailing - * out. + * be moved just before tbin->avail before bailing out. */ if (i > 0) { - memmove(tbin->avail, &tbin->avail[nfill - i], + memmove(tbin->avail - i, tbin->avail - nfill, i * sizeof(void *)); } break; } if (config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ptr, &arena_bin_info[binind], - true); + arena_alloc_junk_small(ptr, &bin_infos[binind], true); } /* Insert such that low regions get used first. */ - tbin->avail[nfill - 1 - i] = ptr; + *(tbin->avail - nfill + i) = ptr; } if (config_stats) { bin->stats.nmalloc += i; @@ -2033,139 +1294,46 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, bin->stats.nfills++; tbin->tstats.nrequests = 0; } - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); tbin->ncached = i; + arena_decay_tick(tsdn, arena); } void -arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) -{ - - if (zero) { - size_t redzone_size = bin_info->redzone_size; - memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, - redzone_size); - memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, - redzone_size); - } else { - memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, - bin_info->reg_interval); +arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { + if (!zero) { + memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); } } -#ifdef JEMALLOC_JET -#undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) -#endif -static void -arena_redzone_corruption(void *ptr, size_t usize, bool after, - size_t offset, uint8_t byte) -{ - - malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " - "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", - after ? "after" : "before", ptr, usize, byte); -} -#ifdef JEMALLOC_JET -#undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) -arena_redzone_corruption_t *arena_redzone_corruption = - JEMALLOC_N(arena_redzone_corruption_impl); -#endif - static void -arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) -{ - bool error = false; - - if (opt_junk_alloc) { - size_t size = bin_info->reg_size; - size_t redzone_size = bin_info->redzone_size; - size_t i; - - for (i = 1; i <= redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); - if (*byte != 0xa5) { - error = true; - arena_redzone_corruption(ptr, size, false, i, - *byte); - if (reset) - *byte = 0xa5; - } - } - for (i = 0; i < redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); - if (*byte != 0xa5) { - error = true; - arena_redzone_corruption(ptr, size, true, i, - *byte); - if (reset) - *byte = 0xa5; - } - } - } - - if (opt_abort && error) - abort(); +arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { + memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); } +arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = + arena_dalloc_junk_small_impl; -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) -#endif -void -arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) -{ - size_t redzone_size = bin_info->redzone_size; - - arena_redzones_validate(ptr, bin_info, false); - memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, - bin_info->reg_interval); -} -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) -arena_dalloc_junk_small_t *arena_dalloc_junk_small = - JEMALLOC_N(arena_dalloc_junk_small_impl); -#endif - -void -arena_quarantine_junk_small(void *ptr, size_t usize) -{ - szind_t binind; - arena_bin_info_t *bin_info; - cassert(config_fill); - assert(opt_junk_free); - assert(opt_quarantine); - assert(usize <= SMALL_MAXCLASS); - - binind = size2index(usize); - bin_info = &arena_bin_info[binind]; - arena_redzones_validate(ptr, bin_info, true); -} - -void * -arena_malloc_small(arena_t *arena, size_t size, bool zero) -{ +static void * +arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; - arena_bin_t *bin; - arena_run_t *run; - szind_t binind; + bin_t *bin; + size_t usize; + extent_t *slab; - binind = size2index(size); assert(binind < NBINS); bin = &arena->bins[binind]; - size = index2size(binind); + usize = sz_index2size(binind); - malloc_mutex_lock(&bin->lock); - if ((run = bin->runcur) != NULL && run->nfree > 0) - ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ret = arena_bin_malloc_hard(arena, bin); + malloc_mutex_lock(tsdn, &bin->lock); + if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { + ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); + } else { + ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); + } if (ret == NULL) { - malloc_mutex_unlock(&bin->lock); - return (NULL); + malloc_mutex_unlock(tsdn, &bin->lock); + return NULL; } if (config_stats) { @@ -2173,864 +1341,465 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) bin->stats.nrequests++; bin->stats.curregs++; } - malloc_mutex_unlock(&bin->lock); - if (config_prof && !isthreaded && arena_prof_accum(arena, size)) - prof_idump(); + malloc_mutex_unlock(tsdn, &bin->lock); + if (config_prof && arena_prof_accum(tsdn, arena, usize)) { + prof_idump(tsdn); + } if (!zero) { if (config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, - &arena_bin_info[binind], false); - } else if (unlikely(opt_zero)) - memset(ret, 0, size); + &bin_infos[binind], false); + } else if (unlikely(opt_zero)) { + memset(ret, 0, usize); + } } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } else { if (config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, &arena_bin_info[binind], + arena_alloc_junk_small(ret, &bin_infos[binind], true); } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); + memset(ret, 0, usize); } - return (ret); + arena_decay_tick(tsdn, arena); + return ret; } void * -arena_malloc_large(arena_t *arena, size_t size, bool zero) -{ - void *ret; - size_t usize; - uintptr_t random_offset; - arena_run_t *run; - arena_chunk_map_misc_t *miscelm; - UNUSED bool idump; - - /* Large allocation. */ - usize = s2u(size); - malloc_mutex_lock(&arena->lock); - if (config_cache_oblivious) { - uint64_t r; - - /* - * Compute a uniformly distributed offset within the first page - * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 - * for 4 KiB pages and 64-byte cachelines. - */ - prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state, - UINT64_C(6364136223846793009), - UINT64_C(1442695040888963409)); - random_offset = ((uintptr_t)r) << LG_CACHELINE; - } else - random_offset = 0; - run = arena_run_alloc_large(arena, usize + large_pad, zero); - if (run == NULL) { - malloc_mutex_unlock(&arena->lock); - return (NULL); - } - miscelm = arena_run_to_miscelm(run); - ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + - random_offset); - if (config_stats) { - szind_t index = size2index(usize) - NBINS; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += usize; - arena->stats.lstats[index].nmalloc++; - arena->stats.lstats[index].nrequests++; - arena->stats.lstats[index].curruns++; - } - if (config_prof) - idump = arena_prof_accum_locked(arena, usize); - malloc_mutex_unlock(&arena->lock); - if (config_prof && idump) - prof_idump(); +arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, + bool zero) { + assert(!tsdn_null(tsdn) || arena != NULL); - if (!zero) { - if (config_fill) { - if (unlikely(opt_junk_alloc)) - memset(ret, 0xa5, usize); - else if (unlikely(opt_zero)) - memset(ret, 0, usize); - } + if (likely(!tsdn_null(tsdn))) { + arena = arena_choose(tsdn_tsd(tsdn), arena); } - - return (ret); -} - -/* Only handles large allocations that require more than page alignment. */ -static void * -arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, - bool zero) -{ - void *ret; - size_t alloc_size, leadsize, trailsize; - arena_run_t *run; - arena_chunk_t *chunk; - arena_chunk_map_misc_t *miscelm; - void *rpages; - - assert(usize == PAGE_CEILING(usize)); - - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL)) - return (NULL); - - alignment = PAGE_CEILING(alignment); - alloc_size = usize + large_pad + alignment - PAGE; - - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_large(arena, alloc_size, false); - if (run == NULL) { - malloc_mutex_unlock(&arena->lock); - return (NULL); - } - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - miscelm = arena_run_to_miscelm(run); - rpages = arena_miscelm_to_rpages(miscelm); - - leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - - (uintptr_t)rpages; - assert(alloc_size >= leadsize + usize); - trailsize = alloc_size - leadsize - usize - large_pad; - if (leadsize != 0) { - arena_chunk_map_misc_t *head_miscelm = miscelm; - arena_run_t *head_run = run; - - miscelm = arena_miscelm_get(chunk, - arena_miscelm_to_pageind(head_miscelm) + (leadsize >> - LG_PAGE)); - run = &miscelm->run; - - arena_run_trim_head(arena, chunk, head_run, alloc_size, - alloc_size - leadsize); - } - if (trailsize != 0) { - arena_run_trim_tail(arena, chunk, run, usize + large_pad + - trailsize, usize + large_pad, false); - } - if (arena_run_init_large(arena, run, usize + large_pad, zero)) { - size_t run_ind = - arena_miscelm_to_pageind(arena_run_to_miscelm(run)); - bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); - bool decommitted = (arena_mapbits_decommitted_get(chunk, - run_ind) != 0); - - assert(decommitted); /* Cause of OOM. */ - arena_run_dalloc(arena, run, dirty, false, decommitted); - malloc_mutex_unlock(&arena->lock); - return (NULL); - } - ret = arena_miscelm_to_rpages(miscelm); - - if (config_stats) { - szind_t index = size2index(usize) - NBINS; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += usize; - arena->stats.lstats[index].nmalloc++; - arena->stats.lstats[index].nrequests++; - arena->stats.lstats[index].curruns++; + if (unlikely(arena == NULL)) { + return NULL; } - malloc_mutex_unlock(&arena->lock); - if (config_fill && !zero) { - if (unlikely(opt_junk_alloc)) - memset(ret, 0xa5, usize); - else if (unlikely(opt_zero)) - memset(ret, 0, usize); + if (likely(size <= SMALL_MAXCLASS)) { + return arena_malloc_small(tsdn, arena, ind, zero); } - return (ret); + return large_malloc(tsdn, arena, sz_index2size(ind), zero); } void * -arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, - bool zero, tcache_t *tcache) -{ +arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero, tcache_t *tcache) { void *ret; if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { - /* Small; alignment doesn't require special run placement. */ - ret = arena_malloc(tsd, arena, usize, zero, tcache); - } else if (usize <= large_maxclass && alignment <= PAGE) { - /* - * Large; alignment doesn't require special run placement. - * However, the cached pointer may be at a random offset from - * the base of the run, so do some bit manipulation to retrieve - * the base. - */ - ret = arena_malloc(tsd, arena, usize, zero, tcache); - if (config_cache_oblivious) - ret = (void *)((uintptr_t)ret & ~PAGE_MASK); + /* Small; alignment doesn't require special slab placement. */ + ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), + zero, tcache, true); } else { - if (likely(usize <= large_maxclass)) { - ret = arena_palloc_large(tsd, arena, usize, alignment, - zero); - } else if (likely(alignment <= chunksize)) - ret = huge_malloc(tsd, arena, usize, zero, tcache); - else { - ret = huge_palloc(tsd, arena, usize, alignment, zero, - tcache); + if (likely(alignment <= CACHELINE)) { + ret = large_malloc(tsdn, arena, usize, zero); + } else { + ret = large_palloc(tsdn, arena, usize, alignment, zero); } } - return (ret); + return ret; } void -arena_prof_promoted(const void *ptr, size_t size) -{ - arena_chunk_t *chunk; - size_t pageind; - szind_t binind; - +arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { cassert(config_prof); assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - assert(isalloc(ptr, false) == LARGE_MINCLASS); - assert(isalloc(ptr, true) == LARGE_MINCLASS); - assert(size <= SMALL_MAXCLASS); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - binind = size2index(size); - assert(binind < NBINS); - arena_mapbits_large_binind_set(chunk, pageind, binind); - - assert(isalloc(ptr, false) == LARGE_MINCLASS); - assert(isalloc(ptr, true) == size); -} - -static void -arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - - /* Dissociate run from bin. */ - if (run == bin->runcur) - bin->runcur = NULL; - else { - szind_t binind = arena_bin_index(extent_node_arena_get( - &chunk->node), bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - - if (bin_info->nregs != 1) { - /* - * This block's conditional is necessary because if the - * run only contains one region, then it never gets - * inserted into the non-full runs tree. - */ - arena_bin_runs_remove(bin, run); - } - } -} + assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); + assert(usize <= SMALL_MAXCLASS); -static void -arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - assert(run != bin->runcur); - assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == - NULL); + extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true); + arena_t *arena = extent_arena_get(extent); - malloc_mutex_unlock(&bin->lock); - /******************************/ - malloc_mutex_lock(&arena->lock); - arena_run_dalloc_decommit(arena, chunk, run); - malloc_mutex_unlock(&arena->lock); - /****************************/ - malloc_mutex_lock(&bin->lock); - if (config_stats) - bin->stats.curruns--; -} + szind_t szind = sz_size2index(usize); + extent_szind_set(extent, szind); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + szind, false); -static void -arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ + prof_accum_cancel(tsdn, &arena->prof_accum, usize); - /* - * Make sure that if bin->runcur is non-NULL, it refers to the lowest - * non-full run. It is okay to NULL runcur out rather than proactively - * keeping it pointing at the lowest non-full run. - */ - if ((uintptr_t)run < (uintptr_t)bin->runcur) { - /* Switch runcur. */ - if (bin->runcur->nfree > 0) - arena_bin_runs_insert(bin, bin->runcur); - bin->runcur = run; - if (config_stats) - bin->stats.reruns++; - } else - arena_bin_runs_insert(bin, run); + assert(isalloc(tsdn, ptr) == usize); } -static void -arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_bits_t *bitselm, bool junked) -{ - size_t pageind, rpages_ind; - arena_run_t *run; - arena_bin_t *bin; - arena_bin_info_t *bin_info; - szind_t binind; - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - run = &arena_miscelm_get(chunk, rpages_ind)->run; - binind = run->binind; - bin = &arena->bins[binind]; - bin_info = &arena_bin_info[binind]; - - if (!junked && config_fill && unlikely(opt_junk_free)) - arena_dalloc_junk_small(ptr, bin_info); - - arena_run_reg_dalloc(run, ptr); - if (run->nfree == bin_info->nregs) { - arena_dissociate_bin_run(chunk, run, bin); - arena_dalloc_bin_run(arena, chunk, run, bin); - } else if (run->nfree == 1 && run != bin->runcur) - arena_bin_lower_run(arena, chunk, run, bin); - - if (config_stats) { - bin->stats.ndalloc++; - bin->stats.curregs--; - } -} - -void -arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_bits_t *bitselm) -{ +static size_t +arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { + cassert(config_prof); + assert(ptr != NULL); - arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); -} + extent_szind_set(extent, NBINS); + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + NBINS, false); -void -arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_bits_t *bitselm) -{ - arena_run_t *run; - arena_bin_t *bin; - size_t rpages_ind; + assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); - rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - run = &arena_miscelm_get(chunk, rpages_ind)->run; - bin = &arena->bins[run->binind]; - malloc_mutex_lock(&bin->lock); - arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); - malloc_mutex_unlock(&bin->lock); + return LARGE_MINCLASS; } void -arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind) -{ - arena_chunk_map_bits_t *bitselm; +arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, + bool slow_path) { + cassert(config_prof); + assert(opt_prof); - if (config_debug) { - /* arena_ptr_small_binind_get() does extra sanity checking. */ - assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, - pageind)) != BININD_INVALID); + extent_t *extent = iealloc(tsdn, ptr); + size_t usize = arena_prof_demote(tsdn, extent, ptr); + if (usize <= tcache_maxclass) { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + sz_size2index(usize), slow_path); + } else { + large_dalloc(tsdn, extent); } - bitselm = arena_bitselm_get(chunk, pageind); - arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); -} - -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) -#endif -void -arena_dalloc_junk_large(void *ptr, size_t usize) -{ - - if (config_fill && unlikely(opt_junk_free)) - memset(ptr, 0x5a, usize); } -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) -arena_dalloc_junk_large_t *arena_dalloc_junk_large = - JEMALLOC_N(arena_dalloc_junk_large_impl); -#endif static void -arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, - void *ptr, bool junked) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); - arena_run_t *run = &miscelm->run; - - if (config_fill || config_stats) { - size_t usize = arena_mapbits_large_size_get(chunk, pageind) - - large_pad; - - if (!junked) - arena_dalloc_junk_large(ptr, usize); - if (config_stats) { - szind_t index = size2index(usize) - NBINS; +arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { + /* Dissociate slab from bin. */ + if (slab == bin->slabcur) { + bin->slabcur = NULL; + } else { + szind_t binind = extent_szind_get(slab); + const bin_info_t *bin_info = &bin_infos[binind]; - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= usize; - arena->stats.lstats[index].ndalloc++; - arena->stats.lstats[index].curruns--; + /* + * The following block's conditional is necessary because if the + * slab only contains one region, then it never gets inserted + * into the non-full slabs heap. + */ + if (bin_info->nregs == 1) { + arena_bin_slabs_full_remove(arena, bin, slab); + } else { + arena_bin_slabs_nonfull_remove(bin, slab); } } - - arena_run_dalloc_decommit(arena, chunk, run); -} - -void -arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, - void *ptr) -{ - - arena_dalloc_large_locked_impl(arena, chunk, ptr, true); } -void -arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) -{ +static void +arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + bin_t *bin) { + assert(slab != bin->slabcur); - malloc_mutex_lock(&arena->lock); - arena_dalloc_large_locked_impl(arena, chunk, ptr, false); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &bin->lock); + /******************************/ + arena_slab_dalloc(tsdn, arena, slab); + /****************************/ + malloc_mutex_lock(tsdn, &bin->lock); + if (config_stats) { + bin->stats.curslabs--; + } } static void -arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); - arena_run_t *run = &miscelm->run; - - assert(size < oldsize); +arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab, + bin_t *bin) { + assert(extent_nfree_get(slab) > 0); /* - * Shrink the run, and make trailing pages available for other - * allocations. + * Make sure that if bin->slabcur is non-NULL, it refers to the + * oldest/lowest non-full slab. It is okay to NULL slabcur out rather + * than proactively keeping it pointing at the oldest/lowest non-full + * slab. */ - malloc_mutex_lock(&arena->lock); - arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size + - large_pad, true); - if (config_stats) { - szind_t oldindex = size2index(oldsize) - NBINS; - szind_t index = size2index(size) - NBINS; - - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[oldindex].ndalloc++; - arena->stats.lstats[oldindex].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[index].nmalloc++; - arena->stats.lstats[index].nrequests++; - arena->stats.lstats[index].curruns++; - } - malloc_mutex_unlock(&arena->lock); -} - -static bool -arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t usize_min, size_t usize_max, bool zero) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t npages = (oldsize + large_pad) >> LG_PAGE; - size_t followsize; - - assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - - large_pad); - - /* Try to extend the run. */ - malloc_mutex_lock(&arena->lock); - if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, - pageind+npages) != 0) - goto label_fail; - followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); - if (oldsize + followsize >= usize_min) { - /* - * The next run is available and sufficiently large. Split the - * following run, then merge the first part with the existing - * allocation. - */ - arena_run_t *run; - size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; - - usize = usize_max; - while (oldsize + followsize < usize) - usize = index2size(size2index(usize)-1); - assert(usize >= usize_min); - assert(usize >= oldsize); - splitsize = usize - oldsize; - if (splitsize == 0) - goto label_fail; - - run = &arena_miscelm_get(chunk, pageind+npages)->run; - if (arena_run_split_large(arena, run, splitsize, zero)) - goto label_fail; - - if (config_cache_oblivious && zero) { - /* - * Zero the trailing bytes of the original allocation's - * last page, since they are in an indeterminate state. - */ - assert(PAGE_CEILING(oldsize) == oldsize); - memset((void *)((uintptr_t)ptr + oldsize), 0, - PAGE_CEILING((uintptr_t)ptr) - (uintptr_t)ptr); + if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { + /* Switch slabcur. */ + if (extent_nfree_get(bin->slabcur) > 0) { + arena_bin_slabs_nonfull_insert(bin, bin->slabcur); + } else { + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); } - - size = oldsize + splitsize; - npages = (size + large_pad) >> LG_PAGE; - - /* - * Mark the extended run as dirty if either portion of the run - * was dirty before allocation. This is rather pedantic, - * because there's not actually any sequence of events that - * could cause the resulting run to be passed to - * arena_run_dalloc() with the dirty argument set to false - * (which is when dirty flag consistency would really matter). - */ - flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | - arena_mapbits_dirty_get(chunk, pageind+npages-1); - flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; - arena_mapbits_large_set(chunk, pageind, size + large_pad, - flag_dirty | (flag_unzeroed_mask & - arena_mapbits_unzeroed_get(chunk, pageind))); - arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+npages-1))); - + bin->slabcur = slab; if (config_stats) { - szind_t oldindex = size2index(oldsize) - NBINS; - szind_t index = size2index(size) - NBINS; - - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[oldindex].ndalloc++; - arena->stats.lstats[oldindex].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[index].nmalloc++; - arena->stats.lstats[index].nrequests++; - arena->stats.lstats[index].curruns++; + bin->stats.reslabs++; } - malloc_mutex_unlock(&arena->lock); - return (false); + } else { + arena_bin_slabs_nonfull_insert(bin, slab); } -label_fail: - malloc_mutex_unlock(&arena->lock); - return (true); } -#ifdef JEMALLOC_JET -#undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) -#endif static void -arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) -{ +arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + void *ptr, bool junked) { + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + szind_t binind = extent_szind_get(slab); + bin_t *bin = &arena->bins[binind]; + const bin_info_t *bin_info = &bin_infos[binind]; + + if (!junked && config_fill && unlikely(opt_junk_free)) { + arena_dalloc_junk_small(ptr, bin_info); + } + + arena_slab_reg_dalloc(slab, slab_data, ptr); + unsigned nfree = extent_nfree_get(slab); + if (nfree == bin_info->nregs) { + arena_dissociate_bin_slab(arena, slab, bin); + arena_dalloc_bin_slab(tsdn, arena, slab, bin); + } else if (nfree == 1 && slab != bin->slabcur) { + arena_bin_slabs_full_remove(arena, bin, slab); + arena_bin_lower_slab(tsdn, arena, slab, bin); + } - if (config_fill && unlikely(opt_junk_free)) { - memset((void *)((uintptr_t)ptr + usize), 0x5a, - old_usize - usize); + if (config_stats) { + bin->stats.ndalloc++; + bin->stats.curregs--; } } -#ifdef JEMALLOC_JET -#undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) -arena_ralloc_junk_large_t *arena_ralloc_junk_large = - JEMALLOC_N(arena_ralloc_junk_large_impl); -#endif -/* - * Try to resize a large allocation, in order to avoid copying. This will - * always fail if growing an object, and the following run is already in use. - */ -static bool -arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, - size_t usize_max, bool zero) -{ - arena_chunk_t *chunk; - arena_t *arena; +void +arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + void *ptr) { + arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); +} - if (oldsize == usize_max) { - /* Current size class is compatible and maximal. */ - return (false); - } +static void +arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { + szind_t binind = extent_szind_get(extent); + bin_t *bin = &arena->bins[binind]; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = extent_node_arena_get(&chunk->node); + malloc_mutex_lock(tsdn, &bin->lock); + arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); + malloc_mutex_unlock(tsdn, &bin->lock); +} - if (oldsize < usize_max) { - bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, - usize_min, usize_max, zero); - if (config_fill && !ret && !zero) { - if (unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), 0xa5, - isalloc(ptr, config_prof) - oldsize); - } else if (unlikely(opt_zero)) { - memset((void *)((uintptr_t)ptr + oldsize), 0, - isalloc(ptr, config_prof) - oldsize); - } - } - return (ret); - } +void +arena_dalloc_small(tsdn_t *tsdn, void *ptr) { + extent_t *extent = iealloc(tsdn, ptr); + arena_t *arena = extent_arena_get(extent); - assert(oldsize > usize_max); - /* Fill before shrinking in order avoid a race. */ - arena_ralloc_junk_large(ptr, oldsize, usize_max); - arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); - return (false); + arena_dalloc_bin(tsdn, arena, extent, ptr); + arena_decay_tick(tsdn, arena); } bool -arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) -{ - size_t usize_min, usize_max; - - usize_min = s2u(size); - usize_max = s2u(size + extra); - if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { +arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero) { + /* Calls with non-zero extra had to clamp extra. */ + assert(extra == 0 || size + extra <= LARGE_MAXCLASS); + + if (unlikely(size > LARGE_MAXCLASS)) { + return true; + } + + extent_t *extent = iealloc(tsdn, ptr); + size_t usize_min = sz_s2u(size); + size_t usize_max = sz_s2u(size + extra); + if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) { /* * Avoid moving the allocation if the size class can be left the * same. */ - if (oldsize <= SMALL_MAXCLASS) { - assert(arena_bin_info[size2index(oldsize)].reg_size == - oldsize); - if ((usize_max <= SMALL_MAXCLASS && - size2index(usize_max) == size2index(oldsize)) || - (size <= oldsize && usize_max >= oldsize)) - return (false); - } else { - if (usize_max > SMALL_MAXCLASS) { - if (!arena_ralloc_large(ptr, oldsize, usize_min, - usize_max, zero)) - return (false); - } + assert(bin_infos[sz_size2index(oldsize)].reg_size == + oldsize); + if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) != + sz_size2index(oldsize)) && (size > oldsize || usize_max < + oldsize)) { + return true; } - /* Reallocation would require a move. */ - return (true); - } else { - return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max, - zero)); + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) { + return large_ralloc_no_move(tsdn, extent, usize_min, usize_max, + zero); } + + return true; } static void * -arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, - size_t alignment, bool zero, tcache_t *tcache) -{ - - if (alignment == 0) - return (arena_malloc(tsd, arena, usize, zero, tcache)); - usize = sa2u(usize, alignment); - if (usize == 0) - return (NULL); - return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); +arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) { + if (alignment == 0) { + return arena_malloc(tsdn, arena, usize, sz_size2index(usize), + zero, tcache, true); + } + usize = sz_sa2u(usize, alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + return NULL; + } + return ipalloct(tsdn, usize, alignment, zero, tcache, arena); } void * -arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t alignment, bool zero, tcache_t *tcache) -{ - void *ret; - size_t usize; - - usize = s2u(size); - if (usize == 0) - return (NULL); - - if (likely(usize <= large_maxclass)) { - size_t copysize; +arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, + size_t size, size_t alignment, bool zero, tcache_t *tcache) { + size_t usize = sz_s2u(size); + if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) { + return NULL; + } + if (likely(usize <= SMALL_MAXCLASS)) { /* Try to avoid moving the allocation. */ - if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero)) - return (ptr); - - /* - * size and oldsize are different enough that we need to move - * the object. In that case, fall back to allocating new space - * and copying. - */ - ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, - zero, tcache); - if (ret == NULL) - return (NULL); + if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) { + return ptr; + } + } - /* - * Junk/zero-filling were already done by - * ipalloc()/arena_malloc(). - */ + if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { + return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize, + alignment, zero, tcache); + } - copysize = (usize < oldsize) ? usize : oldsize; - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); - memcpy(ret, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache); - } else { - ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, - zero, tcache); + /* + * size and oldsize are different enough that we need to move the + * object. In that case, fall back to allocating new space and copying. + */ + void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, + zero, tcache); + if (ret == NULL) { + return NULL; } - return (ret); + + /* + * Junk/zero-filling were already done by + * ipalloc()/arena_malloc(). + */ + + size_t copysize = (usize < oldsize) ? usize : oldsize; + memcpy(ret, ptr, copysize); + isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); + return ret; } dss_prec_t -arena_dss_prec_get(arena_t *arena) -{ - dss_prec_t ret; - - malloc_mutex_lock(&arena->lock); - ret = arena->dss_prec; - malloc_mutex_unlock(&arena->lock); - return (ret); +arena_dss_prec_get(arena_t *arena) { + return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); } bool -arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) -{ - - if (!have_dss) +arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { + if (!have_dss) { return (dss_prec != dss_prec_disabled); - malloc_mutex_lock(&arena->lock); - arena->dss_prec = dss_prec; - malloc_mutex_unlock(&arena->lock); - return (false); + } + atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); + return false; } ssize_t -arena_lg_dirty_mult_default_get(void) -{ +arena_dirty_decay_ms_default_get(void) { + return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); +} - return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); +bool +arena_dirty_decay_ms_default_set(ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); + return false; +} + +ssize_t +arena_muzzy_decay_ms_default_get(void) { + return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); +} + +bool +arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); + return false; } bool -arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) -{ +arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, + size_t *new_limit) { + assert(opt_retain); + + pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); + if (new_limit != NULL) { + size_t limit = *new_limit; + /* Grow no more than the new limit. */ + if ((new_ind = sz_psz2ind(limit + 1) - 1) > + EXTENT_GROW_MAX_PIND) { + return true; + } + } + + malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx); + if (old_limit != NULL) { + *old_limit = sz_pind2sz(arena->retain_grow_limit); + } + if (new_limit != NULL) { + arena->retain_grow_limit = new_ind; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx); - if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) - return (true); - atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); - return (false); + return false; +} + +unsigned +arena_nthreads_get(arena_t *arena, bool internal) { + return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); } void -arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, - size_t *nactive, size_t *ndirty, arena_stats_t *astats, - malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, - malloc_huge_stats_t *hstats) -{ - unsigned i; +arena_nthreads_inc(arena_t *arena, bool internal) { + atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); +} - malloc_mutex_lock(&arena->lock); - *dss = dss_prec_names[arena->dss_prec]; - *lg_dirty_mult = arena->lg_dirty_mult; - *nactive += arena->nactive; - *ndirty += arena->ndirty; - - astats->mapped += arena->stats.mapped; - astats->npurge += arena->stats.npurge; - astats->nmadvise += arena->stats.nmadvise; - astats->purged += arena->stats.purged; - astats->metadata_mapped += arena->stats.metadata_mapped; - astats->metadata_allocated += arena_metadata_allocated_get(arena); - astats->allocated_large += arena->stats.allocated_large; - astats->nmalloc_large += arena->stats.nmalloc_large; - astats->ndalloc_large += arena->stats.ndalloc_large; - astats->nrequests_large += arena->stats.nrequests_large; - astats->allocated_huge += arena->stats.allocated_huge; - astats->nmalloc_huge += arena->stats.nmalloc_huge; - astats->ndalloc_huge += arena->stats.ndalloc_huge; - - for (i = 0; i < nlclasses; i++) { - lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; - lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; - lstats[i].nrequests += arena->stats.lstats[i].nrequests; - lstats[i].curruns += arena->stats.lstats[i].curruns; - } - - for (i = 0; i < nhclasses; i++) { - hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; - hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; - hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; - } - malloc_mutex_unlock(&arena->lock); +void +arena_nthreads_dec(arena_t *arena, bool internal) { + atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); +} - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - - malloc_mutex_lock(&bin->lock); - bstats[i].nmalloc += bin->stats.nmalloc; - bstats[i].ndalloc += bin->stats.ndalloc; - bstats[i].nrequests += bin->stats.nrequests; - bstats[i].curregs += bin->stats.curregs; - if (config_tcache) { - bstats[i].nfills += bin->stats.nfills; - bstats[i].nflushes += bin->stats.nflushes; - } - bstats[i].nruns += bin->stats.nruns; - bstats[i].reruns += bin->stats.reruns; - bstats[i].curruns += bin->stats.curruns; - malloc_mutex_unlock(&bin->lock); - } +size_t +arena_extent_sn_next(arena_t *arena) { + return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); } arena_t * -arena_new(unsigned ind) -{ +arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; + base_t *base; unsigned i; - arena_bin_t *bin; - /* - * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly - * because there is no way to clean up if base_alloc() OOMs. - */ - if (config_stats) { - arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) - + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + - nhclasses) * sizeof(malloc_huge_stats_t)); - } else - arena = (arena_t *)base_alloc(sizeof(arena_t)); - if (arena == NULL) - return (NULL); - - arena->ind = ind; - arena->nthreads = 0; - if (malloc_mutex_init(&arena->lock)) - return (NULL); + if (ind == 0) { + base = b0get(); + } else { + base = base_new(tsdn, ind, extent_hooks); + if (base == NULL) { + return NULL; + } + } + + arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); + if (arena == NULL) { + goto label_error; + } + + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); + atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); + arena->last_thd = NULL; if (config_stats) { - memset(&arena->stats, 0, sizeof(arena_stats_t)); - arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena - + CACHELINE_CEILING(sizeof(arena_t))); - memset(arena->stats.lstats, 0, nlclasses * - sizeof(malloc_large_stats_t)); - arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena - + CACHELINE_CEILING(sizeof(arena_t)) + - QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); - memset(arena->stats.hstats, 0, nhclasses * - sizeof(malloc_huge_stats_t)); - if (config_tcache) - ql_new(&arena->tcache_ql); - } - - if (config_prof) - arena->prof_accumbytes = 0; + if (arena_stats_init(tsdn, &arena->stats)) { + goto label_error; + } + + ql_new(&arena->tcache_ql); + ql_new(&arena->cache_bin_array_descriptor_ql); + if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", + WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { + goto label_error; + } + } + + if (config_prof) { + if (prof_accum_init(tsdn, &arena->prof_accum)) { + goto label_error; + } + } if (config_cache_oblivious) { /* @@ -3040,279 +1809,235 @@ arena_new(unsigned ind) * cost of test repeatability. For debug builds, instead use a * deterministic seed. */ - arena->offset_state = config_debug ? ind : - (uint64_t)(uintptr_t)arena; + atomic_store_zu(&arena->offset_state, config_debug ? ind : + (size_t)(uintptr_t)arena, ATOMIC_RELAXED); } - arena->dss_prec = chunk_dss_prec_get(); - - arena->spare = NULL; - - arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); - arena->purging = false; - arena->nactive = 0; - arena->ndirty = 0; - - arena_avail_tree_new(&arena->runs_avail); - qr_new(&arena->runs_dirty, rd_link); - qr_new(&arena->chunks_cache, cc_link); - - ql_new(&arena->huge); - if (malloc_mutex_init(&arena->huge_mtx)) - return (NULL); + atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); - extent_tree_szad_new(&arena->chunks_szad_cached); - extent_tree_ad_new(&arena->chunks_ad_cached); - extent_tree_szad_new(&arena->chunks_szad_retained); - extent_tree_ad_new(&arena->chunks_ad_retained); - if (malloc_mutex_init(&arena->chunks_mtx)) - return (NULL); - ql_new(&arena->node_cache); - if (malloc_mutex_init(&arena->node_cache_mtx)) - return (NULL); + atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), + ATOMIC_RELAXED); - arena->chunk_hooks = chunk_hooks_default; + atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); - /* Initialize bins. */ - for (i = 0; i < NBINS; i++) { - bin = &arena->bins[i]; - if (malloc_mutex_init(&bin->lock)) - return (NULL); - bin->runcur = NULL; - arena_run_tree_new(&bin->runs); - if (config_stats) - memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); + extent_list_init(&arena->large); + if (malloc_mutex_init(&arena->large_mtx, "arena_large", + WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { + goto label_error; } - return (arena); -} - -/* - * Calculate bin_info->run_size such that it meets the following constraints: - * - * *) bin_info->run_size <= arena_maxrun - * *) bin_info->nregs <= RUN_MAXREGS - * - * bin_info->nregs and bin_info->reg0_offset are also calculated here, since - * these settings are all interdependent. - */ -static void -bin_info_run_size_calc(arena_bin_info_t *bin_info) -{ - size_t pad_size; - size_t try_run_size, perfect_run_size, actual_run_size; - uint32_t try_nregs, perfect_nregs, actual_nregs; - /* - * Determine redzone size based on minimum alignment and minimum - * redzone size. Add padding to the end of the run if it is needed to - * align the regions. The padding allows each redzone to be half the - * minimum alignment; without the padding, each redzone would have to - * be twice as large in order to maintain alignment. + * Delay coalescing for dirty extents despite the disruptive effect on + * memory layout for best-fit extent allocation, since cached extents + * are likely to be reused soon after deallocation, and the cost of + * merging/splitting extents is non-trivial. */ - if (config_fill && unlikely(opt_redzone)) { - size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - - 1); - if (align_min <= REDZONE_MINSIZE) { - bin_info->redzone_size = REDZONE_MINSIZE; - pad_size = 0; - } else { - bin_info->redzone_size = align_min >> 1; - pad_size = bin_info->redzone_size; - } - } else { - bin_info->redzone_size = 0; - pad_size = 0; + if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, + true)) { + goto label_error; } - bin_info->reg_interval = bin_info->reg_size + - (bin_info->redzone_size << 1); - /* - * Compute run size under ideal conditions (no redzones, no limit on run - * size). + * Coalesce muzzy extents immediately, because operations on them are in + * the critical path much less often than for dirty extents. */ - try_run_size = PAGE; - try_nregs = try_run_size / bin_info->reg_size; - do { - perfect_run_size = try_run_size; - perfect_nregs = try_nregs; - - try_run_size += PAGE; - try_nregs = try_run_size / bin_info->reg_size; - } while (perfect_run_size != perfect_nregs * bin_info->reg_size); - assert(perfect_nregs <= RUN_MAXREGS); - - actual_run_size = perfect_run_size; - actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; - + if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, + false)) { + goto label_error; + } /* - * Redzones can require enough padding that not even a single region can - * fit within the number of pages that would normally be dedicated to a - * run for this size class. Increase the run size until at least one - * region fits. + * Coalesce retained extents immediately, in part because they will + * never be evicted (and therefore there's no opportunity for delayed + * coalescing), but also because operations on retained extents are not + * in the critical path. */ - while (actual_nregs == 0) { - assert(config_fill && unlikely(opt_redzone)); + if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, + false)) { + goto label_error; + } - actual_run_size += PAGE; - actual_nregs = (actual_run_size - pad_size) / - bin_info->reg_interval; + if (arena_decay_init(&arena->decay_dirty, + arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { + goto label_error; + } + if (arena_decay_init(&arena->decay_muzzy, + arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { + goto label_error; } - /* - * Make sure that the run will fit within an arena chunk. - */ - while (actual_run_size > arena_maxrun) { - actual_run_size -= PAGE; - actual_nregs = (actual_run_size - pad_size) / - bin_info->reg_interval; + arena->extent_grow_next = sz_psz2ind(HUGEPAGE); + arena->retain_grow_limit = EXTENT_GROW_MAX_PIND; + if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", + WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { + goto label_error; + } + + extent_avail_new(&arena->extent_avail); + if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", + WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { + goto label_error; + } + + /* Initialize bins. */ + for (i = 0; i < NBINS; i++) { + bool err = bin_init(&arena->bins[i]); + if (err) { + goto label_error; + } } - assert(actual_nregs > 0); - assert(actual_run_size == s2u(actual_run_size)); - /* Copy final settings. */ - bin_info->run_size = actual_run_size; - bin_info->nregs = actual_nregs; - bin_info->reg0_offset = actual_run_size - (actual_nregs * - bin_info->reg_interval) - pad_size + bin_info->redzone_size; + arena->base = base; + /* Set arena before creating background threads. */ + arena_set(ind, arena); - if (actual_run_size > small_maxrun) - small_maxrun = actual_run_size; + nstime_init(&arena->create_time, 0); + nstime_update(&arena->create_time); - assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs - * bin_info->reg_interval) + pad_size == bin_info->run_size); + /* We don't support reentrancy for arena 0 bootstrapping. */ + if (ind != 0) { + /* + * If we're here, then arena 0 already exists, so bootstrapping + * is done enough that we should have tsd. + */ + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn), arena); + if (hooks_arena_new_hook) { + hooks_arena_new_hook(); + } + post_reentrancy(tsdn_tsd(tsdn)); + } + + return arena; +label_error: + if (ind != 0) { + base_delete(tsdn, base); + } + return NULL; } -static void -bin_info_init(void) -{ - arena_bin_info_t *bin_info; - -#define BIN_INFO_INIT_bin_yes(index, size) \ - bin_info = &arena_bin_info[index]; \ - bin_info->reg_size = size; \ - bin_info_run_size_calc(bin_info); \ - bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); -#define BIN_INFO_INIT_bin_no(index, size) -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ - BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) +void +arena_boot(void) { + arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); + arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); +#define REGIND_bin_yes(index, reg_size) \ + div_init(&arena_binind_div_info[(index)], (reg_size)); +#define REGIND_bin_no(index, reg_size) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ + lg_delta_lookup) \ + REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta)) SIZE_CLASSES -#undef BIN_INFO_INIT_bin_yes -#undef BIN_INFO_INIT_bin_no +#undef REGIND_bin_yes +#undef REGIND_bin_no #undef SC } -static bool -small_run_size_init(void) -{ - - assert(small_maxrun != 0); - - small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >> - LG_PAGE)); - if (small_run_tab == NULL) - return (true); +void +arena_prefork0(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); +} -#define TAB_INIT_bin_yes(index, size) { \ - arena_bin_info_t *bin_info = &arena_bin_info[index]; \ - small_run_tab[bin_info->run_size >> LG_PAGE] = true; \ +void +arena_prefork1(tsdn_t *tsdn, arena_t *arena) { + if (config_stats) { + malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); } -#define TAB_INIT_bin_no(index, size) -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ - TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) - SIZE_CLASSES -#undef TAB_INIT_bin_yes -#undef TAB_INIT_bin_no -#undef SC - - return (false); } -bool -arena_boot(void) -{ - unsigned i; +void +arena_prefork2(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); +} - arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); +void +arena_prefork3(tsdn_t *tsdn, arena_t *arena) { + extents_prefork(tsdn, &arena->extents_dirty); + extents_prefork(tsdn, &arena->extents_muzzy); + extents_prefork(tsdn, &arena->extents_retained); +} - /* - * Compute the header size such that it is large enough to contain the - * page map. The page map is biased to omit entries for the header - * itself, so some iteration is necessary to compute the map bias. - * - * 1) Compute safe header_size and map_bias values that include enough - * space for an unbiased page map. - * 2) Refine map_bias based on (1) to omit the header pages in the page - * map. The resulting map_bias may be one too small. - * 3) Refine map_bias based on (2). The result will be >= the result - * from (2), and will always be correct. - */ - map_bias = 0; - for (i = 0; i < 3; i++) { - size_t header_size = offsetof(arena_chunk_t, map_bits) + - ((sizeof(arena_chunk_map_bits_t) + - sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); - map_bias = (header_size + PAGE_MASK) >> LG_PAGE; - } - assert(map_bias > 0); - - map_misc_offset = offsetof(arena_chunk_t, map_bits) + - sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); - - arena_maxrun = chunksize - (map_bias << LG_PAGE); - assert(arena_maxrun > 0); - large_maxclass = index2size(size2index(chunksize)-1); - if (large_maxclass > arena_maxrun) { - /* - * For small chunk sizes it's possible for there to be fewer - * non-header pages available than are necessary to serve the - * size classes just below chunksize. - */ - large_maxclass = arena_maxrun; - } - assert(large_maxclass > 0); - nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); - nhclasses = NSIZES - nlclasses - NBINS; +void +arena_prefork4(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); +} - bin_info_init(); - return (small_run_size_init()); +void +arena_prefork5(tsdn_t *tsdn, arena_t *arena) { + base_prefork(tsdn, arena->base); } void -arena_prefork(arena_t *arena) -{ - unsigned i; +arena_prefork6(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->large_mtx); +} - malloc_mutex_prefork(&arena->lock); - malloc_mutex_prefork(&arena->huge_mtx); - malloc_mutex_prefork(&arena->chunks_mtx); - malloc_mutex_prefork(&arena->node_cache_mtx); - for (i = 0; i < NBINS; i++) - malloc_mutex_prefork(&arena->bins[i].lock); +void +arena_prefork7(tsdn_t *tsdn, arena_t *arena) { + for (unsigned i = 0; i < NBINS; i++) { + bin_prefork(tsdn, &arena->bins[i]); + } } void -arena_postfork_parent(arena_t *arena) -{ +arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { unsigned i; - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_parent(&arena->bins[i].lock); - malloc_mutex_postfork_parent(&arena->node_cache_mtx); - malloc_mutex_postfork_parent(&arena->chunks_mtx); - malloc_mutex_postfork_parent(&arena->huge_mtx); - malloc_mutex_postfork_parent(&arena->lock); + for (i = 0; i < NBINS; i++) { + bin_postfork_parent(tsdn, &arena->bins[i]); + } + malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); + base_postfork_parent(tsdn, arena->base); + malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); + extents_postfork_parent(tsdn, &arena->extents_dirty); + extents_postfork_parent(tsdn, &arena->extents_muzzy); + extents_postfork_parent(tsdn, &arena->extents_retained); + malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); + if (config_stats) { + malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); + } } void -arena_postfork_child(arena_t *arena) -{ +arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { unsigned i; - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_child(&arena->bins[i].lock); - malloc_mutex_postfork_child(&arena->node_cache_mtx); - malloc_mutex_postfork_child(&arena->chunks_mtx); - malloc_mutex_postfork_child(&arena->huge_mtx); - malloc_mutex_postfork_child(&arena->lock); + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); + atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); + if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { + arena_nthreads_inc(arena, false); + } + if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { + arena_nthreads_inc(arena, true); + } + if (config_stats) { + ql_new(&arena->tcache_ql); + ql_new(&arena->cache_bin_array_descriptor_ql); + tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); + if (tcache != NULL && tcache->arena == arena) { + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); + cache_bin_array_descriptor_init( + &tcache->cache_bin_array_descriptor, + tcache->bins_small, tcache->bins_large); + ql_tail_insert(&arena->cache_bin_array_descriptor_ql, + &tcache->cache_bin_array_descriptor, link); + } + } + + for (i = 0; i < NBINS; i++) { + bin_postfork_child(tsdn, &arena->bins[i]); + } + malloc_mutex_postfork_child(tsdn, &arena->large_mtx); + base_postfork_child(tsdn, arena->base); + malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); + extents_postfork_child(tsdn, &arena->extents_dirty); + extents_postfork_child(tsdn, &arena->extents_muzzy); + extents_postfork_child(tsdn, &arena->extents_retained); + malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); + malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); + if (config_stats) { + malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); + } } diff --git a/deps/jemalloc/src/atomic.c b/deps/jemalloc/src/atomic.c deleted file mode 100644 index 77ee31311..000000000 --- a/deps/jemalloc/src/atomic.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_ATOMIC_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/deps/jemalloc/src/background_thread.c b/deps/jemalloc/src/background_thread.c new file mode 100644 index 000000000..3517a3bb8 --- /dev/null +++ b/deps/jemalloc/src/background_thread.c @@ -0,0 +1,909 @@ +#define JEMALLOC_BACKGROUND_THREAD_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" + +/******************************************************************************/ +/* Data. */ + +/* This option should be opt-in only. */ +#define BACKGROUND_THREAD_DEFAULT false +/* Read-only after initialization. */ +bool opt_background_thread = BACKGROUND_THREAD_DEFAULT; +size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT; + +/* Used for thread creation, termination and stats. */ +malloc_mutex_t background_thread_lock; +/* Indicates global state. Atomic because decay reads this w/o locking. */ +atomic_b_t background_thread_enabled_state; +size_t n_background_threads; +size_t max_background_threads; +/* Thread info per-index. */ +background_thread_info_t *background_thread_info; + +/* False if no necessary runtime support. */ +bool can_enable_background_thread; + +/******************************************************************************/ + +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER +#include <dlfcn.h> + +static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, + void *(*)(void *), void *__restrict); + +static void +pthread_create_wrapper_init(void) { +#ifdef JEMALLOC_LAZY_LOCK + if (!isthreaded) { + isthreaded = true; + } +#endif +} + +int +pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr, + void *(*start_routine)(void *), void *__restrict arg) { + pthread_create_wrapper_init(); + + return pthread_create_fptr(thread, attr, start_routine, arg); +} +#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */ + +#ifndef JEMALLOC_BACKGROUND_THREAD +#define NOT_REACHED { not_reached(); } +bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED +bool background_threads_enable(tsd_t *tsd) NOT_REACHED +bool background_threads_disable(tsd_t *tsd) NOT_REACHED +void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, size_t npages_new) NOT_REACHED +void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED +void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED +void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED +void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED +bool background_thread_stats_read(tsdn_t *tsdn, + background_thread_stats_t *stats) NOT_REACHED +void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED +#undef NOT_REACHED +#else + +static bool background_thread_enabled_at_fork; + +static void +background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { + background_thread_wakeup_time_set(tsdn, info, 0); + info->npages_to_purge_new = 0; + if (config_stats) { + info->tot_n_runs = 0; + nstime_init(&info->tot_sleep_time, 0); + } +} + +static inline bool +set_current_thread_affinity(UNUSED int cpu) { +#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); + + return (ret != 0); +#else + return false; +#endif +} + +/* Threshold for determining when to wake up the background thread. */ +#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024) +#define BILLION UINT64_C(1000000000) +/* Minimal sleep interval 100 ms. */ +#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10) + +static inline size_t +decay_npurge_after_interval(arena_decay_t *decay, size_t interval) { + size_t i; + uint64_t sum = 0; + for (i = 0; i < interval; i++) { + sum += decay->backlog[i] * h_steps[i]; + } + for (; i < SMOOTHSTEP_NSTEPS; i++) { + sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]); + } + + return (size_t)(sum >> SMOOTHSTEP_BFP); +} + +static uint64_t +arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay, + extents_t *extents) { + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + /* Use minimal interval if decay is contended. */ + return BACKGROUND_THREAD_MIN_INTERVAL_NS; + } + + uint64_t interval; + ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); + if (decay_time <= 0) { + /* Purging is eagerly done or disabled currently. */ + interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; + goto label_done; + } + + uint64_t decay_interval_ns = nstime_ns(&decay->interval); + assert(decay_interval_ns > 0); + size_t npages = extents_npages_get(extents); + if (npages == 0) { + unsigned i; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + if (decay->backlog[i] > 0) { + break; + } + } + if (i == SMOOTHSTEP_NSTEPS) { + /* No dirty pages recorded. Sleep indefinitely. */ + interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; + goto label_done; + } + } + if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) { + /* Use max interval. */ + interval = decay_interval_ns * SMOOTHSTEP_NSTEPS; + goto label_done; + } + + size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns; + size_t ub = SMOOTHSTEP_NSTEPS; + /* Minimal 2 intervals to ensure reaching next epoch deadline. */ + lb = (lb < 2) ? 2 : lb; + if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) || + (lb + 2 > ub)) { + interval = BACKGROUND_THREAD_MIN_INTERVAL_NS; + goto label_done; + } + + assert(lb + 2 <= ub); + size_t npurge_lb, npurge_ub; + npurge_lb = decay_npurge_after_interval(decay, lb); + if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) { + interval = decay_interval_ns * lb; + goto label_done; + } + npurge_ub = decay_npurge_after_interval(decay, ub); + if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) { + interval = decay_interval_ns * ub; + goto label_done; + } + + unsigned n_search = 0; + size_t target, npurge; + while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub) + && (lb + 2 < ub)) { + target = (lb + ub) / 2; + npurge = decay_npurge_after_interval(decay, target); + if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) { + ub = target; + npurge_ub = npurge; + } else { + lb = target; + npurge_lb = npurge; + } + assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1); + } + interval = decay_interval_ns * (ub + lb) / 2; +label_done: + interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ? + BACKGROUND_THREAD_MIN_INTERVAL_NS : interval; + malloc_mutex_unlock(tsdn, &decay->mtx); + + return interval; +} + +/* Compute purge interval for background threads. */ +static uint64_t +arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) { + uint64_t i1, i2; + i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty, + &arena->extents_dirty); + if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) { + return i1; + } + i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy, + &arena->extents_muzzy); + + return i1 < i2 ? i1 : i2; +} + +static void +background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, + uint64_t interval) { + if (config_stats) { + info->tot_n_runs++; + } + info->npages_to_purge_new = 0; + + struct timeval tv; + /* Specific clock required by timedwait. */ + gettimeofday(&tv, NULL); + nstime_t before_sleep; + nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000); + + int ret; + if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) { + assert(background_thread_indefinite_sleep(info)); + ret = pthread_cond_wait(&info->cond, &info->mtx.lock); + assert(ret == 0); + } else { + assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS && + interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP); + /* We need malloc clock (can be different from tv). */ + nstime_t next_wakeup; + nstime_init(&next_wakeup, 0); + nstime_update(&next_wakeup); + nstime_iadd(&next_wakeup, interval); + assert(nstime_ns(&next_wakeup) < + BACKGROUND_THREAD_INDEFINITE_SLEEP); + background_thread_wakeup_time_set(tsdn, info, + nstime_ns(&next_wakeup)); + + nstime_t ts_wakeup; + nstime_copy(&ts_wakeup, &before_sleep); + nstime_iadd(&ts_wakeup, interval); + struct timespec ts; + ts.tv_sec = (size_t)nstime_sec(&ts_wakeup); + ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup); + + assert(!background_thread_indefinite_sleep(info)); + ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts); + assert(ret == ETIMEDOUT || ret == 0); + background_thread_wakeup_time_set(tsdn, info, + BACKGROUND_THREAD_INDEFINITE_SLEEP); + } + if (config_stats) { + gettimeofday(&tv, NULL); + nstime_t after_sleep; + nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000); + if (nstime_compare(&after_sleep, &before_sleep) > 0) { + nstime_subtract(&after_sleep, &before_sleep); + nstime_add(&info->tot_sleep_time, &after_sleep); + } + } +} + +static bool +background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) { + if (unlikely(info->state == background_thread_paused)) { + malloc_mutex_unlock(tsdn, &info->mtx); + /* Wait on global lock to update status. */ + malloc_mutex_lock(tsdn, &background_thread_lock); + malloc_mutex_unlock(tsdn, &background_thread_lock); + malloc_mutex_lock(tsdn, &info->mtx); + return true; + } + + return false; +} + +static inline void +background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) { + uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; + unsigned narenas = narenas_total_get(); + + for (unsigned i = ind; i < narenas; i += max_background_threads) { + arena_t *arena = arena_get(tsdn, i, false); + if (!arena) { + continue; + } + arena_decay(tsdn, arena, true, false); + if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) { + /* Min interval will be used. */ + continue; + } + uint64_t interval = arena_decay_compute_purge_interval(tsdn, + arena); + assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS); + if (min_interval > interval) { + min_interval = interval; + } + } + background_thread_sleep(tsdn, info, min_interval); +} + +static bool +background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) { + if (info == &background_thread_info[0]) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), + &background_thread_lock); + } else { + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), + &background_thread_lock); + } + + pre_reentrancy(tsd, NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + bool has_thread; + assert(info->state != background_thread_paused); + if (info->state == background_thread_started) { + has_thread = true; + info->state = background_thread_stopped; + pthread_cond_signal(&info->cond); + } else { + has_thread = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + + if (!has_thread) { + post_reentrancy(tsd); + return false; + } + void *ret; + if (pthread_join(info->thread, &ret)) { + post_reentrancy(tsd); + return true; + } + assert(ret == NULL); + n_background_threads--; + post_reentrancy(tsd); + + return false; +} + +static void *background_thread_entry(void *ind_arg); + +static int +background_thread_create_signals_masked(pthread_t *thread, + const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { + /* + * Mask signals during thread creation so that the thread inherits + * an empty signal set. + */ + sigset_t set; + sigfillset(&set); + sigset_t oldset; + int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset); + if (mask_err != 0) { + return mask_err; + } + int create_err = pthread_create_wrapper(thread, attr, start_routine, + arg); + /* + * Restore the signal mask. Failure to restore the signal mask here + * changes program behavior. + */ + int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL); + if (restore_err != 0) { + malloc_printf("<jemalloc>: background thread creation " + "failed (%d), and signal mask restoration failed " + "(%d)\n", create_err, restore_err); + if (opt_abort) { + abort(); + } + } + return create_err; +} + +static bool +check_background_thread_creation(tsd_t *tsd, unsigned *n_created, + bool *created_threads) { + bool ret = false; + if (likely(*n_created == n_background_threads)) { + return ret; + } + + tsdn_t *tsdn = tsd_tsdn(tsd); + malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx); + for (unsigned i = 1; i < max_background_threads; i++) { + if (created_threads[i]) { + continue; + } + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsdn, &info->mtx); + /* + * In case of the background_thread_paused state because of + * arena reset, delay the creation. + */ + bool create = (info->state == background_thread_started); + malloc_mutex_unlock(tsdn, &info->mtx); + if (!create) { + continue; + } + + pre_reentrancy(tsd, NULL); + int err = background_thread_create_signals_masked(&info->thread, + NULL, background_thread_entry, (void *)(uintptr_t)i); + post_reentrancy(tsd); + + if (err == 0) { + (*n_created)++; + created_threads[i] = true; + } else { + malloc_printf("<jemalloc>: background thread " + "creation failed (%d)\n", err); + if (opt_abort) { + abort(); + } + } + /* Return to restart the loop since we unlocked. */ + ret = true; + break; + } + malloc_mutex_lock(tsdn, &background_thread_info[0].mtx); + + return ret; +} + +static void +background_thread0_work(tsd_t *tsd) { + /* Thread0 is also responsible for launching / terminating threads. */ + VARIABLE_ARRAY(bool, created_threads, max_background_threads); + unsigned i; + for (i = 1; i < max_background_threads; i++) { + created_threads[i] = false; + } + /* Start working, and create more threads when asked. */ + unsigned n_created = 1; + while (background_thread_info[0].state != background_thread_stopped) { + if (background_thread_pause_check(tsd_tsdn(tsd), + &background_thread_info[0])) { + continue; + } + if (check_background_thread_creation(tsd, &n_created, + (bool *)&created_threads)) { + continue; + } + background_work_sleep_once(tsd_tsdn(tsd), + &background_thread_info[0], 0); + } + + /* + * Shut down other threads at exit. Note that the ctl thread is holding + * the global background_thread mutex (and is waiting) for us. + */ + assert(!background_thread_enabled()); + for (i = 1; i < max_background_threads; i++) { + background_thread_info_t *info = &background_thread_info[i]; + assert(info->state != background_thread_paused); + if (created_threads[i]) { + background_threads_disable_single(tsd, info); + } else { + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + if (info->state != background_thread_stopped) { + /* The thread was not created. */ + assert(info->state == + background_thread_started); + n_background_threads--; + info->state = background_thread_stopped; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + } + background_thread_info[0].state = background_thread_stopped; + assert(n_background_threads == 1); +} + +static void +background_work(tsd_t *tsd, unsigned ind) { + background_thread_info_t *info = &background_thread_info[ind]; + + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + background_thread_wakeup_time_set(tsd_tsdn(tsd), info, + BACKGROUND_THREAD_INDEFINITE_SLEEP); + if (ind == 0) { + background_thread0_work(tsd); + } else { + while (info->state != background_thread_stopped) { + if (background_thread_pause_check(tsd_tsdn(tsd), + info)) { + continue; + } + background_work_sleep_once(tsd_tsdn(tsd), info, ind); + } + } + assert(info->state == background_thread_stopped); + background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0); + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); +} + +static void * +background_thread_entry(void *ind_arg) { + unsigned thread_ind = (unsigned)(uintptr_t)ind_arg; + assert(thread_ind < max_background_threads); +#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP + pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); +#endif + if (opt_percpu_arena != percpu_arena_disabled) { + set_current_thread_affinity((int)thread_ind); + } + /* + * Start periodic background work. We use internal tsd which avoids + * side effects, for example triggering new arena creation (which in + * turn triggers another background thread creation). + */ + background_work(tsd_internal_fetch(), thread_ind); + assert(pthread_equal(pthread_self(), + background_thread_info[thread_ind].thread)); + + return NULL; +} + +static void +background_thread_init(tsd_t *tsd, background_thread_info_t *info) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + info->state = background_thread_started; + background_thread_info_init(tsd_tsdn(tsd), info); + n_background_threads++; +} + +/* Create a new background thread if needed. */ +bool +background_thread_create(tsd_t *tsd, unsigned arena_ind) { + assert(have_background_thread); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + + /* We create at most NCPUs threads. */ + size_t thread_ind = arena_ind % max_background_threads; + background_thread_info_t *info = &background_thread_info[thread_ind]; + + bool need_new_thread; + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + need_new_thread = background_thread_enabled() && + (info->state == background_thread_stopped); + if (need_new_thread) { + background_thread_init(tsd, info); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + if (!need_new_thread) { + return false; + } + if (arena_ind != 0) { + /* Threads are created asynchronously by Thread 0. */ + background_thread_info_t *t0 = &background_thread_info[0]; + malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx); + assert(t0->state == background_thread_started); + pthread_cond_signal(&t0->cond); + malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx); + + return false; + } + + pre_reentrancy(tsd, NULL); + /* + * To avoid complications (besides reentrancy), create internal + * background threads with the underlying pthread_create. + */ + int err = background_thread_create_signals_masked(&info->thread, NULL, + background_thread_entry, (void *)thread_ind); + post_reentrancy(tsd); + + if (err != 0) { + malloc_printf("<jemalloc>: arena 0 background thread creation " + "failed (%d)\n", err); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_stopped; + n_background_threads--; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + + return true; + } + + return false; +} + +bool +background_threads_enable(tsd_t *tsd) { + assert(n_background_threads == 0); + assert(background_thread_enabled()); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + + VARIABLE_ARRAY(bool, marked, max_background_threads); + unsigned i, nmarked; + for (i = 0; i < max_background_threads; i++) { + marked[i] = false; + } + nmarked = 0; + /* Thread 0 is required and created at the end. */ + marked[0] = true; + /* Mark the threads we need to create for thread 0. */ + unsigned n = narenas_total_get(); + for (i = 1; i < n; i++) { + if (marked[i % max_background_threads] || + arena_get(tsd_tsdn(tsd), i, false) == NULL) { + continue; + } + background_thread_info_t *info = &background_thread_info[ + i % max_background_threads]; + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + assert(info->state == background_thread_stopped); + background_thread_init(tsd, info); + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + marked[i % max_background_threads] = true; + if (++nmarked == max_background_threads) { + break; + } + } + + return background_thread_create(tsd, 0); +} + +bool +background_threads_disable(tsd_t *tsd) { + assert(!background_thread_enabled()); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + + /* Thread 0 will be responsible for terminating other threads. */ + if (background_threads_disable_single(tsd, + &background_thread_info[0])) { + return true; + } + assert(n_background_threads == 0); + + return false; +} + +/* Check if we need to signal the background thread early. */ +void +background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, size_t npages_new) { + background_thread_info_t *info = arena_background_thread_info_get( + arena); + if (malloc_mutex_trylock(tsdn, &info->mtx)) { + /* + * Background thread may hold the mutex for a long period of + * time. We'd like to avoid the variance on application + * threads. So keep this non-blocking, and leave the work to a + * future epoch. + */ + return; + } + + if (info->state != background_thread_started) { + goto label_done; + } + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + goto label_done; + } + + ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); + if (decay_time <= 0) { + /* Purging is eagerly done or disabled currently. */ + goto label_done_unlock2; + } + uint64_t decay_interval_ns = nstime_ns(&decay->interval); + assert(decay_interval_ns > 0); + + nstime_t diff; + nstime_init(&diff, background_thread_wakeup_time_get(info)); + if (nstime_compare(&diff, &decay->epoch) <= 0) { + goto label_done_unlock2; + } + nstime_subtract(&diff, &decay->epoch); + if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) { + goto label_done_unlock2; + } + + if (npages_new > 0) { + size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns); + /* + * Compute how many new pages we would need to purge by the next + * wakeup, which is used to determine if we should signal the + * background thread. + */ + uint64_t npurge_new; + if (n_epoch >= SMOOTHSTEP_NSTEPS) { + npurge_new = npages_new; + } else { + uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1]; + assert(h_steps_max >= + h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); + npurge_new = npages_new * (h_steps_max - + h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); + npurge_new >>= SMOOTHSTEP_BFP; + } + info->npages_to_purge_new += npurge_new; + } + + bool should_signal; + if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) { + should_signal = true; + } else if (unlikely(background_thread_indefinite_sleep(info)) && + (extents_npages_get(&arena->extents_dirty) > 0 || + extents_npages_get(&arena->extents_muzzy) > 0 || + info->npages_to_purge_new > 0)) { + should_signal = true; + } else { + should_signal = false; + } + + if (should_signal) { + info->npages_to_purge_new = 0; + pthread_cond_signal(&info->cond); + } +label_done_unlock2: + malloc_mutex_unlock(tsdn, &decay->mtx); +label_done: + malloc_mutex_unlock(tsdn, &info->mtx); +} + +void +background_thread_prefork0(tsdn_t *tsdn) { + malloc_mutex_prefork(tsdn, &background_thread_lock); + background_thread_enabled_at_fork = background_thread_enabled(); +} + +void +background_thread_prefork1(tsdn_t *tsdn) { + for (unsigned i = 0; i < max_background_threads; i++) { + malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx); + } +} + +void +background_thread_postfork_parent(tsdn_t *tsdn) { + for (unsigned i = 0; i < max_background_threads; i++) { + malloc_mutex_postfork_parent(tsdn, + &background_thread_info[i].mtx); + } + malloc_mutex_postfork_parent(tsdn, &background_thread_lock); +} + +void +background_thread_postfork_child(tsdn_t *tsdn) { + for (unsigned i = 0; i < max_background_threads; i++) { + malloc_mutex_postfork_child(tsdn, + &background_thread_info[i].mtx); + } + malloc_mutex_postfork_child(tsdn, &background_thread_lock); + if (!background_thread_enabled_at_fork) { + return; + } + + /* Clear background_thread state (reset to disabled for child). */ + malloc_mutex_lock(tsdn, &background_thread_lock); + n_background_threads = 0; + background_thread_enabled_set(tsdn, false); + for (unsigned i = 0; i < max_background_threads; i++) { + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsdn, &info->mtx); + info->state = background_thread_stopped; + int ret = pthread_cond_init(&info->cond, NULL); + assert(ret == 0); + background_thread_info_init(tsdn, info); + malloc_mutex_unlock(tsdn, &info->mtx); + } + malloc_mutex_unlock(tsdn, &background_thread_lock); +} + +bool +background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { + assert(config_stats); + malloc_mutex_lock(tsdn, &background_thread_lock); + if (!background_thread_enabled()) { + malloc_mutex_unlock(tsdn, &background_thread_lock); + return true; + } + + stats->num_threads = n_background_threads; + uint64_t num_runs = 0; + nstime_init(&stats->run_interval, 0); + for (unsigned i = 0; i < max_background_threads; i++) { + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsdn, &info->mtx); + if (info->state != background_thread_stopped) { + num_runs += info->tot_n_runs; + nstime_add(&stats->run_interval, &info->tot_sleep_time); + } + malloc_mutex_unlock(tsdn, &info->mtx); + } + stats->num_runs = num_runs; + if (num_runs > 0) { + nstime_idivide(&stats->run_interval, num_runs); + } + malloc_mutex_unlock(tsdn, &background_thread_lock); + + return false; +} + +#undef BACKGROUND_THREAD_NPAGES_THRESHOLD +#undef BILLION +#undef BACKGROUND_THREAD_MIN_INTERVAL_NS + +static bool +pthread_create_fptr_init(void) { + if (pthread_create_fptr != NULL) { + return false; + } + pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); + if (pthread_create_fptr == NULL) { + can_enable_background_thread = false; + if (config_lazy_lock || opt_background_thread) { + malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, " + "\"pthread_create\")\n"); + abort(); + } + } else { + can_enable_background_thread = true; + } + + return false; +} + +/* + * When lazy lock is enabled, we need to make sure setting isthreaded before + * taking any background_thread locks. This is called early in ctl (instead of + * wait for the pthread_create calls to trigger) because the mutex is required + * before creating background threads. + */ +void +background_thread_ctl_init(tsdn_t *tsdn) { + malloc_mutex_assert_not_owner(tsdn, &background_thread_lock); +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER + pthread_create_fptr_init(); + pthread_create_wrapper_init(); +#endif +} + +#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */ + +bool +background_thread_boot0(void) { + if (!have_background_thread && opt_background_thread) { + malloc_printf("<jemalloc>: option background_thread currently " + "supports pthread only\n"); + return true; + } +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER + if ((config_lazy_lock || opt_background_thread) && + pthread_create_fptr_init()) { + return true; + } +#endif + return false; +} + +bool +background_thread_boot1(tsdn_t *tsdn) { +#ifdef JEMALLOC_BACKGROUND_THREAD + assert(have_background_thread); + assert(narenas_total_get() > 0); + + if (opt_max_background_threads == MAX_BACKGROUND_THREAD_LIMIT && + ncpus < MAX_BACKGROUND_THREAD_LIMIT) { + opt_max_background_threads = ncpus; + } + max_background_threads = opt_max_background_threads; + + background_thread_enabled_set(tsdn, opt_background_thread); + if (malloc_mutex_init(&background_thread_lock, + "background_thread_global", + WITNESS_RANK_BACKGROUND_THREAD_GLOBAL, + malloc_mutex_rank_exclusive)) { + return true; + } + + background_thread_info = (background_thread_info_t *)base_alloc(tsdn, + b0get(), opt_max_background_threads * + sizeof(background_thread_info_t), CACHELINE); + if (background_thread_info == NULL) { + return true; + } + + for (unsigned i = 0; i < max_background_threads; i++) { + background_thread_info_t *info = &background_thread_info[i]; + /* Thread mutex is rank_inclusive because of thread0. */ + if (malloc_mutex_init(&info->mtx, "background_thread", + WITNESS_RANK_BACKGROUND_THREAD, + malloc_mutex_address_ordered)) { + return true; + } + if (pthread_cond_init(&info->cond, NULL)) { + return true; + } + malloc_mutex_lock(tsdn, &info->mtx); + info->state = background_thread_stopped; + background_thread_info_init(tsdn, info); + malloc_mutex_unlock(tsdn, &info->mtx); + } +#endif + + return false; +} diff --git a/deps/jemalloc/src/base.c b/deps/jemalloc/src/base.c index 7cdcfed86..b0324b5d7 100644 --- a/deps/jemalloc/src/base.c +++ b/deps/jemalloc/src/base.c @@ -1,174 +1,514 @@ -#define JEMALLOC_BASE_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_BASE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/sz.h" /******************************************************************************/ /* Data. */ -static malloc_mutex_t base_mtx; -static extent_tree_t base_avail_szad; -static extent_node_t *base_nodes; -static size_t base_allocated; -static size_t base_resident; -static size_t base_mapped; +static base_t *b0; + +metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT; + +const char *metadata_thp_mode_names[] = { + "disabled", + "auto", + "always" +}; /******************************************************************************/ -/* base_mtx must be held. */ -static extent_node_t * -base_node_try_alloc(void) -{ - extent_node_t *node; +static inline bool +metadata_thp_madvise(void) { + return (metadata_thp_enabled() && + (init_system_thp_mode == thp_mode_default)); +} + +static void * +base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) { + void *addr; + bool zero = true; + bool commit = true; - if (base_nodes == NULL) - return (NULL); - node = base_nodes; - base_nodes = *(extent_node_t **)node; - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); - return (node); + /* Use huge page sizes and alignment regardless of opt_metadata_thp. */ + assert(size == HUGEPAGE_CEILING(size)); + size_t alignment = HUGEPAGE; + if (extent_hooks == &extent_hooks_default) { + addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit); + } else { + /* No arena context as we are creating new arenas. */ + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, NULL); + addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment, + &zero, &commit, ind); + post_reentrancy(tsd); + } + + return addr; } -/* base_mtx must be held. */ static void -base_node_dalloc(extent_node_t *node) -{ +base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, + size_t size) { + /* + * Cascade through dalloc, decommit, purge_forced, and purge_lazy, + * stopping at first success. This cascade is performed for consistency + * with the cascade in extent_dalloc_wrapper() because an application's + * custom hooks may not support e.g. dalloc. This function is only ever + * called as a side effect of arena destruction, so although it might + * seem pointless to do anything besides dalloc here, the application + * may in fact want the end state of all associated virtual memory to be + * in some consistent-but-allocated state. + */ + if (extent_hooks == &extent_hooks_default) { + if (!extent_dalloc_mmap(addr, size)) { + goto label_done; + } + if (!pages_decommit(addr, size)) { + goto label_done; + } + if (!pages_purge_forced(addr, size)) { + goto label_done; + } + if (!pages_purge_lazy(addr, size)) { + goto label_done; + } + /* Nothing worked. This should never happen. */ + not_reached(); + } else { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, NULL); + if (extent_hooks->dalloc != NULL && + !extent_hooks->dalloc(extent_hooks, addr, size, true, + ind)) { + goto label_post_reentrancy; + } + if (extent_hooks->decommit != NULL && + !extent_hooks->decommit(extent_hooks, addr, size, 0, size, + ind)) { + goto label_post_reentrancy; + } + if (extent_hooks->purge_forced != NULL && + !extent_hooks->purge_forced(extent_hooks, addr, size, 0, + size, ind)) { + goto label_post_reentrancy; + } + if (extent_hooks->purge_lazy != NULL && + !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, + ind)) { + goto label_post_reentrancy; + } + /* Nothing worked. That's the application's problem. */ + label_post_reentrancy: + post_reentrancy(tsd); + } +label_done: + if (metadata_thp_madvise()) { + /* Set NOHUGEPAGE after unmap to avoid kernel defrag. */ + assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && + (size & HUGEPAGE_MASK) == 0); + pages_nohuge(addr, size); + } +} + +static void +base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr, + size_t size) { + size_t sn; + + sn = *extent_sn_next; + (*extent_sn_next)++; - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); - *(extent_node_t **)node = base_nodes; - base_nodes = node; + extent_binit(extent, addr, size, sn); } -/* base_mtx must be held. */ -static extent_node_t * -base_chunk_alloc(size_t minsize) -{ - extent_node_t *node; - size_t csize, nsize; - void *addr; +static size_t +base_get_num_blocks(base_t *base, bool with_new_block) { + base_block_t *b = base->blocks; + assert(b != NULL); + + size_t n_blocks = with_new_block ? 2 : 1; + while (b->next != NULL) { + n_blocks++; + b = b->next; + } - assert(minsize != 0); - node = base_node_try_alloc(); - /* Allocate enough space to also carve a node out if necessary. */ - nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; - csize = CHUNK_CEILING(minsize + nsize); - addr = chunk_alloc_base(csize); - if (addr == NULL) { - if (node != NULL) - base_node_dalloc(node); - return (NULL); - } - base_mapped += csize; - if (node == NULL) { - node = (extent_node_t *)addr; - addr = (void *)((uintptr_t)addr + nsize); - csize -= nsize; + return n_blocks; +} + +static void +base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { + assert(opt_metadata_thp == metadata_thp_auto); + malloc_mutex_assert_owner(tsdn, &base->mtx); + if (base->auto_thp_switched) { + return; + } + /* Called when adding a new block. */ + bool should_switch; + if (base_ind_get(base) != 0) { + should_switch = (base_get_num_blocks(base, true) == + BASE_AUTO_THP_THRESHOLD); + } else { + should_switch = (base_get_num_blocks(base, true) == + BASE_AUTO_THP_THRESHOLD_A0); + } + if (!should_switch) { + return; + } + + base->auto_thp_switched = true; + assert(!config_stats || base->n_thp == 0); + /* Make the initial blocks THP lazily. */ + base_block_t *block = base->blocks; + while (block != NULL) { + assert((block->size & HUGEPAGE_MASK) == 0); + pages_huge(block, block->size); if (config_stats) { - base_allocated += nsize; - base_resident += PAGE_CEILING(nsize); + base->n_thp += HUGEPAGE_CEILING(block->size - + extent_bsize_get(&block->extent)) >> LG_HUGEPAGE; + } + block = block->next; + assert(block == NULL || (base_ind_get(base) == 0)); + } +} + +static void * +base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, + size_t alignment) { + void *ret; + + assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM)); + assert(size == ALIGNMENT_CEILING(size, alignment)); + + *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent), + alignment) - (uintptr_t)extent_addr_get(extent); + ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size); + assert(extent_bsize_get(extent) >= *gap_size + size); + extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) + + *gap_size + size), extent_bsize_get(extent) - *gap_size - size, + extent_sn_get(extent)); + return ret; +} + +static void +base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size, + void *addr, size_t size) { + if (extent_bsize_get(extent) > 0) { + /* + * Compute the index for the largest size class that does not + * exceed extent's size. + */ + szind_t index_floor = + sz_size2index(extent_bsize_get(extent) + 1) - 1; + extent_heap_insert(&base->avail[index_floor], extent); + } + + if (config_stats) { + base->allocated += size; + /* + * Add one PAGE to base_resident for every page boundary that is + * crossed by the new allocation. Adjust n_thp similarly when + * metadata_thp is enabled. + */ + base->resident += PAGE_CEILING((uintptr_t)addr + size) - + PAGE_CEILING((uintptr_t)addr - gap_size); + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + if (metadata_thp_madvise() && (opt_metadata_thp == + metadata_thp_always || base->auto_thp_switched)) { + base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size) + - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >> + LG_HUGEPAGE; + assert(base->mapped >= base->n_thp << LG_HUGEPAGE); + } + } +} + +static void * +base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size, + size_t alignment) { + void *ret; + size_t gap_size; + + ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment); + base_extent_bump_alloc_post(base, extent, gap_size, ret, size); + return ret; +} + +/* + * Allocate a block of virtual memory that is large enough to start with a + * base_block_t header, followed by an object of specified size and alignment. + * On success a pointer to the initialized base_block_t header is returned. + */ +static base_block_t * +base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, + unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size, + size_t alignment) { + alignment = ALIGNMENT_CEILING(alignment, QUANTUM); + size_t usize = ALIGNMENT_CEILING(size, alignment); + size_t header_size = sizeof(base_block_t); + size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) - + header_size; + /* + * Create increasingly larger blocks in order to limit the total number + * of disjoint virtual memory ranges. Choose the next size in the page + * size class series (skipping size classes that are not a multiple of + * HUGEPAGE), or a size large enough to satisfy the requested size and + * alignment, whichever is larger. + */ + size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size + + usize)); + pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 : + *pind_last; + size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); + size_t block_size = (min_block_size > next_block_size) ? min_block_size + : next_block_size; + base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind, + block_size); + if (block == NULL) { + return NULL; + } + + if (metadata_thp_madvise()) { + void *addr = (void *)block; + assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && + (block_size & HUGEPAGE_MASK) == 0); + if (opt_metadata_thp == metadata_thp_always) { + pages_huge(addr, block_size); + } else if (opt_metadata_thp == metadata_thp_auto && + base != NULL) { + /* base != NULL indicates this is not a new base. */ + malloc_mutex_lock(tsdn, &base->mtx); + base_auto_thp_switch(tsdn, base); + if (base->auto_thp_switched) { + pages_huge(addr, block_size); + } + malloc_mutex_unlock(tsdn, &base->mtx); } } - extent_node_init(node, NULL, addr, csize, true, true); - return (node); + + *pind_last = sz_psz2ind(block_size); + block->size = block_size; + block->next = NULL; + assert(block_size >= header_size); + base_extent_init(extent_sn_next, &block->extent, + (void *)((uintptr_t)block + header_size), block_size - header_size); + return block; } /* - * base_alloc() guarantees demand-zeroed memory, in order to make multi-page - * sparse data structures such as radix tree nodes efficient with respect to - * physical memory usage. + * Allocate an extent that is at least as large as specified size, with + * specified alignment. */ -void * -base_alloc(size_t size) -{ - void *ret; - size_t csize, usize; - extent_node_t *node; - extent_node_t key; +static extent_t * +base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { + malloc_mutex_assert_owner(tsdn, &base->mtx); + extent_hooks_t *extent_hooks = base_extent_hooks_get(base); /* - * Round size up to nearest multiple of the cacheline size, so that - * there is no chance of false cache line sharing. + * Drop mutex during base_block_alloc(), because an extent hook will be + * called. */ - csize = CACHELINE_CEILING(size); - - usize = s2u(csize); - extent_node_init(&key, NULL, NULL, usize, false, false); - malloc_mutex_lock(&base_mtx); - node = extent_tree_szad_nsearch(&base_avail_szad, &key); - if (node != NULL) { - /* Use existing space. */ - extent_tree_szad_remove(&base_avail_szad, node); - } else { - /* Try to allocate more space. */ - node = base_chunk_alloc(csize); + malloc_mutex_unlock(tsdn, &base->mtx); + base_block_t *block = base_block_alloc(tsdn, base, extent_hooks, + base_ind_get(base), &base->pind_last, &base->extent_sn_next, size, + alignment); + malloc_mutex_lock(tsdn, &base->mtx); + if (block == NULL) { + return NULL; } - if (node == NULL) { - ret = NULL; - goto label_return; + block->next = base->blocks; + base->blocks = block; + if (config_stats) { + base->allocated += sizeof(base_block_t); + base->resident += PAGE_CEILING(sizeof(base_block_t)); + base->mapped += block->size; + if (metadata_thp_madvise() && + !(opt_metadata_thp == metadata_thp_auto + && !base->auto_thp_switched)) { + assert(base->n_thp > 0); + base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >> + LG_HUGEPAGE; + } + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + assert(base->n_thp << LG_HUGEPAGE <= base->mapped); + } + return &block->extent; +} + +base_t * +b0get(void) { + return b0; +} + +base_t * +base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + pszind_t pind_last = 0; + size_t extent_sn_next = 0; + base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind, + &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM); + if (block == NULL) { + return NULL; } - ret = extent_node_addr_get(node); - if (extent_node_size_get(node) > csize) { - extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); - extent_node_size_set(node, extent_node_size_get(node) - csize); - extent_tree_szad_insert(&base_avail_szad, node); - } else - base_node_dalloc(node); + size_t gap_size; + size_t base_alignment = CACHELINE; + size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); + base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent, + &gap_size, base_size, base_alignment); + base->ind = ind; + atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED); + if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, + malloc_mutex_rank_exclusive)) { + base_unmap(tsdn, extent_hooks, ind, block, block->size); + return NULL; + } + base->pind_last = pind_last; + base->extent_sn_next = extent_sn_next; + base->blocks = block; + base->auto_thp_switched = false; + for (szind_t i = 0; i < NSIZES; i++) { + extent_heap_new(&base->avail[i]); + } if (config_stats) { - base_allocated += csize; - /* - * Add one PAGE to base_resident for every page boundary that is - * crossed by the new allocation. - */ - base_resident += PAGE_CEILING((uintptr_t)ret + csize) - - PAGE_CEILING((uintptr_t)ret); + base->allocated = sizeof(base_block_t); + base->resident = PAGE_CEILING(sizeof(base_block_t)); + base->mapped = block->size; + base->n_thp = (opt_metadata_thp == metadata_thp_always) && + metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t)) + >> LG_HUGEPAGE : 0; + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + assert(base->n_thp << LG_HUGEPAGE <= base->mapped); } - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); -label_return: - malloc_mutex_unlock(&base_mtx); - return (ret); + base_extent_bump_alloc_post(base, &block->extent, gap_size, base, + base_size); + + return base; } void -base_stats_get(size_t *allocated, size_t *resident, size_t *mapped) -{ +base_delete(tsdn_t *tsdn, base_t *base) { + extent_hooks_t *extent_hooks = base_extent_hooks_get(base); + base_block_t *next = base->blocks; + do { + base_block_t *block = next; + next = block->next; + base_unmap(tsdn, extent_hooks, base_ind_get(base), block, + block->size); + } while (next != NULL); +} - malloc_mutex_lock(&base_mtx); - assert(base_allocated <= base_resident); - assert(base_resident <= base_mapped); - *allocated = base_allocated; - *resident = base_resident; - *mapped = base_mapped; - malloc_mutex_unlock(&base_mtx); +extent_hooks_t * +base_extent_hooks_get(base_t *base) { + return (extent_hooks_t *)atomic_load_p(&base->extent_hooks, + ATOMIC_ACQUIRE); } -bool -base_boot(void) -{ +extent_hooks_t * +base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { + extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base); + atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE); + return old_extent_hooks; +} - if (malloc_mutex_init(&base_mtx)) - return (true); - extent_tree_szad_new(&base_avail_szad); - base_nodes = NULL; +static void * +base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, + size_t *esn) { + alignment = QUANTUM_CEILING(alignment); + size_t usize = ALIGNMENT_CEILING(size, alignment); + size_t asize = usize + alignment - QUANTUM; + + extent_t *extent = NULL; + malloc_mutex_lock(tsdn, &base->mtx); + for (szind_t i = sz_size2index(asize); i < NSIZES; i++) { + extent = extent_heap_remove_first(&base->avail[i]); + if (extent != NULL) { + /* Use existing space. */ + break; + } + } + if (extent == NULL) { + /* Try to allocate more space. */ + extent = base_extent_alloc(tsdn, base, usize, alignment); + } + void *ret; + if (extent == NULL) { + ret = NULL; + goto label_return; + } - return (false); + ret = base_extent_bump_alloc(base, extent, usize, alignment); + if (esn != NULL) { + *esn = extent_sn_get(extent); + } +label_return: + malloc_mutex_unlock(tsdn, &base->mtx); + return ret; +} + +/* + * base_alloc() returns zeroed memory, which is always demand-zeroed for the + * auto arenas, in order to make multi-page sparse data structures such as radix + * tree nodes efficient with respect to physical memory usage. Upon success a + * pointer to at least size bytes with specified alignment is returned. Note + * that size is rounded up to the nearest multiple of alignment to avoid false + * sharing. + */ +void * +base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { + return base_alloc_impl(tsdn, base, size, alignment, NULL); +} + +extent_t * +base_alloc_extent(tsdn_t *tsdn, base_t *base) { + size_t esn; + extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t), + CACHELINE, &esn); + if (extent == NULL) { + return NULL; + } + extent_esn_set(extent, esn); + return extent; } void -base_prefork(void) -{ +base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, + size_t *mapped, size_t *n_thp) { + cassert(config_stats); - malloc_mutex_prefork(&base_mtx); + malloc_mutex_lock(tsdn, &base->mtx); + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + *allocated = base->allocated; + *resident = base->resident; + *mapped = base->mapped; + *n_thp = base->n_thp; + malloc_mutex_unlock(tsdn, &base->mtx); } void -base_postfork_parent(void) -{ +base_prefork(tsdn_t *tsdn, base_t *base) { + malloc_mutex_prefork(tsdn, &base->mtx); +} - malloc_mutex_postfork_parent(&base_mtx); +void +base_postfork_parent(tsdn_t *tsdn, base_t *base) { + malloc_mutex_postfork_parent(tsdn, &base->mtx); } void -base_postfork_child(void) -{ +base_postfork_child(tsdn_t *tsdn, base_t *base) { + malloc_mutex_postfork_child(tsdn, &base->mtx); +} - malloc_mutex_postfork_child(&base_mtx); +bool +base_boot(tsdn_t *tsdn) { + b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); + return (b0 == NULL); } diff --git a/deps/jemalloc/src/bin.c b/deps/jemalloc/src/bin.c new file mode 100644 index 000000000..0886bc4ea --- /dev/null +++ b/deps/jemalloc/src/bin.c @@ -0,0 +1,50 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/bin.h" +#include "jemalloc/internal/witness.h" + +const bin_info_t bin_infos[NBINS] = { +#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \ + {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)}, +#define BIN_INFO_bin_no(reg_size, slab_size, nregs) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ + lg_delta_lookup) \ + BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \ + (pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \ + (ndelta<<lg_delta))) + SIZE_CLASSES +#undef BIN_INFO_bin_yes +#undef BIN_INFO_bin_no +#undef SC +}; + +bool +bin_init(bin_t *bin) { + if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN, + malloc_mutex_rank_exclusive)) { + return true; + } + bin->slabcur = NULL; + extent_heap_new(&bin->slabs_nonfull); + extent_list_init(&bin->slabs_full); + if (config_stats) { + memset(&bin->stats, 0, sizeof(bin_stats_t)); + } + return false; +} + +void +bin_prefork(tsdn_t *tsdn, bin_t *bin) { + malloc_mutex_prefork(tsdn, &bin->lock); +} + +void +bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) { + malloc_mutex_postfork_parent(tsdn, &bin->lock); +} + +void +bin_postfork_child(tsdn_t *tsdn, bin_t *bin) { + malloc_mutex_postfork_child(tsdn, &bin->lock); +} diff --git a/deps/jemalloc/src/bitmap.c b/deps/jemalloc/src/bitmap.c index c733372b4..468b3178e 100644 --- a/deps/jemalloc/src/bitmap.c +++ b/deps/jemalloc/src/bitmap.c @@ -1,11 +1,15 @@ -#define JEMALLOC_BITMAP_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_BITMAP_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" /******************************************************************************/ +#ifdef BITMAP_USE_TREE + void -bitmap_info_init(bitmap_info_t *binfo, size_t nbits) -{ +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { unsigned i; size_t group_count; @@ -32,47 +36,86 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) binfo->nbits = nbits; } -size_t -bitmap_info_ngroups(const bitmap_info_t *binfo) -{ - - return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); -} - -size_t -bitmap_size(size_t nbits) -{ - bitmap_info_t binfo; - - bitmap_info_init(&binfo, nbits); - return (bitmap_info_ngroups(&binfo)); +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) { + return binfo->levels[binfo->nlevels].group_offset; } void -bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { size_t extra; unsigned i; /* * Bits are actually inverted with regard to the external bitmap - * interface, so the bitmap starts out with all 1 bits, except for - * trailing unused bits (if any). Note that each group uses bit 0 to - * correspond to the first logical bit in the group, so extra bits - * are the most significant bits of the last group. + * interface. */ - memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << - LG_SIZEOF_BITMAP); + + if (fill) { + /* The "filled" bitmap starts out with all 0 bits. */ + memset(bitmap, 0, bitmap_size(binfo)); + return; + } + + /* + * The "empty" bitmap starts out with all 1 bits, except for trailing + * unused bits (if any). Note that each group uses bit 0 to correspond + * to the first logical bit in the group, so extra bits are the most + * significant bits of the last group. + */ + memset(bitmap, 0xffU, bitmap_size(binfo)); extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) + if (extra != 0) { bitmap[binfo->levels[1].group_offset - 1] >>= extra; + } for (i = 1; i < binfo->nlevels; i++) { size_t group_count = binfo->levels[i].group_offset - binfo->levels[i-1].group_offset; extra = (BITMAP_GROUP_NBITS - (group_count & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) + if (extra != 0) { bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + } + } +} + +#else /* BITMAP_USE_TREE */ + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + binfo->ngroups = BITMAP_BITS2GROUPS(nbits); + binfo->nbits = nbits; +} + +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) { + return binfo->ngroups; +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { + size_t extra; + + if (fill) { + memset(bitmap, 0, bitmap_size(binfo)); + return; + } + + memset(bitmap, 0xffU, bitmap_size(binfo)); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) { + bitmap[binfo->ngroups - 1] >>= extra; } } + +#endif /* BITMAP_USE_TREE */ + +size_t +bitmap_size(const bitmap_info_t *binfo) { + return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP); +} diff --git a/deps/jemalloc/src/chunk.c b/deps/jemalloc/src/chunk.c deleted file mode 100644 index 6ba1ca7a5..000000000 --- a/deps/jemalloc/src/chunk.c +++ /dev/null @@ -1,761 +0,0 @@ -#define JEMALLOC_CHUNK_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -const char *opt_dss = DSS_DEFAULT; -size_t opt_lg_chunk = 0; - -/* Used exclusively for gdump triggering. */ -static size_t curchunks; -static size_t highchunks; - -rtree_t chunks_rtree; - -/* Various chunk-related settings. */ -size_t chunksize; -size_t chunksize_mask; /* (chunksize - 1). */ -size_t chunk_npages; - -static void *chunk_alloc_default(void *new_addr, size_t size, - size_t alignment, bool *zero, bool *commit, unsigned arena_ind); -static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, - unsigned arena_ind); -static bool chunk_commit_default(void *chunk, size_t size, size_t offset, - size_t length, unsigned arena_ind); -static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, - size_t length, unsigned arena_ind); -static bool chunk_purge_default(void *chunk, size_t size, size_t offset, - size_t length, unsigned arena_ind); -static bool chunk_split_default(void *chunk, size_t size, size_t size_a, - size_t size_b, bool committed, unsigned arena_ind); -static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, - size_t size_b, bool committed, unsigned arena_ind); - -const chunk_hooks_t chunk_hooks_default = { - chunk_alloc_default, - chunk_dalloc_default, - chunk_commit_default, - chunk_decommit_default, - chunk_purge_default, - chunk_split_default, - chunk_merge_default -}; - -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - -static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, - extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, - void *chunk, size_t size, bool zeroed, bool committed); - -/******************************************************************************/ - -static chunk_hooks_t -chunk_hooks_get_locked(arena_t *arena) -{ - - return (arena->chunk_hooks); -} - -chunk_hooks_t -chunk_hooks_get(arena_t *arena) -{ - chunk_hooks_t chunk_hooks; - - malloc_mutex_lock(&arena->chunks_mtx); - chunk_hooks = chunk_hooks_get_locked(arena); - malloc_mutex_unlock(&arena->chunks_mtx); - - return (chunk_hooks); -} - -chunk_hooks_t -chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) -{ - chunk_hooks_t old_chunk_hooks; - - malloc_mutex_lock(&arena->chunks_mtx); - old_chunk_hooks = arena->chunk_hooks; - /* - * Copy each field atomically so that it is impossible for readers to - * see partially updated pointers. There are places where readers only - * need one hook function pointer (therefore no need to copy the - * entirety of arena->chunk_hooks), and stale reads do not affect - * correctness, so they perform unlocked reads. - */ -#define ATOMIC_COPY_HOOK(n) do { \ - union { \ - chunk_##n##_t **n; \ - void **v; \ - } u; \ - u.n = &arena->chunk_hooks.n; \ - atomic_write_p(u.v, chunk_hooks->n); \ -} while (0) - ATOMIC_COPY_HOOK(alloc); - ATOMIC_COPY_HOOK(dalloc); - ATOMIC_COPY_HOOK(commit); - ATOMIC_COPY_HOOK(decommit); - ATOMIC_COPY_HOOK(purge); - ATOMIC_COPY_HOOK(split); - ATOMIC_COPY_HOOK(merge); -#undef ATOMIC_COPY_HOOK - malloc_mutex_unlock(&arena->chunks_mtx); - - return (old_chunk_hooks); -} - -static void -chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, - bool locked) -{ - static const chunk_hooks_t uninitialized_hooks = - CHUNK_HOOKS_INITIALIZER; - - if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == - 0) { - *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : - chunk_hooks_get(arena); - } -} - -static void -chunk_hooks_assure_initialized_locked(arena_t *arena, - chunk_hooks_t *chunk_hooks) -{ - - chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true); -} - -static void -chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks) -{ - - chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false); -} - -bool -chunk_register(const void *chunk, const extent_node_t *node) -{ - - assert(extent_node_addr_get(node) == chunk); - - if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node)) - return (true); - if (config_prof && opt_prof) { - size_t size = extent_node_size_get(node); - size_t nadd = (size == 0) ? 1 : size / chunksize; - size_t cur = atomic_add_z(&curchunks, nadd); - size_t high = atomic_read_z(&highchunks); - while (cur > high && atomic_cas_z(&highchunks, high, cur)) { - /* - * Don't refresh cur, because it may have decreased - * since this thread lost the highchunks update race. - */ - high = atomic_read_z(&highchunks); - } - if (cur > high && prof_gdump_get_unlocked()) - prof_gdump(); - } - - return (false); -} - -void -chunk_deregister(const void *chunk, const extent_node_t *node) -{ - bool err; - - err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL); - assert(!err); - if (config_prof && opt_prof) { - size_t size = extent_node_size_get(node); - size_t nsub = (size == 0) ? 1 : size / chunksize; - assert(atomic_read_z(&curchunks) >= nsub); - atomic_sub_z(&curchunks, nsub); - } -} - -/* - * Do first-best-fit chunk selection, i.e. select the lowest chunk that best - * fits. - */ -static extent_node_t * -chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, size_t size) -{ - extent_node_t key; - - assert(size == CHUNK_CEILING(size)); - - extent_node_init(&key, arena, NULL, size, false, false); - return (extent_tree_szad_nsearch(chunks_szad, &key)); -} - -static void * -chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, - extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, - void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, - bool dalloc_node) -{ - void *ret; - extent_node_t *node; - size_t alloc_size, leadsize, trailsize; - bool zeroed, committed; - - assert(new_addr == NULL || alignment == chunksize); - /* - * Cached chunks use the node linkage embedded in their headers, in - * which case dalloc_node is true, and new_addr is non-NULL because - * we're operating on a specific chunk. - */ - assert(dalloc_node || new_addr != NULL); - - alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); - /* Beware size_t wrap-around. */ - if (alloc_size < size) - return (NULL); - malloc_mutex_lock(&arena->chunks_mtx); - chunk_hooks_assure_initialized_locked(arena, chunk_hooks); - if (new_addr != NULL) { - extent_node_t key; - extent_node_init(&key, arena, new_addr, alloc_size, false, - false); - node = extent_tree_ad_search(chunks_ad, &key); - } else { - node = chunk_first_best_fit(arena, chunks_szad, chunks_ad, - alloc_size); - } - if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < - size)) { - malloc_mutex_unlock(&arena->chunks_mtx); - return (NULL); - } - leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), - alignment) - (uintptr_t)extent_node_addr_get(node); - assert(new_addr == NULL || leadsize == 0); - assert(extent_node_size_get(node) >= leadsize + size); - trailsize = extent_node_size_get(node) - leadsize - size; - ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); - zeroed = extent_node_zeroed_get(node); - if (zeroed) - *zero = true; - committed = extent_node_committed_get(node); - if (committed) - *commit = true; - /* Split the lead. */ - if (leadsize != 0 && - chunk_hooks->split(extent_node_addr_get(node), - extent_node_size_get(node), leadsize, size, false, arena->ind)) { - malloc_mutex_unlock(&arena->chunks_mtx); - return (NULL); - } - /* Remove node from the tree. */ - extent_tree_szad_remove(chunks_szad, node); - extent_tree_ad_remove(chunks_ad, node); - arena_chunk_cache_maybe_remove(arena, node, cache); - if (leadsize != 0) { - /* Insert the leading space as a smaller chunk. */ - extent_node_size_set(node, leadsize); - extent_tree_szad_insert(chunks_szad, node); - extent_tree_ad_insert(chunks_ad, node); - arena_chunk_cache_maybe_insert(arena, node, cache); - node = NULL; - } - if (trailsize != 0) { - /* Split the trail. */ - if (chunk_hooks->split(ret, size + trailsize, size, - trailsize, false, arena->ind)) { - if (dalloc_node && node != NULL) - arena_node_dalloc(arena, node); - malloc_mutex_unlock(&arena->chunks_mtx); - chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, - cache, ret, size + trailsize, zeroed, committed); - return (NULL); - } - /* Insert the trailing space as a smaller chunk. */ - if (node == NULL) { - node = arena_node_alloc(arena); - if (node == NULL) { - malloc_mutex_unlock(&arena->chunks_mtx); - chunk_record(arena, chunk_hooks, chunks_szad, - chunks_ad, cache, ret, size + trailsize, - zeroed, committed); - return (NULL); - } - } - extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), - trailsize, zeroed, committed); - extent_tree_szad_insert(chunks_szad, node); - extent_tree_ad_insert(chunks_ad, node); - arena_chunk_cache_maybe_insert(arena, node, cache); - node = NULL; - } - if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { - malloc_mutex_unlock(&arena->chunks_mtx); - chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, - ret, size, zeroed, committed); - return (NULL); - } - malloc_mutex_unlock(&arena->chunks_mtx); - - assert(dalloc_node || node != NULL); - if (dalloc_node && node != NULL) - arena_node_dalloc(arena, node); - if (*zero) { - if (!zeroed) - memset(ret, 0, size); - else if (config_debug) { - size_t i; - size_t *p = (size_t *)(uintptr_t)ret; - - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); - for (i = 0; i < size / sizeof(size_t); i++) - assert(p[i] == 0); - } - } - return (ret); -} - -/* - * If the caller specifies (!*zero), it is still possible to receive zeroed - * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes - * advantage of this to avoid demanding zeroed chunks, but taking advantage of - * them if they are returned. - */ -static void * -chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment, - bool *zero, bool *commit, dss_prec_t dss_prec) -{ - void *ret; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - - assert(size != 0); - assert((size & chunksize_mask) == 0); - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - /* Retained. */ - if ((ret = chunk_recycle(arena, &chunk_hooks, - &arena->chunks_szad_retained, &arena->chunks_ad_retained, false, - new_addr, size, alignment, zero, commit, true)) != NULL) - return (ret); - - /* "primary" dss. */ - if (have_dss && dss_prec == dss_prec_primary && (ret = - chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != - NULL) - return (ret); - /* - * mmap. Requesting an address is not implemented for - * chunk_alloc_mmap(), so only call it if (new_addr == NULL). - */ - if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero, - commit)) != NULL) - return (ret); - /* "secondary" dss. */ - if (have_dss && dss_prec == dss_prec_secondary && (ret = - chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != - NULL) - return (ret); - - /* All strategies for allocation failed. */ - return (NULL); -} - -void * -chunk_alloc_base(size_t size) -{ - void *ret; - bool zero, commit; - - /* - * Directly call chunk_alloc_mmap() rather than chunk_alloc_core() - * because it's critical that chunk_alloc_base() return untouched - * demand-zeroed virtual memory. - */ - zero = true; - commit = true; - ret = chunk_alloc_mmap(size, chunksize, &zero, &commit); - if (ret == NULL) - return (NULL); - if (config_valgrind) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - - return (ret); -} - -void * -chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, - size_t size, size_t alignment, bool *zero, bool dalloc_node) -{ - void *ret; - bool commit; - - assert(size != 0); - assert((size & chunksize_mask) == 0); - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - commit = true; - ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached, - &arena->chunks_ad_cached, true, new_addr, size, alignment, zero, - &commit, dalloc_node); - if (ret == NULL) - return (NULL); - assert(commit); - if (config_valgrind) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - return (ret); -} - -static arena_t * -chunk_arena_get(unsigned arena_ind) -{ - arena_t *arena; - - /* Dodge tsd for a0 in order to avoid bootstrapping issues. */ - arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind, - false, true); - /* - * The arena we're allocating on behalf of must have been initialized - * already. - */ - assert(arena != NULL); - return (arena); -} - -static void * -chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, - bool *commit, unsigned arena_ind) -{ - void *ret; - arena_t *arena; - - arena = chunk_arena_get(arena_ind); - ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, - commit, arena->dss_prec); - if (ret == NULL) - return (NULL); - if (config_valgrind) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - - return (ret); -} - -void * -chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, - size_t size, size_t alignment, bool *zero, bool *commit) -{ - void *ret; - - chunk_hooks_assure_initialized(arena, chunk_hooks); - ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit, - arena->ind); - if (ret == NULL) - return (NULL); - if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); - return (ret); -} - -static void -chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, - extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, - void *chunk, size_t size, bool zeroed, bool committed) -{ - bool unzeroed; - extent_node_t *node, *prev; - extent_node_t key; - - assert(!cache || !zeroed); - unzeroed = cache || !zeroed; - JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); - - malloc_mutex_lock(&arena->chunks_mtx); - chunk_hooks_assure_initialized_locked(arena, chunk_hooks); - extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, - false, false); - node = extent_tree_ad_nsearch(chunks_ad, &key); - /* Try to coalesce forward. */ - if (node != NULL && extent_node_addr_get(node) == - extent_node_addr_get(&key) && extent_node_committed_get(node) == - committed && !chunk_hooks->merge(chunk, size, - extent_node_addr_get(node), extent_node_size_get(node), false, - arena->ind)) { - /* - * Coalesce chunk with the following address range. This does - * not change the position within chunks_ad, so only - * remove/insert from/into chunks_szad. - */ - extent_tree_szad_remove(chunks_szad, node); - arena_chunk_cache_maybe_remove(arena, node, cache); - extent_node_addr_set(node, chunk); - extent_node_size_set(node, size + extent_node_size_get(node)); - extent_node_zeroed_set(node, extent_node_zeroed_get(node) && - !unzeroed); - extent_tree_szad_insert(chunks_szad, node); - arena_chunk_cache_maybe_insert(arena, node, cache); - } else { - /* Coalescing forward failed, so insert a new node. */ - node = arena_node_alloc(arena); - if (node == NULL) { - /* - * Node allocation failed, which is an exceedingly - * unlikely failure. Leak chunk after making sure its - * pages have already been purged, so that this is only - * a virtual memory leak. - */ - if (cache) { - chunk_purge_wrapper(arena, chunk_hooks, chunk, - size, 0, size); - } - goto label_return; - } - extent_node_init(node, arena, chunk, size, !unzeroed, - committed); - extent_tree_ad_insert(chunks_ad, node); - extent_tree_szad_insert(chunks_szad, node); - arena_chunk_cache_maybe_insert(arena, node, cache); - } - - /* Try to coalesce backward. */ - prev = extent_tree_ad_prev(chunks_ad, node); - if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) + - extent_node_size_get(prev)) == chunk && - extent_node_committed_get(prev) == committed && - !chunk_hooks->merge(extent_node_addr_get(prev), - extent_node_size_get(prev), chunk, size, false, arena->ind)) { - /* - * Coalesce chunk with the previous address range. This does - * not change the position within chunks_ad, so only - * remove/insert node from/into chunks_szad. - */ - extent_tree_szad_remove(chunks_szad, prev); - extent_tree_ad_remove(chunks_ad, prev); - arena_chunk_cache_maybe_remove(arena, prev, cache); - extent_tree_szad_remove(chunks_szad, node); - arena_chunk_cache_maybe_remove(arena, node, cache); - extent_node_addr_set(node, extent_node_addr_get(prev)); - extent_node_size_set(node, extent_node_size_get(prev) + - extent_node_size_get(node)); - extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && - extent_node_zeroed_get(node)); - extent_tree_szad_insert(chunks_szad, node); - arena_chunk_cache_maybe_insert(arena, node, cache); - - arena_node_dalloc(arena, prev); - } - -label_return: - malloc_mutex_unlock(&arena->chunks_mtx); -} - -void -chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, - size_t size, bool committed) -{ - - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert(size != 0); - assert((size & chunksize_mask) == 0); - - chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached, - &arena->chunks_ad_cached, true, chunk, size, false, committed); - arena_maybe_purge(arena); -} - -void -chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, - size_t size, bool zeroed, bool committed) -{ - - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert(size != 0); - assert((size & chunksize_mask) == 0); - - chunk_hooks_assure_initialized(arena, chunk_hooks); - /* Try to deallocate. */ - if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind)) - return; - /* Try to decommit; purge if that fails. */ - if (committed) { - committed = chunk_hooks->decommit(chunk, size, 0, size, - arena->ind); - } - zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, - arena->ind); - chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained, - &arena->chunks_ad_retained, false, chunk, size, zeroed, committed); -} - -static bool -chunk_dalloc_default(void *chunk, size_t size, bool committed, - unsigned arena_ind) -{ - - if (!have_dss || !chunk_in_dss(chunk)) - return (chunk_dalloc_mmap(chunk, size)); - return (true); -} - -void -chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, - size_t size, bool committed) -{ - - chunk_hooks_assure_initialized(arena, chunk_hooks); - chunk_hooks->dalloc(chunk, size, committed, arena->ind); - if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default) - JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); -} - -static bool -chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, - unsigned arena_ind) -{ - - return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset), - length)); -} - -static bool -chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, - unsigned arena_ind) -{ - - return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset), - length)); -} - -bool -chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) -{ - - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert((offset & PAGE_MASK) == 0); - assert(length != 0); - assert((length & PAGE_MASK) == 0); - - return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset), - length)); -} - -static bool -chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, - unsigned arena_ind) -{ - - return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset, - length)); -} - -bool -chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, - size_t size, size_t offset, size_t length) -{ - - chunk_hooks_assure_initialized(arena, chunk_hooks); - return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); -} - -static bool -chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, - bool committed, unsigned arena_ind) -{ - - if (!maps_coalesce) - return (true); - return (false); -} - -static bool -chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, - bool committed, unsigned arena_ind) -{ - - if (!maps_coalesce) - return (true); - if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b)) - return (true); - - return (false); -} - -static rtree_node_elm_t * -chunks_rtree_node_alloc(size_t nelms) -{ - - return ((rtree_node_elm_t *)base_alloc(nelms * - sizeof(rtree_node_elm_t))); -} - -bool -chunk_boot(void) -{ -#ifdef _WIN32 - SYSTEM_INFO info; - GetSystemInfo(&info); - - /* - * Verify actual page size is equal to or an integral multiple of - * configured page size. - */ - if (info.dwPageSize & ((1U << LG_PAGE) - 1)) - return (true); - - /* - * Configure chunksize (if not set) to match granularity (usually 64K), - * so pages_map will always take fast path. - */ - if (!opt_lg_chunk) { - opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity) - - 1; - } -#else - if (!opt_lg_chunk) - opt_lg_chunk = LG_CHUNK_DEFAULT; -#endif - - /* Set variables according to the value of opt_lg_chunk. */ - chunksize = (ZU(1) << opt_lg_chunk); - assert(chunksize >= PAGE); - chunksize_mask = chunksize - 1; - chunk_npages = (chunksize >> LG_PAGE); - - if (have_dss && chunk_dss_boot()) - return (true); - if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) - - opt_lg_chunk, chunks_rtree_node_alloc, NULL)) - return (true); - - return (false); -} - -void -chunk_prefork(void) -{ - - chunk_dss_prefork(); -} - -void -chunk_postfork_parent(void) -{ - - chunk_dss_postfork_parent(); -} - -void -chunk_postfork_child(void) -{ - - chunk_dss_postfork_child(); -} diff --git a/deps/jemalloc/src/chunk_dss.c b/deps/jemalloc/src/chunk_dss.c deleted file mode 100644 index 61fc91696..000000000 --- a/deps/jemalloc/src/chunk_dss.c +++ /dev/null @@ -1,214 +0,0 @@ -#define JEMALLOC_CHUNK_DSS_C_ -#include "jemalloc/internal/jemalloc_internal.h" -/******************************************************************************/ -/* Data. */ - -const char *dss_prec_names[] = { - "disabled", - "primary", - "secondary", - "N/A" -}; - -/* Current dss precedence default, used when creating new arenas. */ -static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT; - -/* - * Protects sbrk() calls. This avoids malloc races among threads, though it - * does not protect against races with threads that call sbrk() directly. - */ -static malloc_mutex_t dss_mtx; - -/* Base address of the DSS. */ -static void *dss_base; -/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ -static void *dss_prev; -/* Current upper limit on DSS addresses. */ -static void *dss_max; - -/******************************************************************************/ - -static void * -chunk_dss_sbrk(intptr_t increment) -{ - -#ifdef JEMALLOC_DSS - return (sbrk(increment)); -#else - not_implemented(); - return (NULL); -#endif -} - -dss_prec_t -chunk_dss_prec_get(void) -{ - dss_prec_t ret; - - if (!have_dss) - return (dss_prec_disabled); - malloc_mutex_lock(&dss_mtx); - ret = dss_prec_default; - malloc_mutex_unlock(&dss_mtx); - return (ret); -} - -bool -chunk_dss_prec_set(dss_prec_t dss_prec) -{ - - if (!have_dss) - return (dss_prec != dss_prec_disabled); - malloc_mutex_lock(&dss_mtx); - dss_prec_default = dss_prec; - malloc_mutex_unlock(&dss_mtx); - return (false); -} - -void * -chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, - bool *zero, bool *commit) -{ - cassert(have_dss); - assert(size > 0 && (size & chunksize_mask) == 0); - assert(alignment > 0 && (alignment & chunksize_mask) == 0); - - /* - * sbrk() uses a signed increment argument, so take care not to - * interpret a huge allocation request as a negative increment. - */ - if ((intptr_t)size < 0) - return (NULL); - - malloc_mutex_lock(&dss_mtx); - if (dss_prev != (void *)-1) { - - /* - * The loop is necessary to recover from races with other - * threads that are using the DSS for something other than - * malloc. - */ - do { - void *ret, *cpad, *dss_next; - size_t gap_size, cpad_size; - intptr_t incr; - /* Avoid an unnecessary system call. */ - if (new_addr != NULL && dss_max != new_addr) - break; - - /* Get the current end of the DSS. */ - dss_max = chunk_dss_sbrk(0); - - /* Make sure the earlier condition still holds. */ - if (new_addr != NULL && dss_max != new_addr) - break; - - /* - * Calculate how much padding is necessary to - * chunk-align the end of the DSS. - */ - gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & - chunksize_mask; - /* - * Compute how much chunk-aligned pad space (if any) is - * necessary to satisfy alignment. This space can be - * recycled for later use. - */ - cpad = (void *)((uintptr_t)dss_max + gap_size); - ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, - alignment); - cpad_size = (uintptr_t)ret - (uintptr_t)cpad; - dss_next = (void *)((uintptr_t)ret + size); - if ((uintptr_t)ret < (uintptr_t)dss_max || - (uintptr_t)dss_next < (uintptr_t)dss_max) { - /* Wrap-around. */ - malloc_mutex_unlock(&dss_mtx); - return (NULL); - } - incr = gap_size + cpad_size + size; - dss_prev = chunk_dss_sbrk(incr); - if (dss_prev == dss_max) { - /* Success. */ - dss_max = dss_next; - malloc_mutex_unlock(&dss_mtx); - if (cpad_size != 0) { - chunk_hooks_t chunk_hooks = - CHUNK_HOOKS_INITIALIZER; - chunk_dalloc_wrapper(arena, - &chunk_hooks, cpad, cpad_size, - true); - } - if (*zero) { - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( - ret, size); - memset(ret, 0, size); - } - if (!*commit) - *commit = pages_decommit(ret, size); - return (ret); - } - } while (dss_prev != (void *)-1); - } - malloc_mutex_unlock(&dss_mtx); - - return (NULL); -} - -bool -chunk_in_dss(void *chunk) -{ - bool ret; - - cassert(have_dss); - - malloc_mutex_lock(&dss_mtx); - if ((uintptr_t)chunk >= (uintptr_t)dss_base - && (uintptr_t)chunk < (uintptr_t)dss_max) - ret = true; - else - ret = false; - malloc_mutex_unlock(&dss_mtx); - - return (ret); -} - -bool -chunk_dss_boot(void) -{ - - cassert(have_dss); - - if (malloc_mutex_init(&dss_mtx)) - return (true); - dss_base = chunk_dss_sbrk(0); - dss_prev = dss_base; - dss_max = dss_base; - - return (false); -} - -void -chunk_dss_prefork(void) -{ - - if (have_dss) - malloc_mutex_prefork(&dss_mtx); -} - -void -chunk_dss_postfork_parent(void) -{ - - if (have_dss) - malloc_mutex_postfork_parent(&dss_mtx); -} - -void -chunk_dss_postfork_child(void) -{ - - if (have_dss) - malloc_mutex_postfork_child(&dss_mtx); -} - -/******************************************************************************/ diff --git a/deps/jemalloc/src/chunk_mmap.c b/deps/jemalloc/src/chunk_mmap.c deleted file mode 100644 index b9ba74191..000000000 --- a/deps/jemalloc/src/chunk_mmap.c +++ /dev/null @@ -1,80 +0,0 @@ -#define JEMALLOC_CHUNK_MMAP_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ - -static void * -chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) -{ - void *ret; - size_t alloc_size; - - alloc_size = size + alignment - PAGE; - /* Beware size_t wrap-around. */ - if (alloc_size < size) - return (NULL); - do { - void *pages; - size_t leadsize; - pages = pages_map(NULL, alloc_size); - if (pages == NULL) - return (NULL); - leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - - (uintptr_t)pages; - ret = pages_trim(pages, alloc_size, leadsize, size); - } while (ret == NULL); - - assert(ret != NULL); - *zero = true; - if (!*commit) - *commit = pages_decommit(ret, size); - return (ret); -} - -void * -chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit) -{ - void *ret; - size_t offset; - - /* - * Ideally, there would be a way to specify alignment to mmap() (like - * NetBSD has), but in the absence of such a feature, we have to work - * hard to efficiently create aligned mappings. The reliable, but - * slow method is to create a mapping that is over-sized, then trim the - * excess. However, that always results in one or two calls to - * pages_unmap(). - * - * Optimistically try mapping precisely the right amount before falling - * back to the slow method, with the expectation that the optimistic - * approach works most of the time. - */ - - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - ret = pages_map(NULL, size); - if (ret == NULL) - return (NULL); - offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); - if (offset != 0) { - pages_unmap(ret, size); - return (chunk_alloc_mmap_slow(size, alignment, zero, commit)); - } - - assert(ret != NULL); - *zero = true; - if (!*commit) - *commit = pages_decommit(ret, size); - return (ret); -} - -bool -chunk_dalloc_mmap(void *chunk, size_t size) -{ - - if (config_munmap) - pages_unmap(chunk, size); - - return (!config_munmap); -} diff --git a/deps/jemalloc/src/ckh.c b/deps/jemalloc/src/ckh.c index 53a1c1ef1..e95e0a3ed 100644 --- a/deps/jemalloc/src/ckh.c +++ b/deps/jemalloc/src/ckh.c @@ -34,8 +34,18 @@ * respectively. * ******************************************************************************/ -#define JEMALLOC_CKH_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_CKH_C_ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/ckh.h" + +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Function prototypes for non-inline static functions. */ @@ -49,27 +59,26 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); * Search bucket for key and return the cell number if found; SIZE_T_MAX * otherwise. */ -JEMALLOC_INLINE_C size_t -ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) -{ +static size_t +ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { ckhc_t *cell; unsigned i; for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; - if (cell->key != NULL && ckh->keycomp(key, cell->key)) - return ((bucket << LG_CKH_BUCKET_CELLS) + i); + if (cell->key != NULL && ckh->keycomp(key, cell->key)) { + return (bucket << LG_CKH_BUCKET_CELLS) + i; + } } - return (SIZE_T_MAX); + return SIZE_T_MAX; } /* * Search table for key and return cell number if found; SIZE_T_MAX otherwise. */ -JEMALLOC_INLINE_C size_t -ckh_isearch(ckh_t *ckh, const void *key) -{ +static size_t +ckh_isearch(ckh_t *ckh, const void *key) { size_t hashes[2], bucket, cell; assert(ckh != NULL); @@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key) /* Search primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); - if (cell != SIZE_T_MAX) - return (cell); + if (cell != SIZE_T_MAX) { + return cell; + } /* Search secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); - return (cell); + return cell; } -JEMALLOC_INLINE_C bool +static bool ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, - const void *data) -{ + const void *data) { ckhc_t *cell; unsigned offset, i; @@ -99,7 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, * Cycle through the cells in the bucket, starting at a random position. * The randomness avoids worst-case search overhead as buckets fill up. */ - prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; @@ -107,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, cell->key = key; cell->data = data; ckh->count++; - return (false); + return false; } } - return (true); + return true; } /* @@ -120,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, * eviction/relocation procedure until either success or detection of an * eviction/relocation bucket cycle. */ -JEMALLOC_INLINE_C bool +static bool ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, - void const **argdata) -{ + void const **argdata) { const void *key, *data, *tkey, *tdata; ckhc_t *cell; size_t hashes[2], bucket, tbucket; @@ -141,7 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, * were an item for which both hashes indicated the same * bucket. */ - prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + i = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; assert(cell->key != NULL); @@ -181,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, if (tbucket == argbucket) { *argkey = key; *argdata = data; - return (true); + return true; } bucket = tbucket; - if (!ckh_try_bucket_insert(ckh, bucket, key, data)) - return (false); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } } } -JEMALLOC_INLINE_C bool -ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) -{ +static bool +ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { size_t hashes[2], bucket; const void *key = *argkey; const void *data = *argdata; @@ -201,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) /* Try to insert in primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (!ckh_try_bucket_insert(ckh, bucket, key, data)) - return (false); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } /* Try to insert in secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (!ckh_try_bucket_insert(ckh, bucket, key, data)) - return (false); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } /* * Try to find a place for this item via iterative eviction/relocation. */ - return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); + return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata); } /* * Try to rebuild the hash table from scratch by inserting all items from the * old table into the new. */ -JEMALLOC_INLINE_C bool -ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) -{ +static bool +ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { size_t count, i, nins; const void *key, *data; @@ -233,22 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) data = aTab[i].data; if (ckh_try_insert(ckh, &key, &data)) { ckh->count = count; - return (true); + return true; } nins++; } } - return (false); + return false; } static bool -ckh_grow(tsd_t *tsd, ckh_t *ckh) -{ +ckh_grow(tsd_t *tsd, ckh_t *ckh) { bool ret; ckhc_t *tab, *ttab; - size_t lg_curcells; - unsigned lg_prevbuckets; + unsigned lg_prevbuckets, lg_curcells; #ifdef CKH_COUNT ckh->ngrows++; @@ -265,13 +274,13 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) size_t usize; lg_curcells++; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) { + usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { ret = true; goto label_return; } - tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, - true, NULL); + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, + true, NULL, true, arena_ichoose(tsd, NULL)); if (tab == NULL) { ret = true; goto label_return; @@ -283,27 +292,26 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsd, tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); break; } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; } ret = false; label_return: - return (ret); + return ret; } static void -ckh_shrink(tsd_t *tsd, ckh_t *ckh) -{ +ckh_shrink(tsd_t *tsd, ckh_t *ckh) { ckhc_t *tab, *ttab; - size_t lg_curcells, usize; - unsigned lg_prevbuckets; + size_t usize; + unsigned lg_prevbuckets, lg_curcells; /* * It is possible (though unlikely, given well behaved hashes) that the @@ -311,11 +319,12 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) + usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { return; - tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, - NULL); + } + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, + true, arena_ichoose(tsd, NULL)); if (tab == NULL) { /* * An OOM error isn't worth propagating, since it doesn't @@ -330,7 +339,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsd, tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); #ifdef CKH_COUNT ckh->nshrinks++; #endif @@ -338,7 +347,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; #ifdef CKH_COUNT @@ -348,8 +357,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, - ckh_keycomp_t *keycomp) -{ + ckh_keycomp_t *keycomp) { bool ret; size_t mincells, usize; unsigned lg_mincells; @@ -379,20 +387,21 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; for (lg_mincells = LG_CKH_BUCKET_CELLS; (ZU(1) << lg_mincells) < mincells; - lg_mincells++) - ; /* Do nothing. */ + lg_mincells++) { + /* Do nothing. */ + } ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->hash = hash; ckh->keycomp = keycomp; - usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); - if (usize == 0) { + usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { ret = true; goto label_return; } - ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, - NULL); + ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, + NULL, true, arena_ichoose(tsd, NULL)); if (ckh->tab == NULL) { ret = true; goto label_return; @@ -400,13 +409,11 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ret = false; label_return: - return (ret); + return ret; } void -ckh_delete(tsd_t *tsd, ckh_t *ckh) -{ - +ckh_delete(tsd_t *tsd, ckh_t *ckh) { assert(ckh != NULL); #ifdef CKH_VERBOSE @@ -421,43 +428,42 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) (unsigned long long)ckh->nrelocs); #endif - idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); - if (config_debug) - memset(ckh, 0x5a, sizeof(ckh_t)); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); + if (config_debug) { + memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); + } } size_t -ckh_count(ckh_t *ckh) -{ - +ckh_count(ckh_t *ckh) { assert(ckh != NULL); - return (ckh->count); + return ckh->count; } bool -ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) -{ +ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) { size_t i, ncells; for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { if (ckh->tab[i].key != NULL) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[i].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[i].data; + } *tabind = i + 1; - return (false); + return false; } } - return (true); + return true; } bool -ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) -{ +ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) { bool ret; assert(ckh != NULL); @@ -476,23 +482,24 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) ret = false; label_return: - return (ret); + return ret; } bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, - void **data) -{ + void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[cell].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[cell].data; + } ckh->tab[cell].key = NULL; ckh->tab[cell].data = NULL; /* Not necessary. */ @@ -505,51 +512,47 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, ckh_shrink(tsd, ckh); } - return (false); + return false; } - return (true); + return true; } bool -ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) -{ +ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[cell].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[cell].data; - return (false); + } + return false; } - return (true); + return true; } void -ckh_string_hash(const void *key, size_t r_hash[2]) -{ - +ckh_string_hash(const void *key, size_t r_hash[2]) { hash(key, strlen((const char *)key), 0x94122f33U, r_hash); } bool -ckh_string_keycomp(const void *k1, const void *k2) -{ - - assert(k1 != NULL); - assert(k2 != NULL); +ckh_string_keycomp(const void *k1, const void *k2) { + assert(k1 != NULL); + assert(k2 != NULL); - return (strcmp((char *)k1, (char *)k2) ? false : true); + return !strcmp((char *)k1, (char *)k2); } void -ckh_pointer_hash(const void *key, size_t r_hash[2]) -{ +ckh_pointer_hash(const void *key, size_t r_hash[2]) { union { const void *v; size_t i; @@ -561,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2]) } bool -ckh_pointer_keycomp(const void *k1, const void *k2) -{ - - return ((k1 == k2) ? true : false); +ckh_pointer_keycomp(const void *k1, const void *k2) { + return (k1 == k2); } diff --git a/deps/jemalloc/src/ctl.c b/deps/jemalloc/src/ctl.c index 3de8e602d..1e713a3d1 100644 --- a/deps/jemalloc/src/ctl.c +++ b/deps/jemalloc/src/ctl.c @@ -1,69 +1,63 @@ -#define JEMALLOC_CTL_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_CTL_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ /* * ctl_mtx protects the following: - * - ctl_stats.* + * - ctl_stats->* */ static malloc_mutex_t ctl_mtx; static bool ctl_initialized; -static uint64_t ctl_epoch; -static ctl_stats_t ctl_stats; +static ctl_stats_t *ctl_stats; +static ctl_arenas_t *ctl_arenas; /******************************************************************************/ /* Helpers for named and indexed nodes. */ -JEMALLOC_INLINE_C const ctl_named_node_t * -ctl_named_node(const ctl_node_t *node) -{ - +static const ctl_named_node_t * +ctl_named_node(const ctl_node_t *node) { return ((node->named) ? (const ctl_named_node_t *)node : NULL); } -JEMALLOC_INLINE_C const ctl_named_node_t * -ctl_named_children(const ctl_named_node_t *node, int index) -{ +static const ctl_named_node_t * +ctl_named_children(const ctl_named_node_t *node, size_t index) { const ctl_named_node_t *children = ctl_named_node(node->children); return (children ? &children[index] : NULL); } -JEMALLOC_INLINE_C const ctl_indexed_node_t * -ctl_indexed_node(const ctl_node_t *node) -{ - +static const ctl_indexed_node_t * +ctl_indexed_node(const ctl_node_t *node) { return (!node->named ? (const ctl_indexed_node_t *)node : NULL); } /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -#define CTL_PROTO(n) \ -static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen); - -#define INDEX_PROTO(n) \ -static const ctl_named_node_t *n##_index(const size_t *mib, \ - size_t miblen, size_t i); - -static bool ctl_arena_init(ctl_arena_stats_t *astats); -static void ctl_arena_clear(ctl_arena_stats_t *astats); -static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, - arena_t *arena); -static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, - ctl_arena_stats_t *astats); -static void ctl_arena_refresh(arena_t *arena, unsigned i); -static bool ctl_grow(void); -static void ctl_refresh(void); -static bool ctl_init(void); -static int ctl_lookup(const char *name, ctl_node_t const **nodesp, - size_t *mibp, size_t *depthp); +#define CTL_PROTO(n) \ +static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen); + +#define INDEX_PROTO(n) \ +static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ + const size_t *mib, size_t miblen, size_t i); CTL_PROTO(version) CTL_PROTO(epoch) +CTL_PROTO(background_thread) +CTL_PROTO(max_background_threads) CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_flush) CTL_PROTO(thread_prof_name) @@ -77,29 +71,33 @@ CTL_PROTO(config_cache_oblivious) CTL_PROTO(config_debug) CTL_PROTO(config_fill) CTL_PROTO(config_lazy_lock) -CTL_PROTO(config_munmap) +CTL_PROTO(config_malloc_conf) CTL_PROTO(config_prof) CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libunwind) CTL_PROTO(config_stats) -CTL_PROTO(config_tcache) -CTL_PROTO(config_tls) CTL_PROTO(config_utrace) -CTL_PROTO(config_valgrind) CTL_PROTO(config_xmalloc) CTL_PROTO(opt_abort) +CTL_PROTO(opt_abort_conf) +CTL_PROTO(opt_metadata_thp) +CTL_PROTO(opt_retain) CTL_PROTO(opt_dss) -CTL_PROTO(opt_lg_chunk) CTL_PROTO(opt_narenas) -CTL_PROTO(opt_lg_dirty_mult) +CTL_PROTO(opt_percpu_arena) +CTL_PROTO(opt_background_thread) +CTL_PROTO(opt_max_background_threads) +CTL_PROTO(opt_dirty_decay_ms) +CTL_PROTO(opt_muzzy_decay_ms) CTL_PROTO(opt_stats_print) +CTL_PROTO(opt_stats_print_opts) CTL_PROTO(opt_junk) CTL_PROTO(opt_zero) -CTL_PROTO(opt_quarantine) -CTL_PROTO(opt_redzone) CTL_PROTO(opt_utrace) CTL_PROTO(opt_xmalloc) CTL_PROTO(opt_tcache) +CTL_PROTO(opt_thp) +CTL_PROTO(opt_lg_extent_max_active_fit) CTL_PROTO(opt_lg_tcache_max) CTL_PROTO(opt_prof) CTL_PROTO(opt_prof_prefix) @@ -114,31 +112,34 @@ CTL_PROTO(opt_prof_accum) CTL_PROTO(tcache_create) CTL_PROTO(tcache_flush) CTL_PROTO(tcache_destroy) +CTL_PROTO(arena_i_initialized) +CTL_PROTO(arena_i_decay) CTL_PROTO(arena_i_purge) -static void arena_purge(unsigned arena_ind); +CTL_PROTO(arena_i_reset) +CTL_PROTO(arena_i_destroy) CTL_PROTO(arena_i_dss) -CTL_PROTO(arena_i_lg_dirty_mult) -CTL_PROTO(arena_i_chunk_hooks) +CTL_PROTO(arena_i_dirty_decay_ms) +CTL_PROTO(arena_i_muzzy_decay_ms) +CTL_PROTO(arena_i_extent_hooks) +CTL_PROTO(arena_i_retain_grow_limit) INDEX_PROTO(arena_i) CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_nregs) -CTL_PROTO(arenas_bin_i_run_size) +CTL_PROTO(arenas_bin_i_slab_size) INDEX_PROTO(arenas_bin_i) -CTL_PROTO(arenas_lrun_i_size) -INDEX_PROTO(arenas_lrun_i) -CTL_PROTO(arenas_hchunk_i_size) -INDEX_PROTO(arenas_hchunk_i) +CTL_PROTO(arenas_lextent_i_size) +INDEX_PROTO(arenas_lextent_i) CTL_PROTO(arenas_narenas) -CTL_PROTO(arenas_initialized) -CTL_PROTO(arenas_lg_dirty_mult) +CTL_PROTO(arenas_dirty_decay_ms) +CTL_PROTO(arenas_muzzy_decay_ms) CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_page) CTL_PROTO(arenas_tcache_max) CTL_PROTO(arenas_nbins) CTL_PROTO(arenas_nhbins) -CTL_PROTO(arenas_nlruns) -CTL_PROTO(arenas_nhchunks) -CTL_PROTO(arenas_extend) +CTL_PROTO(arenas_nlextents) +CTL_PROTO(arenas_create) +CTL_PROTO(arenas_lookup) CTL_PROTO(prof_thread_active_init) CTL_PROTO(prof_active) CTL_PROTO(prof_dump) @@ -154,67 +155,94 @@ CTL_PROTO(stats_arenas_i_large_allocated) CTL_PROTO(stats_arenas_i_large_nmalloc) CTL_PROTO(stats_arenas_i_large_ndalloc) CTL_PROTO(stats_arenas_i_large_nrequests) -CTL_PROTO(stats_arenas_i_huge_allocated) -CTL_PROTO(stats_arenas_i_huge_nmalloc) -CTL_PROTO(stats_arenas_i_huge_ndalloc) -CTL_PROTO(stats_arenas_i_huge_nrequests) CTL_PROTO(stats_arenas_i_bins_j_nmalloc) CTL_PROTO(stats_arenas_i_bins_j_ndalloc) CTL_PROTO(stats_arenas_i_bins_j_nrequests) CTL_PROTO(stats_arenas_i_bins_j_curregs) CTL_PROTO(stats_arenas_i_bins_j_nfills) CTL_PROTO(stats_arenas_i_bins_j_nflushes) -CTL_PROTO(stats_arenas_i_bins_j_nruns) -CTL_PROTO(stats_arenas_i_bins_j_nreruns) -CTL_PROTO(stats_arenas_i_bins_j_curruns) +CTL_PROTO(stats_arenas_i_bins_j_nslabs) +CTL_PROTO(stats_arenas_i_bins_j_nreslabs) +CTL_PROTO(stats_arenas_i_bins_j_curslabs) INDEX_PROTO(stats_arenas_i_bins_j) -CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) -CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) -CTL_PROTO(stats_arenas_i_lruns_j_nrequests) -CTL_PROTO(stats_arenas_i_lruns_j_curruns) -INDEX_PROTO(stats_arenas_i_lruns_j) -CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc) -CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc) -CTL_PROTO(stats_arenas_i_hchunks_j_nrequests) -CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks) -INDEX_PROTO(stats_arenas_i_hchunks_j) +CTL_PROTO(stats_arenas_i_lextents_j_nmalloc) +CTL_PROTO(stats_arenas_i_lextents_j_ndalloc) +CTL_PROTO(stats_arenas_i_lextents_j_nrequests) +CTL_PROTO(stats_arenas_i_lextents_j_curlextents) +INDEX_PROTO(stats_arenas_i_lextents_j) CTL_PROTO(stats_arenas_i_nthreads) +CTL_PROTO(stats_arenas_i_uptime) CTL_PROTO(stats_arenas_i_dss) -CTL_PROTO(stats_arenas_i_lg_dirty_mult) +CTL_PROTO(stats_arenas_i_dirty_decay_ms) +CTL_PROTO(stats_arenas_i_muzzy_decay_ms) CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pdirty) +CTL_PROTO(stats_arenas_i_pmuzzy) CTL_PROTO(stats_arenas_i_mapped) -CTL_PROTO(stats_arenas_i_npurge) -CTL_PROTO(stats_arenas_i_nmadvise) -CTL_PROTO(stats_arenas_i_purged) -CTL_PROTO(stats_arenas_i_metadata_mapped) -CTL_PROTO(stats_arenas_i_metadata_allocated) +CTL_PROTO(stats_arenas_i_retained) +CTL_PROTO(stats_arenas_i_dirty_npurge) +CTL_PROTO(stats_arenas_i_dirty_nmadvise) +CTL_PROTO(stats_arenas_i_dirty_purged) +CTL_PROTO(stats_arenas_i_muzzy_npurge) +CTL_PROTO(stats_arenas_i_muzzy_nmadvise) +CTL_PROTO(stats_arenas_i_muzzy_purged) +CTL_PROTO(stats_arenas_i_base) +CTL_PROTO(stats_arenas_i_internal) +CTL_PROTO(stats_arenas_i_metadata_thp) +CTL_PROTO(stats_arenas_i_tcache_bytes) +CTL_PROTO(stats_arenas_i_resident) INDEX_PROTO(stats_arenas_i) -CTL_PROTO(stats_cactive) CTL_PROTO(stats_allocated) CTL_PROTO(stats_active) +CTL_PROTO(stats_background_thread_num_threads) +CTL_PROTO(stats_background_thread_num_runs) +CTL_PROTO(stats_background_thread_run_interval) CTL_PROTO(stats_metadata) +CTL_PROTO(stats_metadata_thp) CTL_PROTO(stats_resident) CTL_PROTO(stats_mapped) +CTL_PROTO(stats_retained) + +#define MUTEX_STATS_CTL_PROTO_GEN(n) \ +CTL_PROTO(stats_##n##_num_ops) \ +CTL_PROTO(stats_##n##_num_wait) \ +CTL_PROTO(stats_##n##_num_spin_acq) \ +CTL_PROTO(stats_##n##_num_owner_switch) \ +CTL_PROTO(stats_##n##_total_wait_time) \ +CTL_PROTO(stats_##n##_max_wait_time) \ +CTL_PROTO(stats_##n##_max_num_thds) + +/* Global mutexes. */ +#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +/* Per arena mutexes. */ +#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +/* Arena bin mutexes. */ +MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex) +#undef MUTEX_STATS_CTL_PROTO_GEN + +CTL_PROTO(stats_mutexes_reset) /******************************************************************************/ /* mallctl tree. */ -/* Maximum tree depth. */ -#define CTL_MAX_DEPTH 6 - -#define NAME(n) {true}, n -#define CHILD(t, c) \ +#define NAME(n) {true}, n +#define CHILD(t, c) \ sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ (ctl_node_t *)c##_node, \ NULL -#define CTL(c) 0, NULL, c##_ctl +#define CTL(c) 0, NULL, c##_ctl /* * Only handles internal indexed nodes, since there are currently no external * ones. */ -#define INDEX(i) {false}, i##_index +#define INDEX(i) {false}, i##_index static const ctl_named_node_t thread_tcache_node[] = { {NAME("enabled"), CTL(thread_tcache_enabled)}, @@ -241,32 +269,36 @@ static const ctl_named_node_t config_node[] = { {NAME("debug"), CTL(config_debug)}, {NAME("fill"), CTL(config_fill)}, {NAME("lazy_lock"), CTL(config_lazy_lock)}, - {NAME("munmap"), CTL(config_munmap)}, + {NAME("malloc_conf"), CTL(config_malloc_conf)}, {NAME("prof"), CTL(config_prof)}, {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, {NAME("stats"), CTL(config_stats)}, - {NAME("tcache"), CTL(config_tcache)}, - {NAME("tls"), CTL(config_tls)}, {NAME("utrace"), CTL(config_utrace)}, - {NAME("valgrind"), CTL(config_valgrind)}, {NAME("xmalloc"), CTL(config_xmalloc)} }; static const ctl_named_node_t opt_node[] = { {NAME("abort"), CTL(opt_abort)}, + {NAME("abort_conf"), CTL(opt_abort_conf)}, + {NAME("metadata_thp"), CTL(opt_metadata_thp)}, + {NAME("retain"), CTL(opt_retain)}, {NAME("dss"), CTL(opt_dss)}, - {NAME("lg_chunk"), CTL(opt_lg_chunk)}, {NAME("narenas"), CTL(opt_narenas)}, - {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, + {NAME("percpu_arena"), CTL(opt_percpu_arena)}, + {NAME("background_thread"), CTL(opt_background_thread)}, + {NAME("max_background_threads"), CTL(opt_max_background_threads)}, + {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)}, {NAME("stats_print"), CTL(opt_stats_print)}, + {NAME("stats_print_opts"), CTL(opt_stats_print_opts)}, {NAME("junk"), CTL(opt_junk)}, {NAME("zero"), CTL(opt_zero)}, - {NAME("quarantine"), CTL(opt_quarantine)}, - {NAME("redzone"), CTL(opt_redzone)}, {NAME("utrace"), CTL(opt_utrace)}, {NAME("xmalloc"), CTL(opt_xmalloc)}, {NAME("tcache"), CTL(opt_tcache)}, + {NAME("thp"), CTL(opt_thp)}, + {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)}, {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, {NAME("prof"), CTL(opt_prof)}, {NAME("prof_prefix"), CTL(opt_prof_prefix)}, @@ -287,10 +319,16 @@ static const ctl_named_node_t tcache_node[] = { }; static const ctl_named_node_t arena_i_node[] = { + {NAME("initialized"), CTL(arena_i_initialized)}, + {NAME("decay"), CTL(arena_i_decay)}, {NAME("purge"), CTL(arena_i_purge)}, + {NAME("reset"), CTL(arena_i_reset)}, + {NAME("destroy"), CTL(arena_i_destroy)}, {NAME("dss"), CTL(arena_i_dss)}, - {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)}, - {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)} + {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)}, + {NAME("extent_hooks"), CTL(arena_i_extent_hooks)}, + {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)} }; static const ctl_named_node_t super_arena_i_node[] = { {NAME(""), CHILD(named, arena_i)} @@ -303,7 +341,7 @@ static const ctl_indexed_node_t arena_node[] = { static const ctl_named_node_t arenas_bin_i_node[] = { {NAME("size"), CTL(arenas_bin_i_size)}, {NAME("nregs"), CTL(arenas_bin_i_nregs)}, - {NAME("run_size"), CTL(arenas_bin_i_run_size)} + {NAME("slab_size"), CTL(arenas_bin_i_slab_size)} }; static const ctl_named_node_t super_arenas_bin_i_node[] = { {NAME(""), CHILD(named, arenas_bin_i)} @@ -313,43 +351,31 @@ static const ctl_indexed_node_t arenas_bin_node[] = { {INDEX(arenas_bin_i)} }; -static const ctl_named_node_t arenas_lrun_i_node[] = { - {NAME("size"), CTL(arenas_lrun_i_size)} +static const ctl_named_node_t arenas_lextent_i_node[] = { + {NAME("size"), CTL(arenas_lextent_i_size)} }; -static const ctl_named_node_t super_arenas_lrun_i_node[] = { - {NAME(""), CHILD(named, arenas_lrun_i)} +static const ctl_named_node_t super_arenas_lextent_i_node[] = { + {NAME(""), CHILD(named, arenas_lextent_i)} }; -static const ctl_indexed_node_t arenas_lrun_node[] = { - {INDEX(arenas_lrun_i)} -}; - -static const ctl_named_node_t arenas_hchunk_i_node[] = { - {NAME("size"), CTL(arenas_hchunk_i_size)} -}; -static const ctl_named_node_t super_arenas_hchunk_i_node[] = { - {NAME(""), CHILD(named, arenas_hchunk_i)} -}; - -static const ctl_indexed_node_t arenas_hchunk_node[] = { - {INDEX(arenas_hchunk_i)} +static const ctl_indexed_node_t arenas_lextent_node[] = { + {INDEX(arenas_lextent_i)} }; static const ctl_named_node_t arenas_node[] = { {NAME("narenas"), CTL(arenas_narenas)}, - {NAME("initialized"), CTL(arenas_initialized)}, - {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)}, + {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)}, {NAME("quantum"), CTL(arenas_quantum)}, {NAME("page"), CTL(arenas_page)}, {NAME("tcache_max"), CTL(arenas_tcache_max)}, {NAME("nbins"), CTL(arenas_nbins)}, {NAME("nhbins"), CTL(arenas_nhbins)}, {NAME("bin"), CHILD(indexed, arenas_bin)}, - {NAME("nlruns"), CTL(arenas_nlruns)}, - {NAME("lrun"), CHILD(indexed, arenas_lrun)}, - {NAME("nhchunks"), CTL(arenas_nhchunks)}, - {NAME("hchunk"), CHILD(indexed, arenas_hchunk)}, - {NAME("extend"), CTL(arenas_extend)} + {NAME("nlextents"), CTL(arenas_nlextents)}, + {NAME("lextent"), CHILD(indexed, arenas_lextent)}, + {NAME("create"), CTL(arenas_create)}, + {NAME("lookup"), CTL(arenas_lookup)} }; static const ctl_named_node_t prof_node[] = { @@ -362,11 +388,6 @@ static const ctl_named_node_t prof_node[] = { {NAME("lg_sample"), CTL(lg_prof_sample)} }; -static const ctl_named_node_t stats_arenas_i_metadata_node[] = { - {NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)}, - {NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)} -}; - static const ctl_named_node_t stats_arenas_i_small_node[] = { {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, @@ -381,13 +402,27 @@ static const ctl_named_node_t stats_arenas_i_large_node[] = { {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} }; -static const ctl_named_node_t stats_arenas_i_huge_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)} +#define MUTEX_PROF_DATA_NODE(prefix) \ +static const ctl_named_node_t stats_##prefix##_node[] = { \ + {NAME("num_ops"), \ + CTL(stats_##prefix##_num_ops)}, \ + {NAME("num_wait"), \ + CTL(stats_##prefix##_num_wait)}, \ + {NAME("num_spin_acq"), \ + CTL(stats_##prefix##_num_spin_acq)}, \ + {NAME("num_owner_switch"), \ + CTL(stats_##prefix##_num_owner_switch)}, \ + {NAME("total_wait_time"), \ + CTL(stats_##prefix##_total_wait_time)}, \ + {NAME("max_wait_time"), \ + CTL(stats_##prefix##_max_wait_time)}, \ + {NAME("max_num_thds"), \ + CTL(stats_##prefix##_max_num_thds)} \ + /* Note that # of current waiting thread not provided. */ \ }; +MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex) + static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, @@ -395,10 +430,12 @@ static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, - {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, - {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, - {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} + {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)}, + {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)}, + {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, + {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)} }; + static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { {NAME(""), CHILD(named, stats_arenas_i_bins_j)} }; @@ -407,51 +444,57 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { {INDEX(stats_arenas_i_bins_j)} }; -static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, - {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} +static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)}, + {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)} }; -static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_lruns_j)} +static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_lextents_j)} }; -static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { - {INDEX(stats_arenas_i_lruns_j)} +static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = { + {INDEX(stats_arenas_i_lextents_j)} }; -static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)}, - {NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)} -}; -static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_hchunks_j)} -}; +#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx) +MUTEX_PROF_ARENA_MUTEXES +#undef OP -static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = { - {INDEX(stats_arenas_i_hchunks_j)} +static const ctl_named_node_t stats_arenas_i_mutexes_node[] = { +#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)}, +MUTEX_PROF_ARENA_MUTEXES +#undef OP }; static const ctl_named_node_t stats_arenas_i_node[] = { {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, + {NAME("uptime"), CTL(stats_arenas_i_uptime)}, {NAME("dss"), CTL(stats_arenas_i_dss)}, - {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)}, + {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)}, {NAME("pactive"), CTL(stats_arenas_i_pactive)}, {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, + {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)}, - {NAME("npurge"), CTL(stats_arenas_i_npurge)}, - {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, - {NAME("purged"), CTL(stats_arenas_i_purged)}, - {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)}, + {NAME("retained"), CTL(stats_arenas_i_retained)}, + {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)}, + {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)}, + {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)}, + {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)}, + {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)}, + {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)}, + {NAME("base"), CTL(stats_arenas_i_base)}, + {NAME("internal"), CTL(stats_arenas_i_internal)}, + {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)}, + {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, + {NAME("resident"), CTL(stats_arenas_i_resident)}, {NAME("small"), CHILD(named, stats_arenas_i_small)}, {NAME("large"), CHILD(named, stats_arenas_i_large)}, - {NAME("huge"), CHILD(named, stats_arenas_i_huge)}, {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, - {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}, - {NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)} + {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, + {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)} }; static const ctl_named_node_t super_stats_arenas_i_node[] = { {NAME(""), CHILD(named, stats_arenas_i)} @@ -461,19 +504,43 @@ static const ctl_indexed_node_t stats_arenas_node[] = { {INDEX(stats_arenas_i)} }; +static const ctl_named_node_t stats_background_thread_node[] = { + {NAME("num_threads"), CTL(stats_background_thread_num_threads)}, + {NAME("num_runs"), CTL(stats_background_thread_num_runs)}, + {NAME("run_interval"), CTL(stats_background_thread_run_interval)} +}; + +#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +static const ctl_named_node_t stats_mutexes_node[] = { +#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)}, +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + {NAME("reset"), CTL(stats_mutexes_reset)} +}; +#undef MUTEX_PROF_DATA_NODE + static const ctl_named_node_t stats_node[] = { - {NAME("cactive"), CTL(stats_cactive)}, {NAME("allocated"), CTL(stats_allocated)}, {NAME("active"), CTL(stats_active)}, {NAME("metadata"), CTL(stats_metadata)}, + {NAME("metadata_thp"), CTL(stats_metadata_thp)}, {NAME("resident"), CTL(stats_resident)}, {NAME("mapped"), CTL(stats_mapped)}, + {NAME("retained"), CTL(stats_retained)}, + {NAME("background_thread"), + CHILD(named, stats_background_thread)}, + {NAME("mutexes"), CHILD(named, stats_mutexes)}, {NAME("arenas"), CHILD(indexed, stats_arenas)} }; static const ctl_named_node_t root_node[] = { {NAME("version"), CTL(version)}, {NAME("epoch"), CTL(epoch)}, + {NAME("background_thread"), CTL(background_thread)}, + {NAME("max_background_threads"), CTL(max_background_threads)}, {NAME("thread"), CHILD(named, thread)}, {NAME("config"), CHILD(named, config)}, {NAME("opt"), CHILD(named, opt)}, @@ -494,312 +561,519 @@ static const ctl_named_node_t super_root_node[] = { /******************************************************************************/ -static bool -ctl_arena_init(ctl_arena_stats_t *astats) -{ +/* + * Sets *dst + *src non-atomically. This is safe, since everything is + * synchronized by the ctl mutex. + */ +static void +ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) { +#ifdef JEMALLOC_ATOMIC_U64 + uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); + uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED); + atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED); +#else + *dst += *src; +#endif +} + +/* Likewise: with ctl mutex synchronization, reading is simple. */ +static uint64_t +ctl_arena_stats_read_u64(arena_stats_u64_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_u64(p, ATOMIC_RELAXED); +#else + return *p; +#endif +} + +static void +accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) { + size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); + size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED); + atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED); +} + +/******************************************************************************/ - if (astats->lstats == NULL) { - astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses * - sizeof(malloc_large_stats_t)); - if (astats->lstats == NULL) - return (true); +static unsigned +arenas_i2a_impl(size_t i, bool compat, bool validate) { + unsigned a; + + switch (i) { + case MALLCTL_ARENAS_ALL: + a = 0; + break; + case MALLCTL_ARENAS_DESTROYED: + a = 1; + break; + default: + if (compat && i == ctl_arenas->narenas) { + /* + * Provide deprecated backward compatibility for + * accessing the merged stats at index narenas rather + * than via MALLCTL_ARENAS_ALL. This is scheduled for + * removal in 6.0.0. + */ + a = 0; + } else if (validate && i >= ctl_arenas->narenas) { + a = UINT_MAX; + } else { + /* + * This function should never be called for an index + * more than one past the range of indices that have + * initialized ctl data. + */ + assert(i < ctl_arenas->narenas || (!validate && i == + ctl_arenas->narenas)); + a = (unsigned)i + 2; + } + break; } - if (astats->hstats == NULL) { - astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses * - sizeof(malloc_huge_stats_t)); - if (astats->hstats == NULL) - return (true); + return a; +} + +static unsigned +arenas_i2a(size_t i) { + return arenas_i2a_impl(i, true, false); +} + +static ctl_arena_t * +arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) { + ctl_arena_t *ret; + + assert(!compat || !init); + + ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)]; + if (init && ret == NULL) { + if (config_stats) { + struct container_s { + ctl_arena_t ctl_arena; + ctl_arena_stats_t astats; + }; + struct container_s *cont = + (struct container_s *)base_alloc(tsd_tsdn(tsd), + b0get(), sizeof(struct container_s), QUANTUM); + if (cont == NULL) { + return NULL; + } + ret = &cont->ctl_arena; + ret->astats = &cont->astats; + } else { + ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(), + sizeof(ctl_arena_t), QUANTUM); + if (ret == NULL) { + return NULL; + } + } + ret->arena_ind = (unsigned)i; + ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret; } - return (false); + assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i)); + return ret; } -static void -ctl_arena_clear(ctl_arena_stats_t *astats) -{ +static ctl_arena_t * +arenas_i(size_t i) { + ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false); + assert(ret != NULL); + return ret; +} - astats->dss = dss_prec_names[dss_prec_limit]; - astats->lg_dirty_mult = -1; - astats->pactive = 0; - astats->pdirty = 0; +static void +ctl_arena_clear(ctl_arena_t *ctl_arena) { + ctl_arena->nthreads = 0; + ctl_arena->dss = dss_prec_names[dss_prec_limit]; + ctl_arena->dirty_decay_ms = -1; + ctl_arena->muzzy_decay_ms = -1; + ctl_arena->pactive = 0; + ctl_arena->pdirty = 0; + ctl_arena->pmuzzy = 0; if (config_stats) { - memset(&astats->astats, 0, sizeof(arena_stats_t)); - astats->allocated_small = 0; - astats->nmalloc_small = 0; - astats->ndalloc_small = 0; - astats->nrequests_small = 0; - memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); - memset(astats->lstats, 0, nlclasses * - sizeof(malloc_large_stats_t)); - memset(astats->hstats, 0, nhclasses * - sizeof(malloc_huge_stats_t)); + memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t)); + ctl_arena->astats->allocated_small = 0; + ctl_arena->astats->nmalloc_small = 0; + ctl_arena->astats->ndalloc_small = 0; + ctl_arena->astats->nrequests_small = 0; + memset(ctl_arena->astats->bstats, 0, NBINS * + sizeof(bin_stats_t)); + memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) * + sizeof(arena_stats_large_t)); } } static void -ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) -{ +ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { unsigned i; - arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult, - &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats, - cstats->lstats, cstats->hstats); - - for (i = 0; i < NBINS; i++) { - cstats->allocated_small += cstats->bstats[i].curregs * - index2size(i); - cstats->nmalloc_small += cstats->bstats[i].nmalloc; - cstats->ndalloc_small += cstats->bstats[i].ndalloc; - cstats->nrequests_small += cstats->bstats[i].nrequests; + if (config_stats) { + arena_stats_merge(tsdn, arena, &ctl_arena->nthreads, + &ctl_arena->dss, &ctl_arena->dirty_decay_ms, + &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, + &ctl_arena->pdirty, &ctl_arena->pmuzzy, + &ctl_arena->astats->astats, ctl_arena->astats->bstats, + ctl_arena->astats->lstats); + + for (i = 0; i < NBINS; i++) { + ctl_arena->astats->allocated_small += + ctl_arena->astats->bstats[i].curregs * + sz_index2size(i); + ctl_arena->astats->nmalloc_small += + ctl_arena->astats->bstats[i].nmalloc; + ctl_arena->astats->ndalloc_small += + ctl_arena->astats->bstats[i].ndalloc; + ctl_arena->astats->nrequests_small += + ctl_arena->astats->bstats[i].nrequests; + } + } else { + arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads, + &ctl_arena->dss, &ctl_arena->dirty_decay_ms, + &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, + &ctl_arena->pdirty, &ctl_arena->pmuzzy); } } static void -ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) -{ +ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, + bool destroyed) { unsigned i; - sstats->pactive += astats->pactive; - sstats->pdirty += astats->pdirty; - - sstats->astats.mapped += astats->astats.mapped; - sstats->astats.npurge += astats->astats.npurge; - sstats->astats.nmadvise += astats->astats.nmadvise; - sstats->astats.purged += astats->astats.purged; - - sstats->astats.metadata_mapped += astats->astats.metadata_mapped; - sstats->astats.metadata_allocated += astats->astats.metadata_allocated; - - sstats->allocated_small += astats->allocated_small; - sstats->nmalloc_small += astats->nmalloc_small; - sstats->ndalloc_small += astats->ndalloc_small; - sstats->nrequests_small += astats->nrequests_small; - - sstats->astats.allocated_large += astats->astats.allocated_large; - sstats->astats.nmalloc_large += astats->astats.nmalloc_large; - sstats->astats.ndalloc_large += astats->astats.ndalloc_large; - sstats->astats.nrequests_large += astats->astats.nrequests_large; - - sstats->astats.allocated_huge += astats->astats.allocated_huge; - sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge; - sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge; - - for (i = 0; i < NBINS; i++) { - sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; - sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; - sstats->bstats[i].nrequests += astats->bstats[i].nrequests; - sstats->bstats[i].curregs += astats->bstats[i].curregs; - if (config_tcache) { - sstats->bstats[i].nfills += astats->bstats[i].nfills; - sstats->bstats[i].nflushes += - astats->bstats[i].nflushes; - } - sstats->bstats[i].nruns += astats->bstats[i].nruns; - sstats->bstats[i].reruns += astats->bstats[i].reruns; - sstats->bstats[i].curruns += astats->bstats[i].curruns; + if (!destroyed) { + ctl_sdarena->nthreads += ctl_arena->nthreads; + ctl_sdarena->pactive += ctl_arena->pactive; + ctl_sdarena->pdirty += ctl_arena->pdirty; + ctl_sdarena->pmuzzy += ctl_arena->pmuzzy; + } else { + assert(ctl_arena->nthreads == 0); + assert(ctl_arena->pactive == 0); + assert(ctl_arena->pdirty == 0); + assert(ctl_arena->pmuzzy == 0); } - for (i = 0; i < nlclasses; i++) { - sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; - sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; - sstats->lstats[i].nrequests += astats->lstats[i].nrequests; - sstats->lstats[i].curruns += astats->lstats[i].curruns; - } + if (config_stats) { + ctl_arena_stats_t *sdstats = ctl_sdarena->astats; + ctl_arena_stats_t *astats = ctl_arena->astats; + + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.mapped, + &astats->astats.mapped); + accum_atomic_zu(&sdstats->astats.retained, + &astats->astats.retained); + } + + ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge, + &astats->astats.decay_dirty.npurge); + ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise, + &astats->astats.decay_dirty.nmadvise); + ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged, + &astats->astats.decay_dirty.purged); + + ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge, + &astats->astats.decay_muzzy.npurge); + ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise, + &astats->astats.decay_muzzy.nmadvise); + ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged, + &astats->astats.decay_muzzy.purged); + +#define OP(mtx) malloc_mutex_prof_merge( \ + &(sdstats->astats.mutex_prof_data[ \ + arena_prof_mutex_##mtx]), \ + &(astats->astats.mutex_prof_data[ \ + arena_prof_mutex_##mtx])); +MUTEX_PROF_ARENA_MUTEXES +#undef OP + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.base, + &astats->astats.base); + accum_atomic_zu(&sdstats->astats.internal, + &astats->astats.internal); + accum_atomic_zu(&sdstats->astats.resident, + &astats->astats.resident); + accum_atomic_zu(&sdstats->astats.metadata_thp, + &astats->astats.metadata_thp); + } else { + assert(atomic_load_zu( + &astats->astats.internal, ATOMIC_RELAXED) == 0); + } + + if (!destroyed) { + sdstats->allocated_small += astats->allocated_small; + } else { + assert(astats->allocated_small == 0); + } + sdstats->nmalloc_small += astats->nmalloc_small; + sdstats->ndalloc_small += astats->ndalloc_small; + sdstats->nrequests_small += astats->nrequests_small; - for (i = 0; i < nhclasses; i++) { - sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc; - sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; - sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks; + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.allocated_large, + &astats->astats.allocated_large); + } else { + assert(atomic_load_zu(&astats->astats.allocated_large, + ATOMIC_RELAXED) == 0); + } + ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large, + &astats->astats.nmalloc_large); + ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large, + &astats->astats.ndalloc_large); + ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large, + &astats->astats.nrequests_large); + + accum_atomic_zu(&sdstats->astats.tcache_bytes, + &astats->astats.tcache_bytes); + + if (ctl_arena->arena_ind == 0) { + sdstats->astats.uptime = astats->astats.uptime; + } + + for (i = 0; i < NBINS; i++) { + sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; + sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; + sdstats->bstats[i].nrequests += + astats->bstats[i].nrequests; + if (!destroyed) { + sdstats->bstats[i].curregs += + astats->bstats[i].curregs; + } else { + assert(astats->bstats[i].curregs == 0); + } + sdstats->bstats[i].nfills += astats->bstats[i].nfills; + sdstats->bstats[i].nflushes += + astats->bstats[i].nflushes; + sdstats->bstats[i].nslabs += astats->bstats[i].nslabs; + sdstats->bstats[i].reslabs += astats->bstats[i].reslabs; + if (!destroyed) { + sdstats->bstats[i].curslabs += + astats->bstats[i].curslabs; + } else { + assert(astats->bstats[i].curslabs == 0); + } + malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, + &astats->bstats[i].mutex_data); + } + + for (i = 0; i < NSIZES - NBINS; i++) { + ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc, + &astats->lstats[i].nmalloc); + ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc, + &astats->lstats[i].ndalloc); + ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests, + &astats->lstats[i].nrequests); + if (!destroyed) { + sdstats->lstats[i].curlextents += + astats->lstats[i].curlextents; + } else { + assert(astats->lstats[i].curlextents == 0); + } + } } } static void -ctl_arena_refresh(arena_t *arena, unsigned i) -{ - ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; - ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; +ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, + unsigned i, bool destroyed) { + ctl_arena_t *ctl_arena = arenas_i(i); + + ctl_arena_clear(ctl_arena); + ctl_arena_stats_amerge(tsdn, ctl_arena, arena); + /* Merge into sum stats as well. */ + ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed); +} - ctl_arena_clear(astats); +static unsigned +ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { + unsigned arena_ind; + ctl_arena_t *ctl_arena; - sstats->nthreads += astats->nthreads; - if (config_stats) { - ctl_arena_stats_amerge(astats, arena); - /* Merge into sum stats as well. */ - ctl_arena_stats_smerge(sstats, astats); + if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) != + NULL) { + ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link); + arena_ind = ctl_arena->arena_ind; } else { - astats->pactive += arena->nactive; - astats->pdirty += arena->ndirty; - /* Merge into sum stats as well. */ - sstats->pactive += arena->nactive; - sstats->pdirty += arena->ndirty; + arena_ind = ctl_arenas->narenas; } -} -static bool -ctl_grow(void) -{ - ctl_arena_stats_t *astats; + /* Trigger stats allocation. */ + if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) { + return UINT_MAX; + } /* Initialize new arena. */ - if (arena_init(ctl_stats.narenas) == NULL) - return (true); - - /* Allocate extended arena stats. */ - astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) * - sizeof(ctl_arena_stats_t)); - if (astats == NULL) - return (true); - - /* Initialize the new astats element. */ - memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); - memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); - if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { - a0dalloc(astats); - return (true); - } - /* Swap merged stats to their new location. */ - { - ctl_arena_stats_t tstats; - memcpy(&tstats, &astats[ctl_stats.narenas], - sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas], - &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas + 1], &tstats, - sizeof(ctl_arena_stats_t)); + if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) { + return UINT_MAX; } - a0dalloc(ctl_stats.arenas); - ctl_stats.arenas = astats; - ctl_stats.narenas++; - return (false); + if (arena_ind == ctl_arenas->narenas) { + ctl_arenas->narenas++; + } + + return arena_ind; +} + +static void +ctl_background_thread_stats_read(tsdn_t *tsdn) { + background_thread_stats_t *stats = &ctl_stats->background_thread; + if (!have_background_thread || + background_thread_stats_read(tsdn, stats)) { + memset(stats, 0, sizeof(background_thread_stats_t)); + nstime_init(&stats->run_interval, 0); + } } static void -ctl_refresh(void) -{ - tsd_t *tsd; +ctl_refresh(tsdn_t *tsdn) { unsigned i; - bool refreshed; - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); + ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL); + VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas); /* * Clear sum stats, since they will be merged into by * ctl_arena_refresh(). */ - ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; - ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); - - tsd = tsd_fetch(); - for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { - tarenas[i] = arena_get(tsd, i, false, false); - if (tarenas[i] == NULL && !refreshed) { - tarenas[i] = arena_get(tsd, i, false, true); - refreshed = true; - } - } + ctl_arena_clear(ctl_sarena); - for (i = 0; i < ctl_stats.narenas; i++) { - if (tarenas[i] != NULL) - ctl_stats.arenas[i].nthreads = arena_nbound(i); - else - ctl_stats.arenas[i].nthreads = 0; + for (i = 0; i < ctl_arenas->narenas; i++) { + tarenas[i] = arena_get(tsdn, i, false); } - for (i = 0; i < ctl_stats.narenas; i++) { + for (i = 0; i < ctl_arenas->narenas; i++) { + ctl_arena_t *ctl_arena = arenas_i(i); bool initialized = (tarenas[i] != NULL); - ctl_stats.arenas[i].initialized = initialized; - if (initialized) - ctl_arena_refresh(tarenas[i], i); + ctl_arena->initialized = initialized; + if (initialized) { + ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i, + false); + } } if (config_stats) { - size_t base_allocated, base_resident, base_mapped; - base_stats_get(&base_allocated, &base_resident, &base_mapped); - ctl_stats.allocated = - ctl_stats.arenas[ctl_stats.narenas].allocated_small + - ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + - ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge; - ctl_stats.active = - (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE); - ctl_stats.metadata = base_allocated + - ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + - ctl_stats.arenas[ctl_stats.narenas].astats - .metadata_allocated; - ctl_stats.resident = base_resident + - ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + - ((ctl_stats.arenas[ctl_stats.narenas].pactive + - ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE); - ctl_stats.mapped = base_mapped + - ctl_stats.arenas[ctl_stats.narenas].astats.mapped; - } - - ctl_epoch++; + ctl_stats->allocated = ctl_sarena->astats->allocated_small + + atomic_load_zu(&ctl_sarena->astats->astats.allocated_large, + ATOMIC_RELAXED); + ctl_stats->active = (ctl_sarena->pactive << LG_PAGE); + ctl_stats->metadata = atomic_load_zu( + &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) + + atomic_load_zu(&ctl_sarena->astats->astats.internal, + ATOMIC_RELAXED); + ctl_stats->metadata_thp = atomic_load_zu( + &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED); + ctl_stats->resident = atomic_load_zu( + &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED); + ctl_stats->mapped = atomic_load_zu( + &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED); + ctl_stats->retained = atomic_load_zu( + &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED); + + ctl_background_thread_stats_read(tsdn); + +#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \ + malloc_mutex_lock(tsdn, &mtx); \ + malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \ + malloc_mutex_unlock(tsdn, &mtx); + + if (config_prof && opt_prof) { + READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof, + bt2gctx_mtx); + } + if (have_background_thread) { + READ_GLOBAL_MUTEX_PROF_DATA( + global_prof_mutex_background_thread, + background_thread_lock); + } else { + memset(&ctl_stats->mutex_prof_data[ + global_prof_mutex_background_thread], 0, + sizeof(mutex_prof_data_t)); + } + /* We own ctl mutex already. */ + malloc_mutex_prof_read(tsdn, + &ctl_stats->mutex_prof_data[global_prof_mutex_ctl], + &ctl_mtx); +#undef READ_GLOBAL_MUTEX_PROF_DATA + } + ctl_arenas->epoch++; } static bool -ctl_init(void) -{ +ctl_init(tsd_t *tsd) { bool ret; + tsdn_t *tsdn = tsd_tsdn(tsd); - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsdn, &ctl_mtx); if (!ctl_initialized) { + ctl_arena_t *ctl_sarena, *ctl_darena; + unsigned i; + + /* + * Allocate demand-zeroed space for pointers to the full + * range of supported arena indices. + */ + if (ctl_arenas == NULL) { + ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn, + b0get(), sizeof(ctl_arenas_t), QUANTUM); + if (ctl_arenas == NULL) { + ret = true; + goto label_return; + } + } + + if (config_stats && ctl_stats == NULL) { + ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(), + sizeof(ctl_stats_t), QUANTUM); + if (ctl_stats == NULL) { + ret = true; + goto label_return; + } + } + /* - * Allocate space for one extra arena stats element, which - * contains summed stats across all arenas. + * Allocate space for the current full range of arenas + * here rather than doing it lazily elsewhere, in order + * to limit when OOM-caused errors can occur. */ - ctl_stats.narenas = narenas_total_get(); - ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc( - (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); - if (ctl_stats.arenas == NULL) { + if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false, + true)) == NULL) { ret = true; goto label_return; } - memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); + ctl_sarena->initialized = true; + if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED, + false, true)) == NULL) { + ret = true; + goto label_return; + } + ctl_arena_clear(ctl_darena); /* - * Initialize all stats structures, regardless of whether they - * ever get used. Lazy initialization would allow errors to - * cause inconsistent state to be viewable by the application. + * Don't toggle ctl_darena to initialized until an arena is + * actually destroyed, so that arena.<i>.initialized can be used + * to query whether the stats are relevant. */ - if (config_stats) { - unsigned i; - for (i = 0; i <= ctl_stats.narenas; i++) { - if (ctl_arena_init(&ctl_stats.arenas[i])) { - unsigned j; - for (j = 0; j < i; j++) { - a0dalloc( - ctl_stats.arenas[j].lstats); - a0dalloc( - ctl_stats.arenas[j].hstats); - } - a0dalloc(ctl_stats.arenas); - ctl_stats.arenas = NULL; - ret = true; - goto label_return; - } + + ctl_arenas->narenas = narenas_total_get(); + for (i = 0; i < ctl_arenas->narenas; i++) { + if (arenas_i_impl(tsd, i, false, true) == NULL) { + ret = true; + goto label_return; } } - ctl_stats.arenas[ctl_stats.narenas].initialized = true; - ctl_epoch = 0; - ctl_refresh(); + ql_new(&ctl_arenas->destroyed); + ctl_refresh(tsdn); + ctl_initialized = true; } ret = false; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; } static int -ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, - size_t *depthp) -{ +ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, + size_t *mibp, size_t *depthp) { int ret; const char *elm, *tdot, *dot; size_t elen, i, j; @@ -827,9 +1101,10 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, if (strlen(child->name) == elen && strncmp(elm, child->name, elen) == 0) { node = child; - if (nodesp != NULL) + if (nodesp != NULL) { nodesp[i] = (const ctl_node_t *)node; + } mibp[i] = j; break; } @@ -850,14 +1125,15 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, } inode = ctl_indexed_node(node->children); - node = inode->index(mibp, *depthp, (size_t)index); + node = inode->index(tsdn, mibp, *depthp, (size_t)index); if (node == NULL) { ret = ENOENT; goto label_return; } - if (nodesp != NULL) + if (nodesp != NULL) { nodesp[i] = (const ctl_node_t *)node; + } mibp[i] = (size_t)index; } @@ -890,33 +1166,33 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, ret = 0; label_return: - return (ret); + return ret; } int -ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ +ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { int ret; size_t depth; ctl_node_t const *nodes[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH]; const ctl_named_node_t *node; - if (!ctl_initialized && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } depth = CTL_MAX_DEPTH; - ret = ctl_lookup(name, nodes, mib, &depth); - if (ret != 0) + ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); + if (ret != 0) { goto label_return; + } node = ctl_named_node(nodes[depth-1]); - if (node != NULL && node->ctl) - ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); - else { + if (node != NULL && node->ctl) { + ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); + } else { /* The name refers to a partial path through the ctl tree. */ ret = ENOENT; } @@ -926,29 +1202,27 @@ label_return: } int -ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) -{ +ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) { int ret; - if (!ctl_initialized && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } - ret = ctl_lookup(name, NULL, mibp, miblenp); + ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp); label_return: return(ret); } int -ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; const ctl_named_node_t *node; size_t i; - if (!ctl_initialized && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } @@ -970,7 +1244,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, /* Indexed element. */ inode = ctl_indexed_node(node->children); - node = inode->index(mib, miblen, mib[i]); + node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); if (node == NULL) { ret = ENOENT; goto label_return; @@ -979,9 +1253,9 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, } /* Call the ctl function. */ - if (node && node->ctl) - ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); - else { + if (node && node->ctl) { + ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + } else { /* Partial MIB. */ ret = ENOENT; } @@ -991,56 +1265,50 @@ label_return: } bool -ctl_boot(void) -{ - - if (malloc_mutex_init(&ctl_mtx)) - return (true); +ctl_boot(void) { + if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL, + malloc_mutex_rank_exclusive)) { + return true; + } ctl_initialized = false; - return (false); + return false; } void -ctl_prefork(void) -{ - - malloc_mutex_prefork(&ctl_mtx); +ctl_prefork(tsdn_t *tsdn) { + malloc_mutex_prefork(tsdn, &ctl_mtx); } void -ctl_postfork_parent(void) -{ - - malloc_mutex_postfork_parent(&ctl_mtx); +ctl_postfork_parent(tsdn_t *tsdn) { + malloc_mutex_postfork_parent(tsdn, &ctl_mtx); } void -ctl_postfork_child(void) -{ - - malloc_mutex_postfork_child(&ctl_mtx); +ctl_postfork_child(tsdn_t *tsdn) { + malloc_mutex_postfork_child(tsdn, &ctl_mtx); } /******************************************************************************/ /* *_ctl() functions. */ -#define READONLY() do { \ +#define READONLY() do { \ if (newp != NULL || newlen != 0) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) -#define WRITEONLY() do { \ +#define WRITEONLY() do { \ if (oldp != NULL || oldlenp != NULL) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) -#define READ_XOR_WRITE() do { \ +#define READ_XOR_WRITE() do { \ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ newlen != 0)) { \ ret = EPERM; \ @@ -1048,7 +1316,7 @@ ctl_postfork_child(void) } \ } while (0) -#define READ(v, t) do { \ +#define READ(v, t) do { \ if (oldp != NULL && oldlenp != NULL) { \ if (*oldlenp != sizeof(t)) { \ size_t copylen = (sizeof(t) <= *oldlenp) \ @@ -1061,7 +1329,7 @@ ctl_postfork_child(void) } \ } while (0) -#define WRITE(v, t) do { \ +#define WRITE(v, t) do { \ if (newp != NULL) { \ if (newlen != sizeof(t)) { \ ret = EINVAL; \ @@ -1071,101 +1339,109 @@ ctl_postfork_child(void) } \ } while (0) +#define MIB_UNSIGNED(v, i) do { \ + if (mib[i] > UINT_MAX) { \ + ret = EFAULT; \ + goto label_return; \ + } \ + v = (unsigned)mib[i]; \ +} while (0) + /* * There's a lot of code duplication in the following macros due to limitations * in how nested cpp macros are expanded. */ -#define CTL_RO_CLGEN(c, l, n, v, t) \ +#define CTL_RO_CLGEN(c, l, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if (!(c)) \ - return (ENOENT); \ - if (l) \ - malloc_mutex_lock(&ctl_mtx); \ + if (!(c)) { \ + return ENOENT; \ + } \ + if (l) { \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + } \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - if (l) \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ + if (l) { \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + } \ + return ret; \ } -#define CTL_RO_CGEN(c, n, v, t) \ +#define CTL_RO_CGEN(c, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if (!(c)) \ - return (ENOENT); \ - malloc_mutex_lock(&ctl_mtx); \ + if (!(c)) { \ + return ENOENT; \ + } \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return ret; \ } -#define CTL_RO_GEN(n, v, t) \ +#define CTL_RO_GEN(n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - malloc_mutex_lock(&ctl_mtx); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return ret; \ } /* * ctl_mtx is not acquired, under the assumption that no pertinent data will * mutate during the call. */ -#define CTL_RO_NL_CGEN(c, n, v, t) \ +#define CTL_RO_NL_CGEN(c, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if (!(c)) \ - return (ENOENT); \ + if (!(c)) { \ + return ENOENT; \ + } \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } -#define CTL_RO_NL_GEN(n, v, t) \ +#define CTL_RO_NL_GEN(n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1175,45 +1451,42 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } -#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ +#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ - tsd_t *tsd; \ \ - if (!(c)) \ - return (ENOENT); \ + if (!(c)) { \ + return ENOENT; \ + } \ READONLY(); \ - tsd = tsd_fetch(); \ oldval = (m(tsd)); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } -#define CTL_RO_BOOL_CONFIG_GEN(n) \ +#define CTL_RO_CONFIG_GEN(n, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ - bool oldval; \ + t oldval; \ \ READONLY(); \ oldval = n; \ - READ(oldval, bool); \ + READ(oldval, t); \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } /******************************************************************************/ @@ -1221,57 +1494,187 @@ label_return: \ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int -epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; UNUSED uint64_t newval; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(newval, uint64_t); - if (newp != NULL) - ctl_refresh(); - READ(ctl_epoch, uint64_t); + if (newp != NULL) { + ctl_refresh(tsd_tsdn(tsd)); + } + READ(ctl_arenas->epoch, uint64_t); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!have_background_thread) { + return ENOENT; + } + background_thread_ctl_init(tsd_tsdn(tsd)); + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (newp == NULL) { + oldval = background_thread_enabled(); + READ(oldval, bool); + } else { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = background_thread_enabled(); + READ(oldval, bool); + + bool newval = *(bool *)newp; + if (newval == oldval) { + ret = 0; + goto label_return; + } + + background_thread_enabled_set(tsd_tsdn(tsd), newval); + if (newval) { + if (!can_enable_background_thread) { + malloc_printf("<jemalloc>: Error in dlsym(" + "RTLD_NEXT, \"pthread_create\"). Cannot " + "enable background_thread\n"); + ret = EFAULT; + goto label_return; + } + if (background_threads_enable(tsd)) { + ret = EFAULT; + goto label_return; + } + } else { + if (background_threads_disable(tsd)) { + ret = EFAULT; + goto label_return; + } + } + } + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + + return ret; +} + +static int +max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + size_t oldval; + + if (!have_background_thread) { + return ENOENT; + } + background_thread_ctl_init(tsd_tsdn(tsd)); + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (newp == NULL) { + oldval = max_background_threads; + READ(oldval, size_t); + } else { + if (newlen != sizeof(size_t)) { + ret = EINVAL; + goto label_return; + } + oldval = max_background_threads; + READ(oldval, size_t); + + size_t newval = *(size_t *)newp; + if (newval == oldval) { + ret = 0; + goto label_return; + } + if (newval > opt_max_background_threads) { + ret = EINVAL; + goto label_return; + } + if (background_thread_enabled()) { + if (!can_enable_background_thread) { + malloc_printf("<jemalloc>: Error in dlsym(" + "RTLD_NEXT, \"pthread_create\"). Cannot " + "enable background_thread\n"); + ret = EFAULT; + goto label_return; + } + background_thread_enabled_set(tsd_tsdn(tsd), false); + if (background_threads_disable(tsd)) { + ret = EFAULT; + goto label_return; + } + max_background_threads = newval; + background_thread_enabled_set(tsd_tsdn(tsd), true); + if (background_threads_enable(tsd)) { + ret = EFAULT; + goto label_return; + } + } else { + max_background_threads = newval; + } + } ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + + return ret; } /******************************************************************************/ -CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious) -CTL_RO_BOOL_CONFIG_GEN(config_debug) -CTL_RO_BOOL_CONFIG_GEN(config_fill) -CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) -CTL_RO_BOOL_CONFIG_GEN(config_munmap) -CTL_RO_BOOL_CONFIG_GEN(config_prof) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) -CTL_RO_BOOL_CONFIG_GEN(config_stats) -CTL_RO_BOOL_CONFIG_GEN(config_tcache) -CTL_RO_BOOL_CONFIG_GEN(config_tls) -CTL_RO_BOOL_CONFIG_GEN(config_utrace) -CTL_RO_BOOL_CONFIG_GEN(config_valgrind) -CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) +CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) +CTL_RO_CONFIG_GEN(config_debug, bool) +CTL_RO_CONFIG_GEN(config_fill, bool) +CTL_RO_CONFIG_GEN(config_lazy_lock, bool) +CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) +CTL_RO_CONFIG_GEN(config_prof, bool) +CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) +CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) +CTL_RO_CONFIG_GEN(config_stats, bool) +CTL_RO_CONFIG_GEN(config_utrace, bool) +CTL_RO_CONFIG_GEN(config_xmalloc, bool) /******************************************************************************/ CTL_RO_NL_GEN(opt_abort, opt_abort, bool) +CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) +CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], + const char *) +CTL_RO_NL_GEN(opt_retain, opt_retain, bool) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) -CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) -CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) -CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) +CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) +CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], + const char *) +CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool) +CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t) +CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t) +CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) +CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *) CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) -CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) -CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) -CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) -CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) +CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) +CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *) +CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit, + size_t) +CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) @@ -1287,53 +1690,59 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) /******************************************************************************/ static int -thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; - tsd_t *tsd; arena_t *oldarena; unsigned newind, oldind; - tsd = tsd_fetch(); oldarena = arena_choose(tsd, NULL); - if (oldarena == NULL) - return (EAGAIN); - - malloc_mutex_lock(&ctl_mtx); - newind = oldind = oldarena->ind; + if (oldarena == NULL) { + return EAGAIN; + } + newind = oldind = arena_ind_get(oldarena); WRITE(newind, unsigned); READ(oldind, unsigned); + if (newind != oldind) { arena_t *newarena; - if (newind >= ctl_stats.narenas) { + if (newind >= narenas_total_get()) { /* New arena index is out of range. */ ret = EFAULT; goto label_return; } + if (have_percpu_arena && + PERCPU_ARENA_ENABLED(opt_percpu_arena)) { + if (newind < percpu_arena_ind_limit(opt_percpu_arena)) { + /* + * If perCPU arena is enabled, thread_arena + * control is not allowed for the auto arena + * range. + */ + ret = EPERM; + goto label_return; + } + } + /* Initialize arena if necessary. */ - newarena = arena_get(tsd, newind, true, true); + newarena = arena_get(tsd_tsdn(tsd), newind, true); if (newarena == NULL) { ret = EAGAIN; goto label_return; } /* Set new arena/tcache associations. */ arena_migrate(tsd, oldind, newind); - if (config_tcache) { - tcache_t *tcache = tsd_tcache_get(tsd); - if (tcache != NULL) { - tcache_arena_reassociate(tcache, oldarena, - newarena); - } + if (tcache_available(tsd)) { + tcache_arena_reassociate(tsd_tsdn(tsd), + tsd_tcachep_get(tsd), newarena); } } ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + return ret; } CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, @@ -1346,100 +1755,94 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, tsd_thread_deallocatedp_get, uint64_t *) static int -thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (!config_tcache) - return (ENOENT); - - oldval = tcache_enabled_get(); + oldval = tcache_enabled_get(tsd); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } - tcache_enabled_set(*(bool *)newp); + tcache_enabled_set(tsd, *(bool *)newp); } READ(oldval, bool); ret = 0; label_return: - return (ret); + return ret; } static int -thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - if (!config_tcache) - return (ENOENT); + if (!tcache_available(tsd)) { + ret = EFAULT; + goto label_return; + } READONLY(); WRITEONLY(); - tcache_flush(); + tcache_flush(tsd); ret = 0; label_return: - return (ret); + return ret; } static int -thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; - if (!config_prof) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } READ_XOR_WRITE(); if (newp != NULL) { - tsd_t *tsd; - if (newlen != sizeof(const char *)) { ret = EINVAL; goto label_return; } - tsd = tsd_fetch(); - if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != - 0) + 0) { goto label_return; + } } else { - const char *oldname = prof_thread_name_get(); + const char *oldname = prof_thread_name_get(tsd); READ(oldname, const char *); } ret = 0; label_return: - return (ret); + return ret; } static int -thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (!config_prof) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } - oldval = prof_thread_active_get(); + oldval = prof_thread_active_get(tsd); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } - if (prof_thread_active_set(*(bool *)newp)) { + if (prof_thread_active_set(tsd, *(bool *)newp)) { ret = EAGAIN; goto label_return; } @@ -1448,25 +1851,17 @@ thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } /******************************************************************************/ static int -tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; - tsd_t *tsd; unsigned tcache_ind; - if (!config_tcache) - return (ENOENT); - - tsd = tsd_fetch(); - - malloc_mutex_lock(&ctl_mtx); READONLY(); if (tcaches_create(tsd, &tcache_ind)) { ret = EFAULT; @@ -1476,23 +1871,15 @@ tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + return ret; } static int -tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; - tsd_t *tsd; unsigned tcache_ind; - if (!config_tcache) - return (ENOENT); - - tsd = tsd_fetch(); - WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); @@ -1504,22 +1891,15 @@ tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ret = 0; label_return: - return (ret); + return ret; } static int -tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; - tsd_t *tsd; unsigned tcache_ind; - if (!config_tcache) - return (ENOENT); - - tsd = tsd_fetch(); - WRITEONLY(); tcache_ind = UINT_MAX; WRITE(tcache_ind, unsigned); @@ -1531,71 +1911,239 @@ tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } /******************************************************************************/ -/* ctl_mutex must be held during execution of this function. */ +static int +arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + tsdn_t *tsdn = tsd_tsdn(tsd); + unsigned arena_ind; + bool initialized; + + READONLY(); + MIB_UNSIGNED(arena_ind, 1); + + malloc_mutex_lock(tsdn, &ctl_mtx); + initialized = arenas_i(arena_ind)->initialized; + malloc_mutex_unlock(tsdn, &ctl_mtx); + + READ(initialized, bool); + + ret = 0; +label_return: + return ret; +} + static void -arena_purge(unsigned arena_ind) -{ - tsd_t *tsd; - unsigned i; - bool refreshed; - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); - - tsd = tsd_fetch(); - for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { - tarenas[i] = arena_get(tsd, i, false, false); - if (tarenas[i] == NULL && !refreshed) { - tarenas[i] = arena_get(tsd, i, false, true); - refreshed = true; - } - } +arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) { + malloc_mutex_lock(tsdn, &ctl_mtx); + { + unsigned narenas = ctl_arenas->narenas; - if (arena_ind == ctl_stats.narenas) { - unsigned i; - for (i = 0; i < ctl_stats.narenas; i++) { - if (tarenas[i] != NULL) - arena_purge_all(tarenas[i]); + /* + * Access via index narenas is deprecated, and scheduled for + * removal in 6.0.0. + */ + if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) { + unsigned i; + VARIABLE_ARRAY(arena_t *, tarenas, narenas); + + for (i = 0; i < narenas; i++) { + tarenas[i] = arena_get(tsdn, i, false); + } + + /* + * No further need to hold ctl_mtx, since narenas and + * tarenas contain everything needed below. + */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + for (i = 0; i < narenas; i++) { + if (tarenas[i] != NULL) { + arena_decay(tsdn, tarenas[i], false, + all); + } + } + } else { + arena_t *tarena; + + assert(arena_ind < narenas); + + tarena = arena_get(tsdn, arena_ind, false); + + /* No further need to hold ctl_mtx. */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + if (tarena != NULL) { + arena_decay(tsdn, tarena, false, all); + } } - } else { - assert(arena_ind < ctl_stats.narenas); - if (tarenas[arena_ind] != NULL) - arena_purge_all(tarenas[arena_ind]); } } static int -arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + + READONLY(); + WRITEONLY(); + MIB_UNSIGNED(arena_ind, 1); + arena_i_decay(tsd_tsdn(tsd), arena_ind, false); + + ret = 0; +label_return: + return ret; +} + +static int +arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; + unsigned arena_ind; READONLY(); WRITEONLY(); - malloc_mutex_lock(&ctl_mtx); - arena_purge(mib[1]); - malloc_mutex_unlock(&ctl_mtx); + MIB_UNSIGNED(arena_ind, 1); + arena_i_decay(tsd_tsdn(tsd), arena_ind, true); ret = 0; label_return: - return (ret); + return ret; } static int -arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind, + arena_t **arena) { + int ret; + + READONLY(); + WRITEONLY(); + MIB_UNSIGNED(*arena_ind, 1); + + *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false); + if (*arena == NULL || arena_is_auto(*arena)) { + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: + return ret; +} + +static void +arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) { + /* Temporarily disable the background thread during arena reset. */ + if (have_background_thread) { + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (background_thread_enabled()) { + unsigned ind = arena_ind % ncpus; + background_thread_info_t *info = + &background_thread_info[ind]; + assert(info->state == background_thread_started); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_paused; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + } +} + +static void +arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) { + if (have_background_thread) { + if (background_thread_enabled()) { + unsigned ind = arena_ind % ncpus; + background_thread_info_t *info = + &background_thread_info[ind]; + assert(info->state == background_thread_paused); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_started; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + } +} + +static int +arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + + ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, + newp, newlen, &arena_ind, &arena); + if (ret != 0) { + return ret; + } + + arena_reset_prepare_background_thread(tsd, arena_ind); + arena_reset(tsd, arena); + arena_reset_finish_background_thread(tsd, arena_ind); + + return ret; +} + +static int +arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + ctl_arena_t *ctl_darena, *ctl_arena; + + ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, + newp, newlen, &arena_ind, &arena); + if (ret != 0) { + goto label_return; + } + + if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena, + true) != 0) { + ret = EFAULT; + goto label_return; + } + + arena_reset_prepare_background_thread(tsd, arena_ind); + /* Merge stats after resetting and purging arena. */ + arena_reset(tsd, arena); + arena_decay(tsd_tsdn(tsd), arena, false, true); + ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED); + ctl_darena->initialized = true; + ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true); + /* Destroy arena. */ + arena_destroy(tsd, arena); + ctl_arena = arenas_i(arena_ind); + ctl_arena->initialized = false; + /* Record arena index for later recycling via arenas.create. */ + ql_elm_new(ctl_arena, destroyed_link); + ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link); + arena_reset_finish_background_thread(tsd, arena_ind); + + assert(ret == 0); +label_return: + return ret; +} + +static int +arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *dss = NULL; - unsigned arena_ind = mib[1]; + unsigned arena_ind; dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(dss, const char *); + MIB_UNSIGNED(arena_ind, 1); if (dss != NULL) { int i; bool match = false; @@ -1614,21 +2162,26 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, } } - if (arena_ind < ctl_stats.narenas) { - arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true); - if (arena == NULL || (dss_prec != dss_prec_limit && - arena_dss_prec_set(arena, dss_prec))) { + /* + * Access via index narenas is deprecated, and scheduled for removal in + * 6.0.0. + */ + if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == + ctl_arenas->narenas) { + if (dss_prec != dss_prec_limit && + extent_dss_prec_set(dss_prec)) { ret = EFAULT; goto label_return; } - dss_prec_old = arena_dss_prec_get(arena); + dss_prec_old = extent_dss_prec_get(); } else { - if (dss_prec != dss_prec_limit && - chunk_dss_prec_set(dss_prec)) { + arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL || (dss_prec != dss_prec_limit && + arena_dss_prec_set(arena, dss_prec))) { ret = EFAULT; goto label_return; } - dss_prec_old = chunk_dss_prec_get(); + dss_prec_old = arena_dss_prec_get(arena); } dss = dss_prec_names[dss_prec_old]; @@ -1636,26 +2189,27 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; } static int -arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { int ret; - unsigned arena_ind = mib[1]; + unsigned arena_ind; arena_t *arena; - arena = arena_get(tsd_fetch(), arena_ind, false, true); + MIB_UNSIGNED(arena_ind, 1); + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL) { ret = EFAULT; goto label_return; } if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_lg_dirty_mult_get(arena); + size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) : + arena_muzzy_decay_ms_get(arena); READ(oldval, ssize_t); } if (newp != NULL) { @@ -1663,7 +2217,9 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, ret = EINVAL; goto label_return; } - if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) { + if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd), + arena, *(ssize_t *)newp)) { ret = EFAULT; goto label_return; } @@ -1671,29 +2227,67 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; } static int -arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, true); +} + +static int +arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, false); +} + +static int +arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - unsigned arena_ind = mib[1]; + unsigned arena_ind; arena_t *arena; - malloc_mutex_lock(&ctl_mtx); - if (arena_ind < narenas_total_get() && (arena = - arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) { - if (newp != NULL) { - chunk_hooks_t old_chunk_hooks, new_chunk_hooks; - WRITE(new_chunk_hooks, chunk_hooks_t); - old_chunk_hooks = chunk_hooks_set(arena, - &new_chunk_hooks); - READ(old_chunk_hooks, chunk_hooks_t); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + MIB_UNSIGNED(arena_ind, 1); + if (arena_ind < narenas_total_get()) { + extent_hooks_t *old_extent_hooks; + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL) { + if (arena_ind >= narenas_auto) { + ret = EFAULT; + goto label_return; + } + old_extent_hooks = + (extent_hooks_t *)&extent_hooks_default; + READ(old_extent_hooks, extent_hooks_t *); + if (newp != NULL) { + /* Initialize a new arena as a side effect. */ + extent_hooks_t *new_extent_hooks + JEMALLOC_CC_SILENCE_INIT(NULL); + WRITE(new_extent_hooks, extent_hooks_t *); + arena = arena_init(tsd_tsdn(tsd), arena_ind, + new_extent_hooks); + if (arena == NULL) { + ret = EFAULT; + goto label_return; + } + } } else { - chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena); - READ(old_chunk_hooks, chunk_hooks_t); + if (newp != NULL) { + extent_hooks_t *new_extent_hooks + JEMALLOC_CC_SILENCE_INIT(NULL); + WRITE(new_extent_hooks, extent_hooks_t *); + old_extent_hooks = extent_hooks_set(tsd, arena, + new_extent_hooks); + READ(old_extent_hooks, extent_hooks_t *); + } else { + old_extent_hooks = extent_hooks_get(arena); + READ(old_extent_hooks, extent_hooks_t *); + } } } else { ret = EFAULT; @@ -1701,85 +2295,100 @@ arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, } ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; } -static const ctl_named_node_t * -arena_i_index(const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; +static int +arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; - malloc_mutex_lock(&ctl_mtx); - if (i > ctl_stats.narenas) { - ret = NULL; - goto label_return; + if (!opt_retain) { + /* Only relevant when retain is enabled. */ + return ENOENT; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + MIB_UNSIGNED(arena_ind, 1); + if (arena_ind < narenas_total_get() && (arena = + arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { + size_t old_limit, new_limit; + if (newp != NULL) { + WRITE(new_limit, size_t); + } + bool err = arena_retain_grow_limit_get_set(tsd, arena, + &old_limit, newp != NULL ? &new_limit : NULL); + if (!err) { + READ(old_limit, size_t); + ret = 0; + } else { + ret = EFAULT; + } + } else { + ret = EFAULT; + } +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static const ctl_named_node_t * +arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + const ctl_named_node_t *ret; + + malloc_mutex_lock(tsdn, &ctl_mtx); + switch (i) { + case MALLCTL_ARENAS_ALL: + case MALLCTL_ARENAS_DESTROYED: + break; + default: + if (i > ctl_arenas->narenas) { + ret = NULL; + goto label_return; + } + break; } ret = super_arena_i_node; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; } /******************************************************************************/ static int -arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (*oldlenp != sizeof(unsigned)) { ret = EINVAL; goto label_return; } - narenas = ctl_stats.narenas; + narenas = ctl_arenas->narenas; READ(narenas, unsigned); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned nread, i; - - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { - ret = EINVAL; - nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) - ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas; - } else { - ret = 0; - nread = ctl_stats.narenas; - } - - for (i = 0; i < nread; i++) - ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; - -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; } static int -arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { int ret; if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_lg_dirty_mult_default_get(); + size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() : + arena_muzzy_decay_ms_default_get()); READ(oldval, ssize_t); } if (newp != NULL) { @@ -1787,7 +2396,8 @@ arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, ret = EINVAL; goto label_return; } - if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) { + if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp) + : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) { ret = EFAULT; goto label_return; } @@ -1795,193 +2405,229 @@ arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - return (ret); + return ret; +} + +static int +arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, true); +} + +static int +arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, false); } CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t) -CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) +CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) -CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) -CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) -CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) -CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) +CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned) +CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t) +CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t) +CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t) static const ctl_named_node_t * -arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > NBINS) - return (NULL); - return (super_arenas_bin_i_node); +arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + if (i > NBINS) { + return NULL; + } + return super_arenas_bin_i_node; } -CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned) -CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t) +CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned) +CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]), + size_t) static const ctl_named_node_t * -arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > nlclasses) - return (NULL); - return (super_arenas_lrun_i_node); +arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t i) { + if (i > NSIZES - NBINS) { + return NULL; + } + return super_arenas_lextent_i_node; } -CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned) -CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t) -static const ctl_named_node_t * -arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i) -{ +static int +arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + extent_hooks_t *extent_hooks; + unsigned arena_ind; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); - if (i > nhclasses) - return (NULL); - return (super_arenas_hchunk_i_node); + extent_hooks = (extent_hooks_t *)&extent_hooks_default; + WRITE(extent_hooks, extent_hooks_t *); + if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) { + ret = EAGAIN; + goto label_return; + } + READ(arena_ind, unsigned); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; } static int -arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; - unsigned narenas; + unsigned arena_ind; + void *ptr; + extent_t *extent; + arena_t *arena; - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (ctl_grow()) { - ret = EAGAIN; + ptr = NULL; + ret = EINVAL; + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + WRITE(ptr, void *); + extent = iealloc(tsd_tsdn(tsd), ptr); + if (extent == NULL) + goto label_return; + + arena = extent_arena_get(extent); + if (arena == NULL) goto label_return; - } - narenas = ctl_stats.narenas - 1; - READ(narenas, unsigned); + + arena_ind = arena_ind_get(arena); + READ(arena_ind, unsigned); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; } /******************************************************************************/ static int -prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (!config_prof) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } - oldval = prof_thread_active_init_set(*(bool *)newp); - } else - oldval = prof_thread_active_init_get(); + oldval = prof_thread_active_init_set(tsd_tsdn(tsd), + *(bool *)newp); + } else { + oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); + } READ(oldval, bool); ret = 0; label_return: - return (ret); + return ret; } static int -prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (!config_prof) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } - oldval = prof_active_set(*(bool *)newp); - } else - oldval = prof_active_get(); + oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); + } else { + oldval = prof_active_get(tsd_tsdn(tsd)); + } READ(oldval, bool); ret = 0; label_return: - return (ret); + return ret; } static int -prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *filename = NULL; - if (!config_prof) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } WRITEONLY(); WRITE(filename, const char *); - if (prof_mdump(filename)) { + if (prof_mdump(tsd, filename)) { ret = EFAULT; goto label_return; } ret = 0; label_return: - return (ret); + return ret; } static int -prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (!config_prof) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } - oldval = prof_gdump_set(*(bool *)newp); - } else - oldval = prof_gdump_get(); + oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); + } else { + oldval = prof_gdump_get(tsd_tsdn(tsd)); + } READ(oldval, bool); ret = 0; label_return: - return (ret); + return ret; } static int -prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; size_t lg_sample = lg_prof_sample; - tsd_t *tsd; - if (!config_prof) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } WRITEONLY(); WRITE(lg_sample, size_t); - if (lg_sample >= (sizeof(uint64_t) << 3)) + if (lg_sample >= (sizeof(uint64_t) << 3)) { lg_sample = (sizeof(uint64_t) << 3) - 1; - - tsd = tsd_fetch(); + } prof_reset(tsd, lg_sample); ret = 0; label_return: - return (ret); + return ret; } CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) @@ -1989,135 +2635,249 @@ CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) /******************************************************************************/ -CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) -CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) -CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) -CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) -CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t) -CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) - -CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) -CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult, +CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t) +CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t) +CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t) +CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t) +CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t) +CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t) +CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t) + +CTL_RO_CGEN(config_stats, stats_background_thread_num_threads, + ctl_stats->background_thread.num_threads, size_t) +CTL_RO_CGEN(config_stats, stats_background_thread_num_runs, + ctl_stats->background_thread.num_runs, uint64_t) +CTL_RO_CGEN(config_stats, stats_background_thread_run_interval, + nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t) + +CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *) +CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms, + ssize_t) +CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms, ssize_t) -CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) -CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) -CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) +CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned) +CTL_RO_GEN(stats_arenas_i_uptime, + nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t) +CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t) +CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t) +CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, - ctl_stats.arenas[mib[2]].astats.mapped, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, - ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, - ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_purged, - ctl_stats.arenas[mib[2]].astats.purged, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped, - ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated, - ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t) + atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_retained, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED), + size_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_base, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_internal, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp, + ATOMIC_RELAXED), size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes, + ATOMIC_RELAXED), size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_resident, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED), + size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, - ctl_stats.arenas[mib[2]].allocated_small, size_t) + arenas_i(mib[2])->astats->allocated_small, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, - ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) + arenas_i(mib[2])->astats->nmalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, - ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) + arenas_i(mib[2])->astats->ndalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, - ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) + arenas_i(mib[2])->astats->nrequests_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, - ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) + atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, + ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, - ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, - ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t) +/* + * Note: "nmalloc" here instead of "nrequests" in the read. This is intentional. + */ CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, - ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated, - ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc, - ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc, - ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests, - ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */ + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */ + +/* Lock profiling related APIs below. */ +#define RO_MUTEX_CTL_GEN(n, l) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \ + l.n_lock_ops, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \ + l.n_wait_times, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \ + l.n_spin_acquired, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \ + l.n_owner_switches, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \ + nstime_ns(&l.tot_wait_time), uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \ + nstime_ns(&l.max_wait_time), uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \ + l.max_n_thds, uint32_t) + +/* Global mutexes. */ +#define OP(mtx) \ + RO_MUTEX_CTL_GEN(mutexes_##mtx, \ + ctl_stats->mutex_prof_data[global_prof_mutex_##mtx]) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +/* Per arena mutexes */ +#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \ + arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx]) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +/* tcache bin mutex */ +RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex, + arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data) +#undef RO_MUTEX_CTL_GEN + +/* Resets all mutex stats, including global, arena and bin mutexes. */ +static int +stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + if (!config_stats) { + return ENOENT; + } -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, - ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) + tsdn_t *tsdn = tsd_tsdn(tsd); -static const ctl_named_node_t * -stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) -{ +#define MUTEX_PROF_RESET(mtx) \ + malloc_mutex_lock(tsdn, &mtx); \ + malloc_mutex_prof_data_reset(tsdn, &mtx); \ + malloc_mutex_unlock(tsdn, &mtx); - if (j > NBINS) - return (NULL); - return (super_stats_arenas_i_bins_j_node); -} + /* Global mutexes: ctl and prof. */ + MUTEX_PROF_RESET(ctl_mtx); + if (have_background_thread) { + MUTEX_PROF_RESET(background_thread_lock); + } + if (config_prof && opt_prof) { + MUTEX_PROF_RESET(bt2gctx_mtx); + } -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, - ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) -static const ctl_named_node_t * -stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) -{ + /* Per arena mutexes. */ + unsigned n = narenas_total_get(); - if (j > nlclasses) - return (NULL); - return (super_stats_arenas_i_lruns_j_node); + for (unsigned i = 0; i < n; i++) { + arena_t *arena = arena_get(tsdn, i, false); + if (!arena) { + continue; + } + MUTEX_PROF_RESET(arena->large_mtx); + MUTEX_PROF_RESET(arena->extent_avail_mtx); + MUTEX_PROF_RESET(arena->extents_dirty.mtx); + MUTEX_PROF_RESET(arena->extents_muzzy.mtx); + MUTEX_PROF_RESET(arena->extents_retained.mtx); + MUTEX_PROF_RESET(arena->decay_dirty.mtx); + MUTEX_PROF_RESET(arena->decay_muzzy.mtx); + MUTEX_PROF_RESET(arena->tcache_ql_mtx); + MUTEX_PROF_RESET(arena->base->mtx); + + for (szind_t i = 0; i < NBINS; i++) { + bin_t *bin = &arena->bins[i]; + MUTEX_PROF_RESET(bin->lock); + } + } +#undef MUTEX_PROF_RESET + return 0; } -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc, - ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc, - ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests, - ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */ - uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, - ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, + arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, + arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, + arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, + arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills, + arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes, + arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) static const ctl_named_node_t * -stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j) -{ +stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { + if (j > NBINS) { + return NULL; + } + return super_stats_arenas_i_bins_j_node; +} - if (j > nhclasses) - return (NULL); - return (super_stats_arenas_i_hchunks_j_node); +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, + arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) + +static const ctl_named_node_t * +stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { + if (j > NSIZES - NBINS) { + return NULL; + } + return super_stats_arenas_i_lextents_j_node; } static const ctl_named_node_t * -stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; +stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + const ctl_named_node_t *ret; + size_t a; - malloc_mutex_lock(&ctl_mtx); - if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) { + malloc_mutex_lock(tsdn, &ctl_mtx); + a = arenas_i2a_impl(i, true, true); + if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) { ret = NULL; goto label_return; } ret = super_stats_arenas_i_node; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; } diff --git a/deps/jemalloc/src/div.c b/deps/jemalloc/src/div.c new file mode 100644 index 000000000..808892a13 --- /dev/null +++ b/deps/jemalloc/src/div.c @@ -0,0 +1,55 @@ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/div.h" + +#include "jemalloc/internal/assert.h" + +/* + * Suppose we have n = q * d, all integers. We know n and d, and want q = n / d. + * + * For any k, we have (here, all division is exact; not C-style rounding): + * floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where + * r = (-2^k) mod d. + * + * Expanding this out: + * ... = floor(2^k / d * n / 2^k + r / d * n / 2^k) + * = floor(n / d + (r / d) * (n / 2^k)). + * + * The fractional part of n / d is 0 (because of the assumption that d divides n + * exactly), so we have: + * ... = n / d + floor((r / d) * (n / 2^k)) + * + * So that our initial expression is equal to the quantity we seek, so long as + * (r / d) * (n / 2^k) < 1. + * + * r is a remainder mod d, so r < d and r / d < 1 always. We can make + * n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works. + */ + +void +div_init(div_info_t *div_info, size_t d) { + /* Nonsensical. */ + assert(d != 0); + /* + * This would make the value of magic too high to fit into a uint32_t + * (we would want magic = 2^32 exactly). This would mess with code gen + * on 32-bit machines. + */ + assert(d != 1); + + uint64_t two_to_k = ((uint64_t)1 << 32); + uint32_t magic = (uint32_t)(two_to_k / d); + + /* + * We want magic = ceil(2^k / d), but C gives us floor. We have to + * increment it unless the result was exact (i.e. unless d is a power of + * two). + */ + if (two_to_k % d != 0) { + magic++; + } + div_info->magic = magic; +#ifdef JEMALLOC_DEBUG + div_info->d = d; +#endif +} diff --git a/deps/jemalloc/src/extent.c b/deps/jemalloc/src/extent.c index 13f94411c..09d6d7718 100644 --- a/deps/jemalloc/src/extent.c +++ b/deps/jemalloc/src/extent.c @@ -1,53 +1,2177 @@ -#define JEMALLOC_EXTENT_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_EXTENT_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" + +/******************************************************************************/ +/* Data. */ + +rtree_t extents_rtree; +/* Keyed by the address of the extent_t being protected. */ +mutex_pool_t extent_mutex_pool; + +size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; + +static const bitmap_info_t extents_bitmap_info = + BITMAP_INFO_INITIALIZER(NPSIZES+1); + +static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit, + unsigned arena_ind); +static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +static bool extent_decommit_default(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +#ifdef PAGES_CAN_PURGE_LAZY +static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +#endif +static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +#ifdef PAGES_CAN_PURGE_FORCED +static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +#endif +static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +#ifdef JEMALLOC_MAPS_COALESCE +static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t size_a, size_t size_b, bool committed, + unsigned arena_ind); +#endif +static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, + bool growing_retained); +#ifdef JEMALLOC_MAPS_COALESCE +static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, + size_t size_a, void *addr_b, size_t size_b, bool committed, + unsigned arena_ind); +#endif +static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, + bool growing_retained); + +const extent_hooks_t extent_hooks_default = { + extent_alloc_default, + extent_dalloc_default, + extent_destroy_default, + extent_commit_default, + extent_decommit_default +#ifdef PAGES_CAN_PURGE_LAZY + , + extent_purge_lazy_default +#else + , + NULL +#endif +#ifdef PAGES_CAN_PURGE_FORCED + , + extent_purge_forced_default +#else + , + NULL +#endif +#ifdef JEMALLOC_MAPS_COALESCE + , + extent_split_default, + extent_merge_default +#endif +}; + +/* Used exclusively for gdump triggering. */ +static atomic_zu_t curpages; +static atomic_zu_t highpages; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void extent_deregister(tsdn_t *tsdn, extent_t *extent); +static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, + size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, + bool *zero, bool *commit, bool growing_retained); +static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained); +static void extent_record(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, + bool growing_retained); /******************************************************************************/ -JEMALLOC_INLINE_C size_t -extent_quantize(size_t size) -{ +ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link, + extent_esnead_comp) + +typedef enum { + lock_result_success, + lock_result_failure, + lock_result_no_extent +} lock_result_t; + +static lock_result_t +extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, + extent_t **result) { + extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, + elm, true); + + if (extent1 == NULL) { + return lock_result_no_extent; + } + /* + * It's possible that the extent changed out from under us, and with it + * the leaf->extent mapping. We have to recheck while holding the lock. + */ + extent_lock(tsdn, extent1); + extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn, + &extents_rtree, elm, true); + + if (extent1 == extent2) { + *result = extent1; + return lock_result_success; + } else { + extent_unlock(tsdn, extent1); + return lock_result_failure; + } +} + +/* + * Returns a pool-locked extent_t * if there's one associated with the given + * address, and NULL otherwise. + */ +static extent_t * +extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) { + extent_t *ret = NULL; + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)addr, false, false); + if (elm == NULL) { + return NULL; + } + lock_result_t lock_result; + do { + lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret); + } while (lock_result == lock_result_failure); + return ret; +} + +extent_t * +extent_alloc(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); + extent_t *extent = extent_avail_first(&arena->extent_avail); + if (extent == NULL) { + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); + return base_alloc_extent(tsdn, arena->base); + } + extent_avail_remove(&arena->extent_avail, extent); + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); + return extent; +} + +void +extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); + extent_avail_insert(&arena->extent_avail, extent); + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); +} + +extent_hooks_t * +extent_hooks_get(arena_t *arena) { + return base_extent_hooks_get(arena->base); +} + +extent_hooks_t * +extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) { + background_thread_info_t *info; + if (have_background_thread) { + info = arena_background_thread_info_get(arena); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + } + extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); + if (have_background_thread) { + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + + return ret; +} + +static void +extent_hooks_assure_initialized(arena_t *arena, + extent_hooks_t **r_extent_hooks) { + if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) { + *r_extent_hooks = extent_hooks_get(arena); + } +} + +#ifndef JEMALLOC_JET +static +#endif +size_t +extent_size_quantize_floor(size_t size) { + size_t ret; + pszind_t pind; + + assert(size > 0); + assert((size & PAGE_MASK) == 0); + + pind = sz_psz2ind(size - sz_large_pad + 1); + if (pind == 0) { + /* + * Avoid underflow. This short-circuit would also do the right + * thing for all sizes in the range for which there are + * PAGE-spaced size classes, but it's simplest to just handle + * the one case that would cause erroneous results. + */ + return size; + } + ret = sz_pind2sz(pind - 1) + sz_large_pad; + assert(ret <= size); + return ret; +} + +#ifndef JEMALLOC_JET +static +#endif +size_t +extent_size_quantize_ceil(size_t size) { + size_t ret; + + assert(size > 0); + assert(size - sz_large_pad <= LARGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); + + ret = extent_size_quantize_floor(size); + if (ret < size) { + /* + * Skip a quantization that may have an adequately large extent, + * because under-sized extents may be mixed in. This only + * happens when an unusual size is requested, i.e. for aligned + * allocation, and is just one of several places where linear + * search would potentially find sufficiently aligned available + * memory somewhere lower. + */ + ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + + sz_large_pad; + } + return ret; +} + +/* Generate pairing heap functions. */ +ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) + +bool +extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, + bool delay_coalesce) { + if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, + malloc_mutex_rank_exclusive)) { + return true; + } + for (unsigned i = 0; i < NPSIZES+1; i++) { + extent_heap_new(&extents->heaps[i]); + } + bitmap_init(extents->bitmap, &extents_bitmap_info, true); + extent_list_init(&extents->lru); + atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED); + extents->state = state; + extents->delay_coalesce = delay_coalesce; + return false; +} + +extent_state_t +extents_state_get(const extents_t *extents) { + return extents->state; +} + +size_t +extents_npages_get(extents_t *extents) { + return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); +} + +static void +extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + assert(extent_state_get(extent) == extents->state); + + size_t size = extent_size_get(extent); + size_t psz = extent_size_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + if (extent_heap_empty(&extents->heaps[pind])) { + bitmap_unset(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + } + extent_heap_insert(&extents->heaps[pind], extent); + extent_list_append(&extents->lru, extent); + size_t npages = size >> LG_PAGE; + /* + * All modifications to npages hold the mutex (as asserted above), so we + * don't need an atomic fetch-add; we can get by with a load followed by + * a store. + */ + size_t cur_extents_npages = + atomic_load_zu(&extents->npages, ATOMIC_RELAXED); + atomic_store_zu(&extents->npages, cur_extents_npages + npages, + ATOMIC_RELAXED); +} + +static void +extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + assert(extent_state_get(extent) == extents->state); + + size_t size = extent_size_get(extent); + size_t psz = extent_size_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + extent_heap_remove(&extents->heaps[pind], extent); + if (extent_heap_empty(&extents->heaps[pind])) { + bitmap_set(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + } + extent_list_remove(&extents->lru, extent); + size_t npages = size >> LG_PAGE; + /* + * As in extents_insert_locked, we hold extents->mtx and so don't need + * atomic operations for updating extents->npages. + */ + size_t cur_extents_npages = + atomic_load_zu(&extents->npages, ATOMIC_RELAXED); + assert(cur_extents_npages >= npages); + atomic_store_zu(&extents->npages, + cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); +} + +/* + * Find an extent with size [min_size, max_size) to satisfy the alignment + * requirement. For each size, try only the first extent in the heap. + */ +static extent_t * +extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, + size_t alignment) { + pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size)); + pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size)); + + for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, + &extents_bitmap_info, (size_t)pind); i < pind_max; i = + (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + (size_t)i+1)) { + assert(i < NPSIZES); + assert(!extent_heap_empty(&extents->heaps[i])); + extent_t *extent = extent_heap_first(&extents->heaps[i]); + uintptr_t base = (uintptr_t)extent_base_get(extent); + size_t candidate_size = extent_size_get(extent); + assert(candidate_size >= min_size); + + uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base, + PAGE_CEILING(alignment)); + if (base > next_align || base + candidate_size <= next_align) { + /* Overflow or not crossing the next alignment. */ + continue; + } + + size_t leadsize = next_align - base; + if (candidate_size - leadsize >= min_size) { + return extent; + } + } + + return NULL; +} + +/* Do any-best-fit extent selection, i.e. select any extent that best fits. */ +static extent_t * +extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t size) { + pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); + pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + if (i < NPSIZES+1) { + /* + * In order to reduce fragmentation, avoid reusing and splitting + * large extents for much smaller sizes. + */ + if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { + return NULL; + } + assert(!extent_heap_empty(&extents->heaps[i])); + extent_t *extent = extent_heap_first(&extents->heaps[i]); + assert(extent_size_get(extent) >= size); + return extent; + } + + return NULL; +} + +/* + * Do first-fit extent selection, i.e. select the oldest/lowest extent that is + * large enough. + */ +static extent_t * +extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t size) { + extent_t *ret = NULL; + + pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); + for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, + &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i = + (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + (size_t)i+1)) { + assert(!extent_heap_empty(&extents->heaps[i])); + extent_t *extent = extent_heap_first(&extents->heaps[i]); + assert(extent_size_get(extent) >= size); + if (ret == NULL || extent_snad_comp(extent, ret) < 0) { + ret = extent; + } + if (i == NPSIZES) { + break; + } + assert(i < NPSIZES); + } + + return ret; +} + +/* + * Do {best,first}-fit extent selection, where the selection policy choice is + * based on extents->delay_coalesce. Best-fit selection requires less + * searching, but its layout policy is less stable and may cause higher virtual + * memory fragmentation as a side effect. + */ +static extent_t * +extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t esize, size_t alignment) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + + size_t max_size = esize + PAGE_CEILING(alignment) - PAGE; + /* Beware size_t wrap-around. */ + if (max_size < esize) { + return NULL; + } + + extent_t *extent = extents->delay_coalesce ? + extents_best_fit_locked(tsdn, arena, extents, max_size) : + extents_first_fit_locked(tsdn, arena, extents, max_size); + + if (alignment > PAGE && extent == NULL) { + /* + * max_size guarantees the alignment requirement but is rather + * pessimistic. Next we try to satisfy the aligned allocation + * with sizes in [esize, max_size). + */ + extent = extents_fit_alignment(extents, esize, max_size, + alignment); + } + + return extent; +} + +static bool +extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent) { + extent_state_set(extent, extent_state_active); + bool coalesced; + extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, extent, &coalesced, false); + extent_state_set(extent, extents_state_get(extents)); + + if (!coalesced) { + return true; + } + extents_insert_locked(tsdn, extents, extent); + return false; +} + +extent_t * +extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + assert(size + pad != 0); + assert(alignment != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents, + new_addr, size, pad, alignment, slab, szind, zero, commit, false); + assert(extent == NULL || extent_dumpable_get(extent)); + return extent; +} + +void +extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent) { + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + assert(extent_dumpable_get(extent)); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_addr_set(extent, extent_base_get(extent)); + extent_zeroed_set(extent, false); + + extent_record(tsdn, arena, r_extent_hooks, extents, extent, false); +} + +extent_t * +extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, size_t npages_min) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + malloc_mutex_lock(tsdn, &extents->mtx); + + /* + * Get the LRU coalesced extent, if any. If coalescing was delayed, + * the loop will iterate until the LRU extent is fully coalesced. + */ + extent_t *extent; + while (true) { + /* Get the LRU extent, if any. */ + extent = extent_list_first(&extents->lru); + if (extent == NULL) { + goto label_return; + } + /* Check the eviction limit. */ + size_t extents_npages = atomic_load_zu(&extents->npages, + ATOMIC_RELAXED); + if (extents_npages <= npages_min) { + extent = NULL; + goto label_return; + } + extents_remove_locked(tsdn, extents, extent); + if (!extents->delay_coalesce) { + break; + } + /* Try to coalesce. */ + if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, extent)) { + break; + } + /* + * The LRU extent was just coalesced and the result placed in + * the LRU at its neighbor's position. Start over. + */ + } + + /* + * Either mark the extent active or deregister it to protect against + * concurrent operations. + */ + switch (extents_state_get(extents)) { + case extent_state_active: + not_reached(); + case extent_state_dirty: + case extent_state_muzzy: + extent_state_set(extent, extent_state_active); + break; + case extent_state_retained: + extent_deregister(tsdn, extent); + break; + default: + not_reached(); + } + +label_return: + malloc_mutex_unlock(tsdn, &extents->mtx); + return extent; +} + +static void +extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent, bool growing_retained) { + /* + * Leak extent after making sure its pages have already been purged, so + * that this is only a virtual memory leak. + */ + if (extents_state_get(extents) == extent_state_dirty) { + if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, + extent, 0, extent_size_get(extent), growing_retained)) { + extent_purge_forced_impl(tsdn, arena, r_extent_hooks, + extent, 0, extent_size_get(extent), + growing_retained); + } + } + extent_dalloc(tsdn, arena, extent); +} + +void +extents_prefork(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_prefork(tsdn, &extents->mtx); +} + +void +extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_postfork_parent(tsdn, &extents->mtx); +} + +void +extents_postfork_child(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_postfork_child(tsdn, &extents->mtx); +} + +static void +extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent) { + assert(extent_arena_get(extent) == arena); + assert(extent_state_get(extent) == extent_state_active); + + extent_state_set(extent, extents_state_get(extents)); + extents_insert_locked(tsdn, extents, extent); +} + +static void +extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent) { + malloc_mutex_lock(tsdn, &extents->mtx); + extent_deactivate_locked(tsdn, arena, extents, extent); + malloc_mutex_unlock(tsdn, &extents->mtx); +} + +static void +extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent) { + assert(extent_arena_get(extent) == arena); + assert(extent_state_get(extent) == extents_state_get(extents)); + + extents_remove_locked(tsdn, extents, extent); + extent_state_set(extent, extent_state_active); +} + +static bool +extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, + const extent_t *extent, bool dependent, bool init_missing, + rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { + *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent), dependent, init_missing); + if (!dependent && *r_elm_a == NULL) { + return true; + } + assert(*r_elm_a != NULL); + + *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_last_get(extent), dependent, init_missing); + if (!dependent && *r_elm_b == NULL) { + return true; + } + assert(*r_elm_b != NULL); + + return false; +} + +static void +extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, + rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { + rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); + if (elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind, + slab); + } +} + +static void +extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent, + szind_t szind) { + assert(extent_slab_get(extent)); + + /* Register interior. */ + for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { + rtree_write(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << + LG_PAGE), extent, szind, true); + } +} + +static void +extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { + cassert(config_prof); + /* prof_gdump() requirement. */ + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (opt_prof && extent_state_get(extent) == extent_state_active) { + size_t nadd = extent_size_get(extent) >> LG_PAGE; + size_t cur = atomic_fetch_add_zu(&curpages, nadd, + ATOMIC_RELAXED) + nadd; + size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); + while (cur > high && !atomic_compare_exchange_weak_zu( + &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) { + /* + * Don't refresh cur, because it may have decreased + * since this thread lost the highpages update race. + * Note that high is updated in case of CAS failure. + */ + } + if (cur > high && prof_gdump_get_unlocked()) { + prof_gdump(tsdn); + } + } +} + +static void +extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) { + cassert(config_prof); + + if (opt_prof && extent_state_get(extent) == extent_state_active) { + size_t nsub = extent_size_get(extent) >> LG_PAGE; + assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub); + atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED); + } +} + +static bool +extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *elm_a, *elm_b; + + /* + * We need to hold the lock to protect against a concurrent coalesce + * operation that sees us in a partial state. + */ + extent_lock(tsdn, extent); + + if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, + &elm_a, &elm_b)) { + return true; + } + + szind_t szind = extent_szind_get_maybe_invalid(extent); + bool slab = extent_slab_get(extent); + extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); + if (slab) { + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + + extent_unlock(tsdn, extent); + + if (config_prof && gdump_add) { + extent_gdump_add(tsdn, extent); + } + + return false; +} + +static bool +extent_register(tsdn_t *tsdn, extent_t *extent) { + return extent_register_impl(tsdn, extent, true); +} + +static bool +extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) { + return extent_register_impl(tsdn, extent, false); +} + +static void +extent_reregister(tsdn_t *tsdn, extent_t *extent) { + bool err = extent_register(tsdn, extent); + assert(!err); +} + +/* + * Removes all pointers to the given extent from the global rtree indices for + * its interior. This is relevant for slab extents, for which we need to do + * metadata lookups at places other than the head of the extent. We deregister + * on the interior, then, when an extent moves from being an active slab to an + * inactive state. + */ +static void +extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, + extent_t *extent) { + size_t i; + + assert(extent_slab_get(extent)); + + for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { + rtree_clear(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << + LG_PAGE)); + } +} + +/* + * Removes all pointers to the given extent from the global rtree. + */ +static void +extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *elm_a, *elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false, + &elm_a, &elm_b); + + extent_lock(tsdn, extent); + + extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false); + if (extent_slab_get(extent)) { + extent_interior_deregister(tsdn, rtree_ctx, extent); + extent_slab_set(extent, false); + } + + extent_unlock(tsdn, extent); + + if (config_prof && gdump) { + extent_gdump_sub(tsdn, extent); + } +} + +static void +extent_deregister(tsdn_t *tsdn, extent_t *extent) { + extent_deregister_impl(tsdn, extent, true); +} + +static void +extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) { + extent_deregister_impl(tsdn, extent, false); +} + +/* + * Tries to find and remove an extent from extents that can be used for the + * given allocation request. + */ +static extent_t * +extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + assert(alignment > 0); + if (config_debug && new_addr != NULL) { + /* + * Non-NULL new_addr has two use cases: + * + * 1) Recycle a known-extant extent, e.g. during purging. + * 2) Perform in-place expanding reallocation. + * + * Regardless of use case, new_addr must either refer to a + * non-existing extent, or to the base of an extant extent, + * since only active slabs support interior lookups (which of + * course cannot be recycled). + */ + assert(PAGE_ADDR2BASE(new_addr) == new_addr); + assert(pad == 0); + assert(alignment <= PAGE); + } + + size_t esize = size + pad; + malloc_mutex_lock(tsdn, &extents->mtx); + extent_hooks_assure_initialized(arena, r_extent_hooks); + extent_t *extent; + if (new_addr != NULL) { + extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr); + if (extent != NULL) { + /* + * We might null-out extent to report an error, but we + * still need to unlock the associated mutex after. + */ + extent_t *unlock_extent = extent; + assert(extent_base_get(extent) == new_addr); + if (extent_arena_get(extent) != arena || + extent_size_get(extent) < esize || + extent_state_get(extent) != + extents_state_get(extents)) { + extent = NULL; + } + extent_unlock(tsdn, unlock_extent); + } + } else { + extent = extents_fit_locked(tsdn, arena, extents, esize, + alignment); + } + if (extent == NULL) { + malloc_mutex_unlock(tsdn, &extents->mtx); + return NULL; + } + + extent_activate_locked(tsdn, arena, extents, extent); + malloc_mutex_unlock(tsdn, &extents->mtx); + + return extent; +} + +/* + * Given an allocation request and an extent guaranteed to be able to satisfy + * it, this splits off lead and trail extents, leaving extent pointing to an + * extent satisfying the allocation. + * This function doesn't put lead or trail into any extents_t; it's the caller's + * job to ensure that they can be reused. + */ +typedef enum { + /* + * Split successfully. lead, extent, and trail, are modified to extents + * describing the ranges before, in, and after the given allocation. + */ + extent_split_interior_ok, + /* + * The extent can't satisfy the given allocation request. None of the + * input extent_t *s are touched. + */ + extent_split_interior_cant_alloc, + /* + * In a potentially invalid state. Must leak (if *to_leak is non-NULL), + * and salvage what's still salvageable (if *to_salvage is non-NULL). + * None of lead, extent, or trail are valid. + */ + extent_split_interior_error +} extent_split_interior_result_t; + +static extent_split_interior_result_t +extent_split_interior(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, + /* The result of splitting, in case of success. */ + extent_t **extent, extent_t **lead, extent_t **trail, + /* The mess to clean up, in case of error. */ + extent_t **to_leak, extent_t **to_salvage, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + szind_t szind, bool growing_retained) { + size_t esize = size + pad; + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent), + PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent); + assert(new_addr == NULL || leadsize == 0); + if (extent_size_get(*extent) < leadsize + esize) { + return extent_split_interior_cant_alloc; + } + size_t trailsize = extent_size_get(*extent) - leadsize - esize; + + *lead = NULL; + *trail = NULL; + *to_leak = NULL; + *to_salvage = NULL; + + /* Split the lead. */ + if (leadsize != 0) { + *lead = *extent; + *extent = extent_split_impl(tsdn, arena, r_extent_hooks, + *lead, leadsize, NSIZES, false, esize + trailsize, szind, + slab, growing_retained); + if (*extent == NULL) { + *to_leak = *lead; + *lead = NULL; + return extent_split_interior_error; + } + } + + /* Split the trail. */ + if (trailsize != 0) { + *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent, + esize, szind, slab, trailsize, NSIZES, false, + growing_retained); + if (*trail == NULL) { + *to_leak = *extent; + *to_salvage = *lead; + *lead = NULL; + *extent = NULL; + return extent_split_interior_error; + } + } + + if (leadsize == 0 && trailsize == 0) { + /* + * Splitting causes szind to be set as a side effect, but no + * splitting occurred. + */ + extent_szind_set(*extent, szind); + if (szind != NSIZES) { + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_addr_get(*extent), szind, slab); + if (slab && extent_size_get(*extent) > PAGE) { + rtree_szind_slab_update(tsdn, &extents_rtree, + rtree_ctx, + (uintptr_t)extent_past_get(*extent) - + (uintptr_t)PAGE, szind, slab); + } + } + } + + return extent_split_interior_ok; +} + +/* + * This fulfills the indicated allocation request out of the given extent (which + * the caller should have ensured was big enough). If there's any unused space + * before or after the resulting allocation, that space is given its own extent + * and put back into extents. + */ +static extent_t * +extent_recycle_split(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + szind_t szind, extent_t *extent, bool growing_retained) { + extent_t *lead; + extent_t *trail; + extent_t *to_leak; + extent_t *to_salvage; + + extent_split_interior_result_t result = extent_split_interior( + tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, + &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind, + growing_retained); + + if (result == extent_split_interior_ok) { + if (lead != NULL) { + extent_deactivate(tsdn, arena, extents, lead); + } + if (trail != NULL) { + extent_deactivate(tsdn, arena, extents, trail); + } + return extent; + } else { + /* + * We should have picked an extent that was large enough to + * fulfill our allocation request. + */ + assert(result == extent_split_interior_error); + if (to_salvage != NULL) { + extent_deregister(tsdn, to_salvage); + } + if (to_leak != NULL) { + void *leak = extent_base_get(to_leak); + extent_deregister_no_gdump_sub(tsdn, to_leak); + extents_leak(tsdn, arena, r_extent_hooks, extents, + to_leak, growing_retained); + assert(extent_lock_from_addr(tsdn, rtree_ctx, leak) + == NULL); + } + return NULL; + } + unreachable(); +} + +/* + * Tries to satisfy the given allocation request by reusing one of the extents + * in the given extents_t. + */ +static extent_t * +extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + assert(new_addr == NULL || !slab); + assert(pad == 0 || !slab); + assert(!*zero || !slab); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, new_addr, size, pad, alignment, slab, + growing_retained); + if (extent == NULL) { + return NULL; + } + + extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, new_addr, size, pad, alignment, slab, szind, extent, + growing_retained); + if (extent == NULL) { + return NULL; + } + + if (*commit && !extent_committed_get(extent)) { + if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, + 0, extent_size_get(extent), growing_retained)) { + extent_record(tsdn, arena, r_extent_hooks, extents, + extent, growing_retained); + return NULL; + } + extent_zeroed_set(extent, true); + } + + if (extent_committed_get(extent)) { + *commit = true; + } + if (extent_zeroed_get(extent)) { + *zero = true; + } + + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + assert(extent_state_get(extent) == extent_state_active); + if (slab) { + extent_slab_set(extent, slab); + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + + if (*zero) { + void *addr = extent_base_get(extent); + size_t size = extent_size_get(extent); + if (!extent_zeroed_get(extent)) { + if (pages_purge_forced(addr, size)) { + memset(addr, 0, size); + } + } else if (config_debug) { + size_t *p = (size_t *)(uintptr_t)addr; + for (size_t i = 0; i < size / sizeof(size_t); i++) { + assert(p[i] == 0); + } + } + } + return extent; +} + +/* + * If the caller specifies (!*zero), it is still possible to receive zeroed + * memory, in which case *zero is toggled to true. arena_extent_alloc() takes + * advantage of this to avoid demanding zeroed extents, but taking advantage of + * them if they are returned. + */ +static void * +extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { + void *ret; + + assert(size != 0); + assert(alignment != 0); + + /* "primary" dss. */ + if (have_dss && dss_prec == dss_prec_primary && (ret = + extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) { + return ret; + } + /* mmap. */ + if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) + != NULL) { + return ret; + } + /* "secondary" dss. */ + if (have_dss && dss_prec == dss_prec_secondary && (ret = + extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) { + return ret; + } + + /* All strategies for allocation failed. */ + return NULL; +} + +static void * +extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit) { + void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, + commit, (dss_prec_t)atomic_load_u(&arena->dss_prec, + ATOMIC_RELAXED)); + if (have_madvise_huge && ret) { + pages_set_thp_state(ret, size); + } + return ret; +} + +static void * +extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { + tsdn_t *tsdn; + arena_t *arena; + + tsdn = tsdn_fetch(); + arena = arena_get(tsdn, arena_ind, false); + /* + * The arena we're allocating on behalf of must have been initialized + * already. + */ + assert(arena != NULL); + + return extent_alloc_default_impl(tsdn, arena, new_addr, size, + alignment, zero, commit); +} + +static void +extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + if (arena == arena_get(tsd_tsdn(tsd), 0, false)) { + /* + * The only legitimate case of customized extent hooks for a0 is + * hooks with no allocation activities. One such example is to + * place metadata on pre-allocated resources such as huge pages. + * In that case, rely on reentrancy_level checks to catch + * infinite recursions. + */ + pre_reentrancy(tsd, NULL); + } else { + pre_reentrancy(tsd, arena); + } +} + +static void +extent_hook_post_reentrancy(tsdn_t *tsdn) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + post_reentrancy(tsd); +} + +/* + * If virtual memory is retained, create increasingly larger extents from which + * to split requested extents in order to limit the total number of disjoint + * virtual memory ranges retained by each arena. + */ +static extent_t * +extent_grow_retained(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment, + bool slab, szind_t szind, bool *zero, bool *commit) { + malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx); + assert(pad == 0 || !slab); + assert(!*zero || !slab); + + size_t esize = size + pad; + size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE; + /* Beware size_t wrap-around. */ + if (alloc_size_min < esize) { + goto label_err; + } + /* + * Find the next extent size in the series that would be large enough to + * satisfy this request. + */ + pszind_t egn_skip = 0; + size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); + while (alloc_size < alloc_size_min) { + egn_skip++; + if (arena->extent_grow_next + egn_skip == NPSIZES) { + /* Outside legal range. */ + goto label_err; + } + assert(arena->extent_grow_next + egn_skip < NPSIZES); + alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); + } + + extent_t *extent = extent_alloc(tsdn, arena); + if (extent == NULL) { + goto label_err; + } + bool zeroed = false; + bool committed = false; + + void *ptr; + if (*r_extent_hooks == &extent_hooks_default) { + ptr = extent_alloc_default_impl(tsdn, arena, NULL, + alloc_size, PAGE, &zeroed, &committed); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, + alloc_size, PAGE, &zeroed, &committed, + arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + extent_init(extent, arena, ptr, alloc_size, false, NSIZES, + arena_extent_sn_next(arena), extent_state_active, zeroed, + committed, true); + if (ptr == NULL) { + extent_dalloc(tsdn, arena, extent); + goto label_err; + } + + if (extent_register_no_gdump_add(tsdn, extent)) { + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, true); + goto label_err; + } + + if (extent_zeroed_get(extent) && extent_committed_get(extent)) { + *zero = true; + } + if (extent_committed_get(extent)) { + *commit = true; + } + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *lead; + extent_t *trail; + extent_t *to_leak; + extent_t *to_salvage; + extent_split_interior_result_t result = extent_split_interior( + tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, + &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind, + true); + + if (result == extent_split_interior_ok) { + if (lead != NULL) { + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, lead, true); + } + if (trail != NULL) { + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, trail, true); + } + } else { + /* + * We should have allocated a sufficiently large extent; the + * cant_alloc case should not occur. + */ + assert(result == extent_split_interior_error); + if (to_salvage != NULL) { + if (config_prof) { + extent_gdump_add(tsdn, to_salvage); + } + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, to_salvage, true); + } + if (to_leak != NULL) { + extent_deregister_no_gdump_sub(tsdn, to_leak); + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, to_leak, true); + } + goto label_err; + } + + if (*commit && !extent_committed_get(extent)) { + if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, + extent_size_get(extent), true)) { + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, true); + goto label_err; + } + extent_zeroed_set(extent, true); + } + + /* + * Increment extent_grow_next if doing so wouldn't exceed the allowed + * range. + */ + if (arena->extent_grow_next + egn_skip + 1 <= + arena->retain_grow_limit) { + arena->extent_grow_next += egn_skip + 1; + } else { + arena->extent_grow_next = arena->retain_grow_limit; + } + /* All opportunities for failure are past. */ + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + + if (config_prof) { + /* Adjust gdump stats now that extent is final size. */ + extent_gdump_add(tsdn, extent); + } + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + if (slab) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + + extent_slab_set(extent, true); + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + if (*zero && !extent_zeroed_get(extent)) { + void *addr = extent_base_get(extent); + size_t size = extent_size_get(extent); + if (pages_purge_forced(addr, size)) { + memset(addr, 0, size); + } + } + + return extent; +label_err: + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + return NULL; +} + +static extent_t * +extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + assert(size != 0); + assert(alignment != 0); + + malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); + + extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, + &arena->extents_retained, new_addr, size, pad, alignment, slab, + szind, zero, commit, true); + if (extent != NULL) { + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + if (config_prof) { + extent_gdump_add(tsdn, extent); + } + } else if (opt_retain && new_addr == NULL) { + extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size, + pad, alignment, slab, szind, zero, commit); + /* extent_grow_retained() always releases extent_grow_mtx. */ + } else { + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + } + malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx); + + return extent; +} + +static extent_t * +extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + size_t esize = size + pad; + extent_t *extent = extent_alloc(tsdn, arena); + if (extent == NULL) { + return NULL; + } + void *addr; + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, + alignment, zero, commit); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, + esize, alignment, zero, commit, arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + if (addr == NULL) { + extent_dalloc(tsdn, arena, extent); + return NULL; + } + extent_init(extent, arena, addr, esize, slab, szind, + arena_extent_sn_next(arena), extent_state_active, *zero, *commit, + true); + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + if (extent_register(tsdn, extent)) { + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, false); + return NULL; + } + + return extent; +} + +extent_t * +extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + extent_hooks_assure_initialized(arena, r_extent_hooks); + + extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, + new_addr, size, pad, alignment, slab, szind, zero, commit); + if (extent == NULL) { + if (opt_retain && new_addr != NULL) { + /* + * When retain is enabled and new_addr is set, we do not + * attempt extent_alloc_wrapper_hard which does mmap + * that is very unlikely to succeed (unless it happens + * to be at the end). + */ + return NULL; + } + extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks, + new_addr, size, pad, alignment, slab, szind, zero, commit); + } + + assert(extent == NULL || extent_dumpable_get(extent)); + return extent; +} + +static bool +extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner, + const extent_t *outer) { + assert(extent_arena_get(inner) == arena); + if (extent_arena_get(outer) != arena) { + return false; + } + + assert(extent_state_get(inner) == extent_state_active); + if (extent_state_get(outer) != extents->state) { + return false; + } + + if (extent_committed_get(inner) != extent_committed_get(outer)) { + return false; + } + + return true; +} + +static bool +extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *inner, extent_t *outer, bool forward, + bool growing_retained) { + assert(extent_can_coalesce(arena, extents, inner, outer)); + + extent_activate_locked(tsdn, arena, extents, outer); + + malloc_mutex_unlock(tsdn, &extents->mtx); + bool err = extent_merge_impl(tsdn, arena, r_extent_hooks, + forward ? inner : outer, forward ? outer : inner, growing_retained); + malloc_mutex_lock(tsdn, &extents->mtx); + + if (err) { + extent_deactivate_locked(tsdn, arena, extents, outer); + } + + return err; +} + +static extent_t * +extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained) { /* - * Round down to the nearest chunk size that can actually be requested - * during normal huge allocation. + * Continue attempting to coalesce until failure, to protect against + * races with other threads that are thwarted by this one. */ - return (index2size(size2index(size + 1) - 1)); + bool again; + do { + again = false; + + /* Try to coalesce forward. */ + extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, + extent_past_get(extent)); + if (next != NULL) { + /* + * extents->mtx only protects against races for + * like-state extents, so call extent_can_coalesce() + * before releasing next's pool lock. + */ + bool can_coalesce = extent_can_coalesce(arena, extents, + extent, next); + + extent_unlock(tsdn, next); + + if (can_coalesce && !extent_coalesce(tsdn, arena, + r_extent_hooks, extents, extent, next, true, + growing_retained)) { + if (extents->delay_coalesce) { + /* Do minimal coalescing. */ + *coalesced = true; + return extent; + } + again = true; + } + } + + /* Try to coalesce backward. */ + extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, + extent_before_get(extent)); + if (prev != NULL) { + bool can_coalesce = extent_can_coalesce(arena, extents, + extent, prev); + extent_unlock(tsdn, prev); + + if (can_coalesce && !extent_coalesce(tsdn, arena, + r_extent_hooks, extents, extent, prev, false, + growing_retained)) { + extent = prev; + if (extents->delay_coalesce) { + /* Do minimal coalescing. */ + *coalesced = true; + return extent; + } + again = true; + } + } + } while (again); + + if (extents->delay_coalesce) { + *coalesced = false; + } + return extent; +} + +/* + * Does the metadata management portions of putting an unused extent into the + * given extents_t (coalesces, deregisters slab interiors, the heap operations). + */ +static void +extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent, bool growing_retained) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + assert((extents_state_get(extents) != extent_state_dirty && + extents_state_get(extents) != extent_state_muzzy) || + !extent_zeroed_get(extent)); + + malloc_mutex_lock(tsdn, &extents->mtx); + extent_hooks_assure_initialized(arena, r_extent_hooks); + + extent_szind_set(extent, NSIZES); + if (extent_slab_get(extent)) { + extent_interior_deregister(tsdn, rtree_ctx, extent); + extent_slab_set(extent, false); + } + + assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent), true) == extent); + + if (!extents->delay_coalesce) { + extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, extent, NULL, growing_retained); + } else if (extent_size_get(extent) >= LARGE_MINCLASS) { + /* Always coalesce large extents eagerly. */ + bool coalesced; + size_t prev_size; + do { + prev_size = extent_size_get(extent); + assert(extent_state_get(extent) == extent_state_active); + extent = extent_try_coalesce(tsdn, arena, + r_extent_hooks, rtree_ctx, extents, extent, + &coalesced, growing_retained); + } while (coalesced && + extent_size_get(extent) >= prev_size + LARGE_MINCLASS); + } + extent_deactivate_locked(tsdn, arena, extents, extent); + + malloc_mutex_unlock(tsdn, &extents->mtx); +} + +void +extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (extent_register(tsdn, extent)) { + extents_leak(tsdn, arena, &extent_hooks, + &arena->extents_retained, extent, false); + return; + } + extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); +} + +static bool +extent_dalloc_default_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { + return extent_dalloc_mmap(addr, size); + } + return true; +} + +static bool +extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + return extent_dalloc_default_impl(addr, size); +} + +static bool +extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + bool err; + + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_addr_set(extent, extent_base_get(extent)); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + /* Try to deallocate. */ + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + err = extent_dalloc_default_impl(extent_base_get(extent), + extent_size_get(extent)); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + err = ((*r_extent_hooks)->dalloc == NULL || + (*r_extent_hooks)->dalloc(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), + extent_committed_get(extent), arena_ind_get(arena))); + extent_hook_post_reentrancy(tsdn); + } + + if (!err) { + extent_dalloc(tsdn, arena, extent); + } + + return err; } -JEMALLOC_INLINE_C int -extent_szad_comp(extent_node_t *a, extent_node_t *b) -{ - int ret; - size_t a_qsize = extent_quantize(extent_node_size_get(a)); - size_t b_qsize = extent_quantize(extent_node_size_get(b)); +void +extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + assert(extent_dumpable_get(extent)); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); /* - * Compare based on quantized size rather than size, in order to sort - * equally useful extents only by address. + * Deregister first to avoid a race with other allocating threads, and + * reregister if deallocation fails. */ - ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); - if (ret == 0) { - uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); - uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); + extent_deregister(tsdn, extent); + if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) { + return; + } + + extent_reregister(tsdn, extent); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + /* Try to decommit; purge if that fails. */ + bool zeroed; + if (!extent_committed_get(extent)) { + zeroed = true; + } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, + 0, extent_size_get(extent))) { + zeroed = true; + } else if ((*r_extent_hooks)->purge_forced != NULL && + !(*r_extent_hooks)->purge_forced(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), 0, + extent_size_get(extent), arena_ind_get(arena))) { + zeroed = true; + } else if (extent_state_get(extent) == extent_state_muzzy || + ((*r_extent_hooks)->purge_lazy != NULL && + !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), 0, + extent_size_get(extent), arena_ind_get(arena)))) { + zeroed = false; + } else { + zeroed = false; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_zeroed_set(extent, zeroed); + + if (config_prof) { + extent_gdump_sub(tsdn, extent); + } + + extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, + extent, false); +} + +static void +extent_destroy_default_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { + pages_unmap(addr, size); + } +} + +static void +extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + extent_destroy_default_impl(addr, size); +} + +void +extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* Deregister first to avoid a race with other allocating threads. */ + extent_deregister(tsdn, extent); + + extent_addr_set(extent, extent_base_get(extent)); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + /* Try to destroy; silently fail otherwise. */ + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + extent_destroy_default_impl(extent_base_get(extent), + extent_size_get(extent)); + } else if ((*r_extent_hooks)->destroy != NULL) { + extent_hook_pre_reentrancy(tsdn, arena); + (*r_extent_hooks)->destroy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), + extent_committed_get(extent), arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + extent_dalloc(tsdn, arena, extent); +} + +static bool +extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +static bool +extent_commit_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = ((*r_extent_hooks)->commit == NULL || + (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), + extent_size_get(extent), offset, length, arena_ind_get(arena))); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_committed_set(extent, extent_committed_get(extent) || !err); + return err; +} + +bool +extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset, + length, false); +} + +static bool +extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +bool +extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = ((*r_extent_hooks)->decommit == NULL || + (*r_extent_hooks)->decommit(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena))); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_committed_set(extent, extent_committed_get(extent) && err); + return err; +} + +#ifdef PAGES_CAN_PURGE_LAZY +static bool +extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + assert(addr != NULL); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} +#endif + +static bool +extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->purge_lazy == NULL) { + return true; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + + return err; +} + +bool +extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, + offset, length, false); +} + +#ifdef PAGES_CAN_PURGE_FORCED +static bool +extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind) { + assert(addr != NULL); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return pages_purge_forced((void *)((uintptr_t)addr + + (uintptr_t)offset), length); +} +#endif + +static bool +extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->purge_forced == NULL) { + return true; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + return err; +} + +bool +extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, + offset, length, false); +} + +#ifdef JEMALLOC_MAPS_COALESCE +static bool +extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { + return !maps_coalesce; +} +#endif + +/* + * Accepts the extent to split, and the characteristics of each side of the + * split. The 'a' parameters go with the 'lead' of the resulting pair of + * extents (the lower addressed portion of the split), and the 'b' parameters go + * with the trail (the higher addressed portion). This makes 'extent' the lead, + * and returns the trail (except in case of error). + */ +static extent_t * +extent_split_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, + bool growing_retained) { + assert(extent_size_get(extent) == size_a + size_b); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->split == NULL) { + return NULL; + } + + extent_t *trail = extent_alloc(tsdn, arena); + if (trail == NULL) { + goto label_error_a; + } + + extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + + size_a), size_b, slab_b, szind_b, extent_sn_get(extent), + extent_state_get(extent), extent_zeroed_get(extent), + extent_committed_get(extent), extent_dumpable_get(extent)); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *lead_elm_a, *lead_elm_b; + { + extent_t lead; + + extent_init(&lead, arena, extent_addr_get(extent), size_a, + slab_a, szind_a, extent_sn_get(extent), + extent_state_get(extent), extent_zeroed_get(extent), + extent_committed_get(extent), extent_dumpable_get(extent)); - ret = (a_addr > b_addr) - (a_addr < b_addr); + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, + true, &lead_elm_a, &lead_elm_b); } + rtree_leaf_elm_t *trail_elm_a, *trail_elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true, + &trail_elm_a, &trail_elm_b); - return (ret); + if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL + || trail_elm_b == NULL) { + goto label_error_b; + } + + extent_lock2(tsdn, extent, trail); + + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), + size_a + size_b, size_a, size_b, extent_committed_get(extent), + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + if (err) { + goto label_error_c; + } + + extent_size_set(extent, size_a); + extent_szind_set(extent, szind_a); + + extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent, + szind_a, slab_a); + extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail, + szind_b, slab_b); + + extent_unlock2(tsdn, extent, trail); + + return trail; +label_error_c: + extent_unlock2(tsdn, extent, trail); +label_error_b: + extent_dalloc(tsdn, arena, trail); +label_error_a: + return NULL; } -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link, - extent_szad_comp) +extent_t * +extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) { + return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a, + szind_a, slab_a, size_b, szind_b, slab_b, false); +} -JEMALLOC_INLINE_C int -extent_ad_comp(extent_node_t *a, extent_node_t *b) -{ - uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); - uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); +static bool +extent_merge_default_impl(void *addr_a, void *addr_b) { + if (!maps_coalesce) { + return true; + } + if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { + return true; + } + + return false; +} - return ((a_addr > b_addr) - (a_addr < b_addr)); +#ifdef JEMALLOC_MAPS_COALESCE +static bool +extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { + return extent_merge_default_impl(addr_a, addr_b); } +#endif -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp) +static bool +extent_merge_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->merge == NULL) { + return true; + } + + bool err; + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + err = extent_merge_default_impl(extent_base_get(a), + extent_base_get(b)); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + err = (*r_extent_hooks)->merge(*r_extent_hooks, + extent_base_get(a), extent_size_get(a), extent_base_get(b), + extent_size_get(b), extent_committed_get(a), + arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + if (err) { + return true; + } + + /* + * The rtree writes must happen while all the relevant elements are + * owned, so the following code uses decomposed helper functions rather + * than extent_{,de}register() to do things in the right order. + */ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a, + &a_elm_b); + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a, + &b_elm_b); + + extent_lock2(tsdn, a, b); + + if (a_elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, + NSIZES, false); + } + if (b_elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, + NSIZES, false); + } else { + b_elm_b = b_elm_a; + } + + extent_size_set(a, extent_size_get(a) + extent_size_get(b)); + extent_szind_set(a, NSIZES); + extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? + extent_sn_get(a) : extent_sn_get(b)); + extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); + + extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false); + + extent_unlock2(tsdn, a, b); + + extent_dalloc(tsdn, extent_arena_get(b), b); + + return false; +} + +bool +extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { + return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false); +} + +bool +extent_boot(void) { + if (rtree_new(&extents_rtree, true)) { + return true; + } + + if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", + WITNESS_RANK_EXTENT_POOL)) { + return true; + } + + if (have_dss) { + extent_dss_boot(); + } + + return false; +} diff --git a/deps/jemalloc/src/extent_dss.c b/deps/jemalloc/src/extent_dss.c new file mode 100644 index 000000000..2b1ea9caf --- /dev/null +++ b/deps/jemalloc/src/extent_dss.c @@ -0,0 +1,270 @@ +#define JEMALLOC_EXTENT_DSS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/spin.h" + +/******************************************************************************/ +/* Data. */ + +const char *opt_dss = DSS_DEFAULT; + +const char *dss_prec_names[] = { + "disabled", + "primary", + "secondary", + "N/A" +}; + +/* + * Current dss precedence default, used when creating new arenas. NB: This is + * stored as unsigned rather than dss_prec_t because in principle there's no + * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use + * atomic operations to synchronize the setting. + */ +static atomic_u_t dss_prec_default = ATOMIC_INIT( + (unsigned)DSS_PREC_DEFAULT); + +/* Base address of the DSS. */ +static void *dss_base; +/* Atomic boolean indicating whether a thread is currently extending DSS. */ +static atomic_b_t dss_extending; +/* Atomic boolean indicating whether the DSS is exhausted. */ +static atomic_b_t dss_exhausted; +/* Atomic current upper limit on DSS addresses. */ +static atomic_p_t dss_max; + +/******************************************************************************/ + +static void * +extent_dss_sbrk(intptr_t increment) { +#ifdef JEMALLOC_DSS + return sbrk(increment); +#else + not_implemented(); + return NULL; +#endif +} + +dss_prec_t +extent_dss_prec_get(void) { + dss_prec_t ret; + + if (!have_dss) { + return dss_prec_disabled; + } + ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE); + return ret; +} + +bool +extent_dss_prec_set(dss_prec_t dss_prec) { + if (!have_dss) { + return (dss_prec != dss_prec_disabled); + } + atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE); + return false; +} + +static void +extent_dss_extending_start(void) { + spin_t spinner = SPIN_INITIALIZER; + while (true) { + bool expected = false; + if (atomic_compare_exchange_weak_b(&dss_extending, &expected, + true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) { + break; + } + spin_adaptive(&spinner); + } +} + +static void +extent_dss_extending_finish(void) { + assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED)); + + atomic_store_b(&dss_extending, false, ATOMIC_RELEASE); +} + +static void * +extent_dss_max_update(void *new_addr) { + /* + * Get the current end of the DSS as max_cur and assure that dss_max is + * up to date. + */ + void *max_cur = extent_dss_sbrk(0); + if (max_cur == (void *)-1) { + return NULL; + } + atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE); + /* Fixed new_addr can only be supported if it is at the edge of DSS. */ + if (new_addr != NULL && max_cur != new_addr) { + return NULL; + } + return max_cur; +} + +void * +extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit) { + extent_t *gap; + + cassert(have_dss); + assert(size > 0); + assert(alignment > 0); + + /* + * sbrk() uses a signed increment argument, so take care not to + * interpret a large allocation request as a negative increment. + */ + if ((intptr_t)size < 0) { + return NULL; + } + + gap = extent_alloc(tsdn, arena); + if (gap == NULL) { + return NULL; + } + + extent_dss_extending_start(); + if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) { + /* + * The loop is necessary to recover from races with other + * threads that are using the DSS for something other than + * malloc. + */ + while (true) { + void *max_cur = extent_dss_max_update(new_addr); + if (max_cur == NULL) { + goto label_oom; + } + + /* + * Compute how much page-aligned gap space (if any) is + * necessary to satisfy alignment. This space can be + * recycled for later use. + */ + void *gap_addr_page = (void *)(PAGE_CEILING( + (uintptr_t)max_cur)); + void *ret = (void *)ALIGNMENT_CEILING( + (uintptr_t)gap_addr_page, alignment); + size_t gap_size_page = (uintptr_t)ret - + (uintptr_t)gap_addr_page; + if (gap_size_page != 0) { + extent_init(gap, arena, gap_addr_page, + gap_size_page, false, NSIZES, + arena_extent_sn_next(arena), + extent_state_active, false, true, true); + } + /* + * Compute the address just past the end of the desired + * allocation space. + */ + void *dss_next = (void *)((uintptr_t)ret + size); + if ((uintptr_t)ret < (uintptr_t)max_cur || + (uintptr_t)dss_next < (uintptr_t)max_cur) { + goto label_oom; /* Wrap-around. */ + } + /* Compute the increment, including subpage bytes. */ + void *gap_addr_subpage = max_cur; + size_t gap_size_subpage = (uintptr_t)ret - + (uintptr_t)gap_addr_subpage; + intptr_t incr = gap_size_subpage + size; + + assert((uintptr_t)max_cur + incr == (uintptr_t)ret + + size); + + /* Try to allocate. */ + void *dss_prev = extent_dss_sbrk(incr); + if (dss_prev == max_cur) { + /* Success. */ + atomic_store_p(&dss_max, dss_next, + ATOMIC_RELEASE); + extent_dss_extending_finish(); + + if (gap_size_page != 0) { + extent_dalloc_gap(tsdn, arena, gap); + } else { + extent_dalloc(tsdn, arena, gap); + } + if (!*commit) { + *commit = pages_decommit(ret, size); + } + if (*zero && *commit) { + extent_hooks_t *extent_hooks = + EXTENT_HOOKS_INITIALIZER; + extent_t extent; + + extent_init(&extent, arena, ret, size, + size, false, NSIZES, + extent_state_active, false, true, + true); + if (extent_purge_forced_wrapper(tsdn, + arena, &extent_hooks, &extent, 0, + size)) { + memset(ret, 0, size); + } + } + return ret; + } + /* + * Failure, whether due to OOM or a race with a raw + * sbrk() call from outside the allocator. + */ + if (dss_prev == (void *)-1) { + /* OOM. */ + atomic_store_b(&dss_exhausted, true, + ATOMIC_RELEASE); + goto label_oom; + } + } + } +label_oom: + extent_dss_extending_finish(); + extent_dalloc(tsdn, arena, gap); + return NULL; +} + +static bool +extent_in_dss_helper(void *addr, void *max) { + return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr < + (uintptr_t)max); +} + +bool +extent_in_dss(void *addr) { + cassert(have_dss); + + return extent_in_dss_helper(addr, atomic_load_p(&dss_max, + ATOMIC_ACQUIRE)); +} + +bool +extent_dss_mergeable(void *addr_a, void *addr_b) { + void *max; + + cassert(have_dss); + + if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b < + (uintptr_t)dss_base) { + return true; + } + + max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE); + return (extent_in_dss_helper(addr_a, max) == + extent_in_dss_helper(addr_b, max)); +} + +void +extent_dss_boot(void) { + cassert(have_dss); + + dss_base = extent_dss_sbrk(0); + atomic_store_b(&dss_extending, false, ATOMIC_RELAXED); + atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED); + atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED); +} + +/******************************************************************************/ diff --git a/deps/jemalloc/src/extent_mmap.c b/deps/jemalloc/src/extent_mmap.c new file mode 100644 index 000000000..8d607dc80 --- /dev/null +++ b/deps/jemalloc/src/extent_mmap.c @@ -0,0 +1,42 @@ +#define JEMALLOC_EXTENT_MMAP_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" + +/******************************************************************************/ +/* Data. */ + +bool opt_retain = +#ifdef JEMALLOC_RETAIN + true +#else + false +#endif + ; + +/******************************************************************************/ + +void * +extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit) { + void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment, + PAGE), commit); + if (ret == NULL) { + return NULL; + } + assert(ret != NULL); + if (*commit) { + *zero = true; + } + return ret; +} + +bool +extent_dalloc_mmap(void *addr, size_t size) { + if (!opt_retain) { + pages_unmap(addr, size); + } + return opt_retain; +} diff --git a/deps/jemalloc/src/hash.c b/deps/jemalloc/src/hash.c index cfa4da027..7b2bdc2bd 100644 --- a/deps/jemalloc/src/hash.c +++ b/deps/jemalloc/src/hash.c @@ -1,2 +1,3 @@ -#define JEMALLOC_HASH_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_HASH_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/deps/jemalloc/src/hooks.c b/deps/jemalloc/src/hooks.c new file mode 100644 index 000000000..6266ecd47 --- /dev/null +++ b/deps/jemalloc/src/hooks.c @@ -0,0 +1,12 @@ +#include "jemalloc/internal/jemalloc_preamble.h" + +/* + * The hooks are a little bit screwy -- they're not genuinely exported in the + * sense that we want them available to end-users, but we do want them visible + * from outside the generated library, so that we can use them in test code. + */ +JEMALLOC_EXPORT +void (*hooks_arena_new_hook)() = NULL; + +JEMALLOC_EXPORT +void (*hooks_libc_hook)() = NULL; diff --git a/deps/jemalloc/src/huge.c b/deps/jemalloc/src/huge.c deleted file mode 100644 index 1e9a66512..000000000 --- a/deps/jemalloc/src/huge.c +++ /dev/null @@ -1,435 +0,0 @@ -#define JEMALLOC_HUGE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ - -static extent_node_t * -huge_node_get(const void *ptr) -{ - extent_node_t *node; - - node = chunk_lookup(ptr, true); - assert(!extent_node_achunk_get(node)); - - return (node); -} - -static bool -huge_node_set(const void *ptr, extent_node_t *node) -{ - - assert(extent_node_addr_get(node) == ptr); - assert(!extent_node_achunk_get(node)); - return (chunk_register(ptr, node)); -} - -static void -huge_node_unset(const void *ptr, const extent_node_t *node) -{ - - chunk_deregister(ptr, node); -} - -void * -huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, - tcache_t *tcache) -{ - size_t usize; - - usize = s2u(size); - if (usize == 0) { - /* size_t overflow. */ - return (NULL); - } - - return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache)); -} - -void * -huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, - bool zero, tcache_t *tcache) -{ - void *ret; - size_t usize; - extent_node_t *node; - bool is_zeroed; - - /* Allocate one or more contiguous chunks for this request. */ - - usize = sa2u(size, alignment); - if (unlikely(usize == 0)) - return (NULL); - assert(usize >= chunksize); - - /* Allocate an extent node with which to track the chunk. */ - node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)), - CACHELINE, false, tcache, true, arena); - if (node == NULL) - return (NULL); - - /* - * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that - * it is possible to make correct junk/zero fill decisions below. - */ - is_zeroed = zero; - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena, - size, alignment, &is_zeroed)) == NULL) { - idalloctm(tsd, node, tcache, true); - return (NULL); - } - - extent_node_init(node, arena, ret, size, is_zeroed, true); - - if (huge_node_set(ret, node)) { - arena_chunk_dalloc_huge(arena, ret, size); - idalloctm(tsd, node, tcache, true); - return (NULL); - } - - /* Insert node into huge. */ - malloc_mutex_lock(&arena->huge_mtx); - ql_elm_new(node, ql_link); - ql_tail_insert(&arena->huge, node, ql_link); - malloc_mutex_unlock(&arena->huge_mtx); - - if (zero || (config_fill && unlikely(opt_zero))) { - if (!is_zeroed) - memset(ret, 0, size); - } else if (config_fill && unlikely(opt_junk_alloc)) - memset(ret, 0xa5, size); - - return (ret); -} - -#ifdef JEMALLOC_JET -#undef huge_dalloc_junk -#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) -#endif -static void -huge_dalloc_junk(void *ptr, size_t usize) -{ - - if (config_fill && have_dss && unlikely(opt_junk_free)) { - /* - * Only bother junk filling if the chunk isn't about to be - * unmapped. - */ - if (!config_munmap || (have_dss && chunk_in_dss(ptr))) - memset(ptr, 0x5a, usize); - } -} -#ifdef JEMALLOC_JET -#undef huge_dalloc_junk -#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) -huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); -#endif - -static void -huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, - size_t usize_max, bool zero) -{ - size_t usize, usize_next; - extent_node_t *node; - arena_t *arena; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - bool pre_zeroed, post_zeroed; - - /* Increase usize to incorporate extra. */ - for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1)) - <= oldsize; usize = usize_next) - ; /* Do nothing. */ - - if (oldsize == usize) - return; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - pre_zeroed = extent_node_zeroed_get(node); - - /* Fill if necessary (shrinking). */ - if (oldsize > usize) { - size_t sdiff = oldsize - usize; - if (config_fill && unlikely(opt_junk_free)) { - memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff); - post_zeroed = false; - } else { - post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, - ptr, CHUNK_CEILING(oldsize), usize, sdiff); - } - } else - post_zeroed = pre_zeroed; - - malloc_mutex_lock(&arena->huge_mtx); - /* Update the size of the huge allocation. */ - assert(extent_node_size_get(node) != usize); - extent_node_size_set(node, usize); - /* Update zeroed. */ - extent_node_zeroed_set(node, post_zeroed); - malloc_mutex_unlock(&arena->huge_mtx); - - arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize); - - /* Fill if necessary (growing). */ - if (oldsize < usize) { - if (zero || (config_fill && unlikely(opt_zero))) { - if (!pre_zeroed) { - memset((void *)((uintptr_t)ptr + oldsize), 0, - usize - oldsize); - } - } else if (config_fill && unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - - oldsize); - } - } -} - -static bool -huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) -{ - extent_node_t *node; - arena_t *arena; - chunk_hooks_t chunk_hooks; - size_t cdiff; - bool pre_zeroed, post_zeroed; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - pre_zeroed = extent_node_zeroed_get(node); - chunk_hooks = chunk_hooks_get(arena); - - assert(oldsize > usize); - - /* Split excess chunks. */ - cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); - if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize), - CHUNK_CEILING(usize), cdiff, true, arena->ind)) - return (true); - - if (oldsize > usize) { - size_t sdiff = oldsize - usize; - if (config_fill && unlikely(opt_junk_free)) { - huge_dalloc_junk((void *)((uintptr_t)ptr + usize), - sdiff); - post_zeroed = false; - } else { - post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, - CHUNK_ADDR2BASE((uintptr_t)ptr + usize), - CHUNK_CEILING(oldsize), - CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); - } - } else - post_zeroed = pre_zeroed; - - malloc_mutex_lock(&arena->huge_mtx); - /* Update the size of the huge allocation. */ - extent_node_size_set(node, usize); - /* Update zeroed. */ - extent_node_zeroed_set(node, post_zeroed); - malloc_mutex_unlock(&arena->huge_mtx); - - /* Zap the excess chunks. */ - arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize); - - return (false); -} - -static bool -huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) { - extent_node_t *node; - arena_t *arena; - bool is_zeroed_subchunk, is_zeroed_chunk; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - malloc_mutex_lock(&arena->huge_mtx); - is_zeroed_subchunk = extent_node_zeroed_get(node); - malloc_mutex_unlock(&arena->huge_mtx); - - /* - * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so - * that it is possible to make correct junk/zero fill decisions below. - */ - is_zeroed_chunk = zero; - - if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize, - &is_zeroed_chunk)) - return (true); - - malloc_mutex_lock(&arena->huge_mtx); - /* Update the size of the huge allocation. */ - extent_node_size_set(node, usize); - malloc_mutex_unlock(&arena->huge_mtx); - - if (zero || (config_fill && unlikely(opt_zero))) { - if (!is_zeroed_subchunk) { - memset((void *)((uintptr_t)ptr + oldsize), 0, - CHUNK_CEILING(oldsize) - oldsize); - } - if (!is_zeroed_chunk) { - memset((void *)((uintptr_t)ptr + - CHUNK_CEILING(oldsize)), 0, usize - - CHUNK_CEILING(oldsize)); - } - } else if (config_fill && unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - - oldsize); - } - - return (false); -} - -bool -huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, - size_t usize_max, bool zero) -{ - - assert(s2u(oldsize) == oldsize); - - /* Both allocations must be huge to avoid a move. */ - if (oldsize < chunksize || usize_max < chunksize) - return (true); - - if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { - /* Attempt to expand the allocation in-place. */ - if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero)) - return (false); - /* Try again, this time with usize_min. */ - if (usize_min < usize_max && CHUNK_CEILING(usize_min) > - CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr, - oldsize, usize_min, zero)) - return (false); - } - - /* - * Avoid moving the allocation if the existing chunk size accommodates - * the new size. - */ - if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) - && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { - huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max, - zero); - return (false); - } - - /* Attempt to shrink the allocation in-place. */ - if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) - return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)); - return (true); -} - -static void * -huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, - size_t alignment, bool zero, tcache_t *tcache) -{ - - if (alignment <= chunksize) - return (huge_malloc(tsd, arena, usize, zero, tcache)); - return (huge_palloc(tsd, arena, usize, alignment, zero, tcache)); -} - -void * -huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, - size_t alignment, bool zero, tcache_t *tcache) -{ - void *ret; - size_t copysize; - - /* Try to avoid moving the allocation. */ - if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero)) - return (ptr); - - /* - * usize and oldsize are different enough that we need to use a - * different size class. In that case, fall back to allocating new - * space and copying. - */ - ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero, - tcache); - if (ret == NULL) - return (NULL); - - copysize = (usize < oldsize) ? usize : oldsize; - memcpy(ret, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache); - return (ret); -} - -void -huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) -{ - extent_node_t *node; - arena_t *arena; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - huge_node_unset(ptr, node); - malloc_mutex_lock(&arena->huge_mtx); - ql_remove(&arena->huge, node, ql_link); - malloc_mutex_unlock(&arena->huge_mtx); - - huge_dalloc_junk(extent_node_addr_get(node), - extent_node_size_get(node)); - arena_chunk_dalloc_huge(extent_node_arena_get(node), - extent_node_addr_get(node), extent_node_size_get(node)); - idalloctm(tsd, node, tcache, true); -} - -arena_t * -huge_aalloc(const void *ptr) -{ - - return (extent_node_arena_get(huge_node_get(ptr))); -} - -size_t -huge_salloc(const void *ptr) -{ - size_t size; - extent_node_t *node; - arena_t *arena; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - malloc_mutex_lock(&arena->huge_mtx); - size = extent_node_size_get(node); - malloc_mutex_unlock(&arena->huge_mtx); - - return (size); -} - -prof_tctx_t * -huge_prof_tctx_get(const void *ptr) -{ - prof_tctx_t *tctx; - extent_node_t *node; - arena_t *arena; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - malloc_mutex_lock(&arena->huge_mtx); - tctx = extent_node_prof_tctx_get(node); - malloc_mutex_unlock(&arena->huge_mtx); - - return (tctx); -} - -void -huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) -{ - extent_node_t *node; - arena_t *arena; - - node = huge_node_get(ptr); - arena = extent_node_arena_get(node); - malloc_mutex_lock(&arena->huge_mtx); - extent_node_prof_tctx_set(node, tctx); - malloc_mutex_unlock(&arena->huge_mtx); -} - -void -huge_prof_tctx_reset(const void *ptr) -{ - - huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); -} diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index 54a3b1ba1..f93c16fa3 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -1,11 +1,32 @@ -#define JEMALLOC_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/log.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ /* Runtime configuration options. */ -const char *je_malloc_conf JEMALLOC_ATTR(weak); +const char *je_malloc_conf +#ifndef _WIN32 + JEMALLOC_ATTR(weak) +#endif + ; bool opt_abort = #ifdef JEMALLOC_DEBUG true @@ -13,6 +34,13 @@ bool opt_abort = false #endif ; +bool opt_abort_conf = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; const char *opt_junk = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) "true" @@ -35,20 +63,15 @@ bool opt_junk_free = #endif ; -size_t opt_quarantine = ZU(0); -bool opt_redzone = false; bool opt_utrace = false; bool opt_xmalloc = false; bool opt_zero = false; -size_t opt_narenas = 0; - -/* Initialized to true if the process is running inside Valgrind. */ -bool in_valgrind; +unsigned opt_narenas = 0; unsigned ncpus; -/* Protects arenas initialization (arenas, narenas_total). */ -static malloc_mutex_t arenas_lock; +/* Protects arenas initialization. */ +malloc_mutex_t arenas_lock; /* * Arenas that are used to service external requests. Not all elements of the * arenas array are necessarily used; arenas are created lazily as needed. @@ -56,11 +79,14 @@ static malloc_mutex_t arenas_lock; * arenas[0..narenas_auto) are used for automatic multiplexing of threads and * arenas. arenas[narenas_auto..narenas_total) are only used if the application * takes some action to create them and allocate from them. + * + * Points to an arena_t. */ -static arena_t **arenas; -static unsigned narenas_total; +JEMALLOC_ALIGNED(CACHELINE) +atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; +static atomic_u_t narenas_total; /* Use narenas_total_*(). */ static arena_t *a0; /* arenas[0]; read-only after initialization. */ -static unsigned narenas_auto; /* Read-only after initialization. */ +unsigned narenas_auto; /* Read-only after initialization. */ typedef enum { malloc_init_uninitialized = 3, @@ -70,95 +96,18 @@ typedef enum { } malloc_init_t; static malloc_init_t malloc_init_state = malloc_init_uninitialized; -JEMALLOC_ALIGNED(CACHELINE) -const size_t index2size_tab[NSIZES] = { -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ - ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), - SIZE_CLASSES -#undef SC -}; +/* False should be the common case. Set to true to trigger initialization. */ +bool malloc_slow = true; -JEMALLOC_ALIGNED(CACHELINE) -const uint8_t size2index_tab[] = { -#if LG_TINY_MIN == 0 -#warning "Dangerous LG_TINY_MIN" -#define S2B_0(i) i, -#elif LG_TINY_MIN == 1 -#warning "Dangerous LG_TINY_MIN" -#define S2B_1(i) i, -#elif LG_TINY_MIN == 2 -#warning "Dangerous LG_TINY_MIN" -#define S2B_2(i) i, -#elif LG_TINY_MIN == 3 -#define S2B_3(i) i, -#elif LG_TINY_MIN == 4 -#define S2B_4(i) i, -#elif LG_TINY_MIN == 5 -#define S2B_5(i) i, -#elif LG_TINY_MIN == 6 -#define S2B_6(i) i, -#elif LG_TINY_MIN == 7 -#define S2B_7(i) i, -#elif LG_TINY_MIN == 8 -#define S2B_8(i) i, -#elif LG_TINY_MIN == 9 -#define S2B_9(i) i, -#elif LG_TINY_MIN == 10 -#define S2B_10(i) i, -#elif LG_TINY_MIN == 11 -#define S2B_11(i) i, -#else -#error "Unsupported LG_TINY_MIN" -#endif -#if LG_TINY_MIN < 1 -#define S2B_1(i) S2B_0(i) S2B_0(i) -#endif -#if LG_TINY_MIN < 2 -#define S2B_2(i) S2B_1(i) S2B_1(i) -#endif -#if LG_TINY_MIN < 3 -#define S2B_3(i) S2B_2(i) S2B_2(i) -#endif -#if LG_TINY_MIN < 4 -#define S2B_4(i) S2B_3(i) S2B_3(i) -#endif -#if LG_TINY_MIN < 5 -#define S2B_5(i) S2B_4(i) S2B_4(i) -#endif -#if LG_TINY_MIN < 6 -#define S2B_6(i) S2B_5(i) S2B_5(i) -#endif -#if LG_TINY_MIN < 7 -#define S2B_7(i) S2B_6(i) S2B_6(i) -#endif -#if LG_TINY_MIN < 8 -#define S2B_8(i) S2B_7(i) S2B_7(i) -#endif -#if LG_TINY_MIN < 9 -#define S2B_9(i) S2B_8(i) S2B_8(i) -#endif -#if LG_TINY_MIN < 10 -#define S2B_10(i) S2B_9(i) S2B_9(i) -#endif -#if LG_TINY_MIN < 11 -#define S2B_11(i) S2B_10(i) S2B_10(i) -#endif -#define S2B_no(i) -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ - S2B_##lg_delta_lookup(index) - SIZE_CLASSES -#undef S2B_3 -#undef S2B_4 -#undef S2B_5 -#undef S2B_6 -#undef S2B_7 -#undef S2B_8 -#undef S2B_9 -#undef S2B_10 -#undef S2B_11 -#undef S2B_no -#undef SC +/* When malloc_slow is true, set the corresponding bits for sanity check. */ +enum { + flag_opt_junk_alloc = (1U), + flag_opt_junk_free = (1U << 1), + flag_opt_zero = (1U << 2), + flag_opt_utrace = (1U << 3), + flag_opt_xmalloc = (1U << 4) }; +static uint8_t malloc_slow_flags; #ifdef JEMALLOC_THREADED_INIT /* Used to let the initializing thread recursively allocate. */ @@ -183,19 +132,21 @@ static bool init_lock_initialized = false; JEMALLOC_ATTR(constructor) static void WINAPI -_init_init_lock(void) -{ - - /* If another constructor in the same binary is using mallctl to - * e.g. setup chunk hooks, it may end up running before this one, - * and malloc_init_hard will crash trying to lock the uninitialized - * lock. So we force an initialization of the lock in - * malloc_init_hard as well. We don't try to care about atomicity - * of the accessed to the init_lock_initialized boolean, since it - * really only matters early in the process creation, before any - * separate thread normally starts doing anything. */ - if (!init_lock_initialized) - malloc_mutex_init(&init_lock); +_init_init_lock(void) { + /* + * If another constructor in the same binary is using mallctl to e.g. + * set up extent hooks, it may end up running before this one, and + * malloc_init_hard will crash trying to lock the uninitialized lock. So + * we force an initialization of the lock in malloc_init_hard as well. + * We don't try to care about atomicity of the accessed to the + * init_lock_initialized boolean, since it really only matters early in + * the process creation, before any separate thread normally starts + * doing anything. + */ + if (!init_lock_initialized) { + malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, + malloc_mutex_rank_exclusive); + } init_lock_initialized = true; } @@ -231,6 +182,9 @@ typedef struct { # define UTRACE(a, b, c) #endif +/* Whether encountered any invalid config options. */ +static bool had_conf_error = false; + /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to @@ -245,91 +199,54 @@ static bool malloc_init_hard(void); * Begin miscellaneous support functions. */ -JEMALLOC_ALWAYS_INLINE_C bool -malloc_initialized(void) -{ - +bool +malloc_initialized(void) { return (malloc_init_state == malloc_init_initialized); } -JEMALLOC_ALWAYS_INLINE_C void -malloc_thread_init(void) -{ - - /* - * TSD initialization can't be safely done as a side effect of - * deallocation, because it is possible for a thread to do nothing but - * deallocate its TLS data via free(), in which case writing to TLS - * would cause write-after-free memory corruption. The quarantine - * facility *only* gets used as a side effect of deallocation, so make - * a best effort attempt at initializing its TSD by hooking all - * allocation events. - */ - if (config_fill && unlikely(opt_quarantine)) - quarantine_alloc_hook(); -} - -JEMALLOC_ALWAYS_INLINE_C bool -malloc_init_a0(void) -{ - - if (unlikely(malloc_init_state == malloc_init_uninitialized)) - return (malloc_init_hard_a0()); - return (false); +JEMALLOC_ALWAYS_INLINE bool +malloc_init_a0(void) { + if (unlikely(malloc_init_state == malloc_init_uninitialized)) { + return malloc_init_hard_a0(); + } + return false; } -JEMALLOC_ALWAYS_INLINE_C bool -malloc_init(void) -{ - - if (unlikely(!malloc_initialized()) && malloc_init_hard()) - return (true); - malloc_thread_init(); - - return (false); +JEMALLOC_ALWAYS_INLINE bool +malloc_init(void) { + if (unlikely(!malloc_initialized()) && malloc_init_hard()) { + return true; + } + return false; } /* - * The a0*() functions are used instead of i[mcd]alloc() in situations that + * The a0*() functions are used instead of i{d,}alloc() in situations that * cannot tolerate TLS variable access. */ -arena_t * -a0get(void) -{ - - assert(a0 != NULL); - return (a0); -} - static void * -a0ialloc(size_t size, bool zero, bool is_metadata) -{ - - if (unlikely(malloc_init_a0())) - return (NULL); +a0ialloc(size_t size, bool zero, bool is_internal) { + if (unlikely(malloc_init_a0())) { + return NULL; + } - return (iallocztm(NULL, size, zero, false, is_metadata, a0get())); + return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, + is_internal, arena_get(TSDN_NULL, 0, true), true); } static void -a0idalloc(void *ptr, bool is_metadata) -{ - - idalloctm(NULL, ptr, false, is_metadata); +a0idalloc(void *ptr, bool is_internal) { + idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); } void * -a0malloc(size_t size) -{ - - return (a0ialloc(size, false, true)); +a0malloc(size_t size) { + return a0ialloc(size, false, true); } void -a0dalloc(void *ptr) -{ - +a0dalloc(void *ptr) { a0idalloc(ptr, true); } @@ -340,18 +257,16 @@ a0dalloc(void *ptr) */ void * -bootstrap_malloc(size_t size) -{ - - if (unlikely(size == 0)) +bootstrap_malloc(size_t size) { + if (unlikely(size == 0)) { size = 1; + } - return (a0ialloc(size, false, false)); + return a0ialloc(size, false, false); } void * -bootstrap_calloc(size_t num, size_t size) -{ +bootstrap_calloc(size_t num, size_t size) { size_t num_size; num_size = num * size; @@ -360,237 +275,261 @@ bootstrap_calloc(size_t num, size_t size) num_size = 1; } - return (a0ialloc(num_size, true, false)); + return a0ialloc(num_size, true, false); } void -bootstrap_free(void *ptr) -{ - - if (unlikely(ptr == NULL)) +bootstrap_free(void *ptr) { + if (unlikely(ptr == NULL)) { return; + } a0idalloc(ptr, false); } +void +arena_set(unsigned ind, arena_t *arena) { + atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); +} + +static void +narenas_total_set(unsigned narenas) { + atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); +} + +static void +narenas_total_inc(void) { + atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); +} + +unsigned +narenas_total_get(void) { + return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); +} + /* Create a new arena and insert it into the arenas array at index ind. */ static arena_t * -arena_init_locked(unsigned ind) -{ +arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; - /* Expand arenas if necessary. */ - assert(ind <= narenas_total); - if (ind > MALLOCX_ARENA_MAX) - return (NULL); - if (ind == narenas_total) { - unsigned narenas_new = narenas_total + 1; - arena_t **arenas_new = - (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new * - sizeof(arena_t *))); - if (arenas_new == NULL) - return (NULL); - memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *)); - arenas_new[ind] = NULL; - /* - * Deallocate only if arenas came from a0malloc() (not - * base_alloc()). - */ - if (narenas_total != narenas_auto) - a0dalloc(arenas); - arenas = arenas_new; - narenas_total = narenas_new; + assert(ind <= narenas_total_get()); + if (ind >= MALLOCX_ARENA_LIMIT) { + return NULL; + } + if (ind == narenas_total_get()) { + narenas_total_inc(); } /* * Another thread may have already initialized arenas[ind] if it's an * auto arena. */ - arena = arenas[ind]; + arena = arena_get(tsdn, ind, false); if (arena != NULL) { assert(ind < narenas_auto); - return (arena); + return arena; } /* Actually initialize the arena. */ - arena = arenas[ind] = arena_new(ind); - return (arena); -} + arena = arena_new(tsdn, ind, extent_hooks); -arena_t * -arena_init(unsigned ind) -{ - arena_t *arena; - - malloc_mutex_lock(&arenas_lock); - arena = arena_init_locked(ind); - malloc_mutex_unlock(&arenas_lock); - return (arena); + return arena; } -unsigned -narenas_total_get(void) -{ - unsigned narenas; - - malloc_mutex_lock(&arenas_lock); - narenas = narenas_total; - malloc_mutex_unlock(&arenas_lock); - - return (narenas); +static void +arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { + if (ind == 0) { + return; + } + if (have_background_thread) { + bool err; + malloc_mutex_lock(tsdn, &background_thread_lock); + err = background_thread_create(tsdn_tsd(tsdn), ind); + malloc_mutex_unlock(tsdn, &background_thread_lock); + if (err) { + malloc_printf("<jemalloc>: error in background thread " + "creation for arena %u. Abort.\n", ind); + abort(); + } + } } -static void -arena_bind_locked(tsd_t *tsd, unsigned ind) -{ +arena_t * +arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { arena_t *arena; - arena = arenas[ind]; - arena->nthreads++; + malloc_mutex_lock(tsdn, &arenas_lock); + arena = arena_init_locked(tsdn, ind, extent_hooks); + malloc_mutex_unlock(tsdn, &arenas_lock); - if (tsd_nominal(tsd)) - tsd_arena_set(tsd, arena); + arena_new_create_background_thread(tsdn, ind); + + return arena; } static void -arena_bind(tsd_t *tsd, unsigned ind) -{ +arena_bind(tsd_t *tsd, unsigned ind, bool internal) { + arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_inc(arena, internal); - malloc_mutex_lock(&arenas_lock); - arena_bind_locked(tsd, ind); - malloc_mutex_unlock(&arenas_lock); + if (internal) { + tsd_iarena_set(tsd, arena); + } else { + tsd_arena_set(tsd, arena); + } } void -arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) -{ +arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { arena_t *oldarena, *newarena; - malloc_mutex_lock(&arenas_lock); - oldarena = arenas[oldind]; - newarena = arenas[newind]; - oldarena->nthreads--; - newarena->nthreads++; - malloc_mutex_unlock(&arenas_lock); + oldarena = arena_get(tsd_tsdn(tsd), oldind, false); + newarena = arena_get(tsd_tsdn(tsd), newind, false); + arena_nthreads_dec(oldarena, false); + arena_nthreads_inc(newarena, false); tsd_arena_set(tsd, newarena); } -unsigned -arena_nbound(unsigned ind) -{ - unsigned nthreads; - - malloc_mutex_lock(&arenas_lock); - nthreads = arenas[ind]->nthreads; - malloc_mutex_unlock(&arenas_lock); - return (nthreads); -} - static void -arena_unbind(tsd_t *tsd, unsigned ind) -{ +arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena; - malloc_mutex_lock(&arenas_lock); - arena = arenas[ind]; - arena->nthreads--; - malloc_mutex_unlock(&arenas_lock); - tsd_arena_set(tsd, NULL); + arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_dec(arena, internal); + + if (internal) { + tsd_iarena_set(tsd, NULL); + } else { + tsd_arena_set(tsd, NULL); + } } -arena_t * -arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing) -{ - arena_t *arena; - arena_t **arenas_cache = tsd_arenas_cache_get(tsd); - unsigned narenas_cache = tsd_narenas_cache_get(tsd); +arena_tdata_t * +arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { + arena_tdata_t *tdata, *arenas_tdata_old; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + unsigned narenas_tdata_old, i; + unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); unsigned narenas_actual = narenas_total_get(); - /* Deallocate old cache if it's too small. */ - if (arenas_cache != NULL && narenas_cache < narenas_actual) { - a0dalloc(arenas_cache); - arenas_cache = NULL; - narenas_cache = 0; - tsd_arenas_cache_set(tsd, arenas_cache); - tsd_narenas_cache_set(tsd, narenas_cache); - } - - /* Allocate cache if it's missing. */ - if (arenas_cache == NULL) { - bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd); - assert(ind < narenas_actual || !init_if_missing); - narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1; - - if (tsd_nominal(tsd) && !*arenas_cache_bypassp) { - *arenas_cache_bypassp = true; - arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) * - narenas_cache); - *arenas_cache_bypassp = false; + /* + * Dissociate old tdata array (and set up for deallocation upon return) + * if it's too small. + */ + if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { + arenas_tdata_old = arenas_tdata; + narenas_tdata_old = narenas_tdata; + arenas_tdata = NULL; + narenas_tdata = 0; + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); + } else { + arenas_tdata_old = NULL; + narenas_tdata_old = 0; + } + + /* Allocate tdata array if it's missing. */ + if (arenas_tdata == NULL) { + bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); + narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; + + if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { + *arenas_tdata_bypassp = true; + arenas_tdata = (arena_tdata_t *)a0malloc( + sizeof(arena_tdata_t) * narenas_tdata); + *arenas_tdata_bypassp = false; } - if (arenas_cache == NULL) { - /* - * This function must always tell the truth, even if - * it's slow, so don't let OOM, thread cleanup (note - * tsd_nominal check), nor recursive allocation - * avoidance (note arenas_cache_bypass check) get in the - * way. - */ - if (ind >= narenas_actual) - return (NULL); - malloc_mutex_lock(&arenas_lock); - arena = arenas[ind]; - malloc_mutex_unlock(&arenas_lock); - return (arena); + if (arenas_tdata == NULL) { + tdata = NULL; + goto label_return; } - assert(tsd_nominal(tsd) && !*arenas_cache_bypassp); - tsd_arenas_cache_set(tsd, arenas_cache); - tsd_narenas_cache_set(tsd, narenas_cache); + assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); } /* - * Copy to cache. It's possible that the actual number of arenas has - * increased since narenas_total_get() was called above, but that causes - * no correctness issues unless two threads concurrently execute the - * arenas.extend mallctl, which we trust mallctl synchronization to + * Copy to tdata array. It's possible that the actual number of arenas + * has increased since narenas_total_get() was called above, but that + * causes no correctness issues unless two threads concurrently execute + * the arenas.create mallctl, which we trust mallctl synchronization to * prevent. */ - malloc_mutex_lock(&arenas_lock); - memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual); - malloc_mutex_unlock(&arenas_lock); - if (narenas_cache > narenas_actual) { - memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) * - (narenas_cache - narenas_actual)); + + /* Copy/initialize tickers. */ + for (i = 0; i < narenas_actual; i++) { + if (i < narenas_tdata_old) { + ticker_copy(&arenas_tdata[i].decay_ticker, + &arenas_tdata_old[i].decay_ticker); + } else { + ticker_init(&arenas_tdata[i].decay_ticker, + DECAY_NTICKS_PER_UPDATE); + } + } + if (narenas_tdata > narenas_actual) { + memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) + * (narenas_tdata - narenas_actual)); } - /* Read the refreshed cache, and init the arena if necessary. */ - arena = arenas_cache[ind]; - if (init_if_missing && arena == NULL) - arena = arenas_cache[ind] = arena_init(ind); - return (arena); + /* Read the refreshed tdata array. */ + tdata = &arenas_tdata[ind]; +label_return: + if (arenas_tdata_old != NULL) { + a0dalloc(arenas_tdata_old); + } + return tdata; } /* Slow path, called only by arena_choose(). */ arena_t * -arena_choose_hard(tsd_t *tsd) -{ - arena_t *ret; +arena_choose_hard(tsd_t *tsd, bool internal) { + arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); + + if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { + unsigned choose = percpu_arena_choose(); + ret = arena_get(tsd_tsdn(tsd), choose, true); + assert(ret != NULL); + arena_bind(tsd, arena_ind_get(ret), false); + arena_bind(tsd, arena_ind_get(ret), true); + + return ret; + } if (narenas_auto > 1) { - unsigned i, choose, first_null; + unsigned i, j, choose[2], first_null; + bool is_new_arena[2]; + + /* + * Determine binding for both non-internal and internal + * allocation. + * + * choose[0]: For application allocation. + * choose[1]: For internal metadata allocation. + */ + + for (j = 0; j < 2; j++) { + choose[j] = 0; + is_new_arena[j] = false; + } - choose = 0; first_null = narenas_auto; - malloc_mutex_lock(&arenas_lock); - assert(a0get() != NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); + assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); for (i = 1; i < narenas_auto; i++) { - if (arenas[i] != NULL) { + if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { /* * Choose the first arena that has the lowest * number of threads assigned to it. */ - if (arenas[i]->nthreads < - arenas[choose]->nthreads) - choose = i; + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get( + tsd_tsdn(tsd), i, false), !!j) < + arena_nthreads_get(arena_get( + tsd_tsdn(tsd), choose[j], false), + !!j)) { + choose[j] = i; + } + } } else if (first_null == narenas_auto) { /* * Record the index of the first uninitialized @@ -605,89 +544,99 @@ arena_choose_hard(tsd_t *tsd) } } - if (arenas[choose]->nthreads == 0 - || first_null == narenas_auto) { - /* - * Use an unloaded arena, or the least loaded arena if - * all arenas are already initialized. - */ - ret = arenas[choose]; - } else { - /* Initialize a new arena. */ - choose = first_null; - ret = arena_init_locked(choose); - if (ret == NULL) { - malloc_mutex_unlock(&arenas_lock); - return (NULL); + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), + choose[j], false), !!j) == 0 || first_null == + narenas_auto) { + /* + * Use an unloaded arena, or the least loaded + * arena if all arenas are already initialized. + */ + if (!!j == internal) { + ret = arena_get(tsd_tsdn(tsd), + choose[j], false); + } + } else { + arena_t *arena; + + /* Initialize a new arena. */ + choose[j] = first_null; + arena = arena_init_locked(tsd_tsdn(tsd), + choose[j], + (extent_hooks_t *)&extent_hooks_default); + if (arena == NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), + &arenas_lock); + return NULL; + } + is_new_arena[j] = true; + if (!!j == internal) { + ret = arena; + } } + arena_bind(tsd, choose[j], !!j); } - arena_bind_locked(tsd, choose); - malloc_mutex_unlock(&arenas_lock); - } else { - ret = a0get(); - arena_bind(tsd, 0); - } + malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); - return (ret); -} + for (j = 0; j < 2; j++) { + if (is_new_arena[j]) { + assert(choose[j] > 0); + arena_new_create_background_thread( + tsd_tsdn(tsd), choose[j]); + } + } -void -thread_allocated_cleanup(tsd_t *tsd) -{ + } else { + ret = arena_get(tsd_tsdn(tsd), 0, false); + arena_bind(tsd, 0, false); + arena_bind(tsd, 0, true); + } - /* Do nothing. */ + return ret; } void -thread_deallocated_cleanup(tsd_t *tsd) -{ +iarena_cleanup(tsd_t *tsd) { + arena_t *iarena; - /* Do nothing. */ + iarena = tsd_iarena_get(tsd); + if (iarena != NULL) { + arena_unbind(tsd, arena_ind_get(iarena), true); + } } void -arena_cleanup(tsd_t *tsd) -{ +arena_cleanup(tsd_t *tsd) { arena_t *arena; arena = tsd_arena_get(tsd); - if (arena != NULL) - arena_unbind(tsd, arena->ind); -} - -void -arenas_cache_cleanup(tsd_t *tsd) -{ - arena_t **arenas_cache; - - arenas_cache = tsd_arenas_cache_get(tsd); - if (arenas_cache != NULL) { - tsd_arenas_cache_set(tsd, NULL); - a0dalloc(arenas_cache); + if (arena != NULL) { + arena_unbind(tsd, arena_ind_get(arena), false); } } void -narenas_cache_cleanup(tsd_t *tsd) -{ - - /* Do nothing. */ -} +arenas_tdata_cleanup(tsd_t *tsd) { + arena_tdata_t *arenas_tdata; -void -arenas_cache_bypass_cleanup(tsd_t *tsd) -{ + /* Prevent tsd->arenas_tdata from being (re)created. */ + *tsd_arenas_tdata_bypassp_get(tsd) = true; - /* Do nothing. */ + arenas_tdata = tsd_arenas_tdata_get(tsd); + if (arenas_tdata != NULL) { + tsd_arenas_tdata_set(tsd, NULL); + a0dalloc(arenas_tdata); + } } static void -stats_print_atexit(void) -{ - - if (config_tcache && config_stats) { +stats_print_atexit(void) { + if (config_stats) { + tsdn_t *tsdn; unsigned narenas, i; + tsdn = tsdn_fetch(); + /* * Merge stats from extant threads. This is racy, since * individual threads do not lock when recording tcache stats @@ -696,25 +645,45 @@ stats_print_atexit(void) * continue to allocate. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena = arenas[i]; + arena_t *arena = arena_get(tsdn, i, false); if (arena != NULL) { tcache_t *tcache; - /* - * tcache_stats_merge() locks bins, so if any - * code is introduced that acquires both arena - * and bin locks in the opposite order, - * deadlocks may result. - */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); ql_foreach(tcache, &arena->tcache_ql, link) { - tcache_stats_merge(tcache, arena); + tcache_stats_merge(tsdn, tcache, arena); } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, + &arena->tcache_ql_mtx); } } } - je_malloc_stats_print(NULL, NULL, NULL); + je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); +} + +/* + * Ensure that we don't hold any locks upon entry to or exit from allocator + * code (in a "broad" sense that doesn't count a reentrant allocation as an + * entrance or exit). + */ +JEMALLOC_ALWAYS_INLINE void +check_entry_exit_locking(tsdn_t *tsdn) { + if (!config_debug) { + return; + } + if (tsdn_null(tsdn)) { + return; + } + tsd_t *tsd = tsdn_tsd(tsdn); + /* + * It's possible we hold locks at entry/exit if we're in a nested + * allocation. + */ + int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); + if (reentrancy_level != 0) { + return; + } + witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); } /* @@ -725,38 +694,76 @@ stats_print_atexit(void) * Begin initialization functions. */ -#ifndef JEMALLOC_HAVE_SECURE_GETENV static char * -secure_getenv(const char *name) -{ - +jemalloc_secure_getenv(const char *name) { +#ifdef JEMALLOC_HAVE_SECURE_GETENV + return secure_getenv(name); +#else # ifdef JEMALLOC_HAVE_ISSETUGID - if (issetugid() != 0) - return (NULL); + if (issetugid() != 0) { + return NULL; + } # endif - return (getenv(name)); -} + return getenv(name); #endif +} static unsigned -malloc_ncpus(void) -{ +malloc_ncpus(void) { long result; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwNumberOfProcessors; +#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) + /* + * glibc >= 2.6 has the CPU_COUNT macro. + * + * glibc's sysconf() uses isspace(). glibc allocates for the first time + * *before* setting up the isspace tables. Therefore we need a + * different method to get the number of CPUs. + */ + { + cpu_set_t set; + + pthread_getaffinity_np(pthread_self(), sizeof(set), &set); + result = CPU_COUNT(&set); + } #else result = sysconf(_SC_NPROCESSORS_ONLN); #endif return ((result == -1) ? 1 : (unsigned)result); } +static void +init_opt_stats_print_opts(const char *v, size_t vlen) { + size_t opts_len = strlen(opt_stats_print_opts); + assert(opts_len <= stats_print_tot_num_options); + + for (size_t i = 0; i < vlen; i++) { + switch (v[i]) { +#define OPTION(o, v, d, s) case o: break; + STATS_PRINT_OPTIONS +#undef OPTION + default: continue; + } + + if (strchr(opt_stats_print_opts, v[i]) != NULL) { + /* Ignore repeated. */ + continue; + } + + opt_stats_print_opts[opts_len++] = v[i]; + opt_stats_print_opts[opts_len] = '\0'; + assert(opts_len <= stats_print_tot_num_options); + } + assert(opts_len == strlen(opt_stats_print_opts)); +} + static bool malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, - char const **v_p, size_t *vlen_p) -{ + char const **v_p, size_t *vlen_p) { bool accept; const char *opts = *opts_p; @@ -790,10 +797,10 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, malloc_write("<jemalloc>: Conf string ends " "with key\n"); } - return (true); + return true; default: malloc_write("<jemalloc>: Malformed conf string\n"); - return (true); + return true; } } @@ -826,48 +833,55 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, } *opts_p = opts; - return (false); + return false; } static void -malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, - size_t vlen) -{ +malloc_abort_invalid_conf(void) { + assert(opt_abort_conf); + malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf " + "value (see above).\n"); + abort(); +} +static void +malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, + size_t vlen) { malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, (int)vlen, v); + /* If abort_conf is set, error out after processing all options. */ + had_conf_error = true; } static void -malloc_conf_init(void) -{ +malloc_slow_flag_init(void) { + /* + * Combine the runtime options into malloc_slow for fast path. Called + * after processing all the options. + */ + malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) + | (opt_junk_free ? flag_opt_junk_free : 0) + | (opt_zero ? flag_opt_zero : 0) + | (opt_utrace ? flag_opt_utrace : 0) + | (opt_xmalloc ? flag_opt_xmalloc : 0); + + malloc_slow = (malloc_slow_flags != 0); +} + +static void +malloc_conf_init(void) { unsigned i; char buf[PATH_MAX + 1]; const char *opts, *k, *v; size_t klen, vlen; - /* - * Automatically configure valgrind before processing options. The - * valgrind option remains in jemalloc 3.x for compatibility reasons. - */ - if (config_valgrind) { - in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; - if (config_fill && unlikely(in_valgrind)) { - opt_junk = "false"; - opt_junk_alloc = false; - opt_junk_free = false; - assert(!opt_zero); - opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; - opt_redzone = true; - } - if (config_tcache && unlikely(in_valgrind)) - opt_tcache = false; - } - - for (i = 0; i < 3; i++) { + for (i = 0; i < 4; i++) { /* Get runtime configuration. */ switch (i) { case 0: + opts = config_malloc_conf; + break; + case 1: if (je_malloc_conf != NULL) { /* * Use options that were compiled into the @@ -880,8 +894,8 @@ malloc_conf_init(void) opts = buf; } break; - case 1: { - int linklen = 0; + case 2: { + ssize_t linklen = 0; #ifndef _WIN32 int saved_errno = errno; const char *linkname = @@ -907,7 +921,7 @@ malloc_conf_init(void) buf[linklen] = '\0'; opts = buf; break; - } case 2: { + } case 3: { const char *envname = #ifdef JEMALLOC_PREFIX JEMALLOC_CPREFIX"MALLOC_CONF" @@ -916,7 +930,7 @@ malloc_conf_init(void) #endif ; - if ((opts = secure_getenv(envname)) != NULL) { + if ((opts = jemalloc_secure_getenv(envname)) != NULL) { /* * Do nothing; opts is already initialized to * the value of the MALLOC_CONF environment @@ -936,25 +950,28 @@ malloc_conf_init(void) while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, &vlen)) { -#define CONF_MATCH(n) \ +#define CONF_MATCH(n) \ (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) -#define CONF_MATCH_VALUE(n) \ +#define CONF_MATCH_VALUE(n) \ (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) -#define CONF_HANDLE_BOOL(o, n, cont) \ +#define CONF_HANDLE_BOOL(o, n) \ if (CONF_MATCH(n)) { \ - if (CONF_MATCH_VALUE("true")) \ + if (CONF_MATCH_VALUE("true")) { \ o = true; \ - else if (CONF_MATCH_VALUE("false")) \ + } else if (CONF_MATCH_VALUE("false")) { \ o = false; \ - else { \ + } else { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } \ - if (cont) \ - continue; \ + continue; \ } -#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ +#define CONF_MIN_no(um, min) false +#define CONF_MIN_yes(um, min) ((um) < (min)) +#define CONF_MAX_no(um, max) false +#define CONF_MAX_yes(um, max) ((um) > (max)) +#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ if (CONF_MATCH(n)) { \ uintmax_t um; \ char *end; \ @@ -967,25 +984,39 @@ malloc_conf_init(void) "Invalid conf value", \ k, klen, v, vlen); \ } else if (clip) { \ - if ((min) != 0 && um < (min)) \ - o = (min); \ - else if (um > (max)) \ - o = (max); \ - else \ - o = um; \ + if (CONF_MIN_##check_min(um, \ + (t)(min))) { \ + o = (t)(min); \ + } else if ( \ + CONF_MAX_##check_max(um, \ + (t)(max))) { \ + o = (t)(max); \ + } else { \ + o = (t)um; \ + } \ } else { \ - if (((min) != 0 && um < (min)) \ - || um > (max)) { \ + if (CONF_MIN_##check_min(um, \ + (t)(min)) || \ + CONF_MAX_##check_max(um, \ + (t)(max))) { \ malloc_conf_error( \ "Out-of-range " \ "conf value", \ k, klen, v, vlen); \ - } else \ - o = um; \ + } else { \ + o = (t)um; \ + } \ } \ continue; \ } -#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ +#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ + clip) \ + CONF_HANDLE_T_U(unsigned, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_U(size_t, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ if (CONF_MATCH(n)) { \ long l; \ char *end; \ @@ -1002,11 +1033,12 @@ malloc_conf_init(void) malloc_conf_error( \ "Out-of-range conf value", \ k, klen, v, vlen); \ - } else \ + } else { \ o = l; \ + } \ continue; \ } -#define CONF_HANDLE_CHAR_P(o, n, d) \ +#define CONF_HANDLE_CHAR_P(o, n, d) \ if (CONF_MATCH(n)) { \ size_t cpylen = (vlen <= \ sizeof(o)-1) ? vlen : \ @@ -1016,25 +1048,33 @@ malloc_conf_init(void) continue; \ } - CONF_HANDLE_BOOL(opt_abort, "abort", true) - /* - * Chunks always require at least one header page, - * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and - * possibly an additional page in the presence of - * redzones. In order to simplify options processing, - * use a conservative bound that accommodates all these - * constraints. - */ - CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + - LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), - (sizeof(size_t) << 3) - 1, true) + CONF_HANDLE_BOOL(opt_abort, "abort") + CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") + if (strncmp("metadata_thp", k, klen) == 0) { + int i; + bool match = false; + for (i = 0; i < metadata_thp_mode_limit; i++) { + if (strncmp(metadata_thp_mode_names[i], + v, vlen) == 0) { + opt_metadata_thp = i; + match = true; + break; + } + } + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; + } + CONF_HANDLE_BOOL(opt_retain, "retain") if (strncmp("dss", k, klen) == 0) { int i; bool match = false; for (i = 0; i < dss_prec_limit; i++) { if (strncmp(dss_prec_names[i], v, vlen) == 0) { - if (chunk_dss_prec_set(i)) { + if (extent_dss_prec_set(i)) { malloc_conf_error( "Error setting dss", k, klen, v, vlen); @@ -1052,11 +1092,21 @@ malloc_conf_init(void) } continue; } - CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, - SIZE_T_MAX, false) - CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", - -1, (sizeof(size_t) << 3) - 1) - CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) + CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, + UINT_MAX, yes, no, false) + CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, + "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < + QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : + SSIZE_MAX); + CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, + "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < + QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : + SSIZE_MAX); + CONF_HANDLE_BOOL(opt_stats_print, "stats_print") + if (CONF_MATCH("stats_print_opts")) { + init_opt_stats_print_opts(v, vlen); + continue; + } if (config_fill) { if (CONF_MATCH("junk")) { if (CONF_MATCH_VALUE("true")) { @@ -1082,74 +1132,121 @@ malloc_conf_init(void) } continue; } - CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", - 0, SIZE_T_MAX, false) - CONF_HANDLE_BOOL(opt_redzone, "redzone", true) - CONF_HANDLE_BOOL(opt_zero, "zero", true) + CONF_HANDLE_BOOL(opt_zero, "zero") } if (config_utrace) { - CONF_HANDLE_BOOL(opt_utrace, "utrace", true) + CONF_HANDLE_BOOL(opt_utrace, "utrace") } if (config_xmalloc) { - CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) + CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") } - if (config_tcache) { - CONF_HANDLE_BOOL(opt_tcache, "tcache", - !config_valgrind || !in_valgrind) - if (CONF_MATCH("tcache")) { - assert(config_valgrind && in_valgrind); - if (opt_tcache) { - opt_tcache = false; - malloc_conf_error( - "tcache cannot be enabled " - "while running inside Valgrind", - k, klen, v, vlen); + CONF_HANDLE_BOOL(opt_tcache, "tcache") + CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, + "lg_extent_max_active_fit", 0, + (sizeof(size_t) << 3), yes, yes, false) + CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", + -1, (sizeof(size_t) << 3) - 1) + if (strncmp("percpu_arena", k, klen) == 0) { + bool match = false; + for (int i = percpu_arena_mode_names_base; i < + percpu_arena_mode_names_limit; i++) { + if (strncmp(percpu_arena_mode_names[i], + v, vlen) == 0) { + if (!have_percpu_arena) { + malloc_conf_error( + "No getcpu support", + k, klen, v, vlen); + } + opt_percpu_arena = i; + match = true; + break; } - continue; } - CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, - "lg_tcache_max", -1, - (sizeof(size_t) << 3) - 1) + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; } + CONF_HANDLE_BOOL(opt_background_thread, + "background_thread"); + CONF_HANDLE_SIZE_T(opt_max_background_threads, + "max_background_threads", 1, + opt_max_background_threads, yes, yes, + true); if (config_prof) { - CONF_HANDLE_BOOL(opt_prof, "prof", true) + CONF_HANDLE_BOOL(opt_prof, "prof") CONF_HANDLE_CHAR_P(opt_prof_prefix, "prof_prefix", "jeprof") - CONF_HANDLE_BOOL(opt_prof_active, "prof_active", - true) + CONF_HANDLE_BOOL(opt_prof_active, "prof_active") CONF_HANDLE_BOOL(opt_prof_thread_active_init, - "prof_thread_active_init", true) + "prof_thread_active_init") CONF_HANDLE_SIZE_T(opt_lg_prof_sample, - "lg_prof_sample", 0, - (sizeof(uint64_t) << 3) - 1, true) - CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", - true) + "lg_prof_sample", 0, (sizeof(uint64_t) << 3) + - 1, no, yes, true) + CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, "lg_prof_interval", -1, (sizeof(uint64_t) << 3) - 1) - CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", - true) - CONF_HANDLE_BOOL(opt_prof_final, "prof_final", - true) - CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", - true) + CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") + CONF_HANDLE_BOOL(opt_prof_final, "prof_final") + CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") + } + if (config_log) { + if (CONF_MATCH("log")) { + size_t cpylen = ( + vlen <= sizeof(log_var_names) ? + vlen : sizeof(log_var_names) - 1); + strncpy(log_var_names, v, cpylen); + log_var_names[cpylen] = '\0'; + continue; + } + } + if (CONF_MATCH("thp")) { + bool match = false; + for (int i = 0; i < thp_mode_names_limit; i++) { + if (strncmp(thp_mode_names[i],v, vlen) + == 0) { + if (!have_madvise_huge) { + malloc_conf_error( + "No THP support", + k, klen, v, vlen); + } + opt_thp = i; + match = true; + break; + } + } + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; } malloc_conf_error("Invalid conf pair", k, klen, v, vlen); #undef CONF_MATCH +#undef CONF_MATCH_VALUE #undef CONF_HANDLE_BOOL +#undef CONF_MIN_no +#undef CONF_MIN_yes +#undef CONF_MAX_no +#undef CONF_MAX_yes +#undef CONF_HANDLE_T_U +#undef CONF_HANDLE_UNSIGNED #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_CHAR_P } + if (opt_abort_conf && had_conf_error) { + malloc_abort_invalid_conf(); + } } + atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); } -/* init_lock must be held. */ static bool -malloc_init_hard_needed(void) -{ - +malloc_init_hard_needed(void) { if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == malloc_init_recursible)) { /* @@ -1157,193 +1254,322 @@ malloc_init_hard_needed(void) * acquired init_lock, or this thread is the initializing * thread, and it is recursively allocating. */ - return (false); + return false; } #ifdef JEMALLOC_THREADED_INIT if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { /* Busy-wait until the initializing thread completes. */ + spin_t spinner = SPIN_INITIALIZER; do { - malloc_mutex_unlock(&init_lock); - CPU_SPINWAIT; - malloc_mutex_lock(&init_lock); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + spin_adaptive(&spinner); + malloc_mutex_lock(TSDN_NULL, &init_lock); } while (!malloc_initialized()); - return (false); + return false; } #endif - return (true); + return true; } -/* init_lock must be held. */ static bool -malloc_init_hard_a0_locked(void) -{ - +malloc_init_hard_a0_locked() { malloc_initializer = INITIALIZER; - if (config_prof) + if (config_prof) { prof_boot0(); + } malloc_conf_init(); if (opt_stats_print) { /* Print statistics at exit. */ if (atexit(stats_print_atexit) != 0) { malloc_write("<jemalloc>: Error in atexit()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } } - if (base_boot()) - return (true); - if (chunk_boot()) - return (true); - if (ctl_boot()) - return (true); - if (config_prof) + if (pages_boot()) { + return true; + } + if (base_boot(TSDN_NULL)) { + return true; + } + if (extent_boot()) { + return true; + } + if (ctl_boot()) { + return true; + } + if (config_prof) { prof_boot1(); - if (arena_boot()) - return (true); - if (config_tcache && tcache_boot()) - return (true); - if (malloc_mutex_init(&arenas_lock)) - return (true); + } + arena_boot(); + if (tcache_boot(TSDN_NULL)) { + return true; + } + if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, + malloc_mutex_rank_exclusive)) { + return true; + } /* * Create enough scaffolding to allow recursive allocation in * malloc_ncpus(). */ - narenas_total = narenas_auto = 1; - arenas = &a0; + narenas_auto = 1; memset(arenas, 0, sizeof(arena_t *) * narenas_auto); /* * Initialize one arena here. The rest are lazily created in * arena_choose_hard(). */ - if (arena_init(0) == NULL) - return (true); + if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) + == NULL) { + return true; + } + a0 = arena_get(TSDN_NULL, 0, false); malloc_init_state = malloc_init_a0_initialized; - return (false); + + return false; } static bool -malloc_init_hard_a0(void) -{ +malloc_init_hard_a0(void) { bool ret; - malloc_mutex_lock(&init_lock); + malloc_mutex_lock(TSDN_NULL, &init_lock); ret = malloc_init_hard_a0_locked(); - malloc_mutex_unlock(&init_lock); - return (ret); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + return ret; } -/* - * Initialize data structures which may trigger recursive allocation. - * - * init_lock must be held. - */ -static void -malloc_init_hard_recursible(void) -{ - +/* Initialize data structures which may trigger recursive allocation. */ +static bool +malloc_init_hard_recursible(void) { malloc_init_state = malloc_init_recursible; - malloc_mutex_unlock(&init_lock); ncpus = malloc_ncpus(); -#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ - && !defined(_WIN32) && !defined(__native_client__)) - /* LinuxThreads's pthread_atfork() allocates. */ +#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ + && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ + !defined(__native_client__)) + /* LinuxThreads' pthread_atfork() allocates. */ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, jemalloc_postfork_child) != 0) { malloc_write("<jemalloc>: Error in pthread_atfork()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } + return true; } #endif - malloc_mutex_lock(&init_lock); + + if (background_thread_boot0()) { + return true; + } + + return false; } -/* init_lock must be held. */ -static bool -malloc_init_hard_finish(void) -{ +static unsigned +malloc_narenas_default(void) { + assert(ncpus > 0); + /* + * For SMP systems, create more than one arena per CPU by + * default. + */ + if (ncpus > 1) { + return ncpus << 2; + } else { + return 1; + } +} + +static percpu_arena_mode_t +percpu_arena_as_initialized(percpu_arena_mode_t mode) { + assert(!malloc_initialized()); + assert(mode <= percpu_arena_disabled); + + if (mode != percpu_arena_disabled) { + mode += percpu_arena_mode_enabled_base; + } - if (mutex_boot()) - return (true); + return mode; +} +static bool +malloc_init_narenas(void) { + assert(ncpus > 0); + + if (opt_percpu_arena != percpu_arena_disabled) { + if (!have_percpu_arena || malloc_getcpu() < 0) { + opt_percpu_arena = percpu_arena_disabled; + malloc_printf("<jemalloc>: perCPU arena getcpu() not " + "available. Setting narenas to %u.\n", opt_narenas ? + opt_narenas : malloc_narenas_default()); + if (opt_abort) { + abort(); + } + } else { + if (ncpus >= MALLOCX_ARENA_LIMIT) { + malloc_printf("<jemalloc>: narenas w/ percpu" + "arena beyond limit (%d)\n", ncpus); + if (opt_abort) { + abort(); + } + return true; + } + /* NB: opt_percpu_arena isn't fully initialized yet. */ + if (percpu_arena_as_initialized(opt_percpu_arena) == + per_phycpu_arena && ncpus % 2 != 0) { + malloc_printf("<jemalloc>: invalid " + "configuration -- per physical CPU arena " + "with odd number (%u) of CPUs (no hyper " + "threading?).\n", ncpus); + if (opt_abort) + abort(); + } + unsigned n = percpu_arena_ind_limit( + percpu_arena_as_initialized(opt_percpu_arena)); + if (opt_narenas < n) { + /* + * If narenas is specified with percpu_arena + * enabled, actual narenas is set as the greater + * of the two. percpu_arena_choose will be free + * to use any of the arenas based on CPU + * id. This is conservative (at a small cost) + * but ensures correctness. + * + * If for some reason the ncpus determined at + * boot is not the actual number (e.g. because + * of affinity setting from numactl), reserving + * narenas this way provides a workaround for + * percpu_arena. + */ + opt_narenas = n; + } + } + } if (opt_narenas == 0) { - /* - * For SMP systems, create more than one arena per CPU by - * default. - */ - if (ncpus > 1) - opt_narenas = ncpus << 2; - else - opt_narenas = 1; + opt_narenas = malloc_narenas_default(); } + assert(opt_narenas > 0); + narenas_auto = opt_narenas; /* - * Make sure that the arenas array can be allocated. In practice, this - * limit is enough to allow the allocator to function, but the ctl - * machinery will fail to allocate memory at far lower limits. + * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). */ - if (narenas_auto > chunksize / sizeof(arena_t *)) { - narenas_auto = chunksize / sizeof(arena_t *); + if (narenas_auto >= MALLOCX_ARENA_LIMIT) { + narenas_auto = MALLOCX_ARENA_LIMIT - 1; malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", narenas_auto); } - narenas_total = narenas_auto; + narenas_total_set(narenas_auto); - /* Allocate and initialize arenas. */ - arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); - if (arenas == NULL) - return (true); - /* - * Zero the array. In practice, this should always be pre-zeroed, - * since it was just mmap()ed, but let's be sure. - */ - memset(arenas, 0, sizeof(arena_t *) * narenas_total); - /* Copy the pointer to the one arena that was already initialized. */ - arenas[0] = a0; + return false; +} + +static void +malloc_init_percpu(void) { + opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); +} + +static bool +malloc_init_hard_finish(void) { + if (malloc_mutex_boot()) { + return true; + } malloc_init_state = malloc_init_initialized; - return (false); + malloc_slow_flag_init(); + + return false; +} + +static void +malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { + malloc_mutex_assert_owner(tsdn, &init_lock); + malloc_mutex_unlock(tsdn, &init_lock); + if (reentrancy_set) { + assert(!tsdn_null(tsdn)); + tsd_t *tsd = tsdn_tsd(tsdn); + assert(tsd_reentrancy_level_get(tsd) > 0); + post_reentrancy(tsd); + } } static bool -malloc_init_hard(void) -{ +malloc_init_hard(void) { + tsd_t *tsd; #if defined(_WIN32) && _WIN32_WINNT < 0x0600 _init_init_lock(); #endif - malloc_mutex_lock(&init_lock); + malloc_mutex_lock(TSDN_NULL, &init_lock); + +#define UNLOCK_RETURN(tsdn, ret, reentrancy) \ + malloc_init_hard_cleanup(tsdn, reentrancy); \ + return ret; + if (!malloc_init_hard_needed()) { - malloc_mutex_unlock(&init_lock); - return (false); + UNLOCK_RETURN(TSDN_NULL, false, false) } if (malloc_init_state != malloc_init_a0_initialized && malloc_init_hard_a0_locked()) { - malloc_mutex_unlock(&init_lock); - return (true); + UNLOCK_RETURN(TSDN_NULL, true, false) } - if (malloc_tsd_boot0()) { - malloc_mutex_unlock(&init_lock); - return (true); + + malloc_mutex_unlock(TSDN_NULL, &init_lock); + /* Recursive allocation relies on functional tsd. */ + tsd = malloc_tsd_boot0(); + if (tsd == NULL) { + return true; + } + if (malloc_init_hard_recursible()) { + return true; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); + /* Set reentrancy level to 1 during init. */ + pre_reentrancy(tsd, NULL); + /* Initialize narenas before prof_boot2 (for allocation). */ + if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } - if (config_prof && prof_boot2()) { - malloc_mutex_unlock(&init_lock); - return (true); + if (config_prof && prof_boot2(tsd)) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } - malloc_init_hard_recursible(); + malloc_init_percpu(); if (malloc_init_hard_finish()) { - malloc_mutex_unlock(&init_lock); - return (true); + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } + post_reentrancy(tsd); + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); - malloc_mutex_unlock(&init_lock); + witness_assert_lockless(witness_tsd_tsdn( + tsd_witness_tsdp_get_unsafe(tsd))); malloc_tsd_boot1(); - return (false); + /* Update TSD after tsd_boot1. */ + tsd = tsd_fetch(); + if (opt_background_thread) { + assert(have_background_thread); + /* + * Need to finish init & unlock first before creating background + * threads (pthread_create depends on malloc). ctl_init (which + * sets isthreaded) needs to be called without holding any lock. + */ + background_thread_ctl_init(tsd_tsdn(tsd)); + + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + bool err = background_thread_create(tsd, 0); + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + if (err) { + return true; + } + } +#undef UNLOCK_RETURN + return false; } /* @@ -1351,463 +1577,772 @@ malloc_init_hard(void) */ /******************************************************************************/ /* - * Begin malloc(3)-compatible functions. + * Begin allocation-path internal functions and data structures. */ -static void * -imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) -{ - void *p; +/* + * Settings determined by the documented behavior of the allocation functions. + */ +typedef struct static_opts_s static_opts_t; +struct static_opts_s { + /* Whether or not allocation size may overflow. */ + bool may_overflow; + /* Whether or not allocations of size 0 should be treated as size 1. */ + bool bump_empty_alloc; + /* + * Whether to assert that allocations are not of size 0 (after any + * bumping). + */ + bool assert_nonempty_alloc; - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - p = imalloc(tsd, LARGE_MINCLASS); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = imalloc(tsd, usize); + /* + * Whether or not to modify the 'result' argument to malloc in case of + * error. + */ + bool null_out_result_on_error; + /* Whether to set errno when we encounter an error condition. */ + bool set_errno_on_error; - return (p); -} + /* + * The minimum valid alignment for functions requesting aligned storage. + */ + size_t min_alignment; -JEMALLOC_ALWAYS_INLINE_C void * -imalloc_prof(tsd_t *tsd, size_t usize) -{ - void *p; - prof_tctx_t *tctx; + /* The error string to use if we oom. */ + const char *oom_string; + /* The error string to use if the passed-in alignment is invalid. */ + const char *invalid_alignment_string; - tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = imalloc_prof_sample(tsd, usize, tctx); - else - p = imalloc(tsd, usize); - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_malloc(p, usize, tctx); + /* + * False if we're configured to skip some time-consuming operations. + * + * This isn't really a malloc "behavior", but it acts as a useful + * summary of several other static (or at least, static after program + * initialization) options. + */ + bool slow; +}; - return (p); +JEMALLOC_ALWAYS_INLINE void +static_opts_init(static_opts_t *static_opts) { + static_opts->may_overflow = false; + static_opts->bump_empty_alloc = false; + static_opts->assert_nonempty_alloc = false; + static_opts->null_out_result_on_error = false; + static_opts->set_errno_on_error = false; + static_opts->min_alignment = 0; + static_opts->oom_string = ""; + static_opts->invalid_alignment_string = ""; + static_opts->slow = false; } -JEMALLOC_ALWAYS_INLINE_C void * -imalloc_body(size_t size, tsd_t **tsd, size_t *usize) -{ +/* + * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we + * should have one constant here per magic value there. Note however that the + * representations need not be related. + */ +#define TCACHE_IND_NONE ((unsigned)-1) +#define TCACHE_IND_AUTOMATIC ((unsigned)-2) +#define ARENA_IND_AUTOMATIC ((unsigned)-1) + +typedef struct dynamic_opts_s dynamic_opts_t; +struct dynamic_opts_s { + void **result; + size_t num_items; + size_t item_size; + size_t alignment; + bool zero; + unsigned tcache_ind; + unsigned arena_ind; +}; + +JEMALLOC_ALWAYS_INLINE void +dynamic_opts_init(dynamic_opts_t *dynamic_opts) { + dynamic_opts->result = NULL; + dynamic_opts->num_items = 0; + dynamic_opts->item_size = 0; + dynamic_opts->alignment = 0; + dynamic_opts->zero = false; + dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; + dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; +} + +/* ind is ignored if dopts->alignment > 0. */ +JEMALLOC_ALWAYS_INLINE void * +imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, + size_t size, size_t usize, szind_t ind) { + tcache_t *tcache; + arena_t *arena; - if (unlikely(malloc_init())) - return (NULL); - *tsd = tsd_fetch(); + /* Fill in the tcache. */ + if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { + if (likely(!sopts->slow)) { + /* Getting tcache ptr unconditionally. */ + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + tcache = tcache_get(tsd); + } + } else if (dopts->tcache_ind == TCACHE_IND_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, dopts->tcache_ind); + } - if (config_prof && opt_prof) { - *usize = s2u(size); - if (unlikely(*usize == 0)) - return (NULL); - return (imalloc_prof(*tsd, *usize)); + /* Fill in the arena. */ + if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { + /* + * In case of automatic arena management, we defer arena + * computation until as late as we can, hoping to fill the + * allocation out of the tcache. + */ + arena = NULL; + } else { + arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); + } + + if (unlikely(dopts->alignment != 0)) { + return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, + dopts->zero, tcache, arena); } - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - *usize = s2u(size); - return (imalloc(*tsd, size)); + return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, + arena, sopts->slow); } -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) -je_malloc(size_t size) -{ +JEMALLOC_ALWAYS_INLINE void * +imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, + size_t usize, szind_t ind) { void *ret; - tsd_t *tsd; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - if (size == 0) - size = 1; + /* + * For small allocations, sampling bumps the usize. If so, we allocate + * from the ind_large bucket. + */ + szind_t ind_large; + size_t bumped_usize = usize; - ret = imalloc_body(size, &tsd, &usize); - if (unlikely(ret == NULL)) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error in malloc(): " - "out of memory\n"); - abort(); + if (usize <= SMALL_MAXCLASS) { + assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) : + sz_sa2u(LARGE_MINCLASS, dopts->alignment)) + == LARGE_MINCLASS); + ind_large = sz_size2index(LARGE_MINCLASS); + bumped_usize = sz_s2u(LARGE_MINCLASS); + ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, + bumped_usize, ind_large); + if (unlikely(ret == NULL)) { + return NULL; } - set_errno(ENOMEM); - } - if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(ret, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; + arena_prof_promote(tsd_tsdn(tsd), ret, usize); + } else { + ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); } - UTRACE(0, size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); - return (ret); + + return ret; } -static void * -imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, - prof_tctx_t *tctx) -{ - void *p; +/* + * Returns true if the allocation will overflow, and false otherwise. Sets + * *size to the product either way. + */ +JEMALLOC_ALWAYS_INLINE bool +compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, + size_t *size) { + /* + * This function is just num_items * item_size, except that we may have + * to check for overflow. + */ - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); - p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = ipalloc(tsd, usize, alignment, false); + if (!may_overflow) { + assert(dopts->num_items == 1); + *size = dopts->item_size; + return false; + } - return (p); -} + /* A size_t with its high-half bits all set to 1. */ + static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); -JEMALLOC_ALWAYS_INLINE_C void * -imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) -{ - void *p; - prof_tctx_t *tctx; + *size = dopts->item_size * dopts->num_items; - tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = imemalign_prof_sample(tsd, alignment, usize, tctx); - else - p = ipalloc(tsd, usize, alignment, false); - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); + if (unlikely(*size == 0)) { + return (dopts->num_items != 0 && dopts->item_size != 0); } - prof_malloc(p, usize, tctx); - return (p); + /* + * We got a non-zero size, but we don't know if we overflowed to get + * there. To avoid having to do a divide, we'll be clever and note that + * if both A and B can be represented in N/2 bits, then their product + * can be represented in N bits (without the possibility of overflow). + */ + if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { + return false; + } + if (likely(*size / dopts->item_size == dopts->num_items)) { + return false; + } + return true; } -JEMALLOC_ATTR(nonnull(1)) -static int -imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) -{ - int ret; - tsd_t *tsd; - size_t usize; - void *result; +JEMALLOC_ALWAYS_INLINE int +imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { + /* Where the actual allocated memory will live. */ + void *allocation = NULL; + /* Filled in by compute_size_with_overflow below. */ + size_t size = 0; + /* + * For unaligned allocations, we need only ind. For aligned + * allocations, or in case of stats or profiling we need usize. + * + * These are actually dead stores, in that their values are reset before + * any branch on their value is taken. Sometimes though, it's + * convenient to pass them as arguments before this point. To avoid + * undefined behavior then, we initialize them with dummy stores. + */ + szind_t ind = 0; + size_t usize = 0; - assert(min_alignment != 0); + /* Reentrancy is only checked on slow path. */ + int8_t reentrancy_level; - if (unlikely(malloc_init())) { - result = NULL; + /* Compute the amount of memory the user wants. */ + if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, + &size))) { goto label_oom; } - tsd = tsd_fetch(); - if (size == 0) - size = 1; - /* Make sure that alignment is a large enough power of 2. */ - if (unlikely(((alignment - 1) & alignment) != 0 - || (alignment < min_alignment))) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error allocating " - "aligned memory: invalid alignment\n"); - abort(); + /* Validate the user input. */ + if (sopts->bump_empty_alloc) { + if (unlikely(size == 0)) { + size = 1; } - result = NULL; - ret = EINVAL; - goto label_return; } - usize = sa2u(size, alignment); - if (unlikely(usize == 0)) { - result = NULL; - goto label_oom; + if (sopts->assert_nonempty_alloc) { + assert (size != 0); } - if (config_prof && opt_prof) - result = imemalign_prof(tsd, alignment, usize); - else - result = ipalloc(tsd, usize, alignment, false); - if (unlikely(result == NULL)) - goto label_oom; - assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); + if (unlikely(dopts->alignment < sopts->min_alignment + || (dopts->alignment & (dopts->alignment - 1)) != 0)) { + goto label_invalid_alignment; + } - *memptr = result; - ret = 0; -label_return: - if (config_stats && likely(result != NULL)) { - assert(usize == isalloc(result, config_prof)); + /* This is the beginning of the "core" algorithm. */ + + if (dopts->alignment == 0) { + ind = sz_size2index(size); + if (unlikely(ind >= NSIZES)) { + goto label_oom; + } + if (config_stats || (config_prof && opt_prof)) { + usize = sz_index2size(ind); + assert(usize > 0 && usize <= LARGE_MAXCLASS); + } + } else { + usize = sz_sa2u(size, dopts->alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + goto label_oom; + } + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + /* + * If we need to handle reentrancy, we can do it out of a + * known-initialized arena (i.e. arena 0). + */ + reentrancy_level = tsd_reentrancy_level_get(tsd); + if (sopts->slow && unlikely(reentrancy_level > 0)) { + /* + * We should never specify particular arenas or tcaches from + * within our internal allocations. + */ + assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || + dopts->tcache_ind == TCACHE_IND_NONE); + assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); + dopts->tcache_ind = TCACHE_IND_NONE; + /* We know that arena 0 has already been initialized. */ + dopts->arena_ind = 0; + } + + /* If profiling is on, get our profiling context. */ + if (config_prof && opt_prof) { + /* + * Note that if we're going down this path, usize must have been + * initialized in the previous if statement. + */ + prof_tctx_t *tctx = prof_alloc_prep( + tsd, usize, prof_active_get_unlocked(), true); + + alloc_ctx_t alloc_ctx; + if (likely((uintptr_t)tctx == (uintptr_t)1U)) { + alloc_ctx.slab = (usize <= SMALL_MAXCLASS); + allocation = imalloc_no_sample( + sopts, dopts, tsd, usize, usize, ind); + } else if ((uintptr_t)tctx > (uintptr_t)1U) { + /* + * Note that ind might still be 0 here. This is fine; + * imalloc_sample ignores ind if dopts->alignment > 0. + */ + allocation = imalloc_sample( + sopts, dopts, tsd, usize, ind); + alloc_ctx.slab = false; + } else { + allocation = NULL; + } + + if (unlikely(allocation == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + goto label_oom; + } + prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); + } else { + /* + * If dopts->alignment > 0, then ind is still 0, but usize was + * computed in the previous if statement. Down the positive + * alignment path, imalloc_no_sample ignores ind and size + * (relying only on usize). + */ + allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, + ind); + if (unlikely(allocation == NULL)) { + goto label_oom; + } + } + + /* + * Allocation has been done at this point. We still have some + * post-allocation work to do though. + */ + assert(dopts->alignment == 0 + || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); + + if (config_stats) { + assert(usize == isalloc(tsd_tsdn(tsd), allocation)); *tsd_thread_allocatedp_get(tsd) += usize; } - UTRACE(0, size, result); - return (ret); + + if (sopts->slow) { + UTRACE(0, size, allocation); + } + + /* Success! */ + check_entry_exit_locking(tsd_tsdn(tsd)); + *dopts->result = allocation; + return 0; + label_oom: - assert(result == NULL); + if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->oom_string); + abort(); + } + + if (sopts->slow) { + UTRACE(NULL, size, NULL); + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (sopts->set_errno_on_error) { + set_errno(ENOMEM); + } + + if (sopts->null_out_result_on_error) { + *dopts->result = NULL; + } + + return ENOMEM; + + /* + * This label is only jumped to by one goto; we move it out of line + * anyways to avoid obscuring the non-error paths, and for symmetry with + * the oom case. + */ +label_invalid_alignment: if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error allocating aligned memory: " - "out of memory\n"); + malloc_write(sopts->invalid_alignment_string); abort(); } - ret = ENOMEM; - goto label_return; + + if (sopts->set_errno_on_error) { + set_errno(EINVAL); + } + + if (sopts->slow) { + UTRACE(NULL, size, NULL); + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (sopts->null_out_result_on_error) { + *dopts->result = NULL; + } + + return EINVAL; +} + +/* Returns the errno-style error code of the allocation. */ +JEMALLOC_ALWAYS_INLINE int +imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { + if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->oom_string); + abort(); + } + UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); + set_errno(ENOMEM); + *dopts->result = NULL; + + return ENOMEM; + } + + /* We always need the tsd. Let's grab it right away. */ + tsd_t *tsd = tsd_fetch(); + assert(tsd); + if (likely(tsd_fast(tsd))) { + /* Fast and common path. */ + tsd_assert_fast(tsd); + sopts->slow = false; + return imalloc_body(sopts, dopts, tsd); + } else { + sopts->slow = true; + return imalloc_body(sopts, dopts, tsd); + } +} +/******************************************************************************/ +/* + * Begin malloc(3)-compatible functions. + */ + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_malloc(size_t size) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.malloc.entry", "size: %zu", size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + + imalloc(&sopts, &dopts); + + LOG("core.malloc.exit", "result: %p", ret); + + return ret; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW JEMALLOC_ATTR(nonnull(1)) -je_posix_memalign(void **memptr, size_t alignment, size_t size) -{ - int ret = imemalign(memptr, alignment, size, sizeof(void *)); - JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, - config_prof), false); - return (ret); +je_posix_memalign(void **memptr, size_t alignment, size_t size) { + int ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, " + "size: %zu", memptr, alignment, size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.min_alignment = sizeof(void *); + sopts.oom_string = + "<jemalloc>: Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; + + dopts.result = memptr; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + ret = imalloc(&sopts, &dopts); + + LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, + *memptr); + + return ret; } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) -je_aligned_alloc(size_t alignment, size_t size) -{ +je_aligned_alloc(size_t alignment, size_t size) { void *ret; - int err; - if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { - ret = NULL; - set_errno(err); - } - JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), - false); - return (ret); -} + static_opts_t sopts; + dynamic_opts_t dopts; -static void * -icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) -{ - void *p; + LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n", + alignment, size); - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - p = icalloc(tsd, LARGE_MINCLASS); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = icalloc(tsd, usize); + static_opts_init(&sopts); + dynamic_opts_init(&dopts); - return (p); -} + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.min_alignment = 1; + sopts.oom_string = + "<jemalloc>: Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; -JEMALLOC_ALWAYS_INLINE_C void * -icalloc_prof(tsd_t *tsd, size_t usize) -{ - void *p; - prof_tctx_t *tctx; + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; - tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = icalloc_prof_sample(tsd, usize, tctx); - else - p = icalloc(tsd, usize); - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_malloc(p, usize, tctx); + imalloc(&sopts, &dopts); - return (p); + LOG("core.aligned_alloc.exit", "result: %p", ret); + + return ret; } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) -je_calloc(size_t num, size_t size) -{ +je_calloc(size_t num, size_t size) { void *ret; - tsd_t *tsd; - size_t num_size; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); + static_opts_t sopts; + dynamic_opts_t dopts; - if (unlikely(malloc_init())) { - num_size = 0; - ret = NULL; - goto label_return; - } - tsd = tsd_fetch(); + LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); - num_size = num * size; - if (unlikely(num_size == 0)) { - if (num == 0 || size == 0) - num_size = 1; - else { - ret = NULL; - goto label_return; - } - /* - * Try to avoid division here. We know that it isn't possible to - * overflow during multiplication if neither operand uses any of the - * most significant half of the bits in a size_t. - */ - } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << - 2))) && (num_size / size != num))) { - /* size_t overflow. */ - ret = NULL; - goto label_return; - } + static_opts_init(&sopts); + dynamic_opts_init(&dopts); - if (config_prof && opt_prof) { - usize = s2u(num_size); - if (unlikely(usize == 0)) { - ret = NULL; - goto label_return; - } - ret = icalloc_prof(tsd, usize); - } else { - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - usize = s2u(num_size); - ret = icalloc(tsd, num_size); - } + sopts.may_overflow = true; + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n"; -label_return: - if (unlikely(ret == NULL)) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error in calloc(): out of " - "memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(ret, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; - } - UTRACE(0, num_size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); - return (ret); + dopts.result = &ret; + dopts.num_items = num; + dopts.item_size = size; + dopts.zero = true; + + imalloc(&sopts, &dopts); + + LOG("core.calloc.exit", "result: %p", ret); + + return ret; } static void * irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, - prof_tctx_t *tctx) -{ + prof_tctx_t *tctx) { void *p; - if (tctx == NULL) - return (NULL); + if (tctx == NULL) { + return NULL; + } if (usize <= SMALL_MAXCLASS) { p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else + if (p == NULL) { + return NULL; + } + arena_prof_promote(tsd_tsdn(tsd), p, usize); + } else { p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); + } - return (p); + return p; } -JEMALLOC_ALWAYS_INLINE_C void * -irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) -{ +JEMALLOC_ALWAYS_INLINE void * +irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, + alloc_ctx_t *alloc_ctx) { void *p; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(old_ptr); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); tctx = prof_alloc_prep(tsd, usize, prof_active, true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); - else + } else { p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); + } if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); - return (NULL); + return NULL; } prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, old_tctx); - return (p); + return p; } -JEMALLOC_INLINE_C void -ifree(tsd_t *tsd, void *ptr, tcache_t *tcache) -{ - size_t usize; - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); +JEMALLOC_ALWAYS_INLINE void +ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { + if (!slow_path) { + tsd_assert_fast(tsd); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + if (tsd_reentrancy_level_get(tsd) != 0) { + assert(slow_path); + } assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + + size_t usize; if (config_prof && opt_prof) { - usize = isalloc(ptr, config_prof); - prof_free(tsd, ptr, usize); - } else if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_stats) + usize = sz_index2size(alloc_ctx.szind); + prof_free(tsd, ptr, usize, &alloc_ctx); + } else if (config_stats) { + usize = sz_index2size(alloc_ctx.szind); + } + if (config_stats) { *tsd_thread_deallocatedp_get(tsd) += usize; - if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(ptr); - iqalloc(tsd, ptr, tcache); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); + } + + if (likely(!slow_path)) { + idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, + false); + } else { + idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, + true); + } } -JEMALLOC_INLINE_C void -isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache) -{ - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); +JEMALLOC_ALWAYS_INLINE void +isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { + if (!slow_path) { + tsd_assert_fast(tsd); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + if (tsd_reentrancy_level_get(tsd) != 0) { + assert(slow_path); + } assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - if (config_prof && opt_prof) - prof_free(tsd, ptr, usize); - if (config_stats) + alloc_ctx_t alloc_ctx, *ctx; + if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) { + /* + * When cache_oblivious is disabled and ptr is not page aligned, + * the allocation was not sampled -- usize can be used to + * determine szind directly. + */ + alloc_ctx.szind = sz_size2index(usize); + alloc_ctx.slab = true; + ctx = &alloc_ctx; + if (config_debug) { + alloc_ctx_t dbg_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, + rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind, + &dbg_ctx.slab); + assert(dbg_ctx.szind == alloc_ctx.szind); + assert(dbg_ctx.slab == alloc_ctx.slab); + } + } else if (config_prof && opt_prof) { + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind == sz_size2index(usize)); + ctx = &alloc_ctx; + } else { + ctx = NULL; + } + + if (config_prof && opt_prof) { + prof_free(tsd, ptr, usize, ctx); + } + if (config_stats) { *tsd_thread_deallocatedp_get(tsd) += usize; - if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(ptr); - isqalloc(tsd, ptr, usize, tcache); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); + } + + if (likely(!slow_path)) { + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); + } else { + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); + } } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) -je_realloc(void *ptr, size_t size) -{ +je_realloc(void *ptr, size_t size) { void *ret; - tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL); + tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t old_usize = 0; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + + LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); if (unlikely(size == 0)) { if (ptr != NULL) { /* realloc(ptr, 0) is equivalent to free(ptr). */ UTRACE(ptr, 0, 0); - tsd = tsd_fetch(); - ifree(tsd, ptr, tcache_get(tsd, false)); - return (NULL); + tcache_t *tcache; + tsd_t *tsd = tsd_fetch(); + if (tsd_reentrancy_level_get(tsd) == 0) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + ifree(tsd, ptr, tcache, true); + + LOG("core.realloc.exit", "result: %p", NULL); + return NULL; } size = 1; } if (likely(ptr != NULL)) { assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); - tsd = tsd_fetch(); + tsd_t *tsd = tsd_fetch(); - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && unlikely(in_valgrind)) - old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); + check_entry_exit_locking(tsd_tsdn(tsd)); + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); if (config_prof && opt_prof) { - usize = s2u(size); - ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd, - ptr, old_usize, usize); + usize = sz_s2u(size); + ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ? + NULL : irealloc_prof(tsd, ptr, old_usize, usize, + &alloc_ctx); } else { - if (config_stats || (config_valgrind && - unlikely(in_valgrind))) - usize = s2u(size); + if (config_stats) { + usize = sz_s2u(size); + } ret = iralloc(tsd, ptr, old_usize, size, 0, false); } + tsdn = tsd_tsdn(tsd); } else { /* realloc(NULL, size) is equivalent to malloc(size). */ - ret = imalloc_body(size, &tsd, &usize); + void *ret = je_malloc(size); + LOG("core.realloc.exit", "result: %p", ret); + return ret; } if (unlikely(ret == NULL)) { @@ -1819,25 +2354,54 @@ je_realloc(void *ptr, size_t size) set_errno(ENOMEM); } if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(ret, config_prof)); + tsd_t *tsd; + + assert(usize == isalloc(tsdn, ret)); + tsd = tsdn_tsd(tsdn); *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize, - old_rzsize, true, false); - return (ret); + check_entry_exit_locking(tsdn); + + LOG("core.realloc.exit", "result: %p", ret); + return ret; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_free(void *ptr) -{ +je_free(void *ptr) { + LOG("core.free.entry", "ptr: %p", ptr); UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { - tsd_t *tsd = tsd_fetch(); - ifree(tsd, ptr, tcache_get(tsd, false)); + /* + * We avoid setting up tsd fully (e.g. tcache, arena binding) + * based on only free() calls -- other activities trigger the + * minimal to full transition. This is because free() may + * happen during thread shutdown after tls deallocation: if a + * thread never had any malloc activities until then, a + * fully-setup tsd won't be destructed properly. + */ + tsd_t *tsd = tsd_fetch_min(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (likely(tsd_fast(tsd))) { + tsd_assert_fast(tsd); + /* Unconditionally get tcache ptr on fast path. */ + tcache = tsd_tcachep_get(tsd); + ifree(tsd, ptr, tcache, false); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + ifree(tsd, ptr, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); } + LOG("core.free.exit", ""); } /* @@ -1852,13 +2416,34 @@ je_free(void *ptr) JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) -je_memalign(size_t alignment, size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) - ret = NULL; - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); - return (ret); +je_memalign(size_t alignment, size_t size) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment, + size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.min_alignment = 1; + sopts.oom_string = + "<jemalloc>: Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; + sopts.null_out_result_on_error = true; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + imalloc(&sopts, &dopts); + + LOG("core.memalign.exit", "result: %p", ret); + return ret; } #endif @@ -1866,25 +2451,38 @@ je_memalign(size_t alignment, size_t size) JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) -je_valloc(size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) - ret = NULL; - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); - return (ret); +je_valloc(size_t size) { + void *ret; + + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.valloc.entry", "size: %zu\n", size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.min_alignment = PAGE; + sopts.oom_string = + "<jemalloc>: Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = PAGE; + + imalloc(&sopts, &dopts); + + LOG("core.valloc.exit", "result: %p\n", ret); + return ret; } #endif -/* - * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has - * #define je_malloc malloc - */ -#define malloc_is_malloc 1 -#define is_malloc_(a) malloc_is_ ## a -#define is_malloc(a) is_malloc_(a) - -#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) +#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) /* * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible * to inconsistently reference libc's malloc(3)-compatible functions @@ -1897,10 +2495,44 @@ je_valloc(size_t size) JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; -# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK +# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = je_memalign; -# endif +# endif + +# ifdef CPU_COUNT +/* + * To enable static linking with glibc, the libc specific malloc interface must + * be implemented also, so none of glibc's malloc.o functions are added to the + * link. + */ +# define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) +/* To force macro expansion of je_ prefix before stringification. */ +# define PREALIAS(je_fn) ALIAS(je_fn) +# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC +void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_FREE +void __libc_free(void* ptr) PREALIAS(je_free); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC +void *__libc_malloc(size_t size) PREALIAS(je_malloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN +void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC +void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC +void *__libc_valloc(size_t size) PREALIAS(je_valloc); +# endif +# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN +int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); +# endif +# undef PREALIAS +# undef ALIAS +# endif #endif /* @@ -1911,225 +2543,99 @@ JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = * Begin non-standard functions. */ -JEMALLOC_ALWAYS_INLINE_C bool -imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize, - size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) -{ - - if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { - *alignment = 0; - *usize = s2u(size); - } else { - *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); - *usize = sa2u(size, *alignment); - } - assert(*usize != 0); - *zero = MALLOCX_ZERO_GET(flags); - if ((flags & MALLOCX_TCACHE_MASK) != 0) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) - *tcache = NULL; - else - *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else - *tcache = tcache_get(tsd, true); - if ((flags & MALLOCX_ARENA_MASK) != 0) { - unsigned arena_ind = MALLOCX_ARENA_GET(flags); - *arena = arena_get(tsd, arena_ind, true, true); - if (unlikely(*arena == NULL)) - return (true); - } else - *arena = NULL; - return (false); -} - -JEMALLOC_ALWAYS_INLINE_C bool -imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, - size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) -{ - - if (likely(flags == 0)) { - *usize = s2u(size); - assert(*usize != 0); - *alignment = 0; - *zero = false; - *tcache = tcache_get(tsd, true); - *arena = NULL; - return (false); - } else { - return (imallocx_flags_decode_hard(tsd, size, flags, usize, - alignment, zero, tcache, arena)); - } -} - -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena) -{ +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_mallocx(size_t size, int flags) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; - if (unlikely(alignment != 0)) - return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); - if (unlikely(zero)) - return (icalloct(tsd, usize, tcache, arena)); - return (imalloct(tsd, usize, tcache, arena)); -} + LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); -static void * -imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena) -{ - void *p; + static_opts_init(&sopts); + dynamic_opts_init(&dopts); - if (usize <= SMALL_MAXCLASS) { - assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : - sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); - p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache, - arena); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena); + sopts.assert_nonempty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n"; - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) -{ - void *p; - size_t alignment; - bool zero; - tcache_t *tcache; - arena_t *arena; - prof_tctx_t *tctx; - - if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, - &zero, &tcache, &arena))) - return (NULL); - tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); - if (likely((uintptr_t)tctx == (uintptr_t)1U)) - p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); - else if ((uintptr_t)tctx > (uintptr_t)1U) { - p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache, - arena); - } else - p = NULL; - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_malloc(p, *usize, tctx); + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + if (unlikely(flags != 0)) { + if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { + dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); + } - assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); - return (p); -} + dopts.zero = MALLOCX_ZERO_GET(flags); -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) -{ - void *p; - size_t alignment; - bool zero; - tcache_t *tcache; - arena_t *arena; + if ((flags & MALLOCX_TCACHE_MASK) != 0) { + if ((flags & MALLOCX_TCACHE_MASK) + == MALLOCX_TCACHE_NONE) { + dopts.tcache_ind = TCACHE_IND_NONE; + } else { + dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); + } + } else { + dopts.tcache_ind = TCACHE_IND_AUTOMATIC; + } - if (likely(flags == 0)) { - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - *usize = s2u(size); - return (imalloc(tsd, size)); + if ((flags & MALLOCX_ARENA_MASK) != 0) + dopts.arena_ind = MALLOCX_ARENA_GET(flags); } - if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize, - &alignment, &zero, &tcache, &arena))) - return (NULL); - p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); - assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); - return (p); -} + imalloc(&sopts, &dopts); -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) -je_mallocx(size_t size, int flags) -{ - tsd_t *tsd; - void *p; - size_t usize; - - assert(size != 0); - - if (unlikely(malloc_init())) - goto label_oom; - tsd = tsd_fetch(); - - if (config_prof && opt_prof) - p = imallocx_prof(tsd, size, flags, &usize); - else - p = imallocx_no_prof(tsd, size, flags, &usize); - if (unlikely(p == NULL)) - goto label_oom; - - if (config_stats) { - assert(usize == isalloc(p, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; - } - UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags)); - return (p); -label_oom: - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write("<jemalloc>: Error in mallocx(): out of memory\n"); - abort(); - } - UTRACE(0, size, 0); - return (NULL); + LOG("core.mallocx.exit", "result: %p", ret); + return ret; } static void * -irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, +irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, - prof_tctx_t *tctx) -{ + prof_tctx_t *tctx) { void *p; - if (tctx == NULL) - return (NULL); + if (tctx == NULL) { + return NULL; + } if (usize <= SMALL_MAXCLASS) { - p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, - zero, tcache, arena); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); + p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS, + alignment, zero, tcache, arena); + if (p == NULL) { + return NULL; + } + arena_prof_promote(tsdn, p, usize); } else { - p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, + p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, tcache, arena); } - return (p); + return p; } -JEMALLOC_ALWAYS_INLINE_C void * +JEMALLOC_ALWAYS_INLINE void * irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, size_t alignment, size_t *usize, bool zero, tcache_t *tcache, - arena_t *arena) -{ + arena_t *arena, alloc_ctx_t *alloc_ctx) { void *p; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(old_ptr); - tctx = prof_alloc_prep(tsd, *usize, prof_active, true); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); + tctx = prof_alloc_prep(tsd, *usize, prof_active, false); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { - p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, - alignment, zero, tcache, arena, tctx); + p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, + *usize, alignment, zero, tcache, arena, tctx); } else { - p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, - tcache, arena); + p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, + zero, tcache, arena); } if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); + prof_alloc_rollback(tsd, tctx, false); + return NULL; } if (p == old_ptr && alignment != 0) { @@ -2141,69 +2647,84 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, * be the same as the current usize because of in-place large * reallocation. Therefore, query the actual value of usize. */ - *usize = isalloc(p, config_prof); + *usize = isalloc(tsd_tsdn(tsd), p); } - prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr, + prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, old_usize, old_tctx); - return (p); + return p; } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN void JEMALLOC_NOTHROW * JEMALLOC_ALLOC_SIZE(2) -je_rallocx(void *ptr, size_t size, int flags) -{ +je_rallocx(void *ptr, size_t size, int flags) { void *p; tsd_t *tsd; size_t usize; size_t old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; arena_t *arena; tcache_t *tcache; + LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, + size, flags); + + assert(ptr != NULL); assert(size != 0); assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); - arena = arena_get(tsd, arena_ind, true, true); - if (unlikely(arena == NULL)) + arena = arena_get(tsd_tsdn(tsd), arena_ind, true); + if (unlikely(arena == NULL)) { goto label_oom; - } else + } + } else { arena = NULL; + } if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; - else + } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else - tcache = tcache_get(tsd, true); - - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && unlikely(in_valgrind)) - old_rzsize = u2rz(old_usize); + } + } else { + tcache = tcache_get(tsd); + } + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); if (config_prof && opt_prof) { - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - assert(usize != 0); + usize = (alignment == 0) ? + sz_s2u(size) : sz_sa2u(size, alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + goto label_oom; + } p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, - zero, tcache, arena); - if (unlikely(p == NULL)) + zero, tcache, arena, &alloc_ctx); + if (unlikely(p == NULL)) { goto label_oom; + } } else { - p = iralloct(tsd, ptr, old_usize, size, alignment, zero, - tcache, arena); - if (unlikely(p == NULL)) + p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, + zero, tcache, arena); + if (unlikely(p == NULL)) { goto label_oom; - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - usize = isalloc(p, config_prof); + } + if (config_stats) { + usize = isalloc(tsd_tsdn(tsd), p); + } } assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); @@ -2212,277 +2733,426 @@ je_rallocx(void *ptr, size_t size, int flags) *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, p); - JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize, - old_rzsize, false, zero); - return (p); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.rallocx.exit", "result: %p", p); + return p; label_oom: if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); abort(); } UTRACE(ptr, size, 0); - return (NULL); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.rallocx.exit", "result: %p", NULL); + return NULL; } -JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, bool zero) -{ +JEMALLOC_ALWAYS_INLINE size_t +ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero) { size_t usize; - if (ixalloc(ptr, old_usize, size, extra, alignment, zero)) - return (old_usize); - usize = isalloc(ptr, config_prof); + if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) { + return old_usize; + } + usize = isalloc(tsdn, ptr); - return (usize); + return usize; } static size_t -ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, bool zero, prof_tctx_t *tctx) -{ +ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { size_t usize; - if (tctx == NULL) - return (old_usize); - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero); + if (tctx == NULL) { + return old_usize; + } + usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, + zero); - return (usize); + return usize; } -JEMALLOC_ALWAYS_INLINE_C size_t +JEMALLOC_ALWAYS_INLINE size_t ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, - size_t extra, size_t alignment, bool zero) -{ + size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { size_t usize_max, usize; bool prof_active; prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(ptr); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); /* * usize isn't knowable before ixalloc() returns when extra is non-zero. * Therefore, compute its maximum possible value and use that in * prof_alloc_prep() to decide whether to capture a backtrace. * prof_realloc() will use the actual usize to decide whether to sample. */ - usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra, - alignment); - assert(usize_max != 0); + if (alignment == 0) { + usize_max = sz_s2u(size+extra); + assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS); + } else { + usize_max = sz_sa2u(size+extra, alignment); + if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) { + /* + * usize_max is out of range, and chances are that + * allocation will fail, but use the maximum possible + * value and carry on with prof_alloc_prep(), just in + * case allocation succeeds. + */ + usize_max = LARGE_MAXCLASS; + } + } tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { - usize = ixallocx_prof_sample(ptr, old_usize, size, extra, - alignment, zero, tctx); + usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, + size, extra, alignment, zero, tctx); } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero); + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); } if (usize == old_usize) { prof_alloc_rollback(tsd, tctx, false); - return (usize); + return usize; } prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, old_tctx); - return (usize); + return usize; } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -je_xallocx(void *ptr, size_t size, size_t extra, int flags) -{ +je_xallocx(void *ptr, size_t size, size_t extra, int flags) { tsd_t *tsd; size_t usize, old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; + LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, " + "flags: %d", ptr, size, extra, flags); + assert(ptr != NULL); assert(size != 0); assert(SIZE_T_MAX - size >= extra); assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); tsd = tsd_fetch(); - - old_usize = isalloc(ptr, config_prof); - - /* Clamp extra if necessary to avoid (size + extra) overflow. */ - if (unlikely(size + extra > HUGE_MAXCLASS)) { - /* Check for size overflow. */ - if (unlikely(size > HUGE_MAXCLASS)) { - usize = old_usize; - goto label_not_resized; - } - extra = HUGE_MAXCLASS - size; + check_entry_exit_locking(tsd_tsdn(tsd)); + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); + /* + * The API explicitly absolves itself of protecting against (size + + * extra) numerical overflow, but we may need to clamp extra to avoid + * exceeding LARGE_MAXCLASS. + * + * Ordinarily, size limit checking is handled deeper down, but here we + * have to check as part of (size + extra) clamping, since we need the + * clamped value in the above helper functions. + */ + if (unlikely(size > LARGE_MAXCLASS)) { + usize = old_usize; + goto label_not_resized; + } + if (unlikely(LARGE_MAXCLASS - size < extra)) { + extra = LARGE_MAXCLASS - size; } - - if (config_valgrind && unlikely(in_valgrind)) - old_rzsize = u2rz(old_usize); if (config_prof && opt_prof) { usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, - alignment, zero); + alignment, zero, &alloc_ctx); } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero); + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); } - if (unlikely(usize == old_usize)) + if (unlikely(usize == old_usize)) { goto label_not_resized; + } if (config_stats) { *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } - JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize, - old_rzsize, false, zero); label_not_resized: UTRACE(ptr, size, ptr); - return (usize); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.xallocx.exit", "result: %zu", usize); + return usize; } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) -je_sallocx(const void *ptr, int flags) -{ +je_sallocx(const void *ptr, UNUSED int flags) { size_t usize; + tsdn_t *tsdn; + + LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); + assert(ptr != NULL); + + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); + + if (config_debug || force_ivsalloc) { + usize = ivsalloc(tsdn, ptr); + assert(force_ivsalloc || usize != 0); + } else { + usize = isalloc(tsdn, ptr); + } - if (config_ivsalloc) - usize = ivsalloc(ptr, config_prof); - else - usize = isalloc(ptr, config_prof); + check_entry_exit_locking(tsdn); - return (usize); + LOG("core.sallocx.exit", "result: %zu", usize); + return usize; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_dallocx(void *ptr, int flags) -{ - tsd_t *tsd; - tcache_t *tcache; +je_dallocx(void *ptr, int flags) { + LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags); assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - tsd = tsd_fetch(); + tsd_t *tsd = tsd_fetch(); + bool fast = tsd_fast(tsd); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + /* Not allowed to be reentrant and specify a custom tcache. */ + assert(tsd_reentrancy_level_get(tsd) == 0); + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; - else + } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else - tcache = tcache_get(tsd, false); + } + } else { + if (likely(fast)) { + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + } + } UTRACE(ptr, 0, 0); - ifree(tsd_fetch(), ptr, tcache); + if (likely(fast)) { + tsd_assert_fast(tsd); + ifree(tsd, ptr, tcache, false); + } else { + ifree(tsd, ptr, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.dallocx.exit", ""); } -JEMALLOC_ALWAYS_INLINE_C size_t -inallocx(size_t size, int flags) -{ - size_t usize; +JEMALLOC_ALWAYS_INLINE size_t +inallocx(tsdn_t *tsdn, size_t size, int flags) { + check_entry_exit_locking(tsdn); - if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) - usize = s2u(size); - else - usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); - assert(usize != 0); - return (usize); + size_t usize; + if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { + usize = sz_s2u(size); + } else { + usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); + } + check_entry_exit_locking(tsdn); + return usize; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_sdallocx(void *ptr, size_t size, int flags) -{ - tsd_t *tsd; - tcache_t *tcache; - size_t usize; - +je_sdallocx(void *ptr, size_t size, int flags) { assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); - usize = inallocx(size, flags); - assert(usize == isalloc(ptr, config_prof)); - tsd = tsd_fetch(); + LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, + size, flags); + + tsd_t *tsd = tsd_fetch(); + bool fast = tsd_fast(tsd); + size_t usize = inallocx(tsd_tsdn(tsd), size, flags); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + /* Not allowed to be reentrant and specify a custom tcache. */ + assert(tsd_reentrancy_level_get(tsd) == 0); + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { tcache = NULL; - else + } else { tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else - tcache = tcache_get(tsd, false); + } + } else { + if (likely(fast)) { + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + } + } UTRACE(ptr, 0, 0); - isfree(tsd, ptr, usize, tcache); + if (likely(fast)) { + tsd_assert_fast(tsd); + isfree(tsd, ptr, usize, tcache, false); + } else { + isfree(tsd, ptr, usize, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.sdallocx.exit", ""); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_ATTR(pure) -je_nallocx(size_t size, int flags) -{ +je_nallocx(size_t size, int flags) { + size_t usize; + tsdn_t *tsdn; assert(size != 0); - if (unlikely(malloc_init())) - return (0); + if (unlikely(malloc_init())) { + LOG("core.nallocx.exit", "result: %zu", ZU(0)); + return 0; + } + + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); + + usize = inallocx(tsdn, size, flags); + if (unlikely(usize > LARGE_MAXCLASS)) { + LOG("core.nallocx.exit", "result: %zu", ZU(0)); + return 0; + } - return (inallocx(size, flags)); + check_entry_exit_locking(tsdn); + LOG("core.nallocx.exit", "result: %zu", usize); + return usize; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ + size_t newlen) { + int ret; + tsd_t *tsd; + + LOG("core.mallctl.entry", "name: %s", name); - if (unlikely(malloc_init())) - return (EAGAIN); + if (unlikely(malloc_init())) { + LOG("core.mallctl.exit", "result: %d", EAGAIN); + return EAGAIN; + } + + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); + check_entry_exit_locking(tsd_tsdn(tsd)); - return (ctl_byname(name, oldp, oldlenp, newp, newlen)); + LOG("core.mallctl.exit", "result: %d", ret); + return ret; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW -je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) -{ +je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { + int ret; - if (unlikely(malloc_init())) - return (EAGAIN); + LOG("core.mallctlnametomib.entry", "name: %s", name); - return (ctl_nametomib(name, mibp, miblenp)); + if (unlikely(malloc_init())) { + LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN); + return EAGAIN; + } + + tsd_t *tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_nametomib(tsd, name, mibp, miblenp); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.mallctlnametomib.exit", "result: %d", ret); + return ret; } JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ + void *newp, size_t newlen) { + int ret; + tsd_t *tsd; - if (unlikely(malloc_init())) - return (EAGAIN); + LOG("core.mallctlbymib.entry", ""); - return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); + if (unlikely(malloc_init())) { + LOG("core.mallctlbymib.exit", "result: %d", EAGAIN); + return EAGAIN; + } + + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + check_entry_exit_locking(tsd_tsdn(tsd)); + LOG("core.mallctlbymib.exit", "result: %d", ret); + return ret; } JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ + const char *opts) { + tsdn_t *tsdn; + + LOG("core.malloc_stats_print.entry", ""); + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); stats_print(write_cb, cbopaque, opts); + check_entry_exit_locking(tsdn); + LOG("core.malloc_stats_print.exit", ""); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) -{ +je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { size_t ret; + tsdn_t *tsdn; + + LOG("core.malloc_usable_size.entry", "ptr: %p", ptr); assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); - if (config_ivsalloc) - ret = ivsalloc(ptr, config_prof); - else - ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof); + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); + + if (unlikely(ptr == NULL)) { + ret = 0; + } else { + if (config_debug || force_ivsalloc) { + ret = ivsalloc(tsdn, ptr); + assert(force_ivsalloc || ret != 0); + } else { + ret = isalloc(tsdn, ptr); + } + } - return (ret); + check_entry_exit_locking(tsdn); + LOG("core.malloc_usable_size.exit", "result: %zu", ret); + return ret; } /* @@ -2507,13 +3177,13 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) * to trigger the deadlock described above, but doing so would involve forking * via a library constructor that runs before jemalloc's runs. */ +#ifndef JEMALLOC_JET JEMALLOC_ATTR(constructor) static void -jemalloc_constructor(void) -{ - +jemalloc_constructor(void) { malloc_init(); } +#endif #ifndef JEMALLOC_MUTEX_INIT_CB void @@ -2523,24 +3193,69 @@ JEMALLOC_EXPORT void _malloc_prefork(void) #endif { - unsigned i; + tsd_t *tsd; + unsigned i, j, narenas; + arena_t *arena; #ifdef JEMALLOC_MUTEX_INIT_CB - if (!malloc_initialized()) + if (!malloc_initialized()) { return; + } #endif assert(malloc_initialized()); + tsd = tsd_fetch(); + + narenas = narenas_total_get(); + + witness_prefork(tsd_witness_tsdp_get(tsd)); /* Acquire all mutexes in a safe order. */ - ctl_prefork(); - prof_prefork(); - malloc_mutex_prefork(&arenas_lock); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_prefork(arenas[i]); + ctl_prefork(tsd_tsdn(tsd)); + tcache_prefork(tsd_tsdn(tsd)); + malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); + if (have_background_thread) { + background_thread_prefork0(tsd_tsdn(tsd)); + } + prof_prefork0(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_prefork1(tsd_tsdn(tsd)); + } + /* Break arena prefork into stages to preserve lock order. */ + for (i = 0; i < 8; i++) { + for (j = 0; j < narenas; j++) { + if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != + NULL) { + switch (i) { + case 0: + arena_prefork0(tsd_tsdn(tsd), arena); + break; + case 1: + arena_prefork1(tsd_tsdn(tsd), arena); + break; + case 2: + arena_prefork2(tsd_tsdn(tsd), arena); + break; + case 3: + arena_prefork3(tsd_tsdn(tsd), arena); + break; + case 4: + arena_prefork4(tsd_tsdn(tsd), arena); + break; + case 5: + arena_prefork5(tsd_tsdn(tsd), arena); + break; + case 6: + arena_prefork6(tsd_tsdn(tsd), arena); + break; + case 7: + arena_prefork7(tsd_tsdn(tsd), arena); + break; + default: not_reached(); + } + } + } } - chunk_prefork(); - base_prefork(); + prof_prefork1(tsd_tsdn(tsd)); } #ifndef JEMALLOC_MUTEX_INIT_CB @@ -2551,75 +3266,61 @@ JEMALLOC_EXPORT void _malloc_postfork(void) #endif { - unsigned i; + tsd_t *tsd; + unsigned i, narenas; #ifdef JEMALLOC_MUTEX_INIT_CB - if (!malloc_initialized()) + if (!malloc_initialized()) { return; + } #endif assert(malloc_initialized()); + tsd = tsd_fetch(); + + witness_postfork_parent(tsd_witness_tsdp_get(tsd)); /* Release all mutexes, now that fork() has completed. */ - base_postfork_parent(); - chunk_postfork_parent(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_parent(arenas[i]); + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { + arena_postfork_parent(tsd_tsdn(tsd), arena); + } } - malloc_mutex_postfork_parent(&arenas_lock); - prof_postfork_parent(); - ctl_postfork_parent(); + prof_postfork_parent(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_postfork_parent(tsd_tsdn(tsd)); + } + malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); + tcache_postfork_parent(tsd_tsdn(tsd)); + ctl_postfork_parent(tsd_tsdn(tsd)); } void -jemalloc_postfork_child(void) -{ - unsigned i; +jemalloc_postfork_child(void) { + tsd_t *tsd; + unsigned i, narenas; assert(malloc_initialized()); + tsd = tsd_fetch(); + + witness_postfork_child(tsd_witness_tsdp_get(tsd)); /* Release all mutexes, now that fork() has completed. */ - base_postfork_child(); - chunk_postfork_child(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_child(arenas[i]); + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { + arena_postfork_child(tsd_tsdn(tsd), arena); + } + } + prof_postfork_child(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_postfork_child(tsd_tsdn(tsd)); } - malloc_mutex_postfork_child(&arenas_lock); - prof_postfork_child(); - ctl_postfork_child(); + malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); + tcache_postfork_child(tsd_tsdn(tsd)); + ctl_postfork_child(tsd_tsdn(tsd)); } /******************************************************************************/ - -/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation. - * returns 0 if the allocation is in the currently active run, - * or when it is not causing any frag issue (large or huge bin) - * returns the bin utilization and run utilization both in fixed point 16:16. - * If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */ -JEMALLOC_EXPORT int JEMALLOC_NOTHROW -je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) { - int defrag = 0; - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { /* indication that this is not a HUGE alloc */ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t mapbits = arena_mapbits_get(chunk, pageind); - if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* indication that this is not a LARGE alloc */ - arena_t *arena = extent_node_arena_get(&chunk->node); - size_t rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - arena_run_t *run = &arena_miscelm_get(chunk, rpages_ind)->run; - arena_bin_t *bin = &arena->bins[run->binind]; - malloc_mutex_lock(&bin->lock); - /* runs that are in the same chunk in as the current chunk, are likely to be the next currun */ - if (chunk != (arena_chunk_t *)CHUNK_ADDR2BASE(bin->runcur)) { - arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; - size_t availregs = bin_info->nregs * bin->stats.curruns; - *bin_util = ((long long)bin->stats.curregs<<16) / availregs; - *run_util = ((long long)(bin_info->nregs - run->nfree)<<16) / bin_info->nregs; - defrag = 1; - } - malloc_mutex_unlock(&bin->lock); - } - } - return defrag; -} diff --git a/deps/jemalloc/src/jemalloc_cpp.cpp b/deps/jemalloc/src/jemalloc_cpp.cpp new file mode 100644 index 000000000..f0ceddae3 --- /dev/null +++ b/deps/jemalloc/src/jemalloc_cpp.cpp @@ -0,0 +1,141 @@ +#include <mutex> +#include <new> + +#define JEMALLOC_CPP_CPP_ +#ifdef __cplusplus +extern "C" { +#endif + +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#ifdef __cplusplus +} +#endif + +// All operators in this file are exported. + +// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt +// thunk? +// +// extern __typeof (sdallocx) sdallocx_int +// __attribute ((alias ("sdallocx"), +// visibility ("hidden"))); +// +// ... but it needs to work with jemalloc namespaces. + +void *operator new(std::size_t size); +void *operator new[](std::size_t size); +void *operator new(std::size_t size, const std::nothrow_t &) noexcept; +void *operator new[](std::size_t size, const std::nothrow_t &) noexcept; +void operator delete(void *ptr) noexcept; +void operator delete[](void *ptr) noexcept; +void operator delete(void *ptr, const std::nothrow_t &) noexcept; +void operator delete[](void *ptr, const std::nothrow_t &) noexcept; + +#if __cpp_sized_deallocation >= 201309 +/* C++14's sized-delete operators. */ +void operator delete(void *ptr, std::size_t size) noexcept; +void operator delete[](void *ptr, std::size_t size) noexcept; +#endif + +JEMALLOC_NOINLINE +static void * +handleOOM(std::size_t size, bool nothrow) { + void *ptr = nullptr; + + while (ptr == nullptr) { + std::new_handler handler; + // GCC-4.8 and clang 4.0 do not have std::get_new_handler. + { + static std::mutex mtx; + std::lock_guard<std::mutex> lock(mtx); + + handler = std::set_new_handler(nullptr); + std::set_new_handler(handler); + } + if (handler == nullptr) + break; + + try { + handler(); + } catch (const std::bad_alloc &) { + break; + } + + ptr = je_malloc(size); + } + + if (ptr == nullptr && !nothrow) + std::__throw_bad_alloc(); + return ptr; +} + +template <bool IsNoExcept> +JEMALLOC_ALWAYS_INLINE +void * +newImpl(std::size_t size) noexcept(IsNoExcept) { + void *ptr = je_malloc(size); + if (likely(ptr != nullptr)) + return ptr; + + return handleOOM(size, IsNoExcept); +} + +void * +operator new(std::size_t size) { + return newImpl<false>(size); +} + +void * +operator new[](std::size_t size) { + return newImpl<false>(size); +} + +void * +operator new(std::size_t size, const std::nothrow_t &) noexcept { + return newImpl<true>(size); +} + +void * +operator new[](std::size_t size, const std::nothrow_t &) noexcept { + return newImpl<true>(size); +} + +void +operator delete(void *ptr) noexcept { + je_free(ptr); +} + +void +operator delete[](void *ptr) noexcept { + je_free(ptr); +} + +void +operator delete(void *ptr, const std::nothrow_t &) noexcept { + je_free(ptr); +} + +void operator delete[](void *ptr, const std::nothrow_t &) noexcept { + je_free(ptr); +} + +#if __cpp_sized_deallocation >= 201309 + +void +operator delete(void *ptr, std::size_t size) noexcept { + if (unlikely(ptr == nullptr)) { + return; + } + je_sdallocx(ptr, size, /*flags=*/0); +} + +void operator delete[](void *ptr, std::size_t size) noexcept { + if (unlikely(ptr == nullptr)) { + return; + } + je_sdallocx(ptr, size, /*flags=*/0); +} + +#endif // __cpp_sized_deallocation diff --git a/deps/jemalloc/src/large.c b/deps/jemalloc/src/large.c new file mode 100644 index 000000000..27a2c6798 --- /dev/null +++ b/deps/jemalloc/src/large.c @@ -0,0 +1,371 @@ +#define JEMALLOC_LARGE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/util.h" + +/******************************************************************************/ + +void * +large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { + assert(usize == sz_s2u(usize)); + + return large_palloc(tsdn, arena, usize, CACHELINE, zero); +} + +void * +large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero) { + size_t ausize; + extent_t *extent; + bool is_zeroed; + UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); + + assert(!tsdn_null(tsdn) || arena != NULL); + + ausize = sz_sa2u(usize, alignment); + if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) { + return NULL; + } + + if (config_fill && unlikely(opt_zero)) { + zero = true; + } + /* + * Copy zero into is_zeroed and pass the copy when allocating the + * extent, so that it is possible to make correct junk/zero fill + * decisions below, even if is_zeroed ends up true when zero is false. + */ + is_zeroed = zero; + if (likely(!tsdn_null(tsdn))) { + arena = arena_choose(tsdn_tsd(tsdn), arena); + } + if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, + arena, usize, alignment, &is_zeroed)) == NULL) { + return NULL; + } + + /* See comments in arena_bin_slabs_full_insert(). */ + if (!arena_is_auto(arena)) { + /* Insert extent into large. */ + malloc_mutex_lock(tsdn, &arena->large_mtx); + extent_list_append(&arena->large, extent); + malloc_mutex_unlock(tsdn, &arena->large_mtx); + } + if (config_prof && arena_prof_accum(tsdn, arena, usize)) { + prof_idump(tsdn); + } + + if (zero) { + assert(is_zeroed); + } else if (config_fill && unlikely(opt_junk_alloc)) { + memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, + extent_usize_get(extent)); + } + + arena_decay_tick(tsdn, arena); + return extent_addr_get(extent); +} + +static void +large_dalloc_junk_impl(void *ptr, size_t size) { + memset(ptr, JEMALLOC_FREE_JUNK, size); +} +large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl; + +static void +large_dalloc_maybe_junk_impl(void *ptr, size_t size) { + if (config_fill && have_dss && unlikely(opt_junk_free)) { + /* + * Only bother junk filling if the extent isn't about to be + * unmapped. + */ + if (opt_retain || (have_dss && extent_in_dss(ptr))) { + large_dalloc_junk(ptr, size); + } + } +} +large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk = + large_dalloc_maybe_junk_impl; + +static bool +large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { + arena_t *arena = extent_arena_get(extent); + size_t oldusize = extent_usize_get(extent); + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + size_t diff = extent_size_get(extent) - (usize + sz_large_pad); + + assert(oldusize > usize); + + if (extent_hooks->split == NULL) { + return true; + } + + /* Split excess pages. */ + if (diff != 0) { + extent_t *trail = extent_split_wrapper(tsdn, arena, + &extent_hooks, extent, usize + sz_large_pad, + sz_size2index(usize), false, diff, NSIZES, false); + if (trail == NULL) { + return true; + } + + if (config_fill && unlikely(opt_junk_free)) { + large_dalloc_maybe_junk(extent_addr_get(trail), + extent_size_get(trail)); + } + + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail); + } + + arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize); + + return false; +} + +static bool +large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, + bool zero) { + arena_t *arena = extent_arena_get(extent); + size_t oldusize = extent_usize_get(extent); + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + size_t trailsize = usize - oldusize; + + if (extent_hooks->merge == NULL) { + return true; + } + + if (config_fill && unlikely(opt_zero)) { + zero = true; + } + /* + * Copy zero into is_zeroed_trail and pass the copy when allocating the + * extent, so that it is possible to make correct junk/zero fill + * decisions below, even if is_zeroed_trail ends up true when zero is + * false. + */ + bool is_zeroed_trail = zero; + bool commit = true; + extent_t *trail; + bool new_mapping; + if ((trail = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, extent_past_get(extent), trailsize, 0, + CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL + || (trail = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, extent_past_get(extent), trailsize, 0, + CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) { + if (config_stats) { + new_mapping = false; + } + } else { + if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, + extent_past_get(extent), trailsize, 0, CACHELINE, false, + NSIZES, &is_zeroed_trail, &commit)) == NULL) { + return true; + } + if (config_stats) { + new_mapping = true; + } + } + + if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) { + extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail); + return true; + } + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + szind_t szind = sz_size2index(usize); + extent_szind_set(extent, szind); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_addr_get(extent), szind, false); + + if (config_stats && new_mapping) { + arena_stats_mapped_add(tsdn, &arena->stats, trailsize); + } + + if (zero) { + if (config_cache_oblivious) { + /* + * Zero the trailing bytes of the original allocation's + * last page, since they are in an indeterminate state. + * There will always be trailing bytes, because ptr's + * offset from the beginning of the extent is a multiple + * of CACHELINE in [0 .. PAGE). + */ + void *zbase = (void *) + ((uintptr_t)extent_addr_get(extent) + oldusize); + void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + + PAGE)); + size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; + assert(nzero > 0); + memset(zbase, 0, nzero); + } + assert(is_zeroed_trail); + } else if (config_fill && unlikely(opt_junk_alloc)) { + memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize), + JEMALLOC_ALLOC_JUNK, usize - oldusize); + } + + arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize); + + return false; +} + +bool +large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, + size_t usize_max, bool zero) { + size_t oldusize = extent_usize_get(extent); + + /* The following should have been caught by callers. */ + assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS); + /* Both allocation sizes must be large to avoid a move. */ + assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS); + + if (usize_max > oldusize) { + /* Attempt to expand the allocation in-place. */ + if (!large_ralloc_no_move_expand(tsdn, extent, usize_max, + zero)) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + /* Try again, this time with usize_min. */ + if (usize_min < usize_max && usize_min > oldusize && + large_ralloc_no_move_expand(tsdn, extent, usize_min, + zero)) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + } + + /* + * Avoid moving the allocation if the existing extent size accommodates + * the new size. + */ + if (oldusize >= usize_min && oldusize <= usize_max) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + + /* Attempt to shrink the allocation in-place. */ + if (oldusize > usize_max) { + if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + } + return true; +} + +static void * +large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero) { + if (alignment <= CACHELINE) { + return large_malloc(tsdn, arena, usize, zero); + } + return large_palloc(tsdn, arena, usize, alignment, zero); +} + +void * +large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) { + size_t oldusize = extent_usize_get(extent); + + /* The following should have been caught by callers. */ + assert(usize > 0 && usize <= LARGE_MAXCLASS); + /* Both allocation sizes must be large to avoid a move. */ + assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS); + + /* Try to avoid moving the allocation. */ + if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { + return extent_addr_get(extent); + } + + /* + * usize and old size are different enough that we need to use a + * different size class. In that case, fall back to allocating new + * space and copying. + */ + void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, + zero); + if (ret == NULL) { + return NULL; + } + + size_t copysize = (usize < oldusize) ? usize : oldusize; + memcpy(ret, extent_addr_get(extent), copysize); + isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true); + return ret; +} + +/* + * junked_locked indicates whether the extent's data have been junk-filled, and + * whether the arena's large_mtx is currently held. + */ +static void +large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + bool junked_locked) { + if (!junked_locked) { + /* See comments in arena_bin_slabs_full_insert(). */ + if (!arena_is_auto(arena)) { + malloc_mutex_lock(tsdn, &arena->large_mtx); + extent_list_remove(&arena->large, extent); + malloc_mutex_unlock(tsdn, &arena->large_mtx); + } + large_dalloc_maybe_junk(extent_addr_get(extent), + extent_usize_get(extent)); + } else { + malloc_mutex_assert_owner(tsdn, &arena->large_mtx); + if (!arena_is_auto(arena)) { + extent_list_remove(&arena->large, extent); + } + } + arena_extent_dalloc_large_prep(tsdn, arena, extent); +} + +static void +large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent); +} + +void +large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) { + large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true); +} + +void +large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) { + large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent); +} + +void +large_dalloc(tsdn_t *tsdn, extent_t *extent) { + arena_t *arena = extent_arena_get(extent); + large_dalloc_prep_impl(tsdn, arena, extent, false); + large_dalloc_finish_impl(tsdn, arena, extent); + arena_decay_tick(tsdn, arena); +} + +size_t +large_salloc(tsdn_t *tsdn, const extent_t *extent) { + return extent_usize_get(extent); +} + +prof_tctx_t * +large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { + return extent_prof_tctx_get(extent); +} + +void +large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) { + extent_prof_tctx_set(extent, tctx); +} + +void +large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { + large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); +} diff --git a/deps/jemalloc/src/log.c b/deps/jemalloc/src/log.c new file mode 100644 index 000000000..778902fb9 --- /dev/null +++ b/deps/jemalloc/src/log.c @@ -0,0 +1,78 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/log.h" + +char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; +atomic_b_t log_init_done = ATOMIC_INIT(false); + +/* + * Returns true if we were able to pick out a segment. Fills in r_segment_end + * with a pointer to the first character after the end of the string. + */ +static const char * +log_var_extract_segment(const char* segment_begin) { + const char *end; + for (end = segment_begin; *end != '\0' && *end != '|'; end++) { + } + return end; +} + +static bool +log_var_matches_segment(const char *segment_begin, const char *segment_end, + const char *log_var_begin, const char *log_var_end) { + assert(segment_begin <= segment_end); + assert(log_var_begin < log_var_end); + + ptrdiff_t segment_len = segment_end - segment_begin; + ptrdiff_t log_var_len = log_var_end - log_var_begin; + /* The special '.' segment matches everything. */ + if (segment_len == 1 && *segment_begin == '.') { + return true; + } + if (segment_len == log_var_len) { + return strncmp(segment_begin, log_var_begin, segment_len) == 0; + } else if (segment_len < log_var_len) { + return strncmp(segment_begin, log_var_begin, segment_len) == 0 + && log_var_begin[segment_len] == '.'; + } else { + return false; + } +} + +unsigned +log_var_update_state(log_var_t *log_var) { + const char *log_var_begin = log_var->name; + const char *log_var_end = log_var->name + strlen(log_var->name); + + /* Pointer to one before the beginning of the current segment. */ + const char *segment_begin = log_var_names; + + /* + * If log_init done is false, we haven't parsed the malloc conf yet. To + * avoid log-spew, we default to not displaying anything. + */ + if (!atomic_load_b(&log_init_done, ATOMIC_ACQUIRE)) { + return LOG_INITIALIZED_NOT_ENABLED; + } + + while (true) { + const char *segment_end = log_var_extract_segment( + segment_begin); + assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE); + if (log_var_matches_segment(segment_begin, segment_end, + log_var_begin, log_var_end)) { + atomic_store_u(&log_var->state, LOG_ENABLED, + ATOMIC_RELAXED); + return LOG_ENABLED; + } + if (*segment_end == '\0') { + /* Hit the end of the segment string with no match. */ + atomic_store_u(&log_var->state, + LOG_INITIALIZED_NOT_ENABLED, ATOMIC_RELAXED); + return LOG_INITIALIZED_NOT_ENABLED; + } + /* Otherwise, skip the delimiter and continue. */ + segment_begin = segment_end + 1; + } +} diff --git a/deps/jemalloc/src/util.c b/deps/jemalloc/src/malloc_io.c index 4cb0d6c1e..7bdc13f95 100644 --- a/deps/jemalloc/src/util.c +++ b/deps/jemalloc/src/malloc_io.c @@ -1,59 +1,76 @@ -#define assert(e) do { \ +#define JEMALLOC_MALLOC_IO_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/util.h" + +#ifdef assert +# undef assert +#endif +#ifdef not_reached +# undef not_reached +#endif +#ifdef not_implemented +# undef not_implemented +#endif +#ifdef assert_not_implemented +# undef assert_not_implemented +#endif + +/* + * Define simple versions of assertion macros that won't recurse in case + * of assertion failures in malloc_*printf(). + */ +#define assert(e) do { \ if (config_debug && !(e)) { \ malloc_write("<jemalloc>: Failed assertion\n"); \ abort(); \ } \ } while (0) -#define not_reached() do { \ +#define not_reached() do { \ if (config_debug) { \ malloc_write("<jemalloc>: Unreachable code reached\n"); \ abort(); \ } \ + unreachable(); \ } while (0) -#define not_implemented() do { \ +#define not_implemented() do { \ if (config_debug) { \ malloc_write("<jemalloc>: Not implemented\n"); \ abort(); \ } \ } while (0) -#define JEMALLOC_UTIL_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define assert_not_implemented(e) do { \ + if (unlikely(config_debug && !(e))) { \ + not_implemented(); \ + } \ +} while (0) /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static void wrtmessage(void *cbopaque, const char *s); -#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) -static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, +static void wrtmessage(void *cbopaque, const char *s); +#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) +static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p); -#define D2S_BUFSIZE (1 + U2S_BUFSIZE) -static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); -#define O2S_BUFSIZE (1 + U2S_BUFSIZE) -static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); -#define X2S_BUFSIZE (2 + U2S_BUFSIZE) -static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, +#define D2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); +#define O2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); +#define X2S_BUFSIZE (2 + U2S_BUFSIZE) +static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p); /******************************************************************************/ /* malloc_message() setup. */ static void -wrtmessage(void *cbopaque, const char *s) -{ - -#ifdef SYS_write - /* - * Use syscall(2) rather than write(2) when possible in order to avoid - * the possibility of memory allocation within libc. This is necessary - * on FreeBSD; most operating systems do not have this problem though. - */ - UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); -#else - UNUSED int result = write(STDERR_FILENO, s, strlen(s)); -#endif +wrtmessage(void *cbopaque, const char *s) { + malloc_write_fd(STDERR_FILENO, s, strlen(s)); } JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); @@ -63,13 +80,12 @@ JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); * je_malloc_message(...) throughout the code. */ void -malloc_write(const char *s) -{ - - if (je_malloc_message != NULL) +malloc_write(const char *s) { + if (je_malloc_message != NULL) { je_malloc_message(NULL, s); - else + } else { wrtmessage(NULL, s); + } } /* @@ -77,28 +93,25 @@ malloc_write(const char *s) * provide a wrapper. */ int -buferror(int err, char *buf, size_t buflen) -{ - +buferror(int err, char *buf, size_t buflen) { #ifdef _WIN32 FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, - (LPSTR)buf, buflen, NULL); - return (0); -#elif defined(__GLIBC__) && defined(_GNU_SOURCE) + (LPSTR)buf, (DWORD)buflen, NULL); + return 0; +#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE) char *b = strerror_r(err, buf, buflen); if (b != buf) { strncpy(buf, b, buflen); buf[buflen-1] = '\0'; } - return (0); + return 0; #else - return (strerror_r(err, buf, buflen)); + return strerror_r(err, buf, buflen); #endif } uintmax_t -malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) -{ +malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { uintmax_t ret, digit; unsigned b; bool neg; @@ -143,10 +156,12 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) switch (p[1]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': - if (b == 0) + if (b == 0) { b = 8; - if (b == 8) + } + if (b == 8) { p++; + } break; case 'X': case 'x': switch (p[2]) { @@ -156,10 +171,12 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) case 'F': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': - if (b == 0) + if (b == 0) { b = 16; - if (b == 16) + } + if (b == 16) { p += 2; + } break; default: break; @@ -171,8 +188,9 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) goto label_return; } } - if (b == 0) + if (b == 0) { b = 10; + } /* Convert. */ ret = 0; @@ -190,8 +208,9 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) } p++; } - if (neg) - ret = -ret; + if (neg) { + ret = (uintmax_t)(-((intmax_t)ret)); + } if (p == ns) { /* No conversion performed. */ @@ -205,15 +224,15 @@ label_return: if (p == ns) { /* No characters were converted. */ *endptr = (char *)nptr; - } else + } else { *endptr = (char *)p; + } } - return (ret); + return ret; } static char * -u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) -{ +u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { unsigned i; i = U2S_BUFSIZE - 1; @@ -251,23 +270,25 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) }} *slen_p = U2S_BUFSIZE - 1 - i; - return (&s[i]); + return &s[i]; } static char * -d2s(intmax_t x, char sign, char *s, size_t *slen_p) -{ +d2s(intmax_t x, char sign, char *s, size_t *slen_p) { bool neg; - if ((neg = (x < 0))) + if ((neg = (x < 0))) { x = -x; + } s = u2s(x, 10, false, s, slen_p); - if (neg) + if (neg) { sign = '-'; + } switch (sign) { case '-': - if (!neg) + if (!neg) { break; + } /* Fall through. */ case ' ': case '+': @@ -277,73 +298,70 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p) break; default: not_reached(); } - return (s); + return s; } static char * -o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) -{ - +o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) { s = u2s(x, 8, false, s, slen_p); if (alt_form && *s != '0') { s--; (*slen_p)++; *s = '0'; } - return (s); + return s; } static char * -x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) -{ - +x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) { s = u2s(x, 16, uppercase, s, slen_p); if (alt_form) { s -= 2; (*slen_p) += 2; memcpy(s, uppercase ? "0X" : "0x", 2); } - return (s); + return s; } -int -malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) -{ - int ret; +size_t +malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { size_t i; const char *f; -#define APPEND_C(c) do { \ - if (i < size) \ +#define APPEND_C(c) do { \ + if (i < size) { \ str[i] = (c); \ + } \ i++; \ } while (0) -#define APPEND_S(s, slen) do { \ +#define APPEND_S(s, slen) do { \ if (i < size) { \ size_t cpylen = (slen <= size - i) ? slen : size - i; \ memcpy(&str[i], s, cpylen); \ } \ i += slen; \ } while (0) -#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ +#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ /* Left padding. */ \ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ (size_t)width - slen : 0); \ if (!left_justify && pad_len != 0) { \ size_t j; \ - for (j = 0; j < pad_len; j++) \ + for (j = 0; j < pad_len; j++) { \ APPEND_C(' '); \ + } \ } \ /* Value. */ \ APPEND_S(s, slen); \ /* Right padding. */ \ if (left_justify && pad_len != 0) { \ size_t j; \ - for (j = 0; j < pad_len; j++) \ + for (j = 0; j < pad_len; j++) { \ APPEND_C(' '); \ + } \ } \ } while (0) -#define GET_ARG_NUMERIC(val, len) do { \ +#define GET_ARG_NUMERIC(val, len) do { \ switch (len) { \ case '?': \ val = va_arg(ap, int); \ @@ -400,6 +418,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) int prec = -1; int width = -1; unsigned char len = '?'; + char *s; + size_t slen; f++; /* Flags. */ @@ -449,10 +469,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) break; } /* Width/precision separator. */ - if (*f == '.') + if (*f == '.') { f++; - else + } else { goto label_length; + } /* Precision. */ switch (*f) { case '*': @@ -479,8 +500,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) if (*f == 'l') { len = 'q'; f++; - } else + } else { len = 'l'; + } break; case 'q': case 'j': case 't': case 'z': len = *f; @@ -490,8 +512,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) } /* Conversion specifier. */ switch (*f) { - char *s; - size_t slen; case '%': /* %% */ APPEND_C(*f); @@ -573,37 +593,35 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) }} } label_out: - if (i < size) + if (i < size) { str[i] = '\0'; - else + } else { str[size - 1] = '\0'; - ret = i; + } #undef APPEND_C #undef APPEND_S #undef APPEND_PADDED_S #undef GET_ARG_NUMERIC - return (ret); + return i; } JEMALLOC_FORMAT_PRINTF(3, 4) -int -malloc_snprintf(char *str, size_t size, const char *format, ...) -{ - int ret; +size_t +malloc_snprintf(char *str, size_t size, const char *format, ...) { + size_t ret; va_list ap; va_start(ap, format); ret = malloc_vsnprintf(str, size, format, ap); va_end(ap); - return (ret); + return ret; } void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap) -{ + const char *format, va_list ap) { char buf[MALLOC_PRINTF_BUFSIZE]; if (write_cb == NULL) { @@ -628,8 +646,7 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, JEMALLOC_FORMAT_PRINTF(3, 4) void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, ...) -{ + const char *format, ...) { va_list ap; va_start(ap, format); @@ -640,11 +657,20 @@ malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, /* Print to stderr in such a way as to avoid memory allocation. */ JEMALLOC_FORMAT_PRINTF(1, 2) void -malloc_printf(const char *format, ...) -{ +malloc_printf(const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); } + +/* + * Restore normal assertion macros, in order to make it possible to compile all + * C files as a single concatenation. + */ +#undef assert +#undef not_reached +#undef not_implemented +#undef assert_not_implemented +#include "jemalloc/internal/assert.h" diff --git a/deps/jemalloc/src/mb.c b/deps/jemalloc/src/mb.c deleted file mode 100644 index dc2c0a256..000000000 --- a/deps/jemalloc/src/mb.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_MB_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/deps/jemalloc/src/mutex.c b/deps/jemalloc/src/mutex.c index 2d47af976..30222b3e5 100644 --- a/deps/jemalloc/src/mutex.c +++ b/deps/jemalloc/src/mutex.c @@ -1,12 +1,13 @@ -#define JEMALLOC_MUTEX_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_MUTEX_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -#include <dlfcn.h> -#endif +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/spin.h" #ifndef _CRT_SPINCOUNT -#define _CRT_SPINCOUNT 4000 +#define _CRT_SPINCOUNT 4000 #endif /******************************************************************************/ @@ -20,10 +21,6 @@ static bool postpone_init = true; static malloc_mutex_t *postponed_mutexes = NULL; #endif -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static void pthread_create_once(void); -#endif - /******************************************************************************/ /* * We intercept pthread_create() calls in order to toggle isthreaded if the @@ -31,33 +28,11 @@ static void pthread_create_once(void); */ #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, - void *(*)(void *), void *__restrict); - -static void -pthread_create_once(void) -{ - - pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); - if (pthread_create_fptr == NULL) { - malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, " - "\"pthread_create\")\n"); - abort(); - } - - isthreaded = true; -} - JEMALLOC_EXPORT int pthread_create(pthread_t *__restrict thread, const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), - void *__restrict arg) -{ - static pthread_once_t once_control = PTHREAD_ONCE_INIT; - - pthread_once(&once_control, pthread_create_once); - - return (pthread_create_fptr(thread, attr, start_routine, arg)); + void *__restrict arg) { + return pthread_create_wrapper(thread, attr, start_routine, arg); } #endif @@ -68,18 +43,108 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); #endif -bool -malloc_mutex_init(malloc_mutex_t *mutex) -{ +void +malloc_mutex_lock_slow(malloc_mutex_t *mutex) { + mutex_prof_data_t *data = &mutex->prof_data; + UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER; + + if (ncpus == 1) { + goto label_spin_done; + } + + int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN; + do { + spin_cpu_spinwait(); + if (!malloc_mutex_trylock_final(mutex)) { + data->n_spin_acquired++; + return; + } + } while (cnt++ < max_cnt); + + if (!config_stats) { + /* Only spin is useful when stats is off. */ + malloc_mutex_lock_final(mutex); + return; + } +label_spin_done: + nstime_update(&before); + /* Copy before to after to avoid clock skews. */ + nstime_t after; + nstime_copy(&after, &before); + uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1, + ATOMIC_RELAXED) + 1; + /* One last try as above two calls may take quite some cycles. */ + if (!malloc_mutex_trylock_final(mutex)) { + atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); + data->n_spin_acquired++; + return; + } + + /* True slow path. */ + malloc_mutex_lock_final(mutex); + /* Update more slow-path only counters. */ + atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); + nstime_update(&after); + + nstime_t delta; + nstime_copy(&delta, &after); + nstime_subtract(&delta, &before); + data->n_wait_times++; + nstime_add(&data->tot_wait_time, &delta); + if (nstime_compare(&data->max_wait_time, &delta) < 0) { + nstime_copy(&data->max_wait_time, &delta); + } + if (n_thds > data->max_n_thds) { + data->max_n_thds = n_thds; + } +} + +static void +mutex_prof_data_init(mutex_prof_data_t *data) { + memset(data, 0, sizeof(mutex_prof_data_t)); + nstime_init(&data->max_wait_time, 0); + nstime_init(&data->tot_wait_time, 0); + data->prev_owner = NULL; +} + +void +malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_assert_owner(tsdn, mutex); + mutex_prof_data_init(&mutex->prof_data); +} + +static int +mutex_addr_comp(const witness_t *witness1, void *mutex1, + const witness_t *witness2, void *mutex2) { + assert(mutex1 != NULL); + assert(mutex2 != NULL); + uintptr_t mu1int = (uintptr_t)mutex1; + uintptr_t mu2int = (uintptr_t)mutex2; + if (mu1int < mu2int) { + return -1; + } else if (mu1int == mu2int) { + return 0; + } else { + return 1; + } +} + +bool +malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank, malloc_mutex_lock_order_t lock_order) { + mutex_prof_data_init(&mutex->prof_data); #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 InitializeSRWLock(&mutex->lock); # else if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, - _CRT_SPINCOUNT)) - return (true); + _CRT_SPINCOUNT)) { + return true; + } # endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + mutex->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_OSSPIN)) mutex->lock = 0; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) @@ -88,66 +153,72 @@ malloc_mutex_init(malloc_mutex_t *mutex) postponed_mutexes = mutex; } else { if (_pthread_mutex_init_calloc_cb(&mutex->lock, - bootstrap_calloc) != 0) - return (true); + bootstrap_calloc) != 0) { + return true; + } } #else pthread_mutexattr_t attr; - if (pthread_mutexattr_init(&attr) != 0) - return (true); + if (pthread_mutexattr_init(&attr) != 0) { + return true; + } pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); if (pthread_mutex_init(&mutex->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); - return (true); + return true; } pthread_mutexattr_destroy(&attr); #endif - return (false); + if (config_debug) { + mutex->lock_order = lock_order; + if (lock_order == malloc_mutex_address_ordered) { + witness_init(&mutex->witness, name, rank, + mutex_addr_comp, mutex); + } else { + witness_init(&mutex->witness, name, rank, NULL, NULL); + } + } + return false; } void -malloc_mutex_prefork(malloc_mutex_t *mutex) -{ - - malloc_mutex_lock(mutex); +malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_lock(tsdn, mutex); } void -malloc_mutex_postfork_parent(malloc_mutex_t *mutex) -{ - - malloc_mutex_unlock(mutex); +malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_unlock(tsdn, mutex); } void -malloc_mutex_postfork_child(malloc_mutex_t *mutex) -{ - +malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB - malloc_mutex_unlock(mutex); + malloc_mutex_unlock(tsdn, mutex); #else - if (malloc_mutex_init(mutex)) { + if (malloc_mutex_init(mutex, mutex->witness.name, + mutex->witness.rank, mutex->lock_order)) { malloc_printf("<jemalloc>: Error re-initializing mutex in " "child\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } #endif } bool -mutex_boot(void) -{ - +malloc_mutex_boot(void) { #ifdef JEMALLOC_MUTEX_INIT_CB postpone_init = false; while (postponed_mutexes != NULL) { if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, - bootstrap_calloc) != 0) - return (true); + bootstrap_calloc) != 0) { + return true; + } postponed_mutexes = postponed_mutexes->postponed_next; } #endif - return (false); + return false; } diff --git a/deps/jemalloc/src/mutex_pool.c b/deps/jemalloc/src/mutex_pool.c new file mode 100644 index 000000000..f24d10e44 --- /dev/null +++ b/deps/jemalloc/src/mutex_pool.c @@ -0,0 +1,18 @@ +#define JEMALLOC_MUTEX_POOL_C_ + +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" + +bool +mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) { + for (int i = 0; i < MUTEX_POOL_SIZE; ++i) { + if (malloc_mutex_init(&pool->mutexes[i], name, rank, + malloc_mutex_address_ordered)) { + return true; + } + } + return false; +} diff --git a/deps/jemalloc/src/nstime.c b/deps/jemalloc/src/nstime.c new file mode 100644 index 000000000..71db35396 --- /dev/null +++ b/deps/jemalloc/src/nstime.c @@ -0,0 +1,170 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/nstime.h" + +#include "jemalloc/internal/assert.h" + +#define BILLION UINT64_C(1000000000) +#define MILLION UINT64_C(1000000) + +void +nstime_init(nstime_t *time, uint64_t ns) { + time->ns = ns; +} + +void +nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) { + time->ns = sec * BILLION + nsec; +} + +uint64_t +nstime_ns(const nstime_t *time) { + return time->ns; +} + +uint64_t +nstime_msec(const nstime_t *time) { + return time->ns / MILLION; +} + +uint64_t +nstime_sec(const nstime_t *time) { + return time->ns / BILLION; +} + +uint64_t +nstime_nsec(const nstime_t *time) { + return time->ns % BILLION; +} + +void +nstime_copy(nstime_t *time, const nstime_t *source) { + *time = *source; +} + +int +nstime_compare(const nstime_t *a, const nstime_t *b) { + return (a->ns > b->ns) - (a->ns < b->ns); +} + +void +nstime_add(nstime_t *time, const nstime_t *addend) { + assert(UINT64_MAX - time->ns >= addend->ns); + + time->ns += addend->ns; +} + +void +nstime_iadd(nstime_t *time, uint64_t addend) { + assert(UINT64_MAX - time->ns >= addend); + + time->ns += addend; +} + +void +nstime_subtract(nstime_t *time, const nstime_t *subtrahend) { + assert(nstime_compare(time, subtrahend) >= 0); + + time->ns -= subtrahend->ns; +} + +void +nstime_isubtract(nstime_t *time, uint64_t subtrahend) { + assert(time->ns >= subtrahend); + + time->ns -= subtrahend; +} + +void +nstime_imultiply(nstime_t *time, uint64_t multiplier) { + assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << + 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns)); + + time->ns *= multiplier; +} + +void +nstime_idivide(nstime_t *time, uint64_t divisor) { + assert(divisor != 0); + + time->ns /= divisor; +} + +uint64_t +nstime_divide(const nstime_t *time, const nstime_t *divisor) { + assert(divisor->ns != 0); + + return time->ns / divisor->ns; +} + +#ifdef _WIN32 +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + FILETIME ft; + uint64_t ticks_100ns; + + GetSystemTimeAsFileTime(&ft); + ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; + + nstime_init(time, ticks_100ns * 100); +} +#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE) +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC) +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME) +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + nstime_init(time, mach_absolute_time()); +} +#else +# define NSTIME_MONOTONIC false +static void +nstime_get(nstime_t *time) { + struct timeval tv; + + gettimeofday(&tv, NULL); + nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000); +} +#endif + +static bool +nstime_monotonic_impl(void) { + return NSTIME_MONOTONIC; +#undef NSTIME_MONOTONIC +} +nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl; + +static bool +nstime_update_impl(nstime_t *time) { + nstime_t old_time; + + nstime_copy(&old_time, time); + nstime_get(time); + + /* Handle non-monotonic clocks. */ + if (unlikely(nstime_compare(&old_time, time) > 0)) { + nstime_copy(time, &old_time); + return true; + } + + return false; +} +nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl; diff --git a/deps/jemalloc/src/pages.c b/deps/jemalloc/src/pages.c index 83a167f67..26002692d 100644 --- a/deps/jemalloc/src/pages.c +++ b/deps/jemalloc/src/pages.c @@ -1,49 +1,133 @@ -#define JEMALLOC_PAGES_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_PAGES_C_ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/pages.h" + +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +#include <sys/sysctl.h> +#ifdef __FreeBSD__ +#include <vm/vm_param.h> +#endif +#endif /******************************************************************************/ +/* Data. */ -void * -pages_map(void *addr, size_t size) -{ - void *ret; +/* Actual operating system page size, detected during bootstrap, <= PAGE. */ +static size_t os_page; + +#ifndef _WIN32 +# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) +# define PAGES_PROT_DECOMMIT (PROT_NONE) +static int mmap_flags; +#endif +static bool os_overcommits; + +const char *thp_mode_names[] = { + "default", + "always", + "never", + "not supported" +}; +thp_mode_t opt_thp = THP_MODE_DEFAULT; +thp_mode_t init_system_thp_mode; + +/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */ +static bool pages_can_purge_lazy_runtime = true; +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void os_pages_unmap(void *addr, size_t size); + +/******************************************************************************/ + +static void * +os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(ALIGNMENT_CEILING(size, os_page) == size); assert(size != 0); + if (os_overcommits) { + *commit = true; + } + + void *ret; #ifdef _WIN32 /* * If VirtualAlloc can't allocate at the given address when one is * given, it fails and returns NULL. */ - ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, + ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0), PAGE_READWRITE); #else /* * We don't use MAP_FIXED here, because it can cause the *replacement* * of existing mappings, and we only want to create new mappings. */ - ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, - -1, 0); + { + int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + + ret = mmap(addr, size, prot, mmap_flags, -1, 0); + } assert(ret != NULL); - if (ret == MAP_FAILED) + if (ret == MAP_FAILED) { ret = NULL; - else if (addr != NULL && ret != addr) { + } else if (addr != NULL && ret != addr) { /* * We succeeded in mapping memory, but not in the right place. */ - pages_unmap(ret, size); + os_pages_unmap(ret, size); ret = NULL; } #endif - assert(ret == NULL || (addr == NULL && ret != addr) - || (addr != NULL && ret == addr)); - return (ret); + assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && + ret == addr)); + return ret; } -void -pages_unmap(void *addr, size_t size) -{ +static void * +os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, + bool *commit) { + void *ret = (void *)((uintptr_t)addr + leadsize); + + assert(alloc_size >= leadsize + size); +#ifdef _WIN32 + os_pages_unmap(addr, alloc_size); + void *new_addr = os_pages_map(ret, size, PAGE, commit); + if (new_addr == ret) { + return ret; + } + if (new_addr != NULL) { + os_pages_unmap(new_addr, size); + } + return NULL; +#else + size_t trailsize = alloc_size - leadsize - size; + + if (leadsize != 0) { + os_pages_unmap(addr, leadsize); + } + if (trailsize != 0) { + os_pages_unmap((void *)((uintptr_t)ret + size), trailsize); + } + return ret; +#endif +} + +static void +os_pages_unmap(void *addr, size_t size) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(ALIGNMENT_CEILING(size, os_page) == size); #ifdef _WIN32 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) @@ -56,118 +140,467 @@ pages_unmap(void *addr, size_t size) buferror(get_errno(), buf, sizeof(buf)); malloc_printf("<jemalloc>: Error in " #ifdef _WIN32 - "VirtualFree" + "VirtualFree" #else - "munmap" + "munmap" #endif - "(): %s\n", buf); - if (opt_abort) + "(): %s\n", buf); + if (opt_abort) { abort(); + } + } +} + +static void * +pages_map_slow(size_t size, size_t alignment, bool *commit) { + size_t alloc_size = size + alignment - os_page; + /* Beware size_t wrap-around. */ + if (alloc_size < size) { + return NULL; } + + void *ret; + do { + void *pages = os_pages_map(NULL, alloc_size, alignment, commit); + if (pages == NULL) { + return NULL; + } + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) + - (uintptr_t)pages; + ret = os_pages_trim(pages, alloc_size, leadsize, size, commit); + } while (ret == NULL); + + assert(ret != NULL); + assert(PAGE_ADDR2BASE(ret) == ret); + return ret; } void * -pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) -{ - void *ret = (void *)((uintptr_t)addr + leadsize); +pages_map(void *addr, size_t size, size_t alignment, bool *commit) { + assert(alignment >= PAGE); + assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr); - assert(alloc_size >= leadsize + size); -#ifdef _WIN32 - { - void *new_addr; + /* + * Ideally, there would be a way to specify alignment to mmap() (like + * NetBSD has), but in the absence of such a feature, we have to work + * hard to efficiently create aligned mappings. The reliable, but + * slow method is to create a mapping that is over-sized, then trim the + * excess. However, that always results in one or two calls to + * os_pages_unmap(), and it can leave holes in the process's virtual + * memory map if memory grows downward. + * + * Optimistically try mapping precisely the right amount before falling + * back to the slow method, with the expectation that the optimistic + * approach works most of the time. + */ - pages_unmap(addr, alloc_size); - new_addr = pages_map(ret, size); - if (new_addr == ret) - return (ret); - if (new_addr) - pages_unmap(new_addr, size); - return (NULL); + void *ret = os_pages_map(addr, size, os_page, commit); + if (ret == NULL || ret == addr) { + return ret; } -#else - { - size_t trailsize = alloc_size - leadsize - size; - - if (leadsize != 0) - pages_unmap(addr, leadsize); - if (trailsize != 0) - pages_unmap((void *)((uintptr_t)ret + size), trailsize); - return (ret); + assert(addr == NULL); + if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) { + os_pages_unmap(ret, size); + return pages_map_slow(size, alignment, commit); } -#endif + + assert(PAGE_ADDR2BASE(ret) == ret); + return ret; +} + +void +pages_unmap(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + os_pages_unmap(addr, size); } static bool -pages_commit_impl(void *addr, size_t size, bool commit) -{ +pages_commit_impl(void *addr, size_t size, bool commit) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); -#ifndef _WIN32 - /* - * The following decommit/commit implementation is functional, but - * always disabled because it doesn't add value beyong improved - * debugging (at the cost of extra system calls) on systems that - * overcommit. - */ - if (false) { - int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE; - void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON | - MAP_FIXED, -1, 0); - if (result == MAP_FAILED) - return (true); + if (os_overcommits) { + return true; + } + +#ifdef _WIN32 + return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, + PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); +#else + { + int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, + -1, 0); + if (result == MAP_FAILED) { + return true; + } if (result != addr) { /* * We succeeded in mapping memory, but not in the right * place. */ - pages_unmap(result, size); - return (true); + os_pages_unmap(result, size); + return true; } - return (false); + return false; } #endif - return (true); } bool -pages_commit(void *addr, size_t size) -{ - - return (pages_commit_impl(addr, size, true)); +pages_commit(void *addr, size_t size) { + return pages_commit_impl(addr, size, true); } bool -pages_decommit(void *addr, size_t size) -{ - - return (pages_commit_impl(addr, size, false)); +pages_decommit(void *addr, size_t size) { + return pages_commit_impl(addr, size, false); } bool -pages_purge(void *addr, size_t size) -{ - bool unzeroed; +pages_purge_lazy(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + if (!pages_can_purge_lazy) { + return true; + } + if (!pages_can_purge_lazy_runtime) { + /* + * Built with lazy purge enabled, but detected it was not + * supported on the current system. + */ + return true; + } #ifdef _WIN32 VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); - unzeroed = true; -#elif defined(JEMALLOC_HAVE_MADVISE) -# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED -# define JEMALLOC_MADV_PURGE MADV_DONTNEED -# define JEMALLOC_MADV_ZEROS true -# elif defined(JEMALLOC_PURGE_MADVISE_FREE) -# define JEMALLOC_MADV_PURGE MADV_FREE -# define JEMALLOC_MADV_ZEROS false + return false; +#elif defined(JEMALLOC_PURGE_MADVISE_FREE) + return (madvise(addr, size, +# ifdef MADV_FREE + MADV_FREE # else -# error "No madvise(2) flag defined for purging unused dirty pages." + JEMALLOC_MADV_FREE # endif - int err = madvise(addr, size, JEMALLOC_MADV_PURGE); - unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0); -# undef JEMALLOC_MADV_PURGE -# undef JEMALLOC_MADV_ZEROS + ) != 0); +#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) + return (madvise(addr, size, MADV_DONTNEED) != 0); +#else + not_reached(); +#endif +} + +bool +pages_purge_forced(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + if (!pages_can_purge_forced) { + return true; + } + +#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) + return (madvise(addr, size, MADV_DONTNEED) != 0); +#elif defined(JEMALLOC_MAPS_COALESCE) + /* Try to overlay a new demand-zeroed mapping. */ + return pages_commit(addr, size); +#else + not_reached(); +#endif +} + +static bool +pages_huge_impl(void *addr, size_t size, bool aligned) { + if (aligned) { + assert(HUGEPAGE_ADDR2BASE(addr) == addr); + assert(HUGEPAGE_CEILING(size) == size); + } +#ifdef JEMALLOC_HAVE_MADVISE_HUGE + return (madvise(addr, size, MADV_HUGEPAGE) != 0); +#else + return true; +#endif +} + +bool +pages_huge(void *addr, size_t size) { + return pages_huge_impl(addr, size, true); +} + +static bool +pages_huge_unaligned(void *addr, size_t size) { + return pages_huge_impl(addr, size, false); +} + +static bool +pages_nohuge_impl(void *addr, size_t size, bool aligned) { + if (aligned) { + assert(HUGEPAGE_ADDR2BASE(addr) == addr); + assert(HUGEPAGE_CEILING(size) == size); + } + +#ifdef JEMALLOC_HAVE_MADVISE_HUGE + return (madvise(addr, size, MADV_NOHUGEPAGE) != 0); +#else + return false; +#endif +} + +bool +pages_nohuge(void *addr, size_t size) { + return pages_nohuge_impl(addr, size, true); +} + +static bool +pages_nohuge_unaligned(void *addr, size_t size) { + return pages_nohuge_impl(addr, size, false); +} + +bool +pages_dontdump(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); +#ifdef JEMALLOC_MADVISE_DONTDUMP + return madvise(addr, size, MADV_DONTDUMP) != 0; +#else + return false; +#endif +} + +bool +pages_dodump(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); +#ifdef JEMALLOC_MADVISE_DONTDUMP + return madvise(addr, size, MADV_DODUMP) != 0; +#else + return false; +#endif +} + + +static size_t +os_page_detect(void) { +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwPageSize; +#elif defined(__FreeBSD__) + return getpagesize(); #else - /* Last resort no-op. */ - unzeroed = true; + long result = sysconf(_SC_PAGESIZE); + if (result == -1) { + return LG_PAGE; + } + return (size_t)result; #endif - return (unzeroed); } +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +static bool +os_overcommits_sysctl(void) { + int vm_overcommit; + size_t sz; + + sz = sizeof(vm_overcommit); +#if defined(__FreeBSD__) && defined(VM_OVERCOMMIT) + int mib[2]; + + mib[0] = CTL_VM; + mib[1] = VM_OVERCOMMIT; + if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) { + return false; /* Error. */ + } +#else + if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) { + return false; /* Error. */ + } +#endif + + return ((vm_overcommit & 0x3) == 0); +} +#endif + +#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY +/* + * Use syscall(2) rather than {open,read,close}(2) when possible to avoid + * reentry during bootstrapping if another library has interposed system call + * wrappers. + */ +static bool +os_overcommits_proc(void) { + int fd; + char buf[1]; + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) + #if defined(O_CLOEXEC) + fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY | + O_CLOEXEC); + #else + fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY); + if (fd != -1) { + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); + } + #endif +#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat) + #if defined(O_CLOEXEC) + fd = (int)syscall(SYS_openat, + AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); + #else + fd = (int)syscall(SYS_openat, + AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY); + if (fd != -1) { + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); + } + #endif +#else + #if defined(O_CLOEXEC) + fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); + #else + fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); + if (fd != -1) { + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); + } + #endif +#endif + + if (fd == -1) { + return false; /* Error. */ + } + + ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf)); +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) + syscall(SYS_close, fd); +#else + close(fd); +#endif + + if (nread < 1) { + return false; /* Error. */ + } + /* + * /proc/sys/vm/overcommit_memory meanings: + * 0: Heuristic overcommit. + * 1: Always overcommit. + * 2: Never overcommit. + */ + return (buf[0] == '0' || buf[0] == '1'); +} +#endif + +void +pages_set_thp_state (void *ptr, size_t size) { + if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) { + return; + } + assert(opt_thp != thp_mode_not_supported && + init_system_thp_mode != thp_mode_not_supported); + + if (opt_thp == thp_mode_always + && init_system_thp_mode != thp_mode_never) { + assert(init_system_thp_mode == thp_mode_default); + pages_huge_unaligned(ptr, size); + } else if (opt_thp == thp_mode_never) { + assert(init_system_thp_mode == thp_mode_default || + init_system_thp_mode == thp_mode_always); + pages_nohuge_unaligned(ptr, size); + } +} + +static void +init_thp_state(void) { + if (!have_madvise_huge) { + if (metadata_thp_enabled() && opt_abort) { + malloc_write("<jemalloc>: no MADV_HUGEPAGE support\n"); + abort(); + } + goto label_error; + } + + static const char sys_state_madvise[] = "always [madvise] never\n"; + static const char sys_state_always[] = "[always] madvise never\n"; + static const char sys_state_never[] = "always madvise [never]\n"; + char buf[sizeof(sys_state_madvise)]; + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) + int fd = (int)syscall(SYS_open, + "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); +#else + int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); +#endif + if (fd == -1) { + goto label_error; + } + + ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf)); +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) + syscall(SYS_close, fd); +#else + close(fd); +#endif + + if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) { + init_system_thp_mode = thp_mode_default; + } else if (strncmp(buf, sys_state_always, (size_t)nread) == 0) { + init_system_thp_mode = thp_mode_always; + } else if (strncmp(buf, sys_state_never, (size_t)nread) == 0) { + init_system_thp_mode = thp_mode_never; + } else { + goto label_error; + } + return; +label_error: + opt_thp = init_system_thp_mode = thp_mode_not_supported; +} + +bool +pages_boot(void) { + os_page = os_page_detect(); + if (os_page > PAGE) { + malloc_write("<jemalloc>: Unsupported system page size\n"); + if (opt_abort) { + abort(); + } + return true; + } + +#ifndef _WIN32 + mmap_flags = MAP_PRIVATE | MAP_ANON; +#endif + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT + os_overcommits = os_overcommits_sysctl(); +#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) + os_overcommits = os_overcommits_proc(); +# ifdef MAP_NORESERVE + if (os_overcommits) { + mmap_flags |= MAP_NORESERVE; + } +# endif +#else + os_overcommits = false; +#endif + + init_thp_state(); + + /* Detect lazy purge runtime support. */ + if (pages_can_purge_lazy) { + bool committed = false; + void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed); + if (madv_free_page == NULL) { + return true; + } + assert(pages_can_purge_lazy_runtime); + if (pages_purge_lazy(madv_free_page, PAGE)) { + pages_can_purge_lazy_runtime = false; + } + os_pages_unmap(madv_free_page, PAGE); + } + + return false; +} diff --git a/deps/jemalloc/src/prng.c b/deps/jemalloc/src/prng.c new file mode 100644 index 000000000..83c04bf9b --- /dev/null +++ b/deps/jemalloc/src/prng.c @@ -0,0 +1,3 @@ +#define JEMALLOC_PRNG_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/deps/jemalloc/src/prof.c b/deps/jemalloc/src/prof.c index 5d2b9598f..13df641a0 100644 --- a/deps/jemalloc/src/prof.c +++ b/deps/jemalloc/src/prof.c @@ -1,14 +1,29 @@ -#define JEMALLOC_PROF_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_PROF_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" + /******************************************************************************/ #ifdef JEMALLOC_PROF_LIBUNWIND -#define UNW_LOCAL_ONLY +#define UNW_LOCAL_ONLY #include <libunwind.h> #endif #ifdef JEMALLOC_PROF_LIBGCC +/* + * We have a circular dependency -- jemalloc_internal.h tells us if we should + * use libgcc's unwinding functionality, but after we've included that, we've + * already hooked _Unwind_Backtrace. We'll temporarily disable hooking. + */ +#undef _Unwind_Backtrace #include <unwind.h> +#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook) #endif /******************************************************************************/ @@ -63,7 +78,7 @@ size_t lg_prof_sample; * creating/destroying mutexes. */ static malloc_mutex_t *gctx_locks; -static unsigned cum_gctxs; /* Atomic counter. */ +static atomic_u_t cum_gctxs; /* Atomic counter. */ /* * Table of mutexes that are shared among tdata's. No operations require @@ -78,7 +93,8 @@ static malloc_mutex_t *tdata_locks; * structure that knows about all backtraces currently captured. */ static ckh_t bt2gctx; -static malloc_mutex_t bt2gctx_mtx; +/* Non static to enable profiling. */ +malloc_mutex_t bt2gctx_mtx; /* * Tree of all extant prof_tdata_t structures, regardless of state, @@ -109,7 +125,7 @@ static char prof_dump_buf[ 1 #endif ]; -static unsigned prof_dump_buf_end; +static size_t prof_dump_buf_end; static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ @@ -121,20 +137,19 @@ static bool prof_booted = false; * definition. */ -static bool prof_tctx_should_destroy(prof_tctx_t *tctx); +static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); -static bool prof_tdata_should_destroy(prof_tdata_t *tdata, +static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached); static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached); -static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name); +static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); /******************************************************************************/ /* Red-black trees. */ -JEMALLOC_INLINE_C int -prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) -{ +static int +prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { uint64_t a_thr_uid = a->thr_uid; uint64_t b_thr_uid = b->thr_uid; int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); @@ -150,30 +165,29 @@ prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) b_tctx_uid); } } - return (ret); + return ret; } rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, tctx_link, prof_tctx_comp) -JEMALLOC_INLINE_C int -prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) -{ +static int +prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { unsigned a_len = a->bt.len; unsigned b_len = b->bt.len; unsigned comp_len = (a_len < b_len) ? a_len : b_len; int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); - if (ret == 0) + if (ret == 0) { ret = (a_len > b_len) - (a_len < b_len); - return (ret); + } + return ret; } rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, prof_gctx_comp) -JEMALLOC_INLINE_C int -prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) -{ +static int +prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { int ret; uint64_t a_uid = a->thr_uid; uint64_t b_uid = b->thr_uid; @@ -185,7 +199,7 @@ prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); } - return (ret); + return ret; } rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, @@ -194,8 +208,7 @@ rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, /******************************************************************************/ void -prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) -{ +prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { prof_tdata_t *tdata; cassert(config_prof); @@ -208,27 +221,28 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) * programs. */ tdata = prof_tdata_get(tsd, true); - if (tdata != NULL) + if (tdata != NULL) { prof_sample_threshold_update(tdata); + } } if ((uintptr_t)tctx > (uintptr_t)1U) { - malloc_mutex_lock(tctx->tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); tctx->prepared = false; - if (prof_tctx_should_destroy(tctx)) + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { prof_tctx_destroy(tsd, tctx); - else - malloc_mutex_unlock(tctx->tdata->lock); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } } } void -prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) -{ +prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx) { + prof_tctx_set(tsdn, ptr, usize, NULL, tctx); - prof_tctx_set(ptr, usize, tctx); - - malloc_mutex_lock(tctx->tdata->lock); + malloc_mutex_lock(tsdn, tctx->tdata->lock); tctx->cnts.curobjs++; tctx->cnts.curbytes += usize; if (opt_prof_accum) { @@ -236,39 +250,34 @@ prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) tctx->cnts.accumbytes += usize; } tctx->prepared = false; - malloc_mutex_unlock(tctx->tdata->lock); + malloc_mutex_unlock(tsdn, tctx->tdata->lock); } void -prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) -{ - - malloc_mutex_lock(tctx->tdata->lock); +prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) { + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curbytes >= usize); tctx->cnts.curobjs--; tctx->cnts.curbytes -= usize; - if (prof_tctx_should_destroy(tctx)) + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { prof_tctx_destroy(tsd, tctx); - else - malloc_mutex_unlock(tctx->tdata->lock); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } } void -bt_init(prof_bt_t *bt, void **vec) -{ - +bt_init(prof_bt_t *bt, void **vec) { cassert(config_prof); bt->vec = vec; bt->len = 0; } -JEMALLOC_INLINE_C void -prof_enter(tsd_t *tsd, prof_tdata_t *tdata) -{ - +static void +prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); @@ -277,17 +286,15 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata) tdata->enq = true; } - malloc_mutex_lock(&bt2gctx_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); } -JEMALLOC_INLINE_C void -prof_leave(tsd_t *tsd, prof_tdata_t *tdata) -{ - +static void +prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); - malloc_mutex_unlock(&bt2gctx_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); if (tdata != NULL) { bool idump, gdump; @@ -299,17 +306,18 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata) gdump = tdata->enq_gdump; tdata->enq_gdump = false; - if (idump) - prof_idump(); - if (gdump) - prof_gdump(); + if (idump) { + prof_idump(tsd_tsdn(tsd)); + } + if (gdump) { + prof_gdump(tsd_tsdn(tsd)); + } } } #ifdef JEMALLOC_PROF_LIBUNWIND void -prof_backtrace(prof_bt_t *bt) -{ +prof_backtrace(prof_bt_t *bt) { int nframes; cassert(config_prof); @@ -317,42 +325,41 @@ prof_backtrace(prof_bt_t *bt) assert(bt->vec != NULL); nframes = unw_backtrace(bt->vec, PROF_BT_MAX); - if (nframes <= 0) + if (nframes <= 0) { return; + } bt->len = nframes; } #elif (defined(JEMALLOC_PROF_LIBGCC)) static _Unwind_Reason_Code -prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) -{ - +prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { cassert(config_prof); - return (_URC_NO_REASON); + return _URC_NO_REASON; } static _Unwind_Reason_Code -prof_unwind_callback(struct _Unwind_Context *context, void *arg) -{ +prof_unwind_callback(struct _Unwind_Context *context, void *arg) { prof_unwind_data_t *data = (prof_unwind_data_t *)arg; void *ip; cassert(config_prof); ip = (void *)_Unwind_GetIP(context); - if (ip == NULL) - return (_URC_END_OF_STACK); + if (ip == NULL) { + return _URC_END_OF_STACK; + } data->bt->vec[data->bt->len] = ip; data->bt->len++; - if (data->bt->len == data->max) - return (_URC_END_OF_STACK); + if (data->bt->len == data->max) { + return _URC_END_OF_STACK; + } - return (_URC_NO_REASON); + return _URC_NO_REASON; } void -prof_backtrace(prof_bt_t *bt) -{ +prof_backtrace(prof_bt_t *bt) { prof_unwind_data_t data = {bt, PROF_BT_MAX}; cassert(config_prof); @@ -361,20 +368,22 @@ prof_backtrace(prof_bt_t *bt) } #elif (defined(JEMALLOC_PROF_GCC)) void -prof_backtrace(prof_bt_t *bt) -{ -#define BT_FRAME(i) \ +prof_backtrace(prof_bt_t *bt) { +#define BT_FRAME(i) \ if ((i) < PROF_BT_MAX) { \ void *p; \ - if (__builtin_frame_address(i) == 0) \ + if (__builtin_frame_address(i) == 0) { \ return; \ + } \ p = __builtin_return_address(i); \ - if (p == NULL) \ + if (p == NULL) { \ return; \ + } \ bt->vec[(i)] = p; \ bt->len = (i) + 1; \ - } else \ - return; + } else { \ + return; \ + } cassert(config_prof); @@ -522,40 +531,36 @@ prof_backtrace(prof_bt_t *bt) } #else void -prof_backtrace(prof_bt_t *bt) -{ - +prof_backtrace(prof_bt_t *bt) { cassert(config_prof); not_reached(); } #endif static malloc_mutex_t * -prof_gctx_mutex_choose(void) -{ - unsigned ngctxs = atomic_add_u(&cum_gctxs, 1); +prof_gctx_mutex_choose(void) { + unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); - return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]); + return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; } static malloc_mutex_t * -prof_tdata_mutex_choose(uint64_t thr_uid) -{ - - return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]); +prof_tdata_mutex_choose(uint64_t thr_uid) { + return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; } static prof_gctx_t * -prof_gctx_create(tsd_t *tsd, prof_bt_t *bt) -{ +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { /* * Create a single allocation that has space for vec of length bt->len. */ - prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t, - vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true), - true, NULL); - if (gctx == NULL) - return (NULL); + size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, + sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), + true); + if (gctx == NULL) { + return NULL; + } gctx->lock = prof_gctx_mutex_choose(); /* * Set nlimbo to 1, in order to avoid a race condition with @@ -567,14 +572,12 @@ prof_gctx_create(tsd_t *tsd, prof_bt_t *bt) memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); gctx->bt.vec = gctx->vec; gctx->bt.len = bt->len; - return (gctx); + return gctx; } static void prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, - prof_tdata_t *tdata) -{ - + prof_tdata_t *tdata) { cassert(config_prof); /* @@ -585,62 +588,66 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, * into this function. */ prof_enter(tsd, tdata_self); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); assert(gctx->nlimbo != 0); if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { /* Remove gctx from bt2gctx. */ - if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) + if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { not_reached(); + } prof_leave(tsd, tdata_self); /* Destroy gctx. */ - malloc_mutex_unlock(gctx->lock); - idalloctm(tsd, gctx, tcache_get(tsd, false), true); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); } else { /* * Compensate for increment in prof_tctx_destroy() or * prof_lookup(). */ gctx->nlimbo--; - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_leave(tsd, tdata_self); } } -/* tctx->tdata->lock must be held. */ static bool -prof_tctx_should_destroy(prof_tctx_t *tctx) -{ +prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - if (opt_prof_accum) - return (false); - if (tctx->cnts.curobjs != 0) - return (false); - if (tctx->prepared) - return (false); - return (true); + if (opt_prof_accum) { + return false; + } + if (tctx->cnts.curobjs != 0) { + return false; + } + if (tctx->prepared) { + return false; + } + return true; } static bool -prof_gctx_should_destroy(prof_gctx_t *gctx) -{ - - if (opt_prof_accum) - return (false); - if (!tctx_tree_empty(&gctx->tctxs)) - return (false); - if (gctx->nlimbo != 0) - return (false); - return (true); +prof_gctx_should_destroy(prof_gctx_t *gctx) { + if (opt_prof_accum) { + return false; + } + if (!tctx_tree_empty(&gctx->tctxs)) { + return false; + } + if (gctx->nlimbo != 0) { + return false; + } + return true; } -/* tctx->tdata->lock is held upon entry, and released before return. */ static void -prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) -{ +prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { prof_tdata_t *tdata = tctx->tdata; prof_gctx_t *gctx = tctx->gctx; bool destroy_tdata, destroy_tctx, destroy_gctx; + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + assert(tctx->cnts.curobjs == 0); assert(tctx->cnts.curbytes == 0); assert(!opt_prof_accum); @@ -648,10 +655,10 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) assert(tctx->cnts.accumbytes == 0); ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); - destroy_tdata = prof_tdata_should_destroy(tdata, false); - malloc_mutex_unlock(tdata->lock); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: tctx_tree_remove(&gctx->tctxs, tctx); @@ -673,8 +680,9 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) */ gctx->nlimbo++; destroy_gctx = true; - } else + } else { destroy_gctx = false; + } break; case prof_tctx_state_dumping: /* @@ -691,27 +699,30 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) destroy_tctx = false; destroy_gctx = false; } - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); if (destroy_gctx) { prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, tdata); } - if (destroy_tdata) + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, false); + } - if (destroy_tctx) - idalloctm(tsd, tctx, tcache_get(tsd, false), true); + if (destroy_tctx) { + idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); + } } static bool prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, - void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) -{ + void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { union { prof_gctx_t *p; void *v; - } gctx; + } gctx, tgctx; union { prof_bt_t *p; void *v; @@ -721,40 +732,57 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { /* bt has never been seen before. Insert it. */ - gctx.p = prof_gctx_create(tsd, bt); - if (gctx.v == NULL) { - prof_leave(tsd, tdata); - return (true); + prof_leave(tsd, tdata); + tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); + if (tgctx.v == NULL) { + return true; } - btkey.p = &gctx.p->bt; - if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { - /* OOM. */ - prof_leave(tsd, tdata); - idalloctm(tsd, gctx.v, tcache_get(tsd, false), true); - return (true); + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + gctx.p = tgctx.p; + btkey.p = &gctx.p->bt; + if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { + /* OOM. */ + prof_leave(tsd, tdata); + idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, + true, true); + return true; + } + new_gctx = true; + } else { + new_gctx = false; } - new_gctx = true; } else { + tgctx.v = NULL; + new_gctx = false; + } + + if (!new_gctx) { /* * Increment nlimbo, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). */ - malloc_mutex_lock(gctx.p->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); gctx.p->nlimbo++; - malloc_mutex_unlock(gctx.p->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); new_gctx = false; + + if (tgctx.v != NULL) { + /* Lost race to insert. */ + idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, + true); + } } prof_leave(tsd, tdata); *p_btkey = btkey.v; *p_gctx = gctx.p; *p_new_gctx = new_gctx; - return (false); + return false; } prof_tctx_t * -prof_lookup(tsd_t *tsd, prof_bt_t *bt) -{ +prof_lookup(tsd_t *tsd, prof_bt_t *bt) { union { prof_tctx_t *p; void *v; @@ -765,16 +793,17 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) cassert(config_prof); tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) - return (NULL); + if (tdata == NULL) { + return NULL; + } - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); - if (!not_found) /* Note double negative! */ + if (!not_found) { /* Note double negative! */ ret.p->prepared = true; - malloc_mutex_unlock(tdata->lock); + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (not_found) { - tcache_t *tcache; void *btkey; prof_gctx_t *gctx; bool new_gctx, error; @@ -784,17 +813,19 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) * cache. */ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, - &new_gctx)) - return (NULL); + &new_gctx)) { + return NULL; + } /* Link a prof_tctx_t into gctx for this thread. */ - tcache = tcache_get(tsd, true); - ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true, - NULL); + ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), + sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, + arena_ichoose(tsd, NULL), true); if (ret.p == NULL) { - if (new_gctx) + if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - return (NULL); + } + return NULL; } ret.p->tdata = tdata; ret.p->thr_uid = tdata->thr_uid; @@ -804,47 +835,48 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) ret.p->tctx_uid = tdata->tctx_uid_next++; ret.p->prepared = true; ret.p->state = prof_tctx_state_initializing; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); - malloc_mutex_unlock(tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (error) { - if (new_gctx) + if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - idalloctm(tsd, ret.v, tcache, true); - return (NULL); + } + idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); + return NULL; } - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); ret.p->state = prof_tctx_state_nominal; tctx_tree_insert(&gctx->tctxs, ret.p); gctx->nlimbo--; - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } - return (ret.p); + return ret.p; } +/* + * The bodies of this function and prof_leakcheck() are compiled out unless heap + * profiling is enabled, so that it is possible to compile jemalloc with + * floating point support completely disabled. Avoiding floating point code is + * important on memory-constrained systems, but it also enables a workaround for + * versions of glibc that don't properly save/restore floating point registers + * during dynamic lazy symbol loading (which internally calls into whatever + * malloc implementation happens to be integrated into the application). Note + * that some compilers (e.g. gcc 4.8) may use floating point registers for fast + * memory moves, so jemalloc must be compiled with such optimizations disabled + * (e.g. + * -mno-sse) in order for the workaround to be complete. + */ void -prof_sample_threshold_update(prof_tdata_t *tdata) -{ - /* - * The body of this function is compiled out unless heap profiling is - * enabled, so that it is possible to compile jemalloc with floating - * point support completely disabled. Avoiding floating point code is - * important on memory-constrained systems, but it also enables a - * workaround for versions of glibc that don't properly save/restore - * floating point registers during dynamic lazy symbol loading (which - * internally calls into whatever malloc implementation happens to be - * integrated into the application). Note that some compilers (e.g. - * gcc 4.8) may use floating point registers for fast memory moves, so - * jemalloc must be compiled with such optimizations disabled (e.g. - * -mno-sse) in order for the workaround to be complete. - */ +prof_sample_threshold_update(prof_tdata_t *tdata) { #ifdef JEMALLOC_PROF uint64_t r; double u; - if (!config_prof) + if (!config_prof) { return; + } if (lg_prof_sample == 0) { tdata->bytes_until_sample = 0; @@ -869,8 +901,7 @@ prof_sample_threshold_update(prof_tdata_t *tdata) * pp 500 * (http://luc.devroye.org/rnbookindex.html) */ - prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005), - UINT64_C(1442695040888963407)); + r = prng_lg_range_u64(&tdata->prng_state, 53); u = (double)r * (1.0/9007199254740992.0L); tdata->bytes_until_sample = (uint64_t)(log(u) / log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) @@ -880,101 +911,91 @@ prof_sample_threshold_update(prof_tdata_t *tdata) #ifdef JEMALLOC_JET static prof_tdata_t * -prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ +prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { size_t *tdata_count = (size_t *)arg; (*tdata_count)++; - return (NULL); + return NULL; } size_t -prof_tdata_count(void) -{ +prof_tdata_count(void) { size_t tdata_count = 0; + tsdn_t *tsdn; - malloc_mutex_lock(&tdatas_mtx); + tsdn = tsdn_fetch(); + malloc_mutex_lock(tsdn, &tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, (void *)&tdata_count); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsdn, &tdatas_mtx); - return (tdata_count); + return tdata_count; } -#endif -#ifdef JEMALLOC_JET size_t -prof_bt_count(void) -{ +prof_bt_count(void) { size_t bt_count; tsd_t *tsd; prof_tdata_t *tdata; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) - return (0); + if (tdata == NULL) { + return 0; + } - malloc_mutex_lock(&bt2gctx_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); bt_count = ckh_count(&bt2gctx); - malloc_mutex_unlock(&bt2gctx_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - return (bt_count); + return bt_count; } #endif -#ifdef JEMALLOC_JET -#undef prof_dump_open -#define prof_dump_open JEMALLOC_N(prof_dump_open_impl) -#endif static int -prof_dump_open(bool propagate_err, const char *filename) -{ +prof_dump_open_impl(bool propagate_err, const char *filename) { int fd; fd = creat(filename, 0644); if (fd == -1 && !propagate_err) { malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n", filename); - if (opt_abort) + if (opt_abort) { abort(); + } } - return (fd); + return fd; } -#ifdef JEMALLOC_JET -#undef prof_dump_open -#define prof_dump_open JEMALLOC_N(prof_dump_open) -prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl); -#endif +prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; static bool -prof_dump_flush(bool propagate_err) -{ +prof_dump_flush(bool propagate_err) { bool ret = false; ssize_t err; cassert(config_prof); - err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); + err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); if (err == -1) { if (!propagate_err) { malloc_write("<jemalloc>: write() failed during heap " "profile flush\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } ret = true; } prof_dump_buf_end = 0; - return (ret); + return ret; } static bool -prof_dump_close(bool propagate_err) -{ +prof_dump_close(bool propagate_err) { bool ret; assert(prof_dump_fd != -1); @@ -982,13 +1003,12 @@ prof_dump_close(bool propagate_err) close(prof_dump_fd); prof_dump_fd = -1; - return (ret); + return ret; } static bool -prof_dump_write(bool propagate_err, const char *s) -{ - unsigned i, slen, n; +prof_dump_write(bool propagate_err, const char *s) { + size_t i, slen, n; cassert(config_prof); @@ -996,9 +1016,11 @@ prof_dump_write(bool propagate_err, const char *s) slen = strlen(s); while (i < slen) { /* Flush the buffer if it is full. */ - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) - if (prof_dump_flush(propagate_err) && propagate_err) - return (true); + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + if (prof_dump_flush(propagate_err) && propagate_err) { + return true; + } + } if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { /* Finish writing. */ @@ -1012,13 +1034,12 @@ prof_dump_write(bool propagate_err, const char *s) i += n; } - return (false); + return false; } JEMALLOC_FORMAT_PRINTF(2, 3) static bool -prof_dump_printf(bool propagate_err, const char *format, ...) -{ +prof_dump_printf(bool propagate_err, const char *format, ...) { bool ret; va_list ap; char buf[PROF_PRINTF_BUFSIZE]; @@ -1028,23 +1049,22 @@ prof_dump_printf(bool propagate_err, const char *format, ...) va_end(ap); ret = prof_dump_write(propagate_err, buf); - return (ret); + return ret; } -/* tctx->tdata->lock is held. */ static void -prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata) -{ +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - malloc_mutex_lock(tctx->gctx->lock); + malloc_mutex_lock(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: - malloc_mutex_unlock(tctx->gctx->lock); + malloc_mutex_unlock(tsdn, tctx->gctx->lock); return; case prof_tctx_state_nominal: tctx->state = prof_tctx_state_dumping; - malloc_mutex_unlock(tctx->gctx->lock); + malloc_mutex_unlock(tsdn, tctx->gctx->lock); memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); @@ -1063,10 +1083,9 @@ prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata) } } -/* gctx->lock is held. */ static void -prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx) -{ +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { + malloc_mutex_assert_owner(tsdn, gctx->lock); gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; @@ -1076,10 +1095,11 @@ prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx) } } -/* tctx->gctx is held. */ static prof_tctx_t * -prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) -{ +prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: @@ -1087,20 +1107,26 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: - prof_tctx_merge_gctx(tctx, tctx->gctx); + prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); break; default: not_reached(); } - return (NULL); + return NULL; } -/* gctx->lock is held. */ +struct prof_tctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + static prof_tctx_t * -prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) -{ - bool propagate_err = *(bool *)arg; +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { + struct prof_tctx_dump_iter_arg_s *arg = + (struct prof_tctx_dump_iter_arg_s *)opaque; + + malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: @@ -1109,25 +1135,27 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: - if (prof_dump_printf(propagate_err, + if (prof_dump_printf(arg->propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, - tctx->dump_cnts.accumbytes)) - return (tctx); + tctx->dump_cnts.accumbytes)) { + return tctx; + } break; default: not_reached(); } - return (NULL); + return NULL; } -/* tctx->gctx is held. */ static prof_tctx_t * -prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) -{ +prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; prof_tctx_t *ret; + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + switch (tctx->state) { case prof_tctx_state_nominal: /* New since dumping started; ignore. */ @@ -1144,16 +1172,14 @@ prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) ret = NULL; label_return: - return (ret); + return ret; } static void -prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) -{ - +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { cassert(config_prof); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsdn, gctx->lock); /* * Increment nlimbo so that gctx won't go away before dump. @@ -1165,26 +1191,32 @@ prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsdn, gctx->lock); } -static prof_gctx_t * -prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) -{ - size_t *leak_ngctx = (size_t *)arg; +struct prof_gctx_merge_iter_arg_s { + tsdn_t *tsdn; + size_t leak_ngctx; +}; - malloc_mutex_lock(gctx->lock); - tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL); - if (gctx->cnt_summed.curobjs != 0) - (*leak_ngctx)++; - malloc_mutex_unlock(gctx->lock); +static prof_gctx_t * +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + struct prof_gctx_merge_iter_arg_s *arg = + (struct prof_gctx_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, + (void *)arg->tsdn); + if (gctx->cnt_summed.curobjs != 0) { + arg->leak_ngctx++; + } + malloc_mutex_unlock(arg->tsdn, gctx->lock); - return (NULL); + return NULL; } static void -prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) -{ +prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { prof_tdata_t *tdata = prof_tdata_get(tsd, false); prof_gctx_t *gctx; @@ -1196,7 +1228,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) */ while ((gctx = gctx_tree_first(gctxs)) != NULL) { gctx_tree_remove(gctxs, gctx); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); { prof_tctx_t *next; @@ -1204,34 +1236,43 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) do { prof_tctx_t *to_destroy = tctx_tree_iter(&gctx->tctxs, next, - prof_tctx_finish_iter, NULL); + prof_tctx_finish_iter, + (void *)tsd_tsdn(tsd)); if (to_destroy != NULL) { next = tctx_tree_next(&gctx->tctxs, to_destroy); tctx_tree_remove(&gctx->tctxs, to_destroy); - idalloctm(tsd, to_destroy, - tcache_get(tsd, false), true); - } else + idalloctm(tsd_tsdn(tsd), to_destroy, + NULL, NULL, true, true); + } else { next = NULL; + } } while (next != NULL); } gctx->nlimbo--; if (prof_gctx_should_destroy(gctx)) { gctx->nlimbo++; - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } else - malloc_mutex_unlock(gctx->lock); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } } } +struct prof_tdata_merge_iter_arg_s { + tsdn_t *tsdn; + prof_cnt_t cnt_all; +}; + static prof_tdata_t * -prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ - prof_cnt_t *cnt_all = (prof_cnt_t *)arg; +prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *opaque) { + struct prof_tdata_merge_iter_arg_s *arg = + (struct prof_tdata_merge_iter_arg_s *)opaque; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(arg->tsdn, tdata->lock); if (!tdata->expired) { size_t tabind; union { @@ -1242,29 +1283,32 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) tdata->dumping = true; memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, - &tctx.v);) - prof_tctx_merge_tdata(tctx.p, tdata); + &tctx.v);) { + prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); + } - cnt_all->curobjs += tdata->cnt_summed.curobjs; - cnt_all->curbytes += tdata->cnt_summed.curbytes; + arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; + arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; if (opt_prof_accum) { - cnt_all->accumobjs += tdata->cnt_summed.accumobjs; - cnt_all->accumbytes += tdata->cnt_summed.accumbytes; + arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; + arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; } - } else + } else { tdata->dumping = false; - malloc_mutex_unlock(tdata->lock); + } + malloc_mutex_unlock(arg->tsdn, tdata->lock); - return (NULL); + return NULL; } static prof_tdata_t * -prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ +prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { bool propagate_err = *(bool *)arg; - if (!tdata->dumping) - return (NULL); + if (!tdata->dumping) { + return NULL; + } if (prof_dump_printf(propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", @@ -1272,48 +1316,42 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, tdata->cnt_summed.accumbytes, (tdata->thread_name != NULL) ? " " : "", - (tdata->thread_name != NULL) ? tdata->thread_name : "")) - return (tdata); - return (NULL); + (tdata->thread_name != NULL) ? tdata->thread_name : "")) { + return tdata; + } + return NULL; } -#ifdef JEMALLOC_JET -#undef prof_dump_header -#define prof_dump_header JEMALLOC_N(prof_dump_header_impl) -#endif static bool -prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) -{ +prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, + const prof_cnt_t *cnt_all) { bool ret; if (prof_dump_printf(propagate_err, "heap_v2/%"FMTu64"\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, - cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) - return (true); + cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { + return true; + } - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsdn, &tdatas_mtx); ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, (void *)&propagate_err) != NULL); - malloc_mutex_unlock(&tdatas_mtx); - return (ret); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + return ret; } -#ifdef JEMALLOC_JET -#undef prof_dump_header -#define prof_dump_header JEMALLOC_N(prof_dump_header) -prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl); -#endif +prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; -/* gctx->lock is held. */ static bool -prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, - prof_gctx_tree_t *gctxs) -{ +prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, + const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { bool ret; unsigned i; + struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; cassert(config_prof); + malloc_mutex_assert_owner(tsdn, gctx->lock); /* Avoid dumping such gctx's that have no useful data. */ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || @@ -1347,21 +1385,23 @@ prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, goto label_return; } + prof_tctx_dump_iter_arg.tsdn = tsdn; + prof_tctx_dump_iter_arg.propagate_err = propagate_err; if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, - (void *)&propagate_err) != NULL) { + (void *)&prof_tctx_dump_iter_arg) != NULL) { ret = true; goto label_return; } ret = false; label_return: - return (ret); + return ret; } +#ifndef _WIN32 JEMALLOC_FORMAT_PRINTF(1, 2) static int -prof_open_maps(const char *format, ...) -{ +prof_open_maps(const char *format, ...) { int mfd; va_list ap; char filename[PATH_MAX + 1]; @@ -1369,27 +1409,47 @@ prof_open_maps(const char *format, ...) va_start(ap, format); malloc_vsnprintf(filename, sizeof(filename), format, ap); va_end(ap); + +#if defined(O_CLOEXEC) + mfd = open(filename, O_RDONLY | O_CLOEXEC); +#else mfd = open(filename, O_RDONLY); + if (mfd != -1) { + fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC); + } +#endif + + return mfd; +} +#endif - return (mfd); +static int +prof_getpid(void) { +#ifdef _WIN32 + return GetCurrentProcessId(); +#else + return getpid(); +#endif } static bool -prof_dump_maps(bool propagate_err) -{ +prof_dump_maps(bool propagate_err) { bool ret; int mfd; cassert(config_prof); #ifdef __FreeBSD__ mfd = prof_open_maps("/proc/curproc/map"); +#elif defined(_WIN32) + mfd = -1; // Not implemented #else { - int pid = getpid(); + int pid = prof_getpid(); mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); - if (mfd == -1) + if (mfd == -1) { mfd = prof_open_maps("/proc/%d/maps", pid); + } } #endif if (mfd != -1) { @@ -1411,8 +1471,9 @@ prof_dump_maps(bool propagate_err) goto label_return; } } - nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], - PROF_DUMP_BUFSIZE - prof_dump_buf_end); + nread = malloc_read_fd(mfd, + &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE + - prof_dump_buf_end); } while (nread > 0); } else { ret = true; @@ -1421,152 +1482,263 @@ prof_dump_maps(bool propagate_err) ret = false; label_return: - if (mfd != -1) + if (mfd != -1) { close(mfd); - return (ret); + } + return ret; } +/* + * See prof_sample_threshold_update() comment for why the body of this function + * is conditionally compiled. + */ static void prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, - const char *filename) -{ - + const char *filename) { +#ifdef JEMALLOC_PROF + /* + * Scaling is equivalent AdjustSamples() in jeprof, but the result may + * differ slightly from what jeprof reports, because here we scale the + * summary values, whereas jeprof scales each context individually and + * reports the sums of the scaled values. + */ if (cnt_all->curbytes != 0) { - malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %" - FMTu64" object%s, %zu context%s\n", - cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "", - cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "", - leak_ngctx, (leak_ngctx != 1) ? "s" : ""); + double sample_period = (double)((uint64_t)1 << lg_prof_sample); + double ratio = (((double)cnt_all->curbytes) / + (double)cnt_all->curobjs) / sample_period; + double scale_factor = 1.0 / (1.0 - exp(-ratio)); + uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) + * scale_factor); + uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * + scale_factor); + + malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64 + " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", + curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != + 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); malloc_printf( "<jemalloc>: Run jeprof on \"%s\" for leak detail\n", filename); } +#endif } +struct prof_gctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + static prof_gctx_t * -prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) -{ +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { prof_gctx_t *ret; - bool propagate_err = *(bool *)arg; + struct prof_gctx_dump_iter_arg_s *arg = + (struct prof_gctx_dump_iter_arg_s *)opaque; - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(arg->tsdn, gctx->lock); - if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) { + if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, + gctxs)) { ret = gctx; goto label_return; } ret = NULL; label_return: - malloc_mutex_unlock(gctx->lock); - return (ret); + malloc_mutex_unlock(arg->tsdn, gctx->lock); + return ret; } -static bool -prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) -{ - prof_tdata_t *tdata; - prof_cnt_t cnt_all; +static void +prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + prof_gctx_tree_t *gctxs) { size_t tabind; union { prof_gctx_t *p; void *v; } gctx; - size_t leak_ngctx; - prof_gctx_tree_t gctxs; - - cassert(config_prof); - tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (true); - - malloc_mutex_lock(&prof_dump_mtx); prof_enter(tsd, tdata); /* * Put gctx's in limbo and clear their counters in preparation for * summing. */ - gctx_tree_new(&gctxs); - for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) - prof_dump_gctx_prep(gctx.p, &gctxs); + gctx_tree_new(gctxs); + for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { + prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); + } /* * Iterate over tdatas, and for the non-expired ones snapshot their tctx * stats and merge them into the associated gctx's. */ - memset(&cnt_all, 0, sizeof(prof_cnt_t)); - malloc_mutex_lock(&tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all); - malloc_mutex_unlock(&tdatas_mtx); + prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd); + memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t)); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, + (void *)prof_tdata_merge_iter_arg); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); /* Merge tctx stats into gctx's. */ - leak_ngctx = 0; - gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx); + prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_merge_iter_arg->leak_ngctx = 0; + gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, + (void *)prof_gctx_merge_iter_arg); prof_leave(tsd, tdata); +} +static bool +prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, + prof_gctx_tree_t *gctxs) { /* Create dump file. */ - if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) - goto label_open_close_error; + if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { + return true; + } /* Dump profile header. */ - if (prof_dump_header(propagate_err, &cnt_all)) + if (prof_dump_header(tsd_tsdn(tsd), propagate_err, + &prof_tdata_merge_iter_arg->cnt_all)) { goto label_write_error; + } /* Dump per gctx profile stats. */ - if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, - (void *)&propagate_err) != NULL) + prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_dump_iter_arg->propagate_err = propagate_err; + if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, + (void *)prof_gctx_dump_iter_arg) != NULL) { goto label_write_error; + } /* Dump /proc/<pid>/maps if possible. */ - if (prof_dump_maps(propagate_err)) + if (prof_dump_maps(propagate_err)) { goto label_write_error; + } + + if (prof_dump_close(propagate_err)) { + return true; + } + + return false; +label_write_error: + prof_dump_close(propagate_err); + return true; +} - if (prof_dump_close(propagate_err)) - goto label_open_close_error; +static bool +prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck) { + cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); + prof_tdata_t * tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return true; + } + + pre_reentrancy(tsd, NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + + prof_gctx_tree_t gctxs; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); + bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata, + &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, + &prof_gctx_dump_iter_arg, &gctxs); prof_gctx_finish(tsd, &gctxs); - malloc_mutex_unlock(&prof_dump_mtx); - if (leakcheck) - prof_leakcheck(&cnt_all, leak_ngctx, filename); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); + post_reentrancy(tsd); - return (false); -label_write_error: - prof_dump_close(propagate_err); -label_open_close_error: + if (err) { + return true; + } + + if (leakcheck) { + prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, + prof_gctx_merge_iter_arg.leak_ngctx, filename); + } + return false; +} + +#ifdef JEMALLOC_JET +void +prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, + uint64_t *accumbytes) { + tsd_t *tsd; + prof_tdata_t *tdata; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + prof_gctx_tree_t gctxs; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + if (curobjs != NULL) { + *curobjs = 0; + } + if (curbytes != NULL) { + *curbytes = 0; + } + if (accumobjs != NULL) { + *accumobjs = 0; + } + if (accumbytes != NULL) { + *accumbytes = 0; + } + return; + } + + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); prof_gctx_finish(tsd, &gctxs); - malloc_mutex_unlock(&prof_dump_mtx); - return (true); + + if (curobjs != NULL) { + *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs; + } + if (curbytes != NULL) { + *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes; + } + if (accumobjs != NULL) { + *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs; + } + if (accumbytes != NULL) { + *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes; + } } +#endif -#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) -#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) +#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) +#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) static void -prof_dump_filename(char *filename, char v, uint64_t vseq) -{ - +prof_dump_filename(char *filename, char v, uint64_t vseq) { cassert(config_prof); if (vseq != VSEQ_INVALID) { /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c%"FMTu64".heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq); + opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); } else { /* "<prefix>.<pid>.<seq>.<v>.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c.heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v); + opt_prof_prefix, prof_getpid(), prof_dump_seq, v); } prof_dump_seq++; } static void -prof_fdump(void) -{ +prof_fdump(void) { tsd_t *tsd; char filename[DUMP_FILENAME_BUFSIZE]; @@ -1574,30 +1746,53 @@ prof_fdump(void) assert(opt_prof_final); assert(opt_prof_prefix[0] != '\0'); - if (!prof_booted) + if (!prof_booted) { return; + } tsd = tsd_fetch(); + assert(tsd_reentrancy_level_get(tsd) == 0); - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'f', VSEQ_INVALID); - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, opt_prof_leak); } +bool +prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { + cassert(config_prof); + +#ifndef JEMALLOC_ATOMIC_U64 + if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", + WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { + return true; + } + prof_accum->accumbytes = 0; +#else + atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); +#endif + return false; +} + void -prof_idump(void) -{ +prof_idump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); - if (!prof_booted) + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { return; - tsd = tsd_fetch(); + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { + return; + } + tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) + if (tdata == NULL) { return; + } if (tdata->enq) { tdata->enq_idump = true; return; @@ -1605,53 +1800,56 @@ prof_idump(void) if (opt_prof_prefix[0] != '\0') { char filename[PATH_MAX + 1]; - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'i', prof_dump_iseq); prof_dump_iseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } bool -prof_mdump(const char *filename) -{ - tsd_t *tsd; - char filename_buf[DUMP_FILENAME_BUFSIZE]; - +prof_mdump(tsd_t *tsd, const char *filename) { cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); - if (!opt_prof || !prof_booted) - return (true); - tsd = tsd_fetch(); - + if (!opt_prof || !prof_booted) { + return true; + } + char filename_buf[DUMP_FILENAME_BUFSIZE]; if (filename == NULL) { /* No filename specified, so automatically generate one. */ - if (opt_prof_prefix[0] == '\0') - return (true); - malloc_mutex_lock(&prof_dump_seq_mtx); + if (opt_prof_prefix[0] == '\0') { + return true; + } + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_mseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); filename = filename_buf; } - return (prof_dump(tsd, true, filename, false)); + return prof_dump(tsd, true, filename, false); } void -prof_gdump(void) -{ +prof_gdump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); - if (!prof_booted) + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { return; - tsd = tsd_fetch(); + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { + return; + } + tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) + if (tdata == NULL) { return; + } if (tdata->enq) { tdata->enq_gdump = true; return; @@ -1659,17 +1857,16 @@ prof_gdump(void) if (opt_prof_prefix[0] != '\0') { char filename[DUMP_FILENAME_BUFSIZE]; - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); prof_dump_filename(filename, 'u', prof_dump_useq); prof_dump_useq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } static void -prof_bt_hash(const void *key, size_t r_hash[2]) -{ +prof_bt_hash(const void *key, size_t r_hash[2]) { prof_bt_t *bt = (prof_bt_t *)key; cassert(config_prof); @@ -1678,46 +1875,44 @@ prof_bt_hash(const void *key, size_t r_hash[2]) } static bool -prof_bt_keycomp(const void *k1, const void *k2) -{ +prof_bt_keycomp(const void *k1, const void *k2) { const prof_bt_t *bt1 = (prof_bt_t *)k1; const prof_bt_t *bt2 = (prof_bt_t *)k2; cassert(config_prof); - if (bt1->len != bt2->len) - return (false); + if (bt1->len != bt2->len) { + return false; + } return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } -JEMALLOC_INLINE_C uint64_t -prof_thr_uid_alloc(void) -{ +static uint64_t +prof_thr_uid_alloc(tsdn_t *tsdn) { uint64_t thr_uid; - malloc_mutex_lock(&next_thr_uid_mtx); + malloc_mutex_lock(tsdn, &next_thr_uid_mtx); thr_uid = next_thr_uid; next_thr_uid++; - malloc_mutex_unlock(&next_thr_uid_mtx); + malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); - return (thr_uid); + return thr_uid; } static prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, - char *thread_name, bool active) -{ + char *thread_name, bool active) { prof_tdata_t *tdata; - tcache_t *tcache; cassert(config_prof); /* Initialize an empty cache for this thread. */ - tcache = tcache_get(tsd, true); - tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false, - tcache, true, NULL); - if (tdata == NULL) - return (NULL); + tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), + sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (tdata == NULL) { + return NULL; + } tdata->lock = prof_tdata_mutex_choose(thr_uid); tdata->thr_uid = thr_uid; @@ -1727,10 +1922,10 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, tdata->expired = false; tdata->tctx_uid_next = 0; - if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, - prof_bt_hash, prof_bt_keycomp)) { - idalloctm(tsd, tdata, tcache, true); - return (NULL); + if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); + return NULL; } tdata->prng_state = (uint64_t)(uintptr_t)tdata; @@ -1743,328 +1938,326 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, tdata->dumping = false; tdata->active = active; - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_insert(&tdatas, tdata); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - return (tdata); + return tdata; } prof_tdata_t * -prof_tdata_init(tsd_t *tsd) -{ +prof_tdata_init(tsd_t *tsd) { + return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, + NULL, prof_thread_active_init_get(tsd_tsdn(tsd))); +} - return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL, - prof_thread_active_init_get())); +static bool +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { + if (tdata->attached && !even_if_attached) { + return false; + } + if (ckh_count(&tdata->bt2tctx) != 0) { + return false; + } + return true; } -/* tdata->lock must be held. */ static bool -prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached) -{ +prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsdn, tdata->lock); - if (tdata->attached && !even_if_attached) - return (false); - if (ckh_count(&tdata->bt2tctx) != 0) - return (false); - return (true); + return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); } -/* tdatas_mtx must be held. */ static void prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, - bool even_if_attached) -{ - tcache_t *tcache; - - assert(prof_tdata_should_destroy(tdata, even_if_attached)); - assert(tsd_prof_tdata_get(tsd) != tdata); + bool even_if_attached) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_remove(&tdatas, tdata); - tcache = tcache_get(tsd, false); - if (tdata->thread_name != NULL) - idalloctm(tsd, tdata->thread_name, tcache, true); + assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); + + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, + true); + } ckh_delete(tsd, &tdata->bt2tctx); - idalloctm(tsd, tdata, tcache, true); + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); } static void -prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) -{ - - malloc_mutex_lock(&tdatas_mtx); +prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); prof_tdata_destroy_locked(tsd, tdata, even_if_attached); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); } static void -prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) -{ +prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { bool destroy_tdata; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); if (tdata->attached) { - destroy_tdata = prof_tdata_should_destroy(tdata, true); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, + true); /* * Only detach if !destroy_tdata, because detaching would allow * another thread to win the race to destroy tdata. */ - if (!destroy_tdata) + if (!destroy_tdata) { tdata->attached = false; + } tsd_prof_tdata_set(tsd, NULL); - } else + } else { destroy_tdata = false; - malloc_mutex_unlock(tdata->lock); - if (destroy_tdata) + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, true); + } } prof_tdata_t * -prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) -{ +prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { uint64_t thr_uid = tdata->thr_uid; uint64_t thr_discrim = tdata->thr_discrim + 1; char *thread_name = (tdata->thread_name != NULL) ? - prof_thread_name_alloc(tsd, tdata->thread_name) : NULL; + prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; bool active = tdata->active; prof_tdata_detach(tsd, tdata); - return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, - active)); + return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, + active); } static bool -prof_tdata_expire(prof_tdata_t *tdata) -{ +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { bool destroy_tdata; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsdn, tdata->lock); if (!tdata->expired) { tdata->expired = true; destroy_tdata = tdata->attached ? false : - prof_tdata_should_destroy(tdata, false); - } else + prof_tdata_should_destroy(tsdn, tdata, false); + } else { destroy_tdata = false; - malloc_mutex_unlock(tdata->lock); + } + malloc_mutex_unlock(tsdn, tdata->lock); - return (destroy_tdata); + return destroy_tdata; } static prof_tdata_t * -prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ +prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; - return (prof_tdata_expire(tdata) ? tdata : NULL); + return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); } void -prof_reset(tsd_t *tsd, size_t lg_sample) -{ +prof_reset(tsd_t *tsd, size_t lg_sample) { prof_tdata_t *next; assert(lg_sample < (sizeof(uint64_t) << 3)); - malloc_mutex_lock(&prof_dump_mtx); - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); lg_prof_sample = lg_sample; next = NULL; do { prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, - prof_tdata_reset_iter, NULL); + prof_tdata_reset_iter, (void *)tsd); if (to_destroy != NULL) { next = tdata_tree_next(&tdatas, to_destroy); prof_tdata_destroy_locked(tsd, to_destroy, false); - } else + } else { next = NULL; + } } while (next != NULL); - malloc_mutex_unlock(&tdatas_mtx); - malloc_mutex_unlock(&prof_dump_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); } void -prof_tdata_cleanup(tsd_t *tsd) -{ +prof_tdata_cleanup(tsd_t *tsd) { prof_tdata_t *tdata; - if (!config_prof) + if (!config_prof) { return; + } tdata = tsd_prof_tdata_get(tsd); - if (tdata != NULL) + if (tdata != NULL) { prof_tdata_detach(tsd, tdata); + } } bool -prof_active_get(void) -{ +prof_active_get(tsdn_t *tsdn) { bool prof_active_current; - malloc_mutex_lock(&prof_active_mtx); + malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_current = prof_active; - malloc_mutex_unlock(&prof_active_mtx); - return (prof_active_current); + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return prof_active_current; } bool -prof_active_set(bool active) -{ +prof_active_set(tsdn_t *tsdn, bool active) { bool prof_active_old; - malloc_mutex_lock(&prof_active_mtx); + malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_old = prof_active; prof_active = active; - malloc_mutex_unlock(&prof_active_mtx); - return (prof_active_old); + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return prof_active_old; } const char * -prof_thread_name_get(void) -{ - tsd_t *tsd; +prof_thread_name_get(tsd_t *tsd) { prof_tdata_t *tdata; - tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (""); + if (tdata == NULL) { + return ""; + } return (tdata->thread_name != NULL ? tdata->thread_name : ""); } static char * -prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) -{ +prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { char *ret; size_t size; - if (thread_name == NULL) - return (NULL); + if (thread_name == NULL) { + return NULL; + } size = strlen(thread_name) + 1; - if (size == 1) - return (""); + if (size == 1) { + return ""; + } - ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL); - if (ret == NULL) - return (NULL); + ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (ret == NULL) { + return NULL; + } memcpy(ret, thread_name, size); - return (ret); + return ret; } int -prof_thread_name_set(tsd_t *tsd, const char *thread_name) -{ +prof_thread_name_set(tsd_t *tsd, const char *thread_name) { prof_tdata_t *tdata; unsigned i; char *s; tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (EAGAIN); + if (tdata == NULL) { + return EAGAIN; + } /* Validate input. */ - if (thread_name == NULL) - return (EFAULT); + if (thread_name == NULL) { + return EFAULT; + } for (i = 0; thread_name[i] != '\0'; i++) { char c = thread_name[i]; - if (!isgraph(c) && !isblank(c)) - return (EFAULT); + if (!isgraph(c) && !isblank(c)) { + return EFAULT; + } } - s = prof_thread_name_alloc(tsd, thread_name); - if (s == NULL) - return (EAGAIN); + s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); + if (s == NULL) { + return EAGAIN; + } if (tdata->thread_name != NULL) { - idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false), + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, true); tdata->thread_name = NULL; } - if (strlen(s) > 0) + if (strlen(s) > 0) { tdata->thread_name = s; - return (0); + } + return 0; } bool -prof_thread_active_get(void) -{ - tsd_t *tsd; +prof_thread_active_get(tsd_t *tsd) { prof_tdata_t *tdata; - tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (false); - return (tdata->active); + if (tdata == NULL) { + return false; + } + return tdata->active; } bool -prof_thread_active_set(bool active) -{ - tsd_t *tsd; +prof_thread_active_set(tsd_t *tsd, bool active) { prof_tdata_t *tdata; - tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (true); + if (tdata == NULL) { + return true; + } tdata->active = active; - return (false); + return false; } bool -prof_thread_active_init_get(void) -{ +prof_thread_active_init_get(tsdn_t *tsdn) { bool active_init; - malloc_mutex_lock(&prof_thread_active_init_mtx); + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init = prof_thread_active_init; - malloc_mutex_unlock(&prof_thread_active_init_mtx); - return (active_init); + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); + return active_init; } bool -prof_thread_active_init_set(bool active_init) -{ +prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) { bool active_init_old; - malloc_mutex_lock(&prof_thread_active_init_mtx); + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init_old = prof_thread_active_init; prof_thread_active_init = active_init; - malloc_mutex_unlock(&prof_thread_active_init_mtx); - return (active_init_old); + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); + return active_init_old; } bool -prof_gdump_get(void) -{ +prof_gdump_get(tsdn_t *tsdn) { bool prof_gdump_current; - malloc_mutex_lock(&prof_gdump_mtx); + malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_current = prof_gdump_val; - malloc_mutex_unlock(&prof_gdump_mtx); - return (prof_gdump_current); + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); + return prof_gdump_current; } bool -prof_gdump_set(bool gdump) -{ +prof_gdump_set(tsdn_t *tsdn, bool gdump) { bool prof_gdump_old; - malloc_mutex_lock(&prof_gdump_mtx); + malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_old = prof_gdump_val; prof_gdump_val = gdump; - malloc_mutex_unlock(&prof_gdump_mtx); - return (prof_gdump_old); + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); + return prof_gdump_old; } void -prof_boot0(void) -{ - +prof_boot0(void) { cassert(config_prof); memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, @@ -2072,9 +2265,7 @@ prof_boot0(void) } void -prof_boot1(void) -{ - +prof_boot1(void) { cassert(config_prof); /* @@ -2098,72 +2289,98 @@ prof_boot1(void) } bool -prof_boot2(void) -{ - +prof_boot2(tsd_t *tsd) { cassert(config_prof); if (opt_prof) { - tsd_t *tsd; unsigned i; lg_prof_sample = opt_lg_prof_sample; prof_active = opt_prof_active; - if (malloc_mutex_init(&prof_active_mtx)) - return (true); + if (malloc_mutex_init(&prof_active_mtx, "prof_active", + WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { + return true; + } prof_gdump_val = opt_prof_gdump; - if (malloc_mutex_init(&prof_gdump_mtx)) - return (true); + if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", + WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { + return true; + } prof_thread_active_init = opt_prof_thread_active_init; - if (malloc_mutex_init(&prof_thread_active_init_mtx)) - return (true); + if (malloc_mutex_init(&prof_thread_active_init_mtx, + "prof_thread_active_init", + WITNESS_RANK_PROF_THREAD_ACTIVE_INIT, + malloc_mutex_rank_exclusive)) { + return true; + } - tsd = tsd_fetch(); if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) - return (true); - if (malloc_mutex_init(&bt2gctx_mtx)) - return (true); + prof_bt_keycomp)) { + return true; + } + if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", + WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { + return true; + } tdata_tree_new(&tdatas); - if (malloc_mutex_init(&tdatas_mtx)) - return (true); + if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", + WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { + return true; + } next_thr_uid = 0; - if (malloc_mutex_init(&next_thr_uid_mtx)) - return (true); + if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", + WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) { + return true; + } - if (malloc_mutex_init(&prof_dump_seq_mtx)) - return (true); - if (malloc_mutex_init(&prof_dump_mtx)) - return (true); + if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", + WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", + WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) { + return true; + } if (opt_prof_final && opt_prof_prefix[0] != '\0' && atexit(prof_fdump) != 0) { malloc_write("<jemalloc>: Error in atexit()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } - gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS * - sizeof(malloc_mutex_t)); - if (gctx_locks == NULL) - return (true); + gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), + CACHELINE); + if (gctx_locks == NULL) { + return true; + } for (i = 0; i < PROF_NCTX_LOCKS; i++) { - if (malloc_mutex_init(&gctx_locks[i])) - return (true); + if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", + WITNESS_RANK_PROF_GCTX, + malloc_mutex_rank_exclusive)) { + return true; + } } - tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS * - sizeof(malloc_mutex_t)); - if (tdata_locks == NULL) - return (true); + tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), + CACHELINE); + if (tdata_locks == NULL) { + return true; + } for (i = 0; i < PROF_NTDATA_LOCKS; i++) { - if (malloc_mutex_init(&tdata_locks[i])) - return (true); + if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", + WITNESS_RANK_PROF_TDATA, + malloc_mutex_rank_exclusive)) { + return true; + } } } @@ -2177,60 +2394,79 @@ prof_boot2(void) prof_booted = true; - return (false); + return false; } void -prof_prefork(void) -{ - - if (opt_prof) { +prof_prefork0(tsdn_t *tsdn) { + if (config_prof && opt_prof) { unsigned i; - malloc_mutex_prefork(&tdatas_mtx); - malloc_mutex_prefork(&bt2gctx_mtx); - malloc_mutex_prefork(&next_thr_uid_mtx); - malloc_mutex_prefork(&prof_dump_seq_mtx); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_prefork(&gctx_locks[i]); - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_prefork(&tdata_locks[i]); + malloc_mutex_prefork(tsdn, &prof_dump_mtx); + malloc_mutex_prefork(tsdn, &bt2gctx_mtx); + malloc_mutex_prefork(tsdn, &tdatas_mtx); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_prefork(tsdn, &tdata_locks[i]); + } + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_prefork(tsdn, &gctx_locks[i]); + } } } void -prof_postfork_parent(void) -{ +prof_prefork1(tsdn_t *tsdn) { + if (config_prof && opt_prof) { + malloc_mutex_prefork(tsdn, &prof_active_mtx); + malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); + malloc_mutex_prefork(tsdn, &prof_gdump_mtx); + malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); + malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); + } +} - if (opt_prof) { +void +prof_postfork_parent(tsdn_t *tsdn) { + if (config_prof && opt_prof) { unsigned i; - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_parent(&tdata_locks[i]); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_parent(&gctx_locks[i]); - malloc_mutex_postfork_parent(&prof_dump_seq_mtx); - malloc_mutex_postfork_parent(&next_thr_uid_mtx); - malloc_mutex_postfork_parent(&bt2gctx_mtx); - malloc_mutex_postfork_parent(&tdatas_mtx); + malloc_mutex_postfork_parent(tsdn, + &prof_thread_active_init_mtx); + malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); + } + malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); + malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); } } void -prof_postfork_child(void) -{ - - if (opt_prof) { +prof_postfork_child(tsdn_t *tsdn) { + if (config_prof && opt_prof) { unsigned i; - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_child(&tdata_locks[i]); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_child(&gctx_locks[i]); - malloc_mutex_postfork_child(&prof_dump_seq_mtx); - malloc_mutex_postfork_child(&next_thr_uid_mtx); - malloc_mutex_postfork_child(&bt2gctx_mtx); - malloc_mutex_postfork_child(&tdatas_mtx); + malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); + malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_child(tsdn, &prof_active_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); + } + malloc_mutex_postfork_child(tsdn, &tdatas_mtx); + malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); } } diff --git a/deps/jemalloc/src/quarantine.c b/deps/jemalloc/src/quarantine.c deleted file mode 100644 index 6c43dfcaa..000000000 --- a/deps/jemalloc/src/quarantine.c +++ /dev/null @@ -1,183 +0,0 @@ -#define JEMALLOC_QUARANTINE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/* - * Quarantine pointers close to NULL are used to encode state information that - * is used for cleaning up during thread shutdown. - */ -#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1) -#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2) -#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine); -static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine); -static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, - size_t upper_bound); - -/******************************************************************************/ - -static quarantine_t * -quarantine_init(tsd_t *tsd, size_t lg_maxobjs) -{ - quarantine_t *quarantine; - - assert(tsd_nominal(tsd)); - - quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs) - + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false, - tcache_get(tsd, true), true, NULL); - if (quarantine == NULL) - return (NULL); - quarantine->curbytes = 0; - quarantine->curobjs = 0; - quarantine->first = 0; - quarantine->lg_maxobjs = lg_maxobjs; - - return (quarantine); -} - -void -quarantine_alloc_hook_work(tsd_t *tsd) -{ - quarantine_t *quarantine; - - if (!tsd_nominal(tsd)) - return; - - quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT); - /* - * Check again whether quarantine has been initialized, because - * quarantine_init() may have triggered recursive initialization. - */ - if (tsd_quarantine_get(tsd) == NULL) - tsd_quarantine_set(tsd, quarantine); - else - idalloctm(tsd, quarantine, tcache_get(tsd, false), true); -} - -static quarantine_t * -quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) -{ - quarantine_t *ret; - - ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1); - if (ret == NULL) { - quarantine_drain_one(tsd, quarantine); - return (quarantine); - } - - ret->curbytes = quarantine->curbytes; - ret->curobjs = quarantine->curobjs; - if (quarantine->first + quarantine->curobjs <= (ZU(1) << - quarantine->lg_maxobjs)) { - /* objs ring buffer data are contiguous. */ - memcpy(ret->objs, &quarantine->objs[quarantine->first], - quarantine->curobjs * sizeof(quarantine_obj_t)); - } else { - /* objs ring buffer data wrap around. */ - size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) - - quarantine->first; - size_t ncopy_b = quarantine->curobjs - ncopy_a; - - memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a - * sizeof(quarantine_obj_t)); - memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * - sizeof(quarantine_obj_t)); - } - idalloctm(tsd, quarantine, tcache_get(tsd, false), true); - - tsd_quarantine_set(tsd, ret); - return (ret); -} - -static void -quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine) -{ - quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; - assert(obj->usize == isalloc(obj->ptr, config_prof)); - idalloctm(tsd, obj->ptr, NULL, false); - quarantine->curbytes -= obj->usize; - quarantine->curobjs--; - quarantine->first = (quarantine->first + 1) & ((ZU(1) << - quarantine->lg_maxobjs) - 1); -} - -static void -quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound) -{ - - while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) - quarantine_drain_one(tsd, quarantine); -} - -void -quarantine(tsd_t *tsd, void *ptr) -{ - quarantine_t *quarantine; - size_t usize = isalloc(ptr, config_prof); - - cassert(config_fill); - assert(opt_quarantine); - - if ((quarantine = tsd_quarantine_get(tsd)) == NULL) { - idalloctm(tsd, ptr, NULL, false); - return; - } - /* - * Drain one or more objects if the quarantine size limit would be - * exceeded by appending ptr. - */ - if (quarantine->curbytes + usize > opt_quarantine) { - size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - - usize : 0; - quarantine_drain(tsd, quarantine, upper_bound); - } - /* Grow the quarantine ring buffer if it's full. */ - if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) - quarantine = quarantine_grow(tsd, quarantine); - /* quarantine_grow() must free a slot if it fails to grow. */ - assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); - /* Append ptr if its size doesn't exceed the quarantine size. */ - if (quarantine->curbytes + usize <= opt_quarantine) { - size_t offset = (quarantine->first + quarantine->curobjs) & - ((ZU(1) << quarantine->lg_maxobjs) - 1); - quarantine_obj_t *obj = &quarantine->objs[offset]; - obj->ptr = ptr; - obj->usize = usize; - quarantine->curbytes += usize; - quarantine->curobjs++; - if (config_fill && unlikely(opt_junk_free)) { - /* - * Only do redzone validation if Valgrind isn't in - * operation. - */ - if ((!config_valgrind || likely(!in_valgrind)) - && usize <= SMALL_MAXCLASS) - arena_quarantine_junk_small(ptr, usize); - else - memset(ptr, 0x5a, usize); - } - } else { - assert(quarantine->curbytes == 0); - idalloctm(tsd, ptr, NULL, false); - } -} - -void -quarantine_cleanup(tsd_t *tsd) -{ - quarantine_t *quarantine; - - if (!config_fill) - return; - - quarantine = tsd_quarantine_get(tsd); - if (quarantine != NULL) { - quarantine_drain(tsd, quarantine, 0); - idalloctm(tsd, quarantine, tcache_get(tsd, false), true); - tsd_quarantine_set(tsd, NULL); - } -} diff --git a/deps/jemalloc/src/rtree.c b/deps/jemalloc/src/rtree.c index af0d97e75..53702cf72 100644 --- a/deps/jemalloc/src/rtree.c +++ b/deps/jemalloc/src/rtree.c @@ -1,127 +1,320 @@ -#define JEMALLOC_RTREE_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_RTREE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" -static unsigned -hmin(unsigned ha, unsigned hb) -{ +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" - return (ha < hb ? ha : hb); +/* + * Only the most significant bits of keys passed to rtree_{read,write}() are + * used. + */ +bool +rtree_new(rtree_t *rtree, bool zeroed) { +#ifdef JEMALLOC_JET + if (!zeroed) { + memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */ + } +#else + assert(zeroed); +#endif + + if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE, + malloc_mutex_rank_exclusive)) { + return true; + } + + return false; } -/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */ -bool -rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, - rtree_node_dalloc_t *dalloc) -{ - unsigned bits_in_leaf, height, i; - - assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); - - bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL - : (bits % RTREE_BITS_PER_LEVEL); - if (bits > bits_in_leaf) { - height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL; - if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits) - height++; - } else - height = 1; - assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits); - - rtree->alloc = alloc; - rtree->dalloc = dalloc; - rtree->height = height; - - /* Root level. */ - rtree->levels[0].subtree = NULL; - rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL : - bits_in_leaf; - rtree->levels[0].cumbits = rtree->levels[0].bits; - /* Interior levels. */ - for (i = 1; i < height-1; i++) { - rtree->levels[i].subtree = NULL; - rtree->levels[i].bits = RTREE_BITS_PER_LEVEL; - rtree->levels[i].cumbits = rtree->levels[i-1].cumbits + - RTREE_BITS_PER_LEVEL; - } - /* Leaf level. */ - if (height > 1) { - rtree->levels[height-1].subtree = NULL; - rtree->levels[height-1].bits = bits_in_leaf; - rtree->levels[height-1].cumbits = bits; - } - - /* Compute lookup table to be used by rtree_start_level(). */ - for (i = 0; i < RTREE_HEIGHT_MAX; i++) { - rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height - - 1); - } - - return (false); +static rtree_node_elm_t * +rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms * + sizeof(rtree_node_elm_t), CACHELINE); } +rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl; static void -rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level) -{ +rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) { + /* Nodes are never deleted during normal operation. */ + not_reached(); +} +UNUSED rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc = + rtree_node_dalloc_impl; - if (level + 1 < rtree->height) { - size_t nchildren, i; +static rtree_leaf_elm_t * +rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms * + sizeof(rtree_leaf_elm_t), CACHELINE); +} +rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl; - nchildren = ZU(1) << rtree->levels[level].bits; - for (i = 0; i < nchildren; i++) { - rtree_node_elm_t *child = node[i].child; - if (child != NULL) - rtree_delete_subtree(rtree, child, level + 1); +static void +rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) { + /* Leaves are never deleted during normal operation. */ + not_reached(); +} +UNUSED rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc = + rtree_leaf_dalloc_impl; + +#ifdef JEMALLOC_JET +# if RTREE_HEIGHT > 1 +static void +rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree, + unsigned level) { + size_t nchildren = ZU(1) << rtree_levels[level].bits; + if (level + 2 < RTREE_HEIGHT) { + for (size_t i = 0; i < nchildren; i++) { + rtree_node_elm_t *node = + (rtree_node_elm_t *)atomic_load_p(&subtree[i].child, + ATOMIC_RELAXED); + if (node != NULL) { + rtree_delete_subtree(tsdn, rtree, node, level + + 1); + } } + } else { + for (size_t i = 0; i < nchildren; i++) { + rtree_leaf_elm_t *leaf = + (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child, + ATOMIC_RELAXED); + if (leaf != NULL) { + rtree_leaf_dalloc(tsdn, rtree, leaf); + } + } + } + + if (subtree != rtree->root) { + rtree_node_dalloc(tsdn, rtree, subtree); } - rtree->dalloc(node); } +# endif void -rtree_delete(rtree_t *rtree) -{ - unsigned i; +rtree_delete(tsdn_t *tsdn, rtree_t *rtree) { +# if RTREE_HEIGHT > 1 + rtree_delete_subtree(tsdn, rtree, rtree->root, 0); +# endif +} +#endif + +static rtree_node_elm_t * +rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, + atomic_p_t *elmp) { + malloc_mutex_lock(tsdn, &rtree->init_lock); + /* + * If *elmp is non-null, then it was initialized with the init lock + * held, so we can get by with 'relaxed' here. + */ + rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED); + if (node == NULL) { + node = rtree_node_alloc(tsdn, rtree, ZU(1) << + rtree_levels[level].bits); + if (node == NULL) { + malloc_mutex_unlock(tsdn, &rtree->init_lock); + return NULL; + } + /* + * Even though we hold the lock, a later reader might not; we + * need release semantics. + */ + atomic_store_p(elmp, node, ATOMIC_RELEASE); + } + malloc_mutex_unlock(tsdn, &rtree->init_lock); + + return node; +} - for (i = 0; i < rtree->height; i++) { - rtree_node_elm_t *subtree = rtree->levels[i].subtree; - if (subtree != NULL) - rtree_delete_subtree(rtree, subtree, i); +static rtree_leaf_elm_t * +rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) { + malloc_mutex_lock(tsdn, &rtree->init_lock); + /* + * If *elmp is non-null, then it was initialized with the init lock + * held, so we can get by with 'relaxed' here. + */ + rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED); + if (leaf == NULL) { + leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) << + rtree_levels[RTREE_HEIGHT-1].bits); + if (leaf == NULL) { + malloc_mutex_unlock(tsdn, &rtree->init_lock); + return NULL; + } + /* + * Even though we hold the lock, a later reader might not; we + * need release semantics. + */ + atomic_store_p(elmp, leaf, ATOMIC_RELEASE); } + malloc_mutex_unlock(tsdn, &rtree->init_lock); + + return leaf; +} + +static bool +rtree_node_valid(rtree_node_elm_t *node) { + return ((uintptr_t)node != (uintptr_t)0); +} + +static bool +rtree_leaf_valid(rtree_leaf_elm_t *leaf) { + return ((uintptr_t)leaf != (uintptr_t)0); } static rtree_node_elm_t * -rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp) -{ +rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) { rtree_node_elm_t *node; - if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) { - /* - * Another thread is already in the process of initializing. - * Spin-wait until initialization is complete. - */ - do { - CPU_SPINWAIT; - node = atomic_read_p((void **)elmp); - } while (node == RTREE_NODE_INITIALIZING); + if (dependent) { + node = (rtree_node_elm_t *)atomic_load_p(&elm->child, + ATOMIC_RELAXED); + } else { + node = (rtree_node_elm_t *)atomic_load_p(&elm->child, + ATOMIC_ACQUIRE); + } + + assert(!dependent || node != NULL); + return node; +} + +static rtree_node_elm_t * +rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level, bool dependent) { + rtree_node_elm_t *node; + + node = rtree_child_node_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_node_valid(node))) { + node = rtree_node_init(tsdn, rtree, level + 1, &elm->child); + } + assert(!dependent || node != NULL); + return node; +} + +static rtree_leaf_elm_t * +rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) { + rtree_leaf_elm_t *leaf; + + if (dependent) { + leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, + ATOMIC_RELAXED); } else { - node = rtree->alloc(ZU(1) << rtree->levels[level].bits); - if (node == NULL) - return (NULL); - atomic_write_p((void **)elmp, node); + leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, + ATOMIC_ACQUIRE); } - return (node); + assert(!dependent || leaf != NULL); + return leaf; } -rtree_node_elm_t * -rtree_subtree_read_hard(rtree_t *rtree, unsigned level) -{ +static rtree_leaf_elm_t * +rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level, bool dependent) { + rtree_leaf_elm_t *leaf; - return (rtree_node_init(rtree, level, &rtree->levels[level].subtree)); + leaf = rtree_child_leaf_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { + leaf = rtree_leaf_init(tsdn, rtree, &elm->child); + } + assert(!dependent || leaf != NULL); + return leaf; } -rtree_node_elm_t * -rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) -{ +rtree_leaf_elm_t * +rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, bool init_missing) { + rtree_node_elm_t *node; + rtree_leaf_elm_t *leaf; +#if RTREE_HEIGHT > 1 + node = rtree->root; +#else + leaf = rtree->root; +#endif + + if (config_debug) { + uintptr_t leafkey = rtree_leafkey(key); + for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { + assert(rtree_ctx->cache[i].leafkey != leafkey); + } + for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { + assert(rtree_ctx->l2_cache[i].leafkey != leafkey); + } + } + +#define RTREE_GET_CHILD(level) { \ + assert(level < RTREE_HEIGHT-1); \ + if (level != 0 && !dependent && \ + unlikely(!rtree_node_valid(node))) { \ + return NULL; \ + } \ + uintptr_t subkey = rtree_subkey(key, level); \ + if (level + 2 < RTREE_HEIGHT) { \ + node = init_missing ? \ + rtree_child_node_read(tsdn, rtree, \ + &node[subkey], level, dependent) : \ + rtree_child_node_tryread(&node[subkey], \ + dependent); \ + } else { \ + leaf = init_missing ? \ + rtree_child_leaf_read(tsdn, rtree, \ + &node[subkey], level, dependent) : \ + rtree_child_leaf_tryread(&node[subkey], \ + dependent); \ + } \ + } + /* + * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss): + * (1) evict last entry in L2 cache; (2) move the collision slot from L1 + * cache down to L2; and 3) fill L1. + */ +#define RTREE_GET_LEAF(level) { \ + assert(level == RTREE_HEIGHT-1); \ + if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \ + return NULL; \ + } \ + if (RTREE_CTX_NCACHE_L2 > 1) { \ + memmove(&rtree_ctx->l2_cache[1], \ + &rtree_ctx->l2_cache[0], \ + sizeof(rtree_ctx_cache_elm_t) * \ + (RTREE_CTX_NCACHE_L2 - 1)); \ + } \ + size_t slot = rtree_cache_direct_map(key); \ + rtree_ctx->l2_cache[0].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[0].leaf = \ + rtree_ctx->cache[slot].leaf; \ + uintptr_t leafkey = rtree_leafkey(key); \ + rtree_ctx->cache[slot].leafkey = leafkey; \ + rtree_ctx->cache[slot].leaf = leaf; \ + uintptr_t subkey = rtree_subkey(key, level); \ + return &leaf[subkey]; \ + } + if (RTREE_HEIGHT > 1) { + RTREE_GET_CHILD(0) + } + if (RTREE_HEIGHT > 2) { + RTREE_GET_CHILD(1) + } + if (RTREE_HEIGHT > 3) { + for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) { + RTREE_GET_CHILD(i) + } + } + RTREE_GET_LEAF(RTREE_HEIGHT-1) +#undef RTREE_GET_CHILD +#undef RTREE_GET_LEAF + not_reached(); +} - return (rtree_node_init(rtree, level, &elm->child)); +void +rtree_ctx_data_init(rtree_ctx_t *ctx) { + for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { + rtree_ctx_cache_elm_t *cache = &ctx->cache[i]; + cache->leafkey = RTREE_LEAFKEY_INVALID; + cache->leaf = NULL; + } + for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { + rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i]; + cache->leafkey = RTREE_LEAFKEY_INVALID; + cache->leaf = NULL; + } } diff --git a/deps/jemalloc/src/stats.c b/deps/jemalloc/src/stats.c index 154c3e74c..08b9507cf 100644 --- a/deps/jemalloc/src/stats.c +++ b/deps/jemalloc/src/stats.c @@ -1,374 +1,1235 @@ -#define JEMALLOC_STATS_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_STATS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" -#define CTL_GET(n, v, t) do { \ +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/emitter.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_prof.h" + +const char *global_mutex_names[mutex_prof_num_global_mutexes] = { +#define OP(mtx) #mtx, + MUTEX_PROF_GLOBAL_MUTEXES +#undef OP +}; + +const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = { +#define OP(mtx) #mtx, + MUTEX_PROF_ARENA_MUTEXES +#undef OP +}; + +#define CTL_GET(n, v, t) do { \ size_t sz = sizeof(t); \ - xmallctl(n, v, &sz, NULL, 0); \ + xmallctl(n, (void *)v, &sz, NULL, 0); \ } while (0) -#define CTL_M2_GET(n, i, v, t) do { \ - size_t mib[6]; \ +#define CTL_M2_GET(n, i, v, t) do { \ + size_t mib[CTL_MAX_DEPTH]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) -#define CTL_M2_M4_GET(n, i, j, v, t) do { \ - size_t mib[6]; \ +#define CTL_M2_M4_GET(n, i, j, v, t) do { \ + size_t mib[CTL_MAX_DEPTH]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ mib[2] = (i); \ mib[4] = (j); \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) /******************************************************************************/ /* Data. */ -bool opt_stats_print = false; - -size_t stats_cactive = 0; +bool opt_stats_print = false; +char opt_stats_print_opts[stats_print_tot_num_options+1] = ""; /******************************************************************************/ -/* Function prototypes for non-inline static functions. */ -static void stats_arena_bins_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_hchunks_print( - void (*write_cb)(void *, const char *), void *cbopaque, unsigned i); -static void stats_arena_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i, bool bins, bool large, bool huge); +/* Calculate x.yyy and output a string (takes a fixed sized char array). */ +static bool +get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) { + if (divisor == 0 || dividend > divisor) { + /* The rate is not supposed to be greater than 1. */ + return true; + } + if (dividend > 0) { + assert(UINT64_MAX / dividend >= 1000); + } + + unsigned n = (unsigned)((dividend * 1000) / divisor); + if (n < 10) { + malloc_snprintf(str, 6, "0.00%u", n); + } else if (n < 100) { + malloc_snprintf(str, 6, "0.0%u", n); + } else if (n < 1000) { + malloc_snprintf(str, 6, "0.%u", n); + } else { + malloc_snprintf(str, 6, "1"); + } -/******************************************************************************/ + return false; +} + +#define MUTEX_CTL_STR_MAX_LENGTH 128 +static void +gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix, + const char *mutex, const char *counter) { + malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter); +} + +static void +mutex_stats_init_cols(emitter_row_t *row, const char *table_name, + emitter_col_t *name, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0; + mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0; + + emitter_col_t *col; + + if (name != NULL) { + emitter_col_init(name, row); + name->justify = emitter_justify_left; + name->width = 21; + name->type = emitter_type_title; + name->str_val = table_name; + } + +#define WIDTH_uint32_t 12 +#define WIDTH_uint64_t 16 +#define OP(counter, counter_type, human) \ + col = &col_##counter_type[k_##counter_type]; \ + ++k_##counter_type; \ + emitter_col_init(col, row); \ + col->justify = emitter_justify_right; \ + col->width = WIDTH_##counter_type; \ + col->type = emitter_type_title; \ + col->str_val = human; + MUTEX_PROF_COUNTERS +#undef OP +#undef WIDTH_uint32_t +#undef WIDTH_uint64_t +} + +static void +mutex_stats_read_global(const char *name, emitter_col_t *col_name, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + + col_name->str_val = name; + + emitter_col_t *dst; +#define EMITTER_TYPE_uint32_t emitter_type_uint32 +#define EMITTER_TYPE_uint64_t emitter_type_uint64 +#define OP(counter, counter_type, human) \ + dst = &col_##counter_type[mutex_counter_##counter]; \ + dst->type = EMITTER_TYPE_##counter_type; \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "mutexes", name, #counter); \ + CTL_GET(cmd, (counter_type *)&dst->bool_val, counter_type); + MUTEX_PROF_COUNTERS +#undef OP +#undef EMITTER_TYPE_uint32_t +#undef EMITTER_TYPE_uint64_t +} static void -stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) -{ +mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind, + const char *name, emitter_col_t *col_name, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + + col_name->str_val = name; + + emitter_col_t *dst; +#define EMITTER_TYPE_uint32_t emitter_type_uint32 +#define EMITTER_TYPE_uint64_t emitter_type_uint64 +#define OP(counter, counter_type, human) \ + dst = &col_##counter_type[mutex_counter_##counter]; \ + dst->type = EMITTER_TYPE_##counter_type; \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "arenas.0.mutexes", arena_mutex_names[mutex_ind], #counter);\ + CTL_M2_GET(cmd, arena_ind, \ + (counter_type *)&dst->bool_val, counter_type); + MUTEX_PROF_COUNTERS +#undef OP +#undef EMITTER_TYPE_uint32_t +#undef EMITTER_TYPE_uint64_t +} + +static void +mutex_stats_read_arena_bin(unsigned arena_ind, unsigned bin_ind, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + emitter_col_t *dst; + +#define EMITTER_TYPE_uint32_t emitter_type_uint32 +#define EMITTER_TYPE_uint64_t emitter_type_uint64 +#define OP(counter, counter_type, human) \ + dst = &col_##counter_type[mutex_counter_##counter]; \ + dst->type = EMITTER_TYPE_##counter_type; \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "arenas.0.bins.0","mutex", #counter); \ + CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \ + (counter_type *)&dst->bool_val, counter_type); + MUTEX_PROF_COUNTERS +#undef OP +#undef EMITTER_TYPE_uint32_t +#undef EMITTER_TYPE_uint64_t +} + +/* "row" can be NULL to avoid emitting in table mode. */ +static void +mutex_stats_emit(emitter_t *emitter, emitter_row_t *row, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + if (row != NULL) { + emitter_table_row(emitter, row); + } + + mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0; + mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0; + + emitter_col_t *col; + +#define EMITTER_TYPE_uint32_t emitter_type_uint32 +#define EMITTER_TYPE_uint64_t emitter_type_uint64 +#define OP(counter, type, human) \ + col = &col_##type[k_##type]; \ + ++k_##type; \ + emitter_json_kv(emitter, #counter, EMITTER_TYPE_##type, \ + (const void *)&col->bool_val); + MUTEX_PROF_COUNTERS; +#undef OP +#undef EMITTER_TYPE_uint32_t +#undef EMITTER_TYPE_uint64_t +} + +static void +stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i) { size_t page; - bool config_tcache, in_gap; + bool in_gap, in_gap_prev; unsigned nbins, j; CTL_GET("arenas.page", &page, size_t); - CTL_GET("config.tcache", &config_tcache, bool); - if (config_tcache) { - malloc_cprintf(write_cb, cbopaque, - "bins: size ind allocated nmalloc" - " ndalloc nrequests curregs curruns regs" - " pgs util nfills nflushes newruns" - " reruns\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "bins: size ind allocated nmalloc" - " ndalloc nrequests curregs curruns regs" - " pgs util newruns reruns\n"); - } CTL_GET("arenas.nbins", &nbins, unsigned); + + emitter_row_t header_row; + emitter_row_init(&header_row); + + emitter_row_t row; + emitter_row_init(&row); +#define COL(name, left_or_right, col_width, etype) \ + emitter_col_t col_##name; \ + emitter_col_init(&col_##name, &row); \ + col_##name.justify = emitter_justify_##left_or_right; \ + col_##name.width = col_width; \ + col_##name.type = emitter_type_##etype; \ + emitter_col_t header_col_##name; \ + emitter_col_init(&header_col_##name, &header_row); \ + header_col_##name.justify = emitter_justify_##left_or_right; \ + header_col_##name.width = col_width; \ + header_col_##name.type = emitter_type_title; \ + header_col_##name.str_val = #name; + + COL(size, right, 20, size) + COL(ind, right, 4, unsigned) + COL(allocated, right, 13, uint64) + COL(nmalloc, right, 13, uint64) + COL(ndalloc, right, 13, uint64) + COL(nrequests, right, 13, uint64) + COL(curregs, right, 13, size) + COL(curslabs, right, 13, size) + COL(regs, right, 5, unsigned) + COL(pgs, right, 4, size) + /* To buffer a right- and left-justified column. */ + COL(justify_spacer, right, 1, title) + COL(util, right, 6, title) + COL(nfills, right, 13, uint64) + COL(nflushes, right, 13, uint64) + COL(nslabs, right, 13, uint64) + COL(nreslabs, right, 13, uint64) +#undef COL + + /* Don't want to actually print the name. */ + header_col_justify_spacer.str_val = " "; + col_justify_spacer.str_val = " "; + + + emitter_col_t col_mutex64[mutex_prof_num_uint64_t_counters]; + emitter_col_t col_mutex32[mutex_prof_num_uint32_t_counters]; + + emitter_col_t header_mutex64[mutex_prof_num_uint64_t_counters]; + emitter_col_t header_mutex32[mutex_prof_num_uint32_t_counters]; + + if (mutex) { + mutex_stats_init_cols(&row, NULL, NULL, col_mutex64, + col_mutex32); + mutex_stats_init_cols(&header_row, NULL, NULL, header_mutex64, + header_mutex32); + } + + /* + * We print a "bins:" header as part of the table row; we need to adjust + * the header size column to compensate. + */ + header_col_size.width -=5; + emitter_table_printf(emitter, "bins:"); + emitter_table_row(emitter, &header_row); + emitter_json_arr_begin(emitter, "bins"); + for (j = 0, in_gap = false; j < nbins; j++) { - uint64_t nruns; + uint64_t nslabs; + size_t reg_size, slab_size, curregs; + size_t curslabs; + uint32_t nregs; + uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nreslabs; - CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns, + CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs, uint64_t); - if (nruns == 0) - in_gap = true; - else { - size_t reg_size, run_size, curregs, availregs, milli; - size_t curruns; - uint32_t nregs; - uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; - uint64_t reruns; - char util[6]; /* "x.yyy". */ - - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - in_gap = false; - } - CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); - CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); - CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, - size_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, - &nmalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, - &ndalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, - &curregs, size_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, - &nrequests, uint64_t); - if (config_tcache) { - CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, - j, &nfills, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", - i, j, &nflushes, uint64_t); - } - CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, - &reruns, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, - &curruns, size_t); - - availregs = nregs * curruns; - milli = (availregs != 0) ? (1000 * curregs) / availregs - : 1000; - assert(milli <= 1000); - if (milli < 10) { - malloc_snprintf(util, sizeof(util), - "0.00%zu", milli); - } else if (milli < 100) { - malloc_snprintf(util, sizeof(util), "0.0%zu", - milli); - } else if (milli < 1000) { - malloc_snprintf(util, sizeof(util), "0.%zu", - milli); - } else - malloc_snprintf(util, sizeof(util), "1"); + in_gap_prev = in_gap; + in_gap = (nslabs == 0); - if (config_tcache) { - malloc_cprintf(write_cb, cbopaque, - "%20zu %3u %12zu %12"FMTu64 - " %12"FMTu64" %12"FMTu64" %12zu" - " %12zu %4u %3zu %-5s %12"FMTu64 - " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n", - reg_size, j, curregs * reg_size, nmalloc, - ndalloc, nrequests, curregs, curruns, nregs, - run_size / page, util, nfills, nflushes, - nruns, reruns); - } else { - malloc_cprintf(write_cb, cbopaque, - "%20zu %3u %12zu %12"FMTu64 - " %12"FMTu64" %12"FMTu64" %12zu" - " %12zu %4u %3zu %-5s %12"FMTu64 - " %12"FMTu64"\n", - reg_size, j, curregs * reg_size, nmalloc, - ndalloc, nrequests, curregs, curruns, nregs, - run_size / page, util, nruns, reruns); - } + if (in_gap_prev && !in_gap) { + emitter_table_printf(emitter, + " ---\n"); } - } - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - } -} -static void -stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) -{ - unsigned nbins, nlruns, j; - bool in_gap; - - malloc_cprintf(write_cb, cbopaque, - "large: size ind allocated nmalloc ndalloc" - " nrequests curruns\n"); - CTL_GET("arenas.nbins", &nbins, unsigned); - CTL_GET("arenas.nlruns", &nlruns, unsigned); - for (j = 0, in_gap = false; j < nlruns; j++) { - uint64_t nmalloc, ndalloc, nrequests; - size_t run_size, curruns; + CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); + CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); + CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t); - CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc, + CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc, + CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j, + CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, + size_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, &nrequests, uint64_t); - if (nrequests == 0) - in_gap = true; - else { - CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t); - CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, - &curruns, size_t); - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - in_gap = false; + CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs, + size_t); + + if (mutex) { + mutex_stats_read_arena_bin(i, j, col_mutex64, + col_mutex32); + } + + emitter_json_arr_obj_begin(emitter); + emitter_json_kv(emitter, "nmalloc", emitter_type_uint64, + &nmalloc); + emitter_json_kv(emitter, "ndalloc", emitter_type_uint64, + &ndalloc); + emitter_json_kv(emitter, "curregs", emitter_type_size, + &curregs); + emitter_json_kv(emitter, "nrequests", emitter_type_uint64, + &nrequests); + emitter_json_kv(emitter, "nfills", emitter_type_uint64, + &nfills); + emitter_json_kv(emitter, "nflushes", emitter_type_uint64, + &nflushes); + emitter_json_kv(emitter, "nreslabs", emitter_type_uint64, + &nreslabs); + emitter_json_kv(emitter, "curslabs", emitter_type_size, + &curslabs); + if (mutex) { + emitter_json_dict_begin(emitter, "mutex"); + mutex_stats_emit(emitter, NULL, col_mutex64, + col_mutex32); + emitter_json_dict_end(emitter); + } + emitter_json_arr_obj_end(emitter); + + size_t availregs = nregs * curslabs; + char util[6]; + if (get_rate_str((uint64_t)curregs, (uint64_t)availregs, util)) + { + if (availregs == 0) { + malloc_snprintf(util, sizeof(util), "1"); + } else if (curregs > availregs) { + /* + * Race detected: the counters were read in + * separate mallctl calls and concurrent + * operations happened in between. In this case + * no meaningful utilization can be computed. + */ + malloc_snprintf(util, sizeof(util), " race"); + } else { + not_reached(); } - malloc_cprintf(write_cb, cbopaque, - "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64" %12zu\n", - run_size, nbins + j, curruns * run_size, nmalloc, - ndalloc, nrequests, curruns); } + + col_size.size_val = reg_size; + col_ind.unsigned_val = j; + col_allocated.size_val = curregs * reg_size; + col_nmalloc.uint64_val = nmalloc; + col_ndalloc.uint64_val = ndalloc; + col_nrequests.uint64_val = nrequests; + col_curregs.size_val = curregs; + col_curslabs.size_val = curslabs; + col_regs.unsigned_val = nregs; + col_pgs.size_val = slab_size / page; + col_util.str_val = util; + col_nfills.uint64_val = nfills; + col_nflushes.uint64_val = nflushes; + col_nslabs.uint64_val = nslabs; + col_nreslabs.uint64_val = nreslabs; + + /* + * Note that mutex columns were initialized above, if mutex == + * true. + */ + + emitter_table_row(emitter, &row); } + emitter_json_arr_end(emitter); /* Close "bins". */ + if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); + emitter_table_printf(emitter, " ---\n"); } } static void -stats_arena_hchunks_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i) -{ - unsigned nbins, nlruns, nhchunks, j; - bool in_gap; - - malloc_cprintf(write_cb, cbopaque, - "huge: size ind allocated nmalloc ndalloc" - " nrequests curhchunks\n"); +stats_arena_lextents_print(emitter_t *emitter, unsigned i) { + unsigned nbins, nlextents, j; + bool in_gap, in_gap_prev; + CTL_GET("arenas.nbins", &nbins, unsigned); - CTL_GET("arenas.nlruns", &nlruns, unsigned); - CTL_GET("arenas.nhchunks", &nhchunks, unsigned); - for (j = 0, in_gap = false; j < nhchunks; j++) { + CTL_GET("arenas.nlextents", &nlextents, unsigned); + + emitter_row_t header_row; + emitter_row_init(&header_row); + emitter_row_t row; + emitter_row_init(&row); + +#define COL(name, left_or_right, col_width, etype) \ + emitter_col_t header_##name; \ + emitter_col_init(&header_##name, &header_row); \ + header_##name.justify = emitter_justify_##left_or_right; \ + header_##name.width = col_width; \ + header_##name.type = emitter_type_title; \ + header_##name.str_val = #name; \ + \ + emitter_col_t col_##name; \ + emitter_col_init(&col_##name, &row); \ + col_##name.justify = emitter_justify_##left_or_right; \ + col_##name.width = col_width; \ + col_##name.type = emitter_type_##etype; + + COL(size, right, 20, size) + COL(ind, right, 4, unsigned) + COL(allocated, right, 13, size) + COL(nmalloc, right, 13, uint64) + COL(ndalloc, right, 13, uint64) + COL(nrequests, right, 13, uint64) + COL(curlextents, right, 13, size) +#undef COL + + /* As with bins, we label the large extents table. */ + header_size.width -= 6; + emitter_table_printf(emitter, "large:"); + emitter_table_row(emitter, &header_row); + emitter_json_arr_begin(emitter, "lextents"); + + for (j = 0, in_gap = false; j < nlextents; j++) { uint64_t nmalloc, ndalloc, nrequests; - size_t hchunk_size, curhchunks; + size_t lextent_size, curlextents; - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j, + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j, &nmalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j, + CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j, &ndalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j, + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j, &nrequests, uint64_t); - if (nrequests == 0) - in_gap = true; - else { - CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, - size_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, - j, &curhchunks, size_t); - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - in_gap = false; - } - malloc_cprintf(write_cb, cbopaque, - "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64" %12zu\n", - hchunk_size, nbins + nlruns + j, - curhchunks * hchunk_size, nmalloc, ndalloc, - nrequests, curhchunks); + in_gap_prev = in_gap; + in_gap = (nrequests == 0); + + if (in_gap_prev && !in_gap) { + emitter_table_printf(emitter, + " ---\n"); + } + + CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j, + &curlextents, size_t); + + emitter_json_arr_obj_begin(emitter); + emitter_json_kv(emitter, "curlextents", emitter_type_size, + &curlextents); + emitter_json_arr_obj_end(emitter); + + col_size.size_val = lextent_size; + col_ind.unsigned_val = nbins + j; + col_allocated.size_val = curlextents * lextent_size; + col_nmalloc.uint64_val = nmalloc; + col_ndalloc.uint64_val = ndalloc; + col_nrequests.uint64_val = nrequests; + col_curlextents.size_val = curlextents; + + if (!in_gap) { + emitter_table_row(emitter, &row); } } + emitter_json_arr_end(emitter); /* Close "lextents". */ if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); + emitter_table_printf(emitter, " ---\n"); } } static void -stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i, bool bins, bool large, bool huge) -{ +stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind) { + emitter_row_t row; + emitter_col_t col_name; + emitter_col_t col64[mutex_prof_num_uint64_t_counters]; + emitter_col_t col32[mutex_prof_num_uint32_t_counters]; + + emitter_row_init(&row); + mutex_stats_init_cols(&row, "", &col_name, col64, col32); + + emitter_json_dict_begin(emitter, "mutexes"); + emitter_table_row(emitter, &row); + + for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes; + i++) { + const char *name = arena_mutex_names[i]; + emitter_json_dict_begin(emitter, name); + mutex_stats_read_arena(arena_ind, i, name, &col_name, col64, + col32); + mutex_stats_emit(emitter, &row, col64, col32); + emitter_json_dict_end(emitter); /* Close the mutex dict. */ + } + emitter_json_dict_end(emitter); /* End "mutexes". */ +} + +static void +stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, + bool mutex) { unsigned nthreads; const char *dss; - ssize_t lg_dirty_mult; - size_t page, pactive, pdirty, mapped; - size_t metadata_mapped, metadata_allocated; - uint64_t npurge, nmadvise, purged; + ssize_t dirty_decay_ms, muzzy_decay_ms; + size_t page, pactive, pdirty, pmuzzy, mapped, retained; + size_t base, internal, resident, metadata_thp; + uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; + uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; size_t small_allocated; uint64_t small_nmalloc, small_ndalloc, small_nrequests; size_t large_allocated; uint64_t large_nmalloc, large_ndalloc, large_nrequests; - size_t huge_allocated; - uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests; + size_t tcache_bytes; + uint64_t uptime; CTL_GET("arenas.page", &page, size_t); CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); - malloc_cprintf(write_cb, cbopaque, - "assigned threads: %u\n", nthreads); + emitter_kv(emitter, "nthreads", "assigned threads", + emitter_type_unsigned, &nthreads); + + CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t); + emitter_kv(emitter, "uptime_ns", "uptime", emitter_type_uint64, + &uptime); + CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); - malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n", - dss); - CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t); - if (lg_dirty_mult >= 0) { - malloc_cprintf(write_cb, cbopaque, - "min active:dirty page ratio: %u:1\n", - (1U << lg_dirty_mult)); - } else { - malloc_cprintf(write_cb, cbopaque, - "min active:dirty page ratio: N/A\n"); - } + emitter_kv(emitter, "dss", "dss allocation precedence", + emitter_type_string, &dss); + + CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms, + ssize_t); + CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms, + ssize_t); CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); - CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t); - CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t); - CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64 - " madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge == - 1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged); - - malloc_cprintf(write_cb, cbopaque, - " allocated nmalloc ndalloc" - " nrequests\n"); - CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated, - size_t); - CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests, - uint64_t); - malloc_cprintf(write_cb, cbopaque, - "small: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - small_allocated, small_nmalloc, small_ndalloc, small_nrequests); - CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, - size_t); - CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, + CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t); + CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t); + CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "large: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - large_allocated, large_nmalloc, large_ndalloc, large_nrequests); - CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t); - CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests, + CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "huge: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); - malloc_cprintf(write_cb, cbopaque, - "total: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - small_allocated + large_allocated + huge_allocated, - small_nmalloc + large_nmalloc + huge_nmalloc, - small_ndalloc + large_ndalloc + huge_ndalloc, - small_nrequests + large_nrequests + huge_nrequests); - malloc_cprintf(write_cb, cbopaque, - "active: %12zu\n", pactive * page); - CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, - "mapped: %12zu\n", mapped); - CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped, - size_t); - CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated, - size_t); - malloc_cprintf(write_cb, cbopaque, - "metadata: mapped: %zu, allocated: %zu\n", - metadata_mapped, metadata_allocated); - - if (bins) - stats_arena_bins_print(write_cb, cbopaque, i); - if (large) - stats_arena_lruns_print(write_cb, cbopaque, i); - if (huge) - stats_arena_hchunks_print(write_cb, cbopaque, i); + CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t); + + emitter_row_t decay_row; + emitter_row_init(&decay_row); + + /* JSON-style emission. */ + emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, + &dirty_decay_ms); + emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, + &muzzy_decay_ms); + + emitter_json_kv(emitter, "pactive", emitter_type_size, &pactive); + emitter_json_kv(emitter, "pdirty", emitter_type_size, &pdirty); + emitter_json_kv(emitter, "pmuzzy", emitter_type_size, &pmuzzy); + + emitter_json_kv(emitter, "dirty_npurge", emitter_type_uint64, + &dirty_npurge); + emitter_json_kv(emitter, "dirty_nmadvise", emitter_type_uint64, + &dirty_nmadvise); + emitter_json_kv(emitter, "dirty_purged", emitter_type_uint64, + &dirty_purged); + + emitter_json_kv(emitter, "muzzy_npurge", emitter_type_uint64, + &muzzy_npurge); + emitter_json_kv(emitter, "muzzy_nmadvise", emitter_type_uint64, + &muzzy_nmadvise); + emitter_json_kv(emitter, "muzzy_purged", emitter_type_uint64, + &muzzy_purged); + + /* Table-style emission. */ + emitter_col_t decay_type; + emitter_col_init(&decay_type, &decay_row); + decay_type.justify = emitter_justify_right; + decay_type.width = 9; + decay_type.type = emitter_type_title; + decay_type.str_val = "decaying:"; + + emitter_col_t decay_time; + emitter_col_init(&decay_time, &decay_row); + decay_time.justify = emitter_justify_right; + decay_time.width = 6; + decay_time.type = emitter_type_title; + decay_time.str_val = "time"; + + emitter_col_t decay_npages; + emitter_col_init(&decay_npages, &decay_row); + decay_npages.justify = emitter_justify_right; + decay_npages.width = 13; + decay_npages.type = emitter_type_title; + decay_npages.str_val = "npages"; + + emitter_col_t decay_sweeps; + emitter_col_init(&decay_sweeps, &decay_row); + decay_sweeps.justify = emitter_justify_right; + decay_sweeps.width = 13; + decay_sweeps.type = emitter_type_title; + decay_sweeps.str_val = "sweeps"; + + emitter_col_t decay_madvises; + emitter_col_init(&decay_madvises, &decay_row); + decay_madvises.justify = emitter_justify_right; + decay_madvises.width = 13; + decay_madvises.type = emitter_type_title; + decay_madvises.str_val = "madvises"; + + emitter_col_t decay_purged; + emitter_col_init(&decay_purged, &decay_row); + decay_purged.justify = emitter_justify_right; + decay_purged.width = 13; + decay_purged.type = emitter_type_title; + decay_purged.str_val = "purged"; + + /* Title row. */ + emitter_table_row(emitter, &decay_row); + + /* Dirty row. */ + decay_type.str_val = "dirty:"; + + if (dirty_decay_ms >= 0) { + decay_time.type = emitter_type_ssize; + decay_time.ssize_val = dirty_decay_ms; + } else { + decay_time.type = emitter_type_title; + decay_time.str_val = "N/A"; + } + + decay_npages.type = emitter_type_size; + decay_npages.size_val = pdirty; + + decay_sweeps.type = emitter_type_uint64; + decay_sweeps.uint64_val = dirty_npurge; + + decay_madvises.type = emitter_type_uint64; + decay_madvises.uint64_val = dirty_nmadvise; + + decay_purged.type = emitter_type_uint64; + decay_purged.uint64_val = dirty_purged; + + emitter_table_row(emitter, &decay_row); + + /* Muzzy row. */ + decay_type.str_val = "muzzy:"; + + if (muzzy_decay_ms >= 0) { + decay_time.type = emitter_type_ssize; + decay_time.ssize_val = muzzy_decay_ms; + } else { + decay_time.type = emitter_type_title; + decay_time.str_val = "N/A"; + } + + decay_npages.type = emitter_type_size; + decay_npages.size_val = pmuzzy; + + decay_sweeps.type = emitter_type_uint64; + decay_sweeps.uint64_val = muzzy_npurge; + + decay_madvises.type = emitter_type_uint64; + decay_madvises.uint64_val = muzzy_nmadvise; + + decay_purged.type = emitter_type_uint64; + decay_purged.uint64_val = muzzy_purged; + + emitter_table_row(emitter, &decay_row); + + /* Small / large / total allocation counts. */ + emitter_row_t alloc_count_row; + emitter_row_init(&alloc_count_row); + + emitter_col_t alloc_count_title; + emitter_col_init(&alloc_count_title, &alloc_count_row); + alloc_count_title.justify = emitter_justify_left; + alloc_count_title.width = 25; + alloc_count_title.type = emitter_type_title; + alloc_count_title.str_val = ""; + + emitter_col_t alloc_count_allocated; + emitter_col_init(&alloc_count_allocated, &alloc_count_row); + alloc_count_allocated.justify = emitter_justify_right; + alloc_count_allocated.width = 12; + alloc_count_allocated.type = emitter_type_title; + alloc_count_allocated.str_val = "allocated"; + + emitter_col_t alloc_count_nmalloc; + emitter_col_init(&alloc_count_nmalloc, &alloc_count_row); + alloc_count_nmalloc.justify = emitter_justify_right; + alloc_count_nmalloc.width = 12; + alloc_count_nmalloc.type = emitter_type_title; + alloc_count_nmalloc.str_val = "nmalloc"; + + emitter_col_t alloc_count_ndalloc; + emitter_col_init(&alloc_count_ndalloc, &alloc_count_row); + alloc_count_ndalloc.justify = emitter_justify_right; + alloc_count_ndalloc.width = 12; + alloc_count_ndalloc.type = emitter_type_title; + alloc_count_ndalloc.str_val = "ndalloc"; + + emitter_col_t alloc_count_nrequests; + emitter_col_init(&alloc_count_nrequests, &alloc_count_row); + alloc_count_nrequests.justify = emitter_justify_right; + alloc_count_nrequests.width = 12; + alloc_count_nrequests.type = emitter_type_title; + alloc_count_nrequests.str_val = "nrequests"; + + emitter_table_row(emitter, &alloc_count_row); + +#define GET_AND_EMIT_ALLOC_STAT(small_or_large, name, valtype) \ + CTL_M2_GET("stats.arenas.0." #small_or_large "." #name, i, \ + &small_or_large##_##name, valtype##_t); \ + emitter_json_kv(emitter, #name, emitter_type_##valtype, \ + &small_or_large##_##name); \ + alloc_count_##name.type = emitter_type_##valtype; \ + alloc_count_##name.valtype##_val = small_or_large##_##name; + + emitter_json_dict_begin(emitter, "small"); + alloc_count_title.str_val = "small:"; + + GET_AND_EMIT_ALLOC_STAT(small, allocated, size) + GET_AND_EMIT_ALLOC_STAT(small, nmalloc, uint64) + GET_AND_EMIT_ALLOC_STAT(small, ndalloc, uint64) + GET_AND_EMIT_ALLOC_STAT(small, nrequests, uint64) + + emitter_table_row(emitter, &alloc_count_row); + emitter_json_dict_end(emitter); /* Close "small". */ + + emitter_json_dict_begin(emitter, "large"); + alloc_count_title.str_val = "large:"; + + GET_AND_EMIT_ALLOC_STAT(large, allocated, size) + GET_AND_EMIT_ALLOC_STAT(large, nmalloc, uint64) + GET_AND_EMIT_ALLOC_STAT(large, ndalloc, uint64) + GET_AND_EMIT_ALLOC_STAT(large, nrequests, uint64) + + emitter_table_row(emitter, &alloc_count_row); + emitter_json_dict_end(emitter); /* Close "large". */ + +#undef GET_AND_EMIT_ALLOC_STAT + + /* Aggregated small + large stats are emitter only in table mode. */ + alloc_count_title.str_val = "total:"; + alloc_count_allocated.size_val = small_allocated + large_allocated; + alloc_count_nmalloc.uint64_val = small_nmalloc + large_nmalloc; + alloc_count_ndalloc.uint64_val = small_ndalloc + large_ndalloc; + alloc_count_nrequests.uint64_val = small_nrequests + large_nrequests; + emitter_table_row(emitter, &alloc_count_row); + + emitter_row_t mem_count_row; + emitter_row_init(&mem_count_row); + + emitter_col_t mem_count_title; + emitter_col_init(&mem_count_title, &mem_count_row); + mem_count_title.justify = emitter_justify_left; + mem_count_title.width = 25; + mem_count_title.type = emitter_type_title; + mem_count_title.str_val = ""; + + emitter_col_t mem_count_val; + emitter_col_init(&mem_count_val, &mem_count_row); + mem_count_val.justify = emitter_justify_right; + mem_count_val.width = 12; + mem_count_val.type = emitter_type_title; + mem_count_val.str_val = ""; + + emitter_table_row(emitter, &mem_count_row); + mem_count_val.type = emitter_type_size; + + /* Active count in bytes is emitted only in table mode. */ + mem_count_title.str_val = "active:"; + mem_count_val.size_val = pactive * page; + emitter_table_row(emitter, &mem_count_row); + +#define GET_AND_EMIT_MEM_STAT(stat) \ + CTL_M2_GET("stats.arenas.0."#stat, i, &stat, size_t); \ + emitter_json_kv(emitter, #stat, emitter_type_size, &stat); \ + mem_count_title.str_val = #stat":"; \ + mem_count_val.size_val = stat; \ + emitter_table_row(emitter, &mem_count_row); + + GET_AND_EMIT_MEM_STAT(mapped) + GET_AND_EMIT_MEM_STAT(retained) + GET_AND_EMIT_MEM_STAT(base) + GET_AND_EMIT_MEM_STAT(internal) + GET_AND_EMIT_MEM_STAT(metadata_thp) + GET_AND_EMIT_MEM_STAT(tcache_bytes) + GET_AND_EMIT_MEM_STAT(resident) +#undef GET_AND_EMIT_MEM_STAT + + if (mutex) { + stats_arena_mutexes_print(emitter, i); + } + if (bins) { + stats_arena_bins_print(emitter, mutex, i); + } + if (large) { + stats_arena_lextents_print(emitter, i); + } +} + +static void +stats_general_print(emitter_t *emitter) { + const char *cpv; + bool bv, bv2; + unsigned uv; + uint32_t u32v; + uint64_t u64v; + ssize_t ssv, ssv2; + size_t sv, bsz, usz, ssz, sssz, cpsz; + + bsz = sizeof(bool); + usz = sizeof(unsigned); + ssz = sizeof(size_t); + sssz = sizeof(ssize_t); + cpsz = sizeof(const char *); + + CTL_GET("version", &cpv, const char *); + emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv); + + /* config. */ + emitter_dict_begin(emitter, "config", "Build-time option settings"); +#define CONFIG_WRITE_BOOL(name) \ + do { \ + CTL_GET("config."#name, &bv, bool); \ + emitter_kv(emitter, #name, "config."#name, \ + emitter_type_bool, &bv); \ + } while (0) + + CONFIG_WRITE_BOOL(cache_oblivious); + CONFIG_WRITE_BOOL(debug); + CONFIG_WRITE_BOOL(fill); + CONFIG_WRITE_BOOL(lazy_lock); + emitter_kv(emitter, "malloc_conf", "config.malloc_conf", + emitter_type_string, &config_malloc_conf); + + CONFIG_WRITE_BOOL(prof); + CONFIG_WRITE_BOOL(prof_libgcc); + CONFIG_WRITE_BOOL(prof_libunwind); + CONFIG_WRITE_BOOL(stats); + CONFIG_WRITE_BOOL(utrace); + CONFIG_WRITE_BOOL(xmalloc); +#undef CONFIG_WRITE_BOOL + emitter_dict_end(emitter); /* Close "config" dict. */ + + /* opt. */ +#define OPT_WRITE(name, var, size, emitter_type) \ + if (je_mallctl("opt."name, (void *)&var, &size, NULL, 0) == \ + 0) { \ + emitter_kv(emitter, name, "opt."name, emitter_type, \ + &var); \ + } + +#define OPT_WRITE_MUTABLE(name, var1, var2, size, emitter_type, \ + altname) \ + if (je_mallctl("opt."name, (void *)&var1, &size, NULL, 0) == \ + 0 && je_mallctl(altname, (void *)&var2, &size, NULL, 0) \ + == 0) { \ + emitter_kv_note(emitter, name, "opt."name, \ + emitter_type, &var1, altname, emitter_type, \ + &var2); \ + } + +#define OPT_WRITE_BOOL(name) OPT_WRITE(name, bv, bsz, emitter_type_bool) +#define OPT_WRITE_BOOL_MUTABLE(name, altname) \ + OPT_WRITE_MUTABLE(name, bv, bv2, bsz, emitter_type_bool, altname) + +#define OPT_WRITE_UNSIGNED(name) \ + OPT_WRITE(name, uv, usz, emitter_type_unsigned) + +#define OPT_WRITE_SSIZE_T(name) \ + OPT_WRITE(name, ssv, sssz, emitter_type_ssize) +#define OPT_WRITE_SSIZE_T_MUTABLE(name, altname) \ + OPT_WRITE_MUTABLE(name, ssv, ssv2, sssz, emitter_type_ssize, \ + altname) + +#define OPT_WRITE_CHAR_P(name) \ + OPT_WRITE(name, cpv, cpsz, emitter_type_string) + + emitter_dict_begin(emitter, "opt", "Run-time option settings"); + + OPT_WRITE_BOOL("abort") + OPT_WRITE_BOOL("abort_conf") + OPT_WRITE_BOOL("retain") + OPT_WRITE_CHAR_P("dss") + OPT_WRITE_UNSIGNED("narenas") + OPT_WRITE_CHAR_P("percpu_arena") + OPT_WRITE_CHAR_P("metadata_thp") + OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread") + OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms") + OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms") + OPT_WRITE_UNSIGNED("lg_extent_max_active_fit") + OPT_WRITE_CHAR_P("junk") + OPT_WRITE_BOOL("zero") + OPT_WRITE_BOOL("utrace") + OPT_WRITE_BOOL("xmalloc") + OPT_WRITE_BOOL("tcache") + OPT_WRITE_SSIZE_T("lg_tcache_max") + OPT_WRITE_CHAR_P("thp") + OPT_WRITE_BOOL("prof") + OPT_WRITE_CHAR_P("prof_prefix") + OPT_WRITE_BOOL_MUTABLE("prof_active", "prof.active") + OPT_WRITE_BOOL_MUTABLE("prof_thread_active_init", + "prof.thread_active_init") + OPT_WRITE_SSIZE_T_MUTABLE("lg_prof_sample", "prof.lg_sample") + OPT_WRITE_BOOL("prof_accum") + OPT_WRITE_SSIZE_T("lg_prof_interval") + OPT_WRITE_BOOL("prof_gdump") + OPT_WRITE_BOOL("prof_final") + OPT_WRITE_BOOL("prof_leak") + OPT_WRITE_BOOL("stats_print") + OPT_WRITE_CHAR_P("stats_print_opts") + + emitter_dict_end(emitter); + +#undef OPT_WRITE +#undef OPT_WRITE_MUTABLE +#undef OPT_WRITE_BOOL +#undef OPT_WRITE_BOOL_MUTABLE +#undef OPT_WRITE_UNSIGNED +#undef OPT_WRITE_SSIZE_T +#undef OPT_WRITE_SSIZE_T_MUTABLE +#undef OPT_WRITE_CHAR_P + + /* prof. */ + if (config_prof) { + emitter_dict_begin(emitter, "prof", "Profiling settings"); + + CTL_GET("prof.thread_active_init", &bv, bool); + emitter_kv(emitter, "thread_active_init", + "prof.thread_active_init", emitter_type_bool, &bv); + + CTL_GET("prof.active", &bv, bool); + emitter_kv(emitter, "active", "prof.active", emitter_type_bool, + &bv); + + CTL_GET("prof.gdump", &bv, bool); + emitter_kv(emitter, "gdump", "prof.gdump", emitter_type_bool, + &bv); + + CTL_GET("prof.interval", &u64v, uint64_t); + emitter_kv(emitter, "interval", "prof.interval", + emitter_type_uint64, &u64v); + + CTL_GET("prof.lg_sample", &ssv, ssize_t); + emitter_kv(emitter, "lg_sample", "prof.lg_sample", + emitter_type_ssize, &ssv); + + emitter_dict_end(emitter); /* Close "prof". */ + } + + /* arenas. */ + /* + * The json output sticks arena info into an "arenas" dict; the table + * output puts them at the top-level. + */ + emitter_json_dict_begin(emitter, "arenas"); + + CTL_GET("arenas.narenas", &uv, unsigned); + emitter_kv(emitter, "narenas", "Arenas", emitter_type_unsigned, &uv); + + /* + * Decay settings are emitted only in json mode; in table mode, they're + * emitted as notes with the opt output, above. + */ + CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t); + emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, &ssv); + + CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t); + emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, &ssv); + + CTL_GET("arenas.quantum", &sv, size_t); + emitter_kv(emitter, "quantum", "Quantum size", emitter_type_size, &sv); + + CTL_GET("arenas.page", &sv, size_t); + emitter_kv(emitter, "page", "Page size", emitter_type_size, &sv); + + if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { + emitter_kv(emitter, "tcache_max", + "Maximum thread-cached size class", emitter_type_size, &sv); + } + + unsigned nbins; + CTL_GET("arenas.nbins", &nbins, unsigned); + emitter_kv(emitter, "nbins", "Number of bin size classes", + emitter_type_unsigned, &nbins); + + unsigned nhbins; + CTL_GET("arenas.nhbins", &nhbins, unsigned); + emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes", + emitter_type_unsigned, &nhbins); + + /* + * We do enough mallctls in a loop that we actually want to omit them + * (not just omit the printing). + */ + if (emitter->output == emitter_output_json) { + emitter_json_arr_begin(emitter, "bin"); + for (unsigned i = 0; i < nbins; i++) { + emitter_json_arr_obj_begin(emitter); + + CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); + emitter_json_kv(emitter, "size", emitter_type_size, + &sv); + + CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); + emitter_json_kv(emitter, "nregs", emitter_type_uint32, + &u32v); + + CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t); + emitter_json_kv(emitter, "slab_size", emitter_type_size, + &sv); + + emitter_json_arr_obj_end(emitter); + } + emitter_json_arr_end(emitter); /* Close "bin". */ + } + + unsigned nlextents; + CTL_GET("arenas.nlextents", &nlextents, unsigned); + emitter_kv(emitter, "nlextents", "Number of large size classes", + emitter_type_unsigned, &nlextents); + + if (emitter->output == emitter_output_json) { + emitter_json_arr_begin(emitter, "lextent"); + for (unsigned i = 0; i < nlextents; i++) { + emitter_json_arr_obj_begin(emitter); + + CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t); + emitter_json_kv(emitter, "size", emitter_type_size, + &sv); + + emitter_json_arr_obj_end(emitter); + } + emitter_json_arr_end(emitter); /* Close "lextent". */ + } + + emitter_json_dict_end(emitter); /* Close "arenas" */ +} + +static void +stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, + bool unmerged, bool bins, bool large, bool mutex) { + /* + * These should be deleted. We keep them around for a while, to aid in + * the transition to the emitter code. + */ + size_t allocated, active, metadata, metadata_thp, resident, mapped, + retained; + size_t num_background_threads; + uint64_t background_thread_num_runs, background_thread_run_interval; + + CTL_GET("stats.allocated", &allocated, size_t); + CTL_GET("stats.active", &active, size_t); + CTL_GET("stats.metadata", &metadata, size_t); + CTL_GET("stats.metadata_thp", &metadata_thp, size_t); + CTL_GET("stats.resident", &resident, size_t); + CTL_GET("stats.mapped", &mapped, size_t); + CTL_GET("stats.retained", &retained, size_t); + + if (have_background_thread) { + CTL_GET("stats.background_thread.num_threads", + &num_background_threads, size_t); + CTL_GET("stats.background_thread.num_runs", + &background_thread_num_runs, uint64_t); + CTL_GET("stats.background_thread.run_interval", + &background_thread_run_interval, uint64_t); + } else { + num_background_threads = 0; + background_thread_num_runs = 0; + background_thread_run_interval = 0; + } + + /* Generic global stats. */ + emitter_json_dict_begin(emitter, "stats"); + emitter_json_kv(emitter, "allocated", emitter_type_size, &allocated); + emitter_json_kv(emitter, "active", emitter_type_size, &active); + emitter_json_kv(emitter, "metadata", emitter_type_size, &metadata); + emitter_json_kv(emitter, "metadata_thp", emitter_type_size, + &metadata_thp); + emitter_json_kv(emitter, "resident", emitter_type_size, &resident); + emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped); + emitter_json_kv(emitter, "retained", emitter_type_size, &retained); + + emitter_table_printf(emitter, "Allocated: %zu, active: %zu, " + "metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, " + "retained: %zu\n", allocated, active, metadata, metadata_thp, + resident, mapped, retained); + + /* Background thread stats. */ + emitter_json_dict_begin(emitter, "background_thread"); + emitter_json_kv(emitter, "num_threads", emitter_type_size, + &num_background_threads); + emitter_json_kv(emitter, "num_runs", emitter_type_uint64, + &background_thread_num_runs); + emitter_json_kv(emitter, "run_interval", emitter_type_uint64, + &background_thread_run_interval); + emitter_json_dict_end(emitter); /* Close "background_thread". */ + + emitter_table_printf(emitter, "Background threads: %zu, " + "num_runs: %"FMTu64", run_interval: %"FMTu64" ns\n", + num_background_threads, background_thread_num_runs, + background_thread_run_interval); + + if (mutex) { + emitter_row_t row; + emitter_col_t name; + emitter_col_t col64[mutex_prof_num_uint64_t_counters]; + emitter_col_t col32[mutex_prof_num_uint32_t_counters]; + + emitter_row_init(&row); + mutex_stats_init_cols(&row, "", &name, col64, col32); + + emitter_table_row(emitter, &row); + emitter_json_dict_begin(emitter, "mutexes"); + + for (int i = 0; i < mutex_prof_num_global_mutexes; i++) { + mutex_stats_read_global(global_mutex_names[i], &name, + col64, col32); + emitter_json_dict_begin(emitter, global_mutex_names[i]); + mutex_stats_emit(emitter, &row, col64, col32); + emitter_json_dict_end(emitter); + } + + emitter_json_dict_end(emitter); /* Close "mutexes". */ + } + + emitter_json_dict_end(emitter); /* Close "stats". */ + + if (merged || destroyed || unmerged) { + unsigned narenas; + + emitter_json_dict_begin(emitter, "stats.arenas"); + + CTL_GET("arenas.narenas", &narenas, unsigned); + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + size_t sz; + VARIABLE_ARRAY(bool, initialized, narenas); + bool destroyed_initialized; + unsigned i, j, ninitialized; + + xmallctlnametomib("arena.0.initialized", mib, &miblen); + for (i = ninitialized = 0; i < narenas; i++) { + mib[1] = i; + sz = sizeof(bool); + xmallctlbymib(mib, miblen, &initialized[i], &sz, + NULL, 0); + if (initialized[i]) { + ninitialized++; + } + } + mib[1] = MALLCTL_ARENAS_DESTROYED; + sz = sizeof(bool); + xmallctlbymib(mib, miblen, &destroyed_initialized, &sz, + NULL, 0); + + /* Merged stats. */ + if (merged && (ninitialized > 1 || !unmerged)) { + /* Print merged arena stats. */ + emitter_table_printf(emitter, "Merged arenas stats:\n"); + emitter_json_dict_begin(emitter, "merged"); + stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins, + large, mutex); + emitter_json_dict_end(emitter); /* Close "merged". */ + } + + /* Destroyed stats. */ + if (destroyed_initialized && destroyed) { + /* Print destroyed arena stats. */ + emitter_table_printf(emitter, + "Destroyed arenas stats:\n"); + emitter_json_dict_begin(emitter, "destroyed"); + stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED, + bins, large, mutex); + emitter_json_dict_end(emitter); /* Close "destroyed". */ + } + + /* Unmerged stats. */ + if (unmerged) { + for (i = j = 0; i < narenas; i++) { + if (initialized[i]) { + char arena_ind_str[20]; + malloc_snprintf(arena_ind_str, + sizeof(arena_ind_str), "%u", i); + emitter_json_dict_begin(emitter, + arena_ind_str); + emitter_table_printf(emitter, + "arenas[%s]:\n", arena_ind_str); + stats_arena_print(emitter, i, bins, + large, mutex); + /* Close "<arena-ind>". */ + emitter_json_dict_end(emitter); + } + } + } + emitter_json_dict_end(emitter); /* Close "stats.arenas". */ + } } void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ + const char *opts) { int err; uint64_t epoch; size_t u64sz; - bool general = true; - bool merged = true; - bool unmerged = true; - bool bins = true; - bool large = true; - bool huge = true; +#define OPTION(o, v, d, s) bool v = d; + STATS_PRINT_OPTIONS +#undef OPTION /* * Refresh stats, in case mallctl() was called by the application. @@ -379,7 +1240,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, * */ epoch = 1; u64sz = sizeof(uint64_t); - err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); + err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, + sizeof(uint64_t)); if (err != 0) { if (err == EAGAIN) { malloc_write("<jemalloc>: Memory allocation failure in " @@ -392,249 +1254,33 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, } if (opts != NULL) { - unsigned i; - - for (i = 0; opts[i] != '\0'; i++) { + for (unsigned i = 0; opts[i] != '\0'; i++) { switch (opts[i]) { - case 'g': - general = false; - break; - case 'm': - merged = false; - break; - case 'a': - unmerged = false; - break; - case 'b': - bins = false; - break; - case 'l': - large = false; - break; - case 'h': - huge = false; - break; +#define OPTION(o, v, d, s) case o: v = s; break; + STATS_PRINT_OPTIONS +#undef OPTION default:; } } } - malloc_cprintf(write_cb, cbopaque, - "___ Begin jemalloc statistics ___\n"); - if (general) { - const char *cpv; - bool bv; - unsigned uv; - ssize_t ssv; - size_t sv, bsz, ssz, sssz, cpsz; - - bsz = sizeof(bool); - ssz = sizeof(size_t); - sssz = sizeof(ssize_t); - cpsz = sizeof(const char *); - - CTL_GET("version", &cpv, const char *); - malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); - CTL_GET("config.debug", &bv, bool); - malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", - bv ? "enabled" : "disabled"); - -#define OPT_WRITE_BOOL(n) \ - if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %s\n", bv ? "true" : "false"); \ - } -#define OPT_WRITE_BOOL_MUTABLE(n, m) { \ - bool bv2; \ - if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \ - je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %s ("#m": %s)\n", bv ? "true" \ - : "false", bv2 ? "true" : "false"); \ - } \ -} -#define OPT_WRITE_SIZE_T(n) \ - if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zu\n", sv); \ - } -#define OPT_WRITE_SSIZE_T(n) \ - if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zd\n", ssv); \ - } -#define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \ - ssize_t ssv2; \ - if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \ - je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zd ("#m": %zd)\n", \ - ssv, ssv2); \ - } \ -} -#define OPT_WRITE_CHAR_P(n) \ - if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": \"%s\"\n", cpv); \ - } - - malloc_cprintf(write_cb, cbopaque, - "Run-time option settings:\n"); - OPT_WRITE_BOOL(abort) - OPT_WRITE_SIZE_T(lg_chunk) - OPT_WRITE_CHAR_P(dss) - OPT_WRITE_SIZE_T(narenas) - OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult) - OPT_WRITE_BOOL(stats_print) - OPT_WRITE_CHAR_P(junk) - OPT_WRITE_SIZE_T(quarantine) - OPT_WRITE_BOOL(redzone) - OPT_WRITE_BOOL(zero) - OPT_WRITE_BOOL(utrace) - OPT_WRITE_BOOL(valgrind) - OPT_WRITE_BOOL(xmalloc) - OPT_WRITE_BOOL(tcache) - OPT_WRITE_SSIZE_T(lg_tcache_max) - OPT_WRITE_BOOL(prof) - OPT_WRITE_CHAR_P(prof_prefix) - OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active) - OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, - prof.thread_active_init) - OPT_WRITE_SSIZE_T(lg_prof_sample) - OPT_WRITE_BOOL(prof_accum) - OPT_WRITE_SSIZE_T(lg_prof_interval) - OPT_WRITE_BOOL(prof_gdump) - OPT_WRITE_BOOL(prof_final) - OPT_WRITE_BOOL(prof_leak) - -#undef OPT_WRITE_BOOL -#undef OPT_WRITE_BOOL_MUTABLE -#undef OPT_WRITE_SIZE_T -#undef OPT_WRITE_SSIZE_T -#undef OPT_WRITE_CHAR_P - - malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); - - CTL_GET("arenas.narenas", &uv, unsigned); - malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); - - malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", - sizeof(void *)); - - CTL_GET("arenas.quantum", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", - sv); - - CTL_GET("arenas.page", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); + emitter_t emitter; + emitter_init(&emitter, + json ? emitter_output_json : emitter_output_table, write_cb, + cbopaque); + emitter_begin(&emitter); + emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n"); + emitter_json_dict_begin(&emitter, "jemalloc"); - CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: %u:1\n", - (1U << ssv)); - } else { - malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: N/A\n"); - } - if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) { - malloc_cprintf(write_cb, cbopaque, - "Maximum thread-cached size class: %zu\n", sv); - } - if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) { - CTL_GET("prof.lg_sample", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, - "Average profile sample interval: %"FMTu64 - " (2^%zu)\n", (((uint64_t)1U) << sv), sv); - - CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: %"FMTu64 - " (2^%zd)\n", - (((uint64_t)1U) << ssv), ssv); - } else { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: N/A\n"); - } - } - CTL_GET("opt.lg_chunk", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, - "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv); + if (general) { + stats_general_print(&emitter); } - if (config_stats) { - size_t *cactive; - size_t allocated, active, metadata, resident, mapped; - - CTL_GET("stats.cactive", &cactive, size_t *); - CTL_GET("stats.allocated", &allocated, size_t); - CTL_GET("stats.active", &active, size_t); - CTL_GET("stats.metadata", &metadata, size_t); - CTL_GET("stats.resident", &resident, size_t); - CTL_GET("stats.mapped", &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, - "Allocated: %zu, active: %zu, metadata: %zu," - " resident: %zu, mapped: %zu\n", - allocated, active, metadata, resident, mapped); - malloc_cprintf(write_cb, cbopaque, - "Current active ceiling: %zu\n", - atomic_read_z(cactive)); - - if (merged) { - unsigned narenas; - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i, ninitialized; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - for (i = ninitialized = 0; i < narenas; i++) { - if (initialized[i]) - ninitialized++; - } - - if (ninitialized > 1 || !unmerged) { - /* Print merged arena stats. */ - malloc_cprintf(write_cb, cbopaque, - "\nMerged arenas stats:\n"); - stats_arena_print(write_cb, cbopaque, - narenas, bins, large, huge); - } - } - } - - if (unmerged) { - unsigned narenas; - - /* Print stats for each arena. */ - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - - for (i = 0; i < narenas; i++) { - if (initialized[i]) { - malloc_cprintf(write_cb, - cbopaque, - "\narenas[%u]:\n", i); - stats_arena_print(write_cb, - cbopaque, i, bins, large, - huge); - } - } - } - } + stats_print_helper(&emitter, merged, destroyed, unmerged, + bins, large, mutex); } - malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n"); + + emitter_json_dict_end(&emitter); /* Closes the "jemalloc" dict. */ + emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n"); + emitter_end(&emitter); } diff --git a/deps/jemalloc/src/sz.c b/deps/jemalloc/src/sz.c new file mode 100644 index 000000000..9de77e45f --- /dev/null +++ b/deps/jemalloc/src/sz.c @@ -0,0 +1,107 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/sz.h" + +JEMALLOC_ALIGNED(CACHELINE) +const size_t sz_pind2sz_tab[NPSIZES+1] = { +#define PSZ_yes(lg_grp, ndelta, lg_delta) \ + (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))), +#define PSZ_no(lg_grp, ndelta, lg_delta) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \ + PSZ_##psz(lg_grp, ndelta, lg_delta) + SIZE_CLASSES +#undef PSZ_yes +#undef PSZ_no +#undef SC + (LARGE_MAXCLASS + PAGE) +}; + +JEMALLOC_ALIGNED(CACHELINE) +const size_t sz_index2size_tab[NSIZES] = { +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \ + ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), + SIZE_CLASSES +#undef SC +}; + +JEMALLOC_ALIGNED(CACHELINE) +const uint8_t sz_size2index_tab[] = { +#if LG_TINY_MIN == 0 +/* The div module doesn't support division by 1. */ +#error "Unsupported LG_TINY_MIN" +#define S2B_0(i) i, +#elif LG_TINY_MIN == 1 +#warning "Dangerous LG_TINY_MIN" +#define S2B_1(i) i, +#elif LG_TINY_MIN == 2 +#warning "Dangerous LG_TINY_MIN" +#define S2B_2(i) i, +#elif LG_TINY_MIN == 3 +#define S2B_3(i) i, +#elif LG_TINY_MIN == 4 +#define S2B_4(i) i, +#elif LG_TINY_MIN == 5 +#define S2B_5(i) i, +#elif LG_TINY_MIN == 6 +#define S2B_6(i) i, +#elif LG_TINY_MIN == 7 +#define S2B_7(i) i, +#elif LG_TINY_MIN == 8 +#define S2B_8(i) i, +#elif LG_TINY_MIN == 9 +#define S2B_9(i) i, +#elif LG_TINY_MIN == 10 +#define S2B_10(i) i, +#elif LG_TINY_MIN == 11 +#define S2B_11(i) i, +#else +#error "Unsupported LG_TINY_MIN" +#endif +#if LG_TINY_MIN < 1 +#define S2B_1(i) S2B_0(i) S2B_0(i) +#endif +#if LG_TINY_MIN < 2 +#define S2B_2(i) S2B_1(i) S2B_1(i) +#endif +#if LG_TINY_MIN < 3 +#define S2B_3(i) S2B_2(i) S2B_2(i) +#endif +#if LG_TINY_MIN < 4 +#define S2B_4(i) S2B_3(i) S2B_3(i) +#endif +#if LG_TINY_MIN < 5 +#define S2B_5(i) S2B_4(i) S2B_4(i) +#endif +#if LG_TINY_MIN < 6 +#define S2B_6(i) S2B_5(i) S2B_5(i) +#endif +#if LG_TINY_MIN < 7 +#define S2B_7(i) S2B_6(i) S2B_6(i) +#endif +#if LG_TINY_MIN < 8 +#define S2B_8(i) S2B_7(i) S2B_7(i) +#endif +#if LG_TINY_MIN < 9 +#define S2B_9(i) S2B_8(i) S2B_8(i) +#endif +#if LG_TINY_MIN < 10 +#define S2B_10(i) S2B_9(i) S2B_9(i) +#endif +#if LG_TINY_MIN < 11 +#define S2B_11(i) S2B_10(i) S2B_10(i) +#endif +#define S2B_no(i) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \ + S2B_##lg_delta_lookup(index) + SIZE_CLASSES +#undef S2B_3 +#undef S2B_4 +#undef S2B_5 +#undef S2B_6 +#undef S2B_7 +#undef S2B_8 +#undef S2B_9 +#undef S2B_10 +#undef S2B_11 +#undef S2B_no +#undef SC +}; diff --git a/deps/jemalloc/src/tcache.c b/deps/jemalloc/src/tcache.c index fdafd0c62..a769a6b17 100644 --- a/deps/jemalloc/src/tcache.c +++ b/deps/jemalloc/src/tcache.c @@ -1,5 +1,10 @@ -#define JEMALLOC_TCACHE_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_TCACHE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/size_classes.h" /******************************************************************************/ /* Data. */ @@ -7,10 +12,10 @@ bool opt_tcache = true; ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; -tcache_bin_info_t *tcache_bin_info; +cache_bin_info_t *tcache_bin_info; static unsigned stack_nelms; /* Total stack elms per tcache. */ -size_t nhbins; +unsigned nhbins; size_t tcache_maxclass; tcaches_t *tcaches; @@ -21,21 +26,26 @@ static unsigned tcaches_past; /* Head of singly linked list tracking available tcaches elements. */ static tcaches_t *tcaches_avail; -/******************************************************************************/ +/* Protects tcaches{,_past,_avail}. */ +static malloc_mutex_t tcaches_mtx; -size_t tcache_salloc(const void *ptr) -{ +/******************************************************************************/ - return (arena_salloc(ptr, false)); +size_t +tcache_salloc(tsdn_t *tsdn, const void *ptr) { + return arena_salloc(tsdn, ptr); } void -tcache_event_hard(tsd_t *tsd, tcache_t *tcache) -{ +tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { szind_t binind = tcache->next_gc_bin; - tcache_bin_t *tbin = &tcache->tbins[binind]; - tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; + cache_bin_t *tbin; + if (binind < NBINS) { + tbin = tcache_small_bin_get(tcache, binind); + } else { + tbin = tcache_large_bin_get(tcache, binind); + } if (tbin->low_water > 0) { /* * Flush (ceiling) 3/4 of the objects below the low water mark. @@ -44,75 +54,84 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) tcache_bin_flush_small(tsd, tcache, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2)); + /* + * Reduce fill count by 2X. Limit lg_fill_div such that + * the fill count is always at least 1. + */ + cache_bin_info_t *tbin_info = &tcache_bin_info[binind]; + if ((tbin_info->ncached_max >> + (tcache->lg_fill_div[binind] + 1)) >= 1) { + tcache->lg_fill_div[binind]++; + } } else { tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2), tcache); } - /* - * Reduce fill count by 2X. Limit lg_fill_div such that the - * fill count is always at least 1. - */ - if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) - tbin->lg_fill_div++; } else if (tbin->low_water < 0) { /* - * Increase fill count by 2X. Make sure lg_fill_div stays - * greater than 0. + * Increase fill count by 2X for small bins. Make sure + * lg_fill_div stays greater than 0. */ - if (tbin->lg_fill_div > 1) - tbin->lg_fill_div--; + if (binind < NBINS && tcache->lg_fill_div[binind] > 1) { + tcache->lg_fill_div[binind]--; + } } tbin->low_water = tbin->ncached; tcache->next_gc_bin++; - if (tcache->next_gc_bin == nhbins) + if (tcache->next_gc_bin == nhbins) { tcache->next_gc_bin = 0; - tcache->ev_cnt = 0; + } } void * -tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, - tcache_bin_t *tbin, szind_t binind) -{ +tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + cache_bin_t *tbin, szind_t binind, bool *tcache_success) { void *ret; - arena_tcache_fill_small(arena, tbin, binind, config_prof ? - tcache->prof_accumbytes : 0); - if (config_prof) + assert(tcache->arena != NULL); + arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind, + config_prof ? tcache->prof_accumbytes : 0); + if (config_prof) { tcache->prof_accumbytes = 0; - ret = tcache_alloc_easy(tbin); + } + ret = cache_bin_alloc_easy(tbin, tcache_success); - return (ret); + return ret; } void -tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, - szind_t binind, unsigned rem) -{ - arena_t *arena; - void *ptr; - unsigned i, nflush, ndeferred; +tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, + szind_t binind, unsigned rem) { bool merged_stats = false; assert(binind < NBINS); - assert(rem <= tbin->ncached); + assert((cache_bin_sz_t)rem <= tbin->ncached); - arena = arena_choose(tsd, NULL); + arena_t *arena = tcache->arena; assert(arena != NULL); - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { + unsigned nflush = tbin->ncached - rem; + VARIABLE_ARRAY(extent_t *, item_extent, nflush); + /* Look up extent once per item. */ + for (unsigned i = 0 ; i < nflush; i++) { + item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); + } + + while (nflush > 0) { /* Lock the arena bin associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); - arena_t *bin_arena = extent_node_arena_get(&chunk->node); - arena_bin_t *bin = &bin_arena->bins[binind]; + extent_t *extent = item_extent[0]; + arena_t *bin_arena = extent_arena_get(extent); + bin_t *bin = &bin_arena->bins[binind]; if (config_prof && bin_arena == arena) { - if (arena_prof_accum(arena, tcache->prof_accumbytes)) - prof_idump(); + if (arena_prof_accum(tsd_tsdn(tsd), arena, + tcache->prof_accumbytes)) { + prof_idump(tsd_tsdn(tsd)); + } tcache->prof_accumbytes = 0; } - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (config_stats && bin_arena == arena) { assert(!merged_stats); merged_stats = true; @@ -120,18 +139,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (extent_node_arena_get(&chunk->node) == bin_arena) { - size_t pageind = ((uintptr_t)ptr - - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_bits_t *bitselm = - arena_bitselm_get(chunk, pageind); - arena_dalloc_bin_junked_locked(bin_arena, chunk, - ptr, bitselm); + unsigned ndeferred = 0; + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + extent = item_extent[i]; + assert(ptr != NULL && extent != NULL); + + if (extent_arena_get(extent) == bin_arena) { + arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), + bin_arena, extent, ptr); } else { /* * This object was allocated via a different @@ -139,80 +155,97 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, * locked. Stash the object, so that it can be * handled in a future pass. */ - tbin->avail[ndeferred] = ptr; + *(tbin->avail - 1 - ndeferred) = ptr; + item_extent[ndeferred] = extent; ndeferred++; } } - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); + nflush = ndeferred; } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - arena_bin_t *bin = &arena->bins[binind]; - malloc_mutex_lock(&bin->lock); + bin_t *bin = &arena->bins[binind]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) + if (tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; + } } void -tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, - unsigned rem, tcache_t *tcache) -{ - arena_t *arena; - void *ptr; - unsigned i, nflush, ndeferred; +tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache) { bool merged_stats = false; assert(binind < nhbins); - assert(rem <= tbin->ncached); + assert((cache_bin_sz_t)rem <= tbin->ncached); - arena = arena_choose(tsd, NULL); + arena_t *arena = tcache->arena; assert(arena != NULL); - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { + unsigned nflush = tbin->ncached - rem; + VARIABLE_ARRAY(extent_t *, item_extent, nflush); + /* Look up extent once per item. */ + for (unsigned i = 0 ; i < nflush; i++) { + item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); + } + + while (nflush > 0) { /* Lock the arena associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); - arena_t *locked_arena = extent_node_arena_get(&chunk->node); + extent_t *extent = item_extent[0]; + arena_t *locked_arena = extent_arena_get(extent); UNUSED bool idump; - if (config_prof) + if (config_prof) { idump = false; - malloc_mutex_lock(&locked_arena->lock); + } + + malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx); + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + assert(ptr != NULL); + extent = item_extent[i]; + if (extent_arena_get(extent) == locked_arena) { + large_dalloc_prep_junked_locked(tsd_tsdn(tsd), + extent); + } + } if ((config_prof || config_stats) && locked_arena == arena) { if (config_prof) { - idump = arena_prof_accum_locked(arena, + idump = arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes); tcache->prof_accumbytes = 0; } if (config_stats) { merged_stats = true; - arena->stats.nrequests_large += - tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; + arena_stats_large_nrequests_add(tsd_tsdn(tsd), + &arena->stats, binind, + tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (extent_node_arena_get(&chunk->node) == - locked_arena) { - arena_dalloc_large_junked_locked(locked_arena, - chunk, ptr); + malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); + + unsigned ndeferred = 0; + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + extent = item_extent[i]; + assert(ptr != NULL && extent != NULL); + + if (extent_arena_get(extent) == locked_arena) { + large_dalloc_finish(tsd_tsdn(tsd), extent); } else { /* * This object was allocated via a different @@ -220,62 +253,64 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, * Stash the object, so that it can be handled * in a future pass. */ - tbin->avail[ndeferred] = ptr; + *(tbin->avail - 1 - ndeferred) = ptr; + item_extent[ndeferred] = extent; ndeferred++; } } - malloc_mutex_unlock(&locked_arena->lock); - if (config_prof && idump) - prof_idump(); + if (config_prof && idump) { + prof_idump(tsd_tsdn(tsd)); + } + arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - + ndeferred); + nflush = ndeferred; } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - malloc_mutex_lock(&arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; + arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats, + binind, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&arena->lock); } - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) + if (tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; + } } void -tcache_arena_associate(tcache_t *tcache, arena_t *arena) -{ +tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { + assert(tcache->arena == NULL); + tcache->arena = arena; if (config_stats) { /* Link into list of extant tcaches. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); - malloc_mutex_unlock(&arena->lock); - } -} - -void -tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena) -{ + cache_bin_array_descriptor_init( + &tcache->cache_bin_array_descriptor, tcache->bins_small, + tcache->bins_large); + ql_tail_insert(&arena->cache_bin_array_descriptor_ql, + &tcache->cache_bin_array_descriptor, link); - tcache_arena_dissociate(tcache, oldarena); - tcache_arena_associate(tcache, newarena); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); + } } -void -tcache_arena_dissociate(tcache_t *tcache, arena_t *arena) -{ - +static void +tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { + arena_t *arena = tcache->arena; + assert(arena != NULL); if (config_stats) { /* Unlink from list of extant tcaches. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); if (config_debug) { bool in_ql = false; tcache_t *iter; @@ -288,240 +323,364 @@ tcache_arena_dissociate(tcache_t *tcache, arena_t *arena) assert(in_ql); } ql_remove(&arena->tcache_ql, tcache, link); - tcache_stats_merge(tcache, arena); - malloc_mutex_unlock(&arena->lock); + ql_remove(&arena->cache_bin_array_descriptor_ql, + &tcache->cache_bin_array_descriptor, link); + tcache_stats_merge(tsdn, tcache, arena); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } + tcache->arena = NULL; } -tcache_t * -tcache_get_hard(tsd_t *tsd) -{ - arena_t *arena; +void +tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { + tcache_arena_dissociate(tsdn, tcache); + tcache_arena_associate(tsdn, tcache, arena); +} + +bool +tsd_tcache_enabled_data_init(tsd_t *tsd) { + /* Called upon tsd initialization. */ + tsd_tcache_enabled_set(tsd, opt_tcache); + tsd_slow_update(tsd); + + if (opt_tcache) { + /* Trigger tcache init. */ + tsd_tcache_data_init(tsd); + } - if (!tcache_enabled_get()) { - if (tsd_nominal(tsd)) - tcache_enabled_set(false); /* Memoize. */ - return (NULL); + return false; +} + +/* Initialize auto tcache (embedded in TSD). */ +static void +tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) { + memset(&tcache->link, 0, sizeof(ql_elm(tcache_t))); + tcache->prof_accumbytes = 0; + tcache->next_gc_bin = 0; + tcache->arena = NULL; + + ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); + + size_t stack_offset = 0; + assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); + memset(tcache->bins_small, 0, sizeof(cache_bin_t) * NBINS); + memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - NBINS)); + unsigned i = 0; + for (; i < NBINS; i++) { + tcache->lg_fill_div[i] = 1; + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + /* + * avail points past the available space. Allocations will + * access the slots toward higher addresses (for the benefit of + * prefetch). + */ + tcache_small_bin_get(tcache, i)->avail = + (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); + } + for (; i < nhbins; i++) { + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + tcache_large_bin_get(tcache, i)->avail = + (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); } - arena = arena_choose(tsd, NULL); - if (unlikely(arena == NULL)) - return (NULL); - return (tcache_create(tsd, arena)); + assert(stack_offset == stack_nelms * sizeof(void *)); } +/* Initialize auto tcache (embedded in TSD). */ +bool +tsd_tcache_data_init(tsd_t *tsd) { + tcache_t *tcache = tsd_tcachep_get_unsafe(tsd); + assert(tcache_small_bin_get(tcache, 0)->avail == NULL); + size_t size = stack_nelms * sizeof(void *); + /* Avoid false cacheline sharing. */ + size = sz_sa2u(size, CACHELINE); + + void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, + NULL, true, arena_get(TSDN_NULL, 0, true)); + if (avail_array == NULL) { + return true; + } + + tcache_init(tsd, tcache, avail_array); + /* + * Initialization is a bit tricky here. After malloc init is done, all + * threads can rely on arena_choose and associate tcache accordingly. + * However, the thread that does actual malloc bootstrapping relies on + * functional tsd, and it can only rely on a0. In that case, we + * associate its tcache to a0 temporarily, and later on + * arena_choose_hard() will re-associate properly. + */ + tcache->arena = NULL; + arena_t *arena; + if (!malloc_initialized()) { + /* If in initialization, assign to a0. */ + arena = arena_get(tsd_tsdn(tsd), 0, false); + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); + } else { + arena = arena_choose(tsd, NULL); + /* This may happen if thread.tcache.enabled is used. */ + if (tcache->arena == NULL) { + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); + } + } + assert(arena == tcache->arena); + + return false; +} + +/* Created manual tcache for tcache.create mallctl. */ tcache_t * -tcache_create(tsd_t *tsd, arena_t *arena) -{ +tcache_create_explicit(tsd_t *tsd) { tcache_t *tcache; size_t size, stack_offset; - unsigned i; - size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); + size = sizeof(tcache_t); /* Naturally align the pointer stacks. */ size = PTR_CEILING(size); stack_offset = size; size += stack_nelms * sizeof(void *); /* Avoid false cacheline sharing. */ - size = sa2u(size, CACHELINE); - - tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get()); - if (tcache == NULL) - return (NULL); + size = sz_sa2u(size, CACHELINE); - tcache_arena_associate(tcache, arena); - - assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); - for (i = 0; i < nhbins; i++) { - tcache->tbins[i].lg_fill_div = 1; - tcache->tbins[i].avail = (void **)((uintptr_t)tcache + - (uintptr_t)stack_offset); - stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, + arena_get(TSDN_NULL, 0, true)); + if (tcache == NULL) { + return NULL; } - return (tcache); + tcache_init(tsd, tcache, + (void *)((uintptr_t)tcache + (uintptr_t)stack_offset)); + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL)); + + return tcache; } static void -tcache_destroy(tsd_t *tsd, tcache_t *tcache) -{ - arena_t *arena; - unsigned i; +tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { + assert(tcache->arena != NULL); - arena = arena_choose(tsd, NULL); - tcache_arena_dissociate(tcache, arena); - - for (i = 0; i < NBINS; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; + for (unsigned i = 0; i < NBINS; i++) { + cache_bin_t *tbin = tcache_small_bin_get(tcache, i); tcache_bin_flush_small(tsd, tcache, tbin, i, 0); - if (config_stats && tbin->tstats.nrequests != 0) { - arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(&bin->lock); - bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); + if (config_stats) { + assert(tbin->tstats.nrequests == 0); } } - - for (; i < nhbins; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; + for (unsigned i = NBINS; i < nhbins; i++) { + cache_bin_t *tbin = tcache_large_bin_get(tcache, i); tcache_bin_flush_large(tsd, tbin, i, 0, tcache); - if (config_stats && tbin->tstats.nrequests != 0) { - malloc_mutex_lock(&arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[i - NBINS].nrequests += - tbin->tstats.nrequests; - malloc_mutex_unlock(&arena->lock); + if (config_stats) { + assert(tbin->tstats.nrequests == 0); } } if (config_prof && tcache->prof_accumbytes > 0 && - arena_prof_accum(arena, tcache->prof_accumbytes)) - prof_idump(); - - idalloctm(tsd, tcache, false, true); + arena_prof_accum(tsd_tsdn(tsd), tcache->arena, + tcache->prof_accumbytes)) { + prof_idump(tsd_tsdn(tsd)); + } } void -tcache_cleanup(tsd_t *tsd) -{ - tcache_t *tcache; - - if (!config_tcache) - return; +tcache_flush(tsd_t *tsd) { + assert(tcache_available(tsd)); + tcache_flush_cache(tsd, tsd_tcachep_get(tsd)); +} - if ((tcache = tsd_tcache_get(tsd)) != NULL) { - tcache_destroy(tsd, tcache); - tsd_tcache_set(tsd, NULL); +static void +tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { + tcache_flush_cache(tsd, tcache); + tcache_arena_dissociate(tsd_tsdn(tsd), tcache); + + if (tsd_tcache) { + /* Release the avail array for the TSD embedded auto tcache. */ + void *avail_array = + (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail - + (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *)); + idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true); + } else { + /* Release both the tcache struct and avail array. */ + idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true); } } +/* For auto tcache (embedded in TSD) only. */ void -tcache_enabled_cleanup(tsd_t *tsd) -{ +tcache_cleanup(tsd_t *tsd) { + tcache_t *tcache = tsd_tcachep_get(tsd); + if (!tcache_available(tsd)) { + assert(tsd_tcache_enabled_get(tsd) == false); + if (config_debug) { + assert(tcache_small_bin_get(tcache, 0)->avail == NULL); + } + return; + } + assert(tsd_tcache_enabled_get(tsd)); + assert(tcache_small_bin_get(tcache, 0)->avail != NULL); - /* Do nothing. */ + tcache_destroy(tsd, tcache, true); + if (config_debug) { + tcache_small_bin_get(tcache, 0)->avail = NULL; + } } -/* Caller must own arena->lock. */ void -tcache_stats_merge(tcache_t *tcache, arena_t *arena) -{ +tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { unsigned i; cassert(config_stats); /* Merge and reset tcache stats. */ for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - tcache_bin_t *tbin = &tcache->tbins[i]; - malloc_mutex_lock(&bin->lock); + bin_t *bin = &arena->bins[i]; + cache_bin_t *tbin = tcache_small_bin_get(tcache, i); + malloc_mutex_lock(tsdn, &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); tbin->tstats.nrequests = 0; } for (; i < nhbins; i++) { - malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; - tcache_bin_t *tbin = &tcache->tbins[i]; - arena->stats.nrequests_large += tbin->tstats.nrequests; - lstats->nrequests += tbin->tstats.nrequests; + cache_bin_t *tbin = tcache_large_bin_get(tcache, i); + arena_stats_large_nrequests_add(tsdn, &arena->stats, i, + tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } } -bool -tcaches_create(tsd_t *tsd, unsigned *r_ind) -{ - tcache_t *tcache; - tcaches_t *elm; +static bool +tcaches_create_prep(tsd_t *tsd) { + bool err; + + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); if (tcaches == NULL) { - tcaches = base_alloc(sizeof(tcache_t *) * - (MALLOCX_TCACHE_MAX+1)); - if (tcaches == NULL) - return (true); + tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *) + * (MALLOCX_TCACHE_MAX+1), CACHELINE); + if (tcaches == NULL) { + err = true; + goto label_return; + } } - if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) - return (true); - tcache = tcache_create(tsd, a0get()); - if (tcache == NULL) - return (true); + if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { + err = true; + goto label_return; + } + + err = false; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + return err; +} +bool +tcaches_create(tsd_t *tsd, unsigned *r_ind) { + witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); + + bool err; + + if (tcaches_create_prep(tsd)) { + err = true; + goto label_return; + } + + tcache_t *tcache = tcache_create_explicit(tsd); + if (tcache == NULL) { + err = true; + goto label_return; + } + + tcaches_t *elm; + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); if (tcaches_avail != NULL) { elm = tcaches_avail; tcaches_avail = tcaches_avail->next; elm->tcache = tcache; - *r_ind = elm - tcaches; + *r_ind = (unsigned)(elm - tcaches); } else { elm = &tcaches[tcaches_past]; elm->tcache = tcache; *r_ind = tcaches_past; tcaches_past++; } + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); - return (false); + err = false; +label_return: + witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); + return err; } -static void -tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) -{ +static tcache_t * +tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); - if (elm->tcache == NULL) - return; - tcache_destroy(tsd, elm->tcache); + if (elm->tcache == NULL) { + return NULL; + } + tcache_t *tcache = elm->tcache; elm->tcache = NULL; + return tcache; } void -tcaches_flush(tsd_t *tsd, unsigned ind) -{ - - tcaches_elm_flush(tsd, &tcaches[ind]); +tcaches_flush(tsd_t *tsd, unsigned ind) { + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]); + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcache != NULL) { + tcache_destroy(tsd, tcache, false); + } } void -tcaches_destroy(tsd_t *tsd, unsigned ind) -{ +tcaches_destroy(tsd_t *tsd, unsigned ind) { + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); tcaches_t *elm = &tcaches[ind]; - tcaches_elm_flush(tsd, elm); + tcache_t *tcache = tcaches_elm_remove(tsd, elm); elm->next = tcaches_avail; tcaches_avail = elm; + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcache != NULL) { + tcache_destroy(tsd, tcache, false); + } } bool -tcache_boot(void) -{ - unsigned i; - - /* - * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is - * known. - */ - if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) +tcache_boot(tsdn_t *tsdn) { + /* If necessary, clamp opt_lg_tcache_max. */ + if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < + SMALL_MAXCLASS) { tcache_maxclass = SMALL_MAXCLASS; - else if ((1U << opt_lg_tcache_max) > large_maxclass) - tcache_maxclass = large_maxclass; - else - tcache_maxclass = (1U << opt_lg_tcache_max); + } else { + tcache_maxclass = (ZU(1) << opt_lg_tcache_max); + } + + if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES, + malloc_mutex_rank_exclusive)) { + return true; + } - nhbins = size2index(tcache_maxclass) + 1; + nhbins = sz_size2index(tcache_maxclass) + 1; /* Initialize tcache_bin_info. */ - tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * - sizeof(tcache_bin_info_t)); - if (tcache_bin_info == NULL) - return (true); + tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins + * sizeof(cache_bin_info_t), CACHELINE); + if (tcache_bin_info == NULL) { + return true; + } stack_nelms = 0; + unsigned i; for (i = 0; i < NBINS; i++) { - if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { + if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MIN; - } else if ((arena_bin_info[i].nregs << 1) <= + } else if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { tcache_bin_info[i].ncached_max = - (arena_bin_info[i].nregs << 1); + (bin_infos[i].nregs << 1); } else { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MAX; @@ -533,5 +692,26 @@ tcache_boot(void) stack_nelms += tcache_bin_info[i].ncached_max; } - return (false); + return false; +} + +void +tcache_prefork(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_prefork(tsdn, &tcaches_mtx); + } +} + +void +tcache_postfork_parent(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); + } +} + +void +tcache_postfork_child(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_postfork_child(tsdn, &tcaches_mtx); + } } diff --git a/deps/jemalloc/src/ticker.c b/deps/jemalloc/src/ticker.c new file mode 100644 index 000000000..d7b8cd26c --- /dev/null +++ b/deps/jemalloc/src/ticker.c @@ -0,0 +1,3 @@ +#define JEMALLOC_TICKER_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/deps/jemalloc/src/tsd.c b/deps/jemalloc/src/tsd.c index 9ffe9afef..c1430682d 100644 --- a/deps/jemalloc/src/tsd.c +++ b/deps/jemalloc/src/tsd.c @@ -1,5 +1,10 @@ -#define JEMALLOC_TSD_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_TSD_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" /******************************************************************************/ /* Data. */ @@ -7,29 +12,158 @@ static unsigned ncleanups; static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; -malloc_tsd_data(, , tsd_t, TSD_INITIALIZER) +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; +__thread bool JEMALLOC_TLS_MODEL tsd_initialized = false; +bool tsd_booted = false; +#elif (defined(JEMALLOC_TLS)) +__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; +pthread_key_t tsd_tsd; +bool tsd_booted = false; +#elif (defined(_WIN32)) +DWORD tsd_tsd; +tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER}; +bool tsd_booted = false; +#else + +/* + * This contains a mutex, but it's pretty convenient to allow the mutex code to + * have a dependency on tsd. So we define the struct here, and only refer to it + * by pointer in the header. + */ +struct tsd_init_head_s { + ql_head(tsd_init_block_t) blocks; + malloc_mutex_t lock; +}; + +pthread_key_t tsd_tsd; +tsd_init_head_t tsd_init_head = { + ql_head_initializer(blocks), + MALLOC_MUTEX_INITIALIZER +}; +tsd_wrapper_t tsd_boot_wrapper = { + false, + TSD_INITIALIZER +}; +bool tsd_booted = false; +#endif + /******************************************************************************/ -void * -malloc_tsd_malloc(size_t size) -{ +void +tsd_slow_update(tsd_t *tsd) { + if (tsd_nominal(tsd)) { + if (malloc_slow || !tsd_tcache_enabled_get(tsd) || + tsd_reentrancy_level_get(tsd) > 0) { + tsd->state = tsd_state_nominal_slow; + } else { + tsd->state = tsd_state_nominal; + } + } +} - return (a0malloc(CACHELINE_CEILING(size))); +static bool +tsd_data_init(tsd_t *tsd) { + /* + * We initialize the rtree context first (before the tcache), since the + * tcache initialization depends on it. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); + + /* + * A nondeterministic seed based on the address of tsd reduces + * the likelihood of lockstep non-uniform cache index + * utilization among identical concurrent processes, but at the + * cost of test repeatability. For debug builds, instead use a + * deterministic seed. + */ + *tsd_offset_statep_get(tsd) = config_debug ? 0 : + (uint64_t)(uintptr_t)tsd; + + return tsd_tcache_enabled_data_init(tsd); } -void -malloc_tsd_dalloc(void *wrapper) -{ +static void +assert_tsd_data_cleanup_done(tsd_t *tsd) { + assert(!tsd_nominal(tsd)); + assert(*tsd_arenap_get_unsafe(tsd) == NULL); + assert(*tsd_iarenap_get_unsafe(tsd) == NULL); + assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true); + assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL); + assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false); + assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL); +} - a0dalloc(wrapper); +static bool +tsd_data_init_nocleanup(tsd_t *tsd) { + assert(tsd->state == tsd_state_reincarnated || + tsd->state == tsd_state_minimal_initialized); + /* + * During reincarnation, there is no guarantee that the cleanup function + * will be called (deallocation may happen after all tsd destructors). + * We set up tsd in a way that no cleanup is needed. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + *tsd_tcache_enabledp_get_unsafe(tsd) = false; + *tsd_reentrancy_levelp_get(tsd) = 1; + assert_tsd_data_cleanup_done(tsd); + + return false; } -void -malloc_tsd_no_cleanup(void *arg) -{ +tsd_t * +tsd_fetch_slow(tsd_t *tsd, bool minimal) { + assert(!tsd_fast(tsd)); + + if (tsd->state == tsd_state_nominal_slow) { + /* On slow path but no work needed. */ + assert(malloc_slow || !tsd_tcache_enabled_get(tsd) || + tsd_reentrancy_level_get(tsd) > 0 || + *tsd_arenas_tdata_bypassp_get(tsd)); + } else if (tsd->state == tsd_state_uninitialized) { + if (!minimal) { + tsd->state = tsd_state_nominal; + tsd_slow_update(tsd); + /* Trigger cleanup handler registration. */ + tsd_set(tsd); + tsd_data_init(tsd); + } else { + tsd->state = tsd_state_minimal_initialized; + tsd_set(tsd); + tsd_data_init_nocleanup(tsd); + } + } else if (tsd->state == tsd_state_minimal_initialized) { + if (!minimal) { + /* Switch to fully initialized. */ + tsd->state = tsd_state_nominal; + assert(*tsd_reentrancy_levelp_get(tsd) >= 1); + (*tsd_reentrancy_levelp_get(tsd))--; + tsd_slow_update(tsd); + tsd_data_init(tsd); + } else { + assert_tsd_data_cleanup_done(tsd); + } + } else if (tsd->state == tsd_state_purgatory) { + tsd->state = tsd_state_reincarnated; + tsd_set(tsd); + tsd_data_init_nocleanup(tsd); + } else { + assert(tsd->state == tsd_state_reincarnated); + } + + return tsd; +} + +void * +malloc_tsd_malloc(size_t size) { + return a0malloc(CACHELINE_CEILING(size)); +} - not_reached(); +void +malloc_tsd_dalloc(void *wrapper) { + a0dalloc(wrapper); } #if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) @@ -37,21 +171,22 @@ malloc_tsd_no_cleanup(void *arg) JEMALLOC_EXPORT #endif void -_malloc_thread_cleanup(void) -{ +_malloc_thread_cleanup(void) { bool pending[MALLOC_TSD_CLEANUPS_MAX], again; unsigned i; - for (i = 0; i < ncleanups; i++) + for (i = 0; i < ncleanups; i++) { pending[i] = true; + } do { again = false; for (i = 0; i < ncleanups; i++) { if (pending[i]) { pending[i] = cleanups[i](); - if (pending[i]) + if (pending[i]) { again = true; + } } } } while (again); @@ -59,28 +194,44 @@ _malloc_thread_cleanup(void) #endif void -malloc_tsd_cleanup_register(bool (*f)(void)) -{ - +malloc_tsd_cleanup_register(bool (*f)(void)) { assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); cleanups[ncleanups] = f; ncleanups++; } +static void +tsd_do_data_cleanup(tsd_t *tsd) { + prof_tdata_cleanup(tsd); + iarena_cleanup(tsd); + arena_cleanup(tsd); + arenas_tdata_cleanup(tsd); + tcache_cleanup(tsd); + witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd)); +} + void -tsd_cleanup(void *arg) -{ +tsd_cleanup(void *arg) { tsd_t *tsd = (tsd_t *)arg; switch (tsd->state) { case tsd_state_uninitialized: /* Do nothing. */ break; + case tsd_state_minimal_initialized: + /* This implies the thread only did free() in its life time. */ + /* Fall through. */ + case tsd_state_reincarnated: + /* + * Reincarnated means another destructor deallocated memory + * after the destructor was called. Cleanup isn't required but + * is still called for testing and completeness. + */ + assert_tsd_data_cleanup_done(tsd); + /* Fall through. */ case tsd_state_nominal: -#define O(n, t) \ - n##_cleanup(tsd); -MALLOC_TSD -#undef O + case tsd_state_nominal_slow: + tsd_do_data_cleanup(tsd); tsd->state = tsd_state_purgatory; tsd_set(tsd); break; @@ -92,44 +243,43 @@ MALLOC_TSD * nothing, and do not request another callback. */ break; - case tsd_state_reincarnated: - /* - * Another destructor deallocated memory after this destructor - * was called. Reset state to tsd_state_purgatory and request - * another callback. - */ - tsd->state = tsd_state_purgatory; - tsd_set(tsd); - break; default: not_reached(); } +#ifdef JEMALLOC_JET + test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd); + int *data = tsd_test_datap_get_unsafe(tsd); + if (test_callback != NULL) { + test_callback(data); + } +#endif } -bool -malloc_tsd_boot0(void) -{ +tsd_t * +malloc_tsd_boot0(void) { + tsd_t *tsd; ncleanups = 0; - if (tsd_boot0()) - return (true); - *tsd_arenas_cache_bypassp_get(tsd_fetch()) = true; - return (false); + if (tsd_boot0()) { + return NULL; + } + tsd = tsd_fetch(); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + return tsd; } void -malloc_tsd_boot1(void) -{ - +malloc_tsd_boot1(void) { tsd_boot1(); - *tsd_arenas_cache_bypassp_get(tsd_fetch()) = false; + tsd_t *tsd = tsd_fetch(); + /* malloc_slow has been set properly. Update tsd_slow. */ + tsd_slow_update(tsd); + *tsd_arenas_tdata_bypassp_get(tsd) = false; } #ifdef _WIN32 static BOOL WINAPI -_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) -{ - +_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { switch (fdwReason) { #ifdef JEMALLOC_LAZY_LOCK case DLL_THREAD_ATTACH: @@ -142,52 +292,60 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) default: break; } - return (true); + return true; } +/* + * We need to be able to say "read" here (in the "pragma section"), but have + * hooked "read". We won't read for the rest of the file, so we can get away + * with unhooking. + */ +#ifdef read +# undef read +#endif + #ifdef _MSC_VER # ifdef _M_IX86 # pragma comment(linker, "/INCLUDE:__tls_used") +# pragma comment(linker, "/INCLUDE:_tls_callback") # else # pragma comment(linker, "/INCLUDE:_tls_used") +# pragma comment(linker, "/INCLUDE:tls_callback") # endif # pragma section(".CRT$XLY",long,read) #endif JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) -static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, +BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; #endif #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) void * -tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) -{ +tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { pthread_t self = pthread_self(); tsd_init_block_t *iter; /* Check whether this thread has already inserted into the list. */ - malloc_mutex_lock(&head->lock); + malloc_mutex_lock(TSDN_NULL, &head->lock); ql_foreach(iter, &head->blocks, link) { if (iter->thread == self) { - malloc_mutex_unlock(&head->lock); - return (iter->data); + malloc_mutex_unlock(TSDN_NULL, &head->lock); + return iter->data; } } /* Insert block into list. */ ql_elm_new(block, link); block->thread = self; ql_tail_insert(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); - return (NULL); + malloc_mutex_unlock(TSDN_NULL, &head->lock); + return NULL; } void -tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) -{ - - malloc_mutex_lock(&head->lock); +tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) { + malloc_mutex_lock(TSDN_NULL, &head->lock); ql_remove(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); + malloc_mutex_unlock(TSDN_NULL, &head->lock); } #endif diff --git a/deps/jemalloc/src/valgrind.c b/deps/jemalloc/src/valgrind.c deleted file mode 100644 index 8e7ef3a2e..000000000 --- a/deps/jemalloc/src/valgrind.c +++ /dev/null @@ -1,34 +0,0 @@ -#include "jemalloc/internal/jemalloc_internal.h" -#ifndef JEMALLOC_VALGRIND -# error "This source file is for Valgrind integration." -#endif - -#include <valgrind/memcheck.h> - -void -valgrind_make_mem_noaccess(void *ptr, size_t usize) -{ - - VALGRIND_MAKE_MEM_NOACCESS(ptr, usize); -} - -void -valgrind_make_mem_undefined(void *ptr, size_t usize) -{ - - VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize); -} - -void -valgrind_make_mem_defined(void *ptr, size_t usize) -{ - - VALGRIND_MAKE_MEM_DEFINED(ptr, usize); -} - -void -valgrind_freelike_block(void *ptr, size_t usize) -{ - - VALGRIND_FREELIKE_BLOCK(ptr, usize); -} diff --git a/deps/jemalloc/src/witness.c b/deps/jemalloc/src/witness.c new file mode 100644 index 000000000..f42b72ad1 --- /dev/null +++ b/deps/jemalloc/src/witness.c @@ -0,0 +1,100 @@ +#define JEMALLOC_WITNESS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" + +void +witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp, void *opaque) { + witness->name = name; + witness->rank = rank; + witness->comp = comp; + witness->opaque = opaque; +} + +static void +witness_lock_error_impl(const witness_list_t *witnesses, + const witness_t *witness) { + witness_t *w; + + malloc_printf("<jemalloc>: Lock rank order reversal:"); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf(" %s(%u)\n", witness->name, witness->rank); + abort(); +} +witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl; + +static void +witness_owner_error_impl(const witness_t *witness) { + malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +witness_owner_error_t *JET_MUTABLE witness_owner_error = + witness_owner_error_impl; + +static void +witness_not_owner_error_impl(const witness_t *witness) { + malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error = + witness_not_owner_error_impl; + +static void +witness_depth_error_impl(const witness_list_t *witnesses, + witness_rank_t rank_inclusive, unsigned depth) { + witness_t *w; + + malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth, + (depth != 1) ? "s" : "", rank_inclusive); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf("\n"); + abort(); +} +witness_depth_error_t *JET_MUTABLE witness_depth_error = + witness_depth_error_impl; + +void +witnesses_cleanup(witness_tsd_t *witness_tsd) { + witness_assert_lockless(witness_tsd_tsdn(witness_tsd)); + + /* Do nothing. */ +} + +void +witness_prefork(witness_tsd_t *witness_tsd) { + if (!config_debug) { + return; + } + witness_tsd->forking = true; +} + +void +witness_postfork_parent(witness_tsd_t *witness_tsd) { + if (!config_debug) { + return; + } + witness_tsd->forking = false; +} + +void +witness_postfork_child(witness_tsd_t *witness_tsd) { + if (!config_debug) { + return; + } +#ifndef JEMALLOC_MUTEX_INIT_CB + witness_list_t *witnesses; + + witnesses = &witness_tsd->witnesses; + ql_new(witnesses); +#endif + witness_tsd->forking = false; +} diff --git a/deps/jemalloc/src/zone.c b/deps/jemalloc/src/zone.c index 12e1734a9..23dfdd04a 100644 --- a/deps/jemalloc/src/zone.c +++ b/deps/jemalloc/src/zone.c @@ -1,10 +1,83 @@ -#include "jemalloc/internal/jemalloc_internal.h" +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" + #ifndef JEMALLOC_ZONE # error "This source file is for zones on Darwin (OS X)." #endif +/* Definitions of the following structs in malloc/malloc.h might be too old + * for the built binary to run on newer versions of OSX. So use the newest + * possible version of those structs. + */ +typedef struct _malloc_zone_t { + void *reserved1; + void *reserved2; + size_t (*size)(struct _malloc_zone_t *, const void *); + void *(*malloc)(struct _malloc_zone_t *, size_t); + void *(*calloc)(struct _malloc_zone_t *, size_t, size_t); + void *(*valloc)(struct _malloc_zone_t *, size_t); + void (*free)(struct _malloc_zone_t *, void *); + void *(*realloc)(struct _malloc_zone_t *, void *, size_t); + void (*destroy)(struct _malloc_zone_t *); + const char *zone_name; + unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned); + void (*batch_free)(struct _malloc_zone_t *, void **, unsigned); + struct malloc_introspection_t *introspect; + unsigned version; + void *(*memalign)(struct _malloc_zone_t *, size_t, size_t); + void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t); + size_t (*pressure_relief)(struct _malloc_zone_t *, size_t); +} malloc_zone_t; + +typedef struct { + vm_address_t address; + vm_size_t size; +} vm_range_t; + +typedef struct malloc_statistics_t { + unsigned blocks_in_use; + size_t size_in_use; + size_t max_size_in_use; + size_t size_allocated; +} malloc_statistics_t; + +typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **); + +typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned); + +typedef struct malloc_introspection_t { + kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t); + size_t (*good_size)(malloc_zone_t *, size_t); + boolean_t (*check)(malloc_zone_t *); + void (*print)(malloc_zone_t *, boolean_t); + void (*log)(malloc_zone_t *, void *); + void (*force_lock)(malloc_zone_t *); + void (*force_unlock)(malloc_zone_t *); + void (*statistics)(malloc_zone_t *, malloc_statistics_t *); + boolean_t (*zone_locked)(malloc_zone_t *); + boolean_t (*enable_discharge_checking)(malloc_zone_t *); + boolean_t (*disable_discharge_checking)(malloc_zone_t *); + void (*discharge)(malloc_zone_t *, void *); +#ifdef __BLOCKS__ + void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *)); +#else + void *enumerate_unavailable_without_blocks; +#endif + void (*reinit_lock)(malloc_zone_t *); +} malloc_introspection_t; + +extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *); + +extern malloc_zone_t *malloc_default_zone(void); + +extern void malloc_zone_register(malloc_zone_t *zone); + +extern void malloc_zone_unregister(malloc_zone_t *zone); + /* - * The malloc_default_purgeable_zone function is only available on >= 10.6. + * The malloc_default_purgeable_zone() function is only available on >= 10.6. * We need to check whether it is present at runtime, thus the weak_import. */ extern malloc_zone_t *malloc_default_purgeable_zone(void) @@ -13,30 +86,43 @@ JEMALLOC_ATTR(weak_import); /******************************************************************************/ /* Data. */ -static malloc_zone_t zone; -static struct malloc_introspection_t zone_introspect; +static malloc_zone_t *default_zone, *purgeable_zone; +static malloc_zone_t jemalloc_zone; +static struct malloc_introspection_t jemalloc_zone_introspect; +static pid_t zone_force_lock_pid = -1; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static size_t zone_size(malloc_zone_t *zone, void *ptr); +static size_t zone_size(malloc_zone_t *zone, const void *ptr); static void *zone_malloc(malloc_zone_t *zone, size_t size); static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); static void *zone_valloc(malloc_zone_t *zone, size_t size); static void zone_free(malloc_zone_t *zone, void *ptr); static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); -#if (JEMALLOC_ZONE_VERSION >= 5) static void *zone_memalign(malloc_zone_t *zone, size_t alignment, -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) size_t size); static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size); -#endif -static void *zone_destroy(malloc_zone_t *zone); +static void zone_destroy(malloc_zone_t *zone); +static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, + void **results, unsigned num_requested); +static void zone_batch_free(struct _malloc_zone_t *zone, + void **to_be_freed, unsigned num_to_be_freed); +static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal); static size_t zone_good_size(malloc_zone_t *zone, size_t size); +static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder); +static boolean_t zone_check(malloc_zone_t *zone); +static void zone_print(malloc_zone_t *zone, boolean_t verbose); +static void zone_log(malloc_zone_t *zone, void *address); static void zone_force_lock(malloc_zone_t *zone); static void zone_force_unlock(malloc_zone_t *zone); +static void zone_statistics(malloc_zone_t *zone, + malloc_statistics_t *stats); +static boolean_t zone_locked(malloc_zone_t *zone); +static void zone_reinit_lock(malloc_zone_t *zone); /******************************************************************************/ /* @@ -44,9 +130,7 @@ static void zone_force_unlock(malloc_zone_t *zone); */ static size_t -zone_size(malloc_zone_t *zone, void *ptr) -{ - +zone_size(malloc_zone_t *zone, const void *ptr) { /* * There appear to be places within Darwin (such as setenv(3)) that * cause calls to this function with pointers that *no* zone owns. If @@ -54,40 +138,33 @@ zone_size(malloc_zone_t *zone, void *ptr) * our zone into two parts, and use one as the default allocator and * the other as the default deallocator/reallocator. Since that will * not work in practice, we must check all pointers to assure that they - * reside within a mapped chunk before determining size. + * reside within a mapped extent before determining size. */ - return (ivsalloc(ptr, config_prof)); + return ivsalloc(tsdn_fetch(), ptr); } static void * -zone_malloc(malloc_zone_t *zone, size_t size) -{ - - return (je_malloc(size)); +zone_malloc(malloc_zone_t *zone, size_t size) { + return je_malloc(size); } static void * -zone_calloc(malloc_zone_t *zone, size_t num, size_t size) -{ - - return (je_calloc(num, size)); +zone_calloc(malloc_zone_t *zone, size_t num, size_t size) { + return je_calloc(num, size); } static void * -zone_valloc(malloc_zone_t *zone, size_t size) -{ +zone_valloc(malloc_zone_t *zone, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, PAGE, size); - return (ret); + return ret; } static void -zone_free(malloc_zone_t *zone, void *ptr) -{ - - if (ivsalloc(ptr, config_prof) != 0) { +zone_free(malloc_zone_t *zone, void *ptr) { + if (ivsalloc(tsdn_fetch(), ptr) != 0) { je_free(ptr); return; } @@ -96,155 +173,235 @@ zone_free(malloc_zone_t *zone, void *ptr) } static void * -zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) -{ - - if (ivsalloc(ptr, config_prof) != 0) - return (je_realloc(ptr, size)); +zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { + if (ivsalloc(tsdn_fetch(), ptr) != 0) { + return je_realloc(ptr, size); + } - return (realloc(ptr, size)); + return realloc(ptr, size); } -#if (JEMALLOC_ZONE_VERSION >= 5) static void * -zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) -{ +zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, alignment, size); - return (ret); + return ret; } -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) static void -zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) -{ +zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { + size_t alloc_size; - if (ivsalloc(ptr, config_prof) != 0) { - assert(ivsalloc(ptr, config_prof) == size); + alloc_size = ivsalloc(tsdn_fetch(), ptr); + if (alloc_size != 0) { + assert(alloc_size == size); je_free(ptr); return; } free(ptr); } -#endif - -static void * -zone_destroy(malloc_zone_t *zone) -{ +static void +zone_destroy(malloc_zone_t *zone) { /* This function should never be called. */ not_reached(); - return (NULL); +} + +static unsigned +zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, + unsigned num_requested) { + unsigned i; + + for (i = 0; i < num_requested; i++) { + results[i] = je_malloc(size); + if (!results[i]) + break; + } + + return i; +} + +static void +zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, + unsigned num_to_be_freed) { + unsigned i; + + for (i = 0; i < num_to_be_freed; i++) { + zone_free(zone, to_be_freed[i]); + to_be_freed[i] = NULL; + } } static size_t -zone_good_size(malloc_zone_t *zone, size_t size) -{ +zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) { + return 0; +} - if (size == 0) +static size_t +zone_good_size(malloc_zone_t *zone, size_t size) { + if (size == 0) { size = 1; - return (s2u(size)); + } + return sz_s2u(size); } -static void -zone_force_lock(malloc_zone_t *zone) -{ +static kern_return_t +zone_enumerator(task_t task, void *data, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder) { + return KERN_SUCCESS; +} - if (isthreaded) - jemalloc_prefork(); +static boolean_t +zone_check(malloc_zone_t *zone) { + return true; } static void -zone_force_unlock(malloc_zone_t *zone) -{ +zone_print(malloc_zone_t *zone, boolean_t verbose) { +} - if (isthreaded) - jemalloc_postfork_parent(); +static void +zone_log(malloc_zone_t *zone, void *address) { } -JEMALLOC_ATTR(constructor) -void -register_zone(void) -{ +static void +zone_force_lock(malloc_zone_t *zone) { + if (isthreaded) { + /* + * See the note in zone_force_unlock, below, to see why we need + * this. + */ + assert(zone_force_lock_pid == -1); + zone_force_lock_pid = getpid(); + jemalloc_prefork(); + } +} +static void +zone_force_unlock(malloc_zone_t *zone) { /* - * If something else replaced the system default zone allocator, don't - * register jemalloc's. + * zone_force_lock and zone_force_unlock are the entry points to the + * forking machinery on OS X. The tricky thing is, the child is not + * allowed to unlock mutexes locked in the parent, even if owned by the + * forking thread (and the mutex type we use in OS X will fail an assert + * if we try). In the child, we can get away with reinitializing all + * the mutexes, which has the effect of unlocking them. In the parent, + * doing this would mean we wouldn't wake any waiters blocked on the + * mutexes we unlock. So, we record the pid of the current thread in + * zone_force_lock, and use that to detect if we're in the parent or + * child here, to decide which unlock logic we need. */ - malloc_zone_t *default_zone = malloc_default_zone(); - malloc_zone_t *purgeable_zone = NULL; - if (!default_zone->zone_name || - strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { - return; + if (isthreaded) { + assert(zone_force_lock_pid != -1); + if (getpid() == zone_force_lock_pid) { + jemalloc_postfork_parent(); + } else { + jemalloc_postfork_child(); + } + zone_force_lock_pid = -1; } +} - zone.size = (void *)zone_size; - zone.malloc = (void *)zone_malloc; - zone.calloc = (void *)zone_calloc; - zone.valloc = (void *)zone_valloc; - zone.free = (void *)zone_free; - zone.realloc = (void *)zone_realloc; - zone.destroy = (void *)zone_destroy; - zone.zone_name = "jemalloc_zone"; - zone.batch_malloc = NULL; - zone.batch_free = NULL; - zone.introspect = &zone_introspect; - zone.version = JEMALLOC_ZONE_VERSION; -#if (JEMALLOC_ZONE_VERSION >= 5) - zone.memalign = zone_memalign; -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) - zone.free_definite_size = zone_free_definite_size; -#endif -#if (JEMALLOC_ZONE_VERSION >= 8) - zone.pressure_relief = NULL; -#endif +static void +zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { + /* We make no effort to actually fill the values */ + stats->blocks_in_use = 0; + stats->size_in_use = 0; + stats->max_size_in_use = 0; + stats->size_allocated = 0; +} - zone_introspect.enumerator = NULL; - zone_introspect.good_size = (void *)zone_good_size; - zone_introspect.check = NULL; - zone_introspect.print = NULL; - zone_introspect.log = NULL; - zone_introspect.force_lock = (void *)zone_force_lock; - zone_introspect.force_unlock = (void *)zone_force_unlock; - zone_introspect.statistics = NULL; -#if (JEMALLOC_ZONE_VERSION >= 6) - zone_introspect.zone_locked = NULL; -#endif -#if (JEMALLOC_ZONE_VERSION >= 7) - zone_introspect.enable_discharge_checking = NULL; - zone_introspect.disable_discharge_checking = NULL; - zone_introspect.discharge = NULL; +static boolean_t +zone_locked(malloc_zone_t *zone) { + /* Pretend no lock is being held */ + return false; +} + +static void +zone_reinit_lock(malloc_zone_t *zone) { + /* As of OSX 10.12, this function is only used when force_unlock would + * be used if the zone version were < 9. So just use force_unlock. */ + zone_force_unlock(zone); +} + +static void +zone_init(void) { + jemalloc_zone.size = zone_size; + jemalloc_zone.malloc = zone_malloc; + jemalloc_zone.calloc = zone_calloc; + jemalloc_zone.valloc = zone_valloc; + jemalloc_zone.free = zone_free; + jemalloc_zone.realloc = zone_realloc; + jemalloc_zone.destroy = zone_destroy; + jemalloc_zone.zone_name = "jemalloc_zone"; + jemalloc_zone.batch_malloc = zone_batch_malloc; + jemalloc_zone.batch_free = zone_batch_free; + jemalloc_zone.introspect = &jemalloc_zone_introspect; + jemalloc_zone.version = 9; + jemalloc_zone.memalign = zone_memalign; + jemalloc_zone.free_definite_size = zone_free_definite_size; + jemalloc_zone.pressure_relief = zone_pressure_relief; + + jemalloc_zone_introspect.enumerator = zone_enumerator; + jemalloc_zone_introspect.good_size = zone_good_size; + jemalloc_zone_introspect.check = zone_check; + jemalloc_zone_introspect.print = zone_print; + jemalloc_zone_introspect.log = zone_log; + jemalloc_zone_introspect.force_lock = zone_force_lock; + jemalloc_zone_introspect.force_unlock = zone_force_unlock; + jemalloc_zone_introspect.statistics = zone_statistics; + jemalloc_zone_introspect.zone_locked = zone_locked; + jemalloc_zone_introspect.enable_discharge_checking = NULL; + jemalloc_zone_introspect.disable_discharge_checking = NULL; + jemalloc_zone_introspect.discharge = NULL; #ifdef __BLOCKS__ - zone_introspect.enumerate_discharged_pointers = NULL; + jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; #else - zone_introspect.enumerate_unavailable_without_blocks = NULL; -#endif + jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; #endif + jemalloc_zone_introspect.reinit_lock = zone_reinit_lock; +} + +static malloc_zone_t * +zone_default_get(void) { + malloc_zone_t **zones = NULL; + unsigned int num_zones = 0; /* - * The default purgeable zone is created lazily by OSX's libc. It uses - * the default zone when it is created for "small" allocations - * (< 15 KiB), but assumes the default zone is a scalable_zone. This - * obviously fails when the default zone is the jemalloc zone, so - * malloc_default_purgeable_zone is called beforehand so that the - * default purgeable zone is created when the default zone is still - * a scalable_zone. As purgeable zones only exist on >= 10.6, we need - * to check for the existence of malloc_default_purgeable_zone() at - * run time. + * On OSX 10.12, malloc_default_zone returns a special zone that is not + * present in the list of registered zones. That zone uses a "lite zone" + * if one is present (apparently enabled when malloc stack logging is + * enabled), or the first registered zone otherwise. In practice this + * means unless malloc stack logging is enabled, the first registered + * zone is the default. So get the list of zones to get the first one, + * instead of relying on malloc_default_zone. */ - if (malloc_default_purgeable_zone != NULL) - purgeable_zone = malloc_default_purgeable_zone(); + if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, + (vm_address_t**)&zones, &num_zones)) { + /* + * Reset the value in case the failure happened after it was + * set. + */ + num_zones = 0; + } - /* Register the custom zone. At this point it won't be the default. */ - malloc_zone_register(&zone); + if (num_zones) { + return zones[0]; + } + + return malloc_default_zone(); +} + +/* As written, this function can only promote jemalloc_zone. */ +static void +zone_promote(void) { + malloc_zone_t *zone; do { - default_zone = malloc_default_zone(); /* * Unregister and reregister the default zone. On OSX >= 10.6, * unregistering takes the last registered zone and places it @@ -255,6 +412,7 @@ register_zone(void) */ malloc_zone_unregister(default_zone); malloc_zone_register(default_zone); + /* * On OSX 10.6, having the default purgeable zone appear before * the default zone makes some things crash because it thinks it @@ -266,9 +424,46 @@ register_zone(void) * above, i.e. the default zone. Registering it again then puts * it at the end, obviously after the default zone. */ - if (purgeable_zone) { + if (purgeable_zone != NULL) { malloc_zone_unregister(purgeable_zone); malloc_zone_register(purgeable_zone); } - } while (malloc_default_zone() != &zone); + + zone = zone_default_get(); + } while (zone != &jemalloc_zone); +} + +JEMALLOC_ATTR(constructor) +void +zone_register(void) { + /* + * If something else replaced the system default zone allocator, don't + * register jemalloc's. + */ + default_zone = zone_default_get(); + if (!default_zone->zone_name || strcmp(default_zone->zone_name, + "DefaultMallocZone") != 0) { + return; + } + + /* + * The default purgeable zone is created lazily by OSX's libc. It uses + * the default zone when it is created for "small" allocations + * (< 15 KiB), but assumes the default zone is a scalable_zone. This + * obviously fails when the default zone is the jemalloc zone, so + * malloc_default_purgeable_zone() is called beforehand so that the + * default purgeable zone is created when the default zone is still + * a scalable_zone. As purgeable zones only exist on >= 10.6, we need + * to check for the existence of malloc_default_purgeable_zone() at + * run time. + */ + purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : + malloc_default_purgeable_zone(); + + /* Register the custom zone. At this point it won't be the default. */ + zone_init(); + malloc_zone_register(&jemalloc_zone); + + /* Promote the custom zone to be default. */ + zone_promote(); } |