summaryrefslogtreecommitdiff
path: root/extra/jemalloc/src
diff options
context:
space:
mode:
Diffstat (limited to 'extra/jemalloc/src')
-rw-r--r--extra/jemalloc/src/arena.c2385
-rw-r--r--extra/jemalloc/src/atomic.c2
-rw-r--r--extra/jemalloc/src/base.c142
-rw-r--r--extra/jemalloc/src/bitmap.c90
-rw-r--r--extra/jemalloc/src/chunk.c385
-rw-r--r--extra/jemalloc/src/chunk_dss.c197
-rw-r--r--extra/jemalloc/src/chunk_mmap.c210
-rw-r--r--extra/jemalloc/src/ckh.c563
-rw-r--r--extra/jemalloc/src/ctl.c1673
-rw-r--r--extra/jemalloc/src/extent.c39
-rw-r--r--extra/jemalloc/src/hash.c2
-rw-r--r--extra/jemalloc/src/huge.c313
-rw-r--r--extra/jemalloc/src/jemalloc.c1868
-rw-r--r--extra/jemalloc/src/mb.c2
-rw-r--r--extra/jemalloc/src/mutex.c149
-rw-r--r--extra/jemalloc/src/prof.c1283
-rw-r--r--extra/jemalloc/src/quarantine.c190
-rw-r--r--extra/jemalloc/src/rtree.c67
-rw-r--r--extra/jemalloc/src/stats.c549
-rw-r--r--extra/jemalloc/src/tcache.c476
-rw-r--r--extra/jemalloc/src/tsd.c107
-rw-r--r--extra/jemalloc/src/util.c641
-rw-r--r--extra/jemalloc/src/zone.c258
23 files changed, 0 insertions, 11591 deletions
diff --git a/extra/jemalloc/src/arena.c b/extra/jemalloc/src/arena.c
deleted file mode 100644
index d28b629a1e1..00000000000
--- a/extra/jemalloc/src/arena.c
+++ /dev/null
@@ -1,2385 +0,0 @@
-#define JEMALLOC_ARENA_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
-arena_bin_info_t arena_bin_info[NBINS];
-
-JEMALLOC_ALIGNED(CACHELINE)
-const uint8_t small_size2bin[] = {
-#define S2B_8(i) i,
-#define S2B_16(i) S2B_8(i) S2B_8(i)
-#define S2B_32(i) S2B_16(i) S2B_16(i)
-#define S2B_64(i) S2B_32(i) S2B_32(i)
-#define S2B_128(i) S2B_64(i) S2B_64(i)
-#define S2B_256(i) S2B_128(i) S2B_128(i)
-#define S2B_512(i) S2B_256(i) S2B_256(i)
-#define S2B_1024(i) S2B_512(i) S2B_512(i)
-#define S2B_2048(i) S2B_1024(i) S2B_1024(i)
-#define S2B_4096(i) S2B_2048(i) S2B_2048(i)
-#define S2B_8192(i) S2B_4096(i) S2B_4096(i)
-#define SIZE_CLASS(bin, delta, size) \
- S2B_##delta(bin)
- SIZE_CLASSES
-#undef S2B_8
-#undef S2B_16
-#undef S2B_32
-#undef S2B_64
-#undef S2B_128
-#undef S2B_256
-#undef S2B_512
-#undef S2B_1024
-#undef S2B_2048
-#undef S2B_4096
-#undef S2B_8192
-#undef SIZE_CLASS
-};
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
- size_t pageind, size_t npages, bool maybe_adjac_pred,
- bool maybe_adjac_succ);
-static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
- size_t pageind, size_t npages, bool maybe_adjac_pred,
- bool maybe_adjac_succ);
-static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
- bool large, size_t binind, bool zero);
-static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
-static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
-static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
- bool large, size_t binind, bool zero);
-static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
- size_t binind, bool zero);
-static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
- arena_chunk_t *chunk, void *arg);
-static void arena_purge(arena_t *arena, bool all);
-static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
- bool cleaned);
-static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize);
-static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
-static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
-static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
-static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
-static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
-static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
-static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
-static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin);
-static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin);
-static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin);
-static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t size);
-static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
-static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
- size_t extra, bool zero);
-static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
- size_t min_run_size);
-static void bin_info_init(void);
-
-/******************************************************************************/
-
-static inline int
-arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
-{
- uintptr_t a_mapelm = (uintptr_t)a;
- uintptr_t b_mapelm = (uintptr_t)b;
-
- assert(a != NULL);
- assert(b != NULL);
-
- return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
-}
-
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
- u.rb_link, arena_run_comp)
-
-static inline int
-arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
-{
- int ret;
- size_t a_size = a->bits & ~PAGE_MASK;
- size_t b_size = b->bits & ~PAGE_MASK;
-
- ret = (a_size > b_size) - (a_size < b_size);
- if (ret == 0) {
- uintptr_t a_mapelm, b_mapelm;
-
- if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
- a_mapelm = (uintptr_t)a;
- else {
- /*
- * Treat keys as though they are lower than anything
- * else.
- */
- a_mapelm = 0;
- }
- b_mapelm = (uintptr_t)b;
-
- ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
- }
-
- return (ret);
-}
-
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
- u.rb_link, arena_avail_comp)
-
-static inline int
-arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
-{
-
- assert(a != NULL);
- assert(b != NULL);
-
- /*
- * Short-circuit for self comparison. The following comparison code
- * would come to the same result, but at the cost of executing the slow
- * path.
- */
- if (a == b)
- return (0);
-
- /*
- * Order such that chunks with higher fragmentation are "less than"
- * those with lower fragmentation -- purging order is from "least" to
- * "greatest". Fragmentation is measured as:
- *
- * mean current avail run size
- * --------------------------------
- * mean defragmented avail run size
- *
- * navail
- * -----------
- * nruns_avail nruns_avail-nruns_adjac
- * = ========================= = -----------------------
- * navail nruns_avail
- * -----------------------
- * nruns_avail-nruns_adjac
- *
- * The following code multiplies away the denominator prior to
- * comparison, in order to avoid division.
- *
- */
- {
- size_t a_val = (a->nruns_avail - a->nruns_adjac) *
- b->nruns_avail;
- size_t b_val = (b->nruns_avail - b->nruns_adjac) *
- a->nruns_avail;
-
- if (a_val < b_val)
- return (1);
- if (a_val > b_val)
- return (-1);
- }
- /*
- * Break ties by chunk address. For fragmented chunks, report lower
- * addresses as "lower", so that fragmentation reduction happens first
- * at lower addresses. However, use the opposite ordering for
- * unfragmented chunks, in order to increase the chances of
- * re-allocating dirty runs.
- */
- {
- uintptr_t a_chunk = (uintptr_t)a;
- uintptr_t b_chunk = (uintptr_t)b;
- int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
- if (a->nruns_adjac == 0) {
- assert(b->nruns_adjac == 0);
- ret = -ret;
- }
- return (ret);
- }
-}
-
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
- dirty_link, arena_chunk_dirty_comp)
-
-static inline bool
-arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
-{
- bool ret;
-
- if (pageind-1 < map_bias)
- ret = false;
- else {
- ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
- assert(ret == false || arena_mapbits_dirty_get(chunk,
- pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
- }
- return (ret);
-}
-
-static inline bool
-arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
-{
- bool ret;
-
- if (pageind+npages == chunk_npages)
- ret = false;
- else {
- assert(pageind+npages < chunk_npages);
- ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
- assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
- != arena_mapbits_dirty_get(chunk, pageind+npages));
- }
- return (ret);
-}
-
-static inline bool
-arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
-{
-
- return (arena_avail_adjac_pred(chunk, pageind) ||
- arena_avail_adjac_succ(chunk, pageind, npages));
-}
-
-static void
-arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
-{
-
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
-
- /*
- * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
- * removed and reinserted even if the run to be inserted is clean.
- */
- if (chunk->ndirty != 0)
- arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
-
- if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
- chunk->nruns_adjac++;
- if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
- chunk->nruns_adjac++;
- chunk->nruns_avail++;
- assert(chunk->nruns_avail > chunk->nruns_adjac);
-
- if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- arena->ndirty += npages;
- chunk->ndirty += npages;
- }
- if (chunk->ndirty != 0)
- arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
-
- arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
- pageind));
-}
-
-static void
-arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
-{
-
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
-
- /*
- * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
- * removed and reinserted even if the run to be removed is clean.
- */
- if (chunk->ndirty != 0)
- arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
-
- if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
- chunk->nruns_adjac--;
- if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
- chunk->nruns_adjac--;
- chunk->nruns_avail--;
- assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
- == 0 && chunk->nruns_adjac == 0));
-
- if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- arena->ndirty -= npages;
- chunk->ndirty -= npages;
- }
- if (chunk->ndirty != 0)
- arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
-
- arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
- pageind));
-}
-
-static inline void *
-arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
-{
- void *ret;
- unsigned regind;
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
-
- assert(run->nfree > 0);
- assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
-
- regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
- ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
- (uintptr_t)(bin_info->reg_interval * regind));
- run->nfree--;
- if (regind == run->nextind)
- run->nextind++;
- assert(regind < run->nextind);
- return (ret);
-}
-
-static inline void
-arena_run_reg_dalloc(arena_run_t *run, void *ptr)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t mapbits = arena_mapbits_get(chunk, pageind);
- size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
- unsigned regind = arena_run_regind(run, bin_info, ptr);
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
-
- assert(run->nfree < bin_info->nregs);
- /* Freeing an interior pointer can cause assertion failure. */
- assert(((uintptr_t)ptr - ((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset)) %
- (uintptr_t)bin_info->reg_interval == 0);
- assert((uintptr_t)ptr >= (uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset);
- /* Freeing an unallocated pointer can cause assertion failure. */
- assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
-
- bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
- run->nfree++;
-}
-
-static inline void
-arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
-{
-
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), (npages << LG_PAGE));
- memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
- (npages << LG_PAGE));
-}
-
-static inline void
-arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
-{
-
- VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), PAGE);
-}
-
-static inline void
-arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
-{
- size_t i;
- UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
-
- arena_run_page_mark_zeroed(chunk, run_ind);
- for (i = 0; i < PAGE / sizeof(size_t); i++)
- assert(p[i] == 0);
-}
-
-static void
-arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
- size_t binind, bool zero)
-{
- arena_chunk_t *chunk;
- size_t run_ind, total_pages, need_pages, rem_pages, i;
- size_t flag_dirty;
-
- assert((large && binind == BININD_INVALID) || (large == false && binind
- != BININD_INVALID));
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
- total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
- LG_PAGE;
- assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
- flag_dirty);
- need_pages = (size >> LG_PAGE);
- assert(need_pages > 0);
- assert(need_pages <= total_pages);
- rem_pages = total_pages - need_pages;
-
- arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
- if (config_stats) {
- /*
- * Update stats_cactive if nactive is crossing a chunk
- * multiple.
- */
- size_t cactive_diff = CHUNK_CEILING((arena->nactive +
- need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
- LG_PAGE);
- if (cactive_diff != 0)
- stats_cactive_add(cactive_diff);
- }
- arena->nactive += need_pages;
-
- /* Keep track of trailing unused pages for later use. */
- if (rem_pages > 0) {
- if (flag_dirty != 0) {
- arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
- (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
- arena_mapbits_unallocated_set(chunk,
- run_ind+total_pages-1, (rem_pages << LG_PAGE),
- CHUNK_MAP_DIRTY);
- } else {
- arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
- (rem_pages << LG_PAGE),
- arena_mapbits_unzeroed_get(chunk,
- run_ind+need_pages));
- arena_mapbits_unallocated_set(chunk,
- run_ind+total_pages-1, (rem_pages << LG_PAGE),
- arena_mapbits_unzeroed_get(chunk,
- run_ind+total_pages-1));
- }
- arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
- false, true);
- }
-
- /*
- * Update the page map separately for large vs. small runs, since it is
- * possible to avoid iteration for large mallocs.
- */
- if (large) {
- if (zero) {
- if (flag_dirty == 0) {
- /*
- * The run is clean, so some pages may be
- * zeroed (i.e. never before touched).
- */
- for (i = 0; i < need_pages; i++) {
- if (arena_mapbits_unzeroed_get(chunk,
- run_ind+i) != 0) {
- arena_run_zero(chunk, run_ind+i,
- 1);
- } else if (config_debug) {
- arena_run_page_validate_zeroed(
- chunk, run_ind+i);
- } else {
- arena_run_page_mark_zeroed(
- chunk, run_ind+i);
- }
- }
- } else {
- /*
- * The run is dirty, so all pages must be
- * zeroed.
- */
- arena_run_zero(chunk, run_ind, need_pages);
- }
- } else {
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
- (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
- }
-
- /*
- * Set the last element first, in case the run only contains one
- * page (i.e. both statements set the same element).
- */
- arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
- flag_dirty);
- arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
- } else {
- assert(zero == false);
- /*
- * Propagate the dirty and unzeroed flags to the allocated
- * small run, so that arena_dalloc_bin_run() has the ability to
- * conditionally trim clean pages.
- */
- arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
- /*
- * The first page will always be dirtied during small run
- * initialization, so a validation failure here would not
- * actually cause an observable failure.
- */
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
- arena_run_page_validate_zeroed(chunk, run_ind);
- for (i = 1; i < need_pages - 1; i++) {
- arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) {
- arena_run_page_validate_zeroed(chunk,
- run_ind+i);
- }
- }
- arena_mapbits_small_set(chunk, run_ind+need_pages-1,
- need_pages-1, binind, flag_dirty);
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
- 0) {
- arena_run_page_validate_zeroed(chunk,
- run_ind+need_pages-1);
- }
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
- (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
- }
-}
-
-static arena_chunk_t *
-arena_chunk_alloc(arena_t *arena)
-{
- arena_chunk_t *chunk;
- size_t i;
-
- if (arena->spare != NULL) {
- chunk = arena->spare;
- arena->spare = NULL;
-
- assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
- assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxclass);
- assert(arena_mapbits_unallocated_size_get(chunk,
- chunk_npages-1) == arena_maxclass);
- assert(arena_mapbits_dirty_get(chunk, map_bias) ==
- arena_mapbits_dirty_get(chunk, chunk_npages-1));
- } else {
- bool zero;
- size_t unzeroed;
-
- zero = false;
- malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
- false, &zero, arena->dss_prec);
- malloc_mutex_lock(&arena->lock);
- if (chunk == NULL)
- return (NULL);
- if (config_stats)
- arena->stats.mapped += chunksize;
-
- chunk->arena = arena;
-
- /*
- * Claim that no pages are in use, since the header is merely
- * overhead.
- */
- chunk->ndirty = 0;
-
- chunk->nruns_avail = 0;
- chunk->nruns_adjac = 0;
-
- /*
- * Initialize the map to contain one maximal free untouched run.
- * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
- * chunk.
- */
- unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
- arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
- unzeroed);
- /*
- * There is no need to initialize the internal page map entries
- * unless the chunk is not zeroed.
- */
- if (zero == false) {
- VALGRIND_MAKE_MEM_UNDEFINED(
- (void *)arena_mapp_get(chunk, map_bias+1),
- (size_t)((uintptr_t) arena_mapp_get(chunk,
- chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
- map_bias+1)));
- for (i = map_bias+1; i < chunk_npages-1; i++)
- arena_mapbits_unzeroed_set(chunk, i, unzeroed);
- } else {
- VALGRIND_MAKE_MEM_DEFINED(
- (void *)arena_mapp_get(chunk, map_bias+1),
- (size_t)((uintptr_t) arena_mapp_get(chunk,
- chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
- map_bias+1)));
- if (config_debug) {
- for (i = map_bias+1; i < chunk_npages-1; i++) {
- assert(arena_mapbits_unzeroed_get(chunk,
- i) == unzeroed);
- }
- }
- }
- arena_mapbits_unallocated_set(chunk, chunk_npages-1,
- arena_maxclass, unzeroed);
- }
-
- /* Insert the run into the runs_avail tree. */
- arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
- false, false);
-
- return (chunk);
-}
-
-static void
-arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
-{
- assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
- assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxclass);
- assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxclass);
- assert(arena_mapbits_dirty_get(chunk, map_bias) ==
- arena_mapbits_dirty_get(chunk, chunk_npages-1));
-
- /*
- * Remove run from the runs_avail tree, so that the arena does not use
- * it.
- */
- arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
- false, false);
-
- if (arena->spare != NULL) {
- arena_chunk_t *spare = arena->spare;
-
- arena->spare = chunk;
- malloc_mutex_unlock(&arena->lock);
- chunk_dealloc((void *)spare, chunksize, true);
- malloc_mutex_lock(&arena->lock);
- if (config_stats)
- arena->stats.mapped -= chunksize;
- } else
- arena->spare = chunk;
-}
-
-static arena_run_t *
-arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
- bool zero)
-{
- arena_run_t *run;
- arena_chunk_map_t *mapelm, key;
-
- key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
-
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- LG_PAGE));
- arena_run_split(arena, run, size, large, binind, zero);
- return (run);
- }
-
- return (NULL);
-}
-
-static arena_run_t *
-arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
- bool zero)
-{
- arena_chunk_t *chunk;
- arena_run_t *run;
-
- assert(size <= arena_maxclass);
- assert((size & PAGE_MASK) == 0);
- assert((large && binind == BININD_INVALID) || (large == false && binind
- != BININD_INVALID));
-
- /* Search the arena's chunks for the lowest best fit. */
- run = arena_run_alloc_helper(arena, size, large, binind, zero);
- if (run != NULL)
- return (run);
-
- /*
- * No usable runs. Create a new chunk from which to allocate the run.
- */
- chunk = arena_chunk_alloc(arena);
- if (chunk != NULL) {
- run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
- arena_run_split(arena, run, size, large, binind, zero);
- return (run);
- }
-
- /*
- * arena_chunk_alloc() failed, but another thread may have made
- * sufficient memory available while this one dropped arena->lock in
- * arena_chunk_alloc(), so search one more time.
- */
- return (arena_run_alloc_helper(arena, size, large, binind, zero));
-}
-
-static inline void
-arena_maybe_purge(arena_t *arena)
-{
- size_t npurgeable, threshold;
-
- /* Don't purge if the option is disabled. */
- if (opt_lg_dirty_mult < 0)
- return;
- /* Don't purge if all dirty pages are already being purged. */
- if (arena->ndirty <= arena->npurgatory)
- return;
- npurgeable = arena->ndirty - arena->npurgatory;
- threshold = (arena->nactive >> opt_lg_dirty_mult);
- /*
- * Don't purge unless the number of purgeable pages exceeds the
- * threshold.
- */
- if (npurgeable <= threshold)
- return;
-
- arena_purge(arena, false);
-}
-
-static inline size_t
-arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
-{
- size_t npurged;
- ql_head(arena_chunk_map_t) mapelms;
- arena_chunk_map_t *mapelm;
- size_t pageind, npages;
- size_t nmadvise;
-
- ql_new(&mapelms);
-
- /*
- * If chunk is the spare, temporarily re-allocate it, 1) so that its
- * run is reinserted into runs_avail, and 2) so that it cannot be
- * completely discarded by another thread while arena->lock is dropped
- * by this thread. Note that the arena_run_dalloc() call will
- * implicitly deallocate the chunk, so no explicit action is required
- * in this function to deallocate the chunk.
- *
- * Note that once a chunk contains dirty pages, it cannot again contain
- * a single run unless 1) it is a dirty run, or 2) this function purges
- * dirty pages and causes the transition to a single clean run. Thus
- * (chunk == arena->spare) is possible, but it is not possible for
- * this function to be called on the spare unless it contains a dirty
- * run.
- */
- if (chunk == arena->spare) {
- assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
- assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
-
- arena_chunk_alloc(arena);
- }
-
- if (config_stats)
- arena->stats.purged += chunk->ndirty;
-
- /*
- * Operate on all dirty runs if there is no clean/dirty run
- * fragmentation.
- */
- if (chunk->nruns_adjac == 0)
- all = true;
-
- /*
- * Temporarily allocate free dirty runs within chunk. If all is false,
- * only operate on dirty runs that are fragments; otherwise operate on
- * all dirty runs.
- */
- for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
- mapelm = arena_mapp_get(chunk, pageind);
- if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
- size_t run_size =
- arena_mapbits_unallocated_size_get(chunk, pageind);
-
- npages = run_size >> LG_PAGE;
- assert(pageind + npages <= chunk_npages);
- assert(arena_mapbits_dirty_get(chunk, pageind) ==
- arena_mapbits_dirty_get(chunk, pageind+npages-1));
-
- if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
- (all || arena_avail_adjac(chunk, pageind,
- npages))) {
- arena_run_t *run = (arena_run_t *)((uintptr_t)
- chunk + (uintptr_t)(pageind << LG_PAGE));
-
- arena_run_split(arena, run, run_size, true,
- BININD_INVALID, false);
- /* Append to list for later processing. */
- ql_elm_new(mapelm, u.ql_link);
- ql_tail_insert(&mapelms, mapelm, u.ql_link);
- }
- } else {
- /* Skip run. */
- if (arena_mapbits_large_get(chunk, pageind) != 0) {
- npages = arena_mapbits_large_size_get(chunk,
- pageind) >> LG_PAGE;
- } else {
- size_t binind;
- arena_bin_info_t *bin_info;
- arena_run_t *run = (arena_run_t *)((uintptr_t)
- chunk + (uintptr_t)(pageind << LG_PAGE));
-
- assert(arena_mapbits_small_runind_get(chunk,
- pageind) == 0);
- binind = arena_bin_index(arena, run->bin);
- bin_info = &arena_bin_info[binind];
- npages = bin_info->run_size >> LG_PAGE;
- }
- }
- }
- assert(pageind == chunk_npages);
- assert(chunk->ndirty == 0 || all == false);
- assert(chunk->nruns_adjac == 0);
-
- malloc_mutex_unlock(&arena->lock);
- if (config_stats)
- nmadvise = 0;
- npurged = 0;
- ql_foreach(mapelm, &mapelms, u.ql_link) {
- bool unzeroed;
- size_t flag_unzeroed, i;
-
- pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- npages = arena_mapbits_large_size_get(chunk, pageind) >>
- LG_PAGE;
- assert(pageind + npages <= chunk_npages);
- unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
- LG_PAGE)), (npages << LG_PAGE));
- flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
- /*
- * Set the unzeroed flag for all pages, now that pages_purge()
- * has returned whether the pages were zeroed as a side effect
- * of purging. This chunk map modification is safe even though
- * the arena mutex isn't currently owned by this thread,
- * because the run is marked as allocated, thus protecting it
- * from being modified by any other thread. As long as these
- * writes don't perturb the first and last elements'
- * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
- */
- for (i = 0; i < npages; i++) {
- arena_mapbits_unzeroed_set(chunk, pageind+i,
- flag_unzeroed);
- }
- npurged += npages;
- if (config_stats)
- nmadvise++;
- }
- malloc_mutex_lock(&arena->lock);
- if (config_stats)
- arena->stats.nmadvise += nmadvise;
-
- /* Deallocate runs. */
- for (mapelm = ql_first(&mapelms); mapelm != NULL;
- mapelm = ql_first(&mapelms)) {
- arena_run_t *run;
-
- pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
- LG_PAGE));
- ql_remove(&mapelms, mapelm, u.ql_link);
- arena_run_dalloc(arena, run, false, true);
- }
-
- return (npurged);
-}
-
-static arena_chunk_t *
-chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
-{
- size_t *ndirty = (size_t *)arg;
-
- assert(chunk->ndirty != 0);
- *ndirty += chunk->ndirty;
- return (NULL);
-}
-
-static void
-arena_purge(arena_t *arena, bool all)
-{
- arena_chunk_t *chunk;
- size_t npurgatory;
- if (config_debug) {
- size_t ndirty = 0;
-
- arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
- chunks_dirty_iter_cb, (void *)&ndirty);
- assert(ndirty == arena->ndirty);
- }
- assert(arena->ndirty > arena->npurgatory || all);
- assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
- arena->npurgatory) || all);
-
- if (config_stats)
- arena->stats.npurge++;
-
- /*
- * Compute the minimum number of pages that this thread should try to
- * purge, and add the result to arena->npurgatory. This will keep
- * multiple threads from racing to reduce ndirty below the threshold.
- */
- {
- size_t npurgeable = arena->ndirty - arena->npurgatory;
-
- if (all == false) {
- size_t threshold = (arena->nactive >>
- opt_lg_dirty_mult);
-
- npurgatory = npurgeable - threshold;
- } else
- npurgatory = npurgeable;
- }
- arena->npurgatory += npurgatory;
-
- while (npurgatory > 0) {
- size_t npurgeable, npurged, nunpurged;
-
- /* Get next chunk with dirty pages. */
- chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
- if (chunk == NULL) {
- /*
- * This thread was unable to purge as many pages as
- * originally intended, due to races with other threads
- * that either did some of the purging work, or re-used
- * dirty pages.
- */
- arena->npurgatory -= npurgatory;
- return;
- }
- npurgeable = chunk->ndirty;
- assert(npurgeable != 0);
-
- if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
- /*
- * This thread will purge all the dirty pages in chunk,
- * so set npurgatory to reflect this thread's intent to
- * purge the pages. This tends to reduce the chances
- * of the following scenario:
- *
- * 1) This thread sets arena->npurgatory such that
- * (arena->ndirty - arena->npurgatory) is at the
- * threshold.
- * 2) This thread drops arena->lock.
- * 3) Another thread causes one or more pages to be
- * dirtied, and immediately determines that it must
- * purge dirty pages.
- *
- * If this scenario *does* play out, that's okay,
- * because all of the purging work being done really
- * needs to happen.
- */
- arena->npurgatory += npurgeable - npurgatory;
- npurgatory = npurgeable;
- }
-
- /*
- * Keep track of how many pages are purgeable, versus how many
- * actually get purged, and adjust counters accordingly.
- */
- arena->npurgatory -= npurgeable;
- npurgatory -= npurgeable;
- npurged = arena_chunk_purge(arena, chunk, all);
- nunpurged = npurgeable - npurged;
- arena->npurgatory += nunpurged;
- npurgatory += nunpurged;
- }
-}
-
-void
-arena_purge_all(arena_t *arena)
-{
-
- malloc_mutex_lock(&arena->lock);
- arena_purge(arena, true);
- malloc_mutex_unlock(&arena->lock);
-}
-
-static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
-{
- arena_chunk_t *chunk;
- size_t size, run_ind, run_pages, flag_dirty;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- assert(run_ind >= map_bias);
- assert(run_ind < chunk_npages);
- if (arena_mapbits_large_get(chunk, run_ind) != 0) {
- size = arena_mapbits_large_size_get(chunk, run_ind);
- assert(size == PAGE ||
- arena_mapbits_large_size_get(chunk,
- run_ind+(size>>LG_PAGE)-1) == 0);
- } else {
- size_t binind = arena_bin_index(arena, run->bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
- size = bin_info->run_size;
- }
- run_pages = (size >> LG_PAGE);
- if (config_stats) {
- /*
- * Update stats_cactive if nactive is crossing a chunk
- * multiple.
- */
- size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
- CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
- if (cactive_diff != 0)
- stats_cactive_sub(cactive_diff);
- }
- arena->nactive -= run_pages;
-
- /*
- * The run is dirty if the caller claims to have dirtied it, as well as
- * if it was already dirty before being allocated and the caller
- * doesn't claim to have cleaned it.
- */
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
- dirty = true;
- flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
-
- /* Mark pages as unallocated in the chunk map. */
- if (dirty) {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- CHUNK_MAP_DIRTY);
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- CHUNK_MAP_DIRTY);
- } else {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- arena_mapbits_unzeroed_get(chunk, run_ind));
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
- }
-
- /* Try to coalesce forward. */
- if (run_ind + run_pages < chunk_npages &&
- arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
- arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
- size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
- run_ind+run_pages);
- size_t nrun_pages = nrun_size >> LG_PAGE;
-
- /*
- * Remove successor from runs_avail; the coalesced run is
- * inserted later.
- */
- assert(arena_mapbits_unallocated_size_get(chunk,
- run_ind+run_pages+nrun_pages-1) == nrun_size);
- assert(arena_mapbits_dirty_get(chunk,
- run_ind+run_pages+nrun_pages-1) == flag_dirty);
- arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
- false, true);
-
- size += nrun_size;
- run_pages += nrun_pages;
-
- arena_mapbits_unallocated_size_set(chunk, run_ind, size);
- arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
- size);
- }
-
- /* Try to coalesce backward. */
- if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
- == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
- size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
- run_ind-1);
- size_t prun_pages = prun_size >> LG_PAGE;
-
- run_ind -= prun_pages;
-
- /*
- * Remove predecessor from runs_avail; the coalesced run is
- * inserted later.
- */
- assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
- prun_size);
- assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
- arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
- false);
-
- size += prun_size;
- run_pages += prun_pages;
-
- arena_mapbits_unallocated_size_set(chunk, run_ind, size);
- arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
- size);
- }
-
- /* Insert into runs_avail, now that coalescing is complete. */
- assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
- arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
-
- /* Deallocate chunk if it is now completely unused. */
- if (size == arena_maxclass) {
- assert(run_ind == map_bias);
- assert(run_pages == (arena_maxclass >> LG_PAGE));
- arena_chunk_dealloc(arena, chunk);
- }
-
- /*
- * It is okay to do dirty page processing here even if the chunk was
- * deallocated above, since in that case it is the spare. Waiting
- * until after possible chunk deallocation to do dirty processing
- * allows for an old spare to be fully deallocated, thus decreasing the
- * chances of spuriously crossing the dirty page purging threshold.
- */
- if (dirty)
- arena_maybe_purge(arena);
-}
-
-static void
-arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize)
-{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- size_t head_npages = (oldsize - newsize) >> LG_PAGE;
- size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
-
- assert(oldsize > newsize);
-
- /*
- * Update the chunk map so that arena_run_dalloc() can treat the
- * leading run as separately allocated. Set the last element of each
- * run first, in case of single-page runs.
- */
- assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
-
- if (config_debug) {
- UNUSED size_t tail_npages = newsize >> LG_PAGE;
- assert(arena_mapbits_large_size_get(chunk,
- pageind+head_npages+tail_npages-1) == 0);
- assert(arena_mapbits_dirty_get(chunk,
- pageind+head_npages+tail_npages-1) == flag_dirty);
- }
- arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
- flag_dirty);
-
- arena_run_dalloc(arena, run, false, false);
-}
-
-static void
-arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize, bool dirty)
-{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- size_t head_npages = newsize >> LG_PAGE;
- size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
-
- assert(oldsize > newsize);
-
- /*
- * Update the chunk map so that arena_run_dalloc() can treat the
- * trailing run as separately allocated. Set the last element of each
- * run first, in case of single-page runs.
- */
- assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
-
- if (config_debug) {
- UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
- assert(arena_mapbits_large_size_get(chunk,
- pageind+head_npages+tail_npages-1) == 0);
- assert(arena_mapbits_dirty_get(chunk,
- pageind+head_npages+tail_npages-1) == flag_dirty);
- }
- arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
- flag_dirty);
-
- arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
- dirty, false);
-}
-
-static arena_run_t *
-arena_bin_runs_first(arena_bin_t *bin)
-{
- arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
- if (mapelm != NULL) {
- arena_chunk_t *chunk;
- size_t pageind;
- arena_run_t *run;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
- pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t))) + map_bias;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) <<
- LG_PAGE));
- return (run);
- }
-
- return (NULL);
-}
-
-static void
-arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
-{
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
-
- assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
-
- arena_run_tree_insert(&bin->runs, mapelm);
-}
-
-static void
-arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
-
- assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
-
- arena_run_tree_remove(&bin->runs, mapelm);
-}
-
-static arena_run_t *
-arena_bin_nonfull_run_tryget(arena_bin_t *bin)
-{
- arena_run_t *run = arena_bin_runs_first(bin);
- if (run != NULL) {
- arena_bin_runs_remove(bin, run);
- if (config_stats)
- bin->stats.reruns++;
- }
- return (run);
-}
-
-static arena_run_t *
-arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
-{
- arena_run_t *run;
- size_t binind;
- arena_bin_info_t *bin_info;
-
- /* Look for a usable run. */
- run = arena_bin_nonfull_run_tryget(bin);
- if (run != NULL)
- return (run);
- /* No existing runs have any space available. */
-
- binind = arena_bin_index(arena, bin);
- bin_info = &arena_bin_info[binind];
-
- /* Allocate a new run. */
- malloc_mutex_unlock(&bin->lock);
- /******************************/
- malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
- if (run != NULL) {
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
-
- /* Initialize run internals. */
- run->bin = bin;
- run->nextind = 0;
- run->nfree = bin_info->nregs;
- bitmap_init(bitmap, &bin_info->bitmap_info);
- }
- malloc_mutex_unlock(&arena->lock);
- /********************************/
- malloc_mutex_lock(&bin->lock);
- if (run != NULL) {
- if (config_stats) {
- bin->stats.nruns++;
- bin->stats.curruns++;
- }
- return (run);
- }
-
- /*
- * arena_run_alloc() failed, but another thread may have made
- * sufficient memory available while this one dropped bin->lock above,
- * so search one more time.
- */
- run = arena_bin_nonfull_run_tryget(bin);
- if (run != NULL)
- return (run);
-
- return (NULL);
-}
-
-/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
-static void *
-arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
-{
- void *ret;
- size_t binind;
- arena_bin_info_t *bin_info;
- arena_run_t *run;
-
- binind = arena_bin_index(arena, bin);
- bin_info = &arena_bin_info[binind];
- bin->runcur = NULL;
- run = arena_bin_nonfull_run_get(arena, bin);
- if (bin->runcur != NULL && bin->runcur->nfree > 0) {
- /*
- * Another thread updated runcur while this one ran without the
- * bin lock in arena_bin_nonfull_run_get().
- */
- assert(bin->runcur->nfree > 0);
- ret = arena_run_reg_alloc(bin->runcur, bin_info);
- if (run != NULL) {
- arena_chunk_t *chunk;
-
- /*
- * arena_run_alloc() may have allocated run, or it may
- * have pulled run from the bin's run tree. Therefore
- * it is unsafe to make any assumptions about how run
- * has previously been used, and arena_bin_lower_run()
- * must be called, as if a region were just deallocated
- * from the run.
- */
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- if (run->nfree == bin_info->nregs)
- arena_dalloc_bin_run(arena, chunk, run, bin);
- else
- arena_bin_lower_run(arena, chunk, run, bin);
- }
- return (ret);
- }
-
- if (run == NULL)
- return (NULL);
-
- bin->runcur = run;
-
- assert(bin->runcur->nfree > 0);
-
- return (arena_run_reg_alloc(bin->runcur, bin_info));
-}
-
-void
-arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
- uint64_t prof_accumbytes)
-{
- unsigned i, nfill;
- arena_bin_t *bin;
- arena_run_t *run;
- void *ptr;
-
- assert(tbin->ncached == 0);
-
- if (config_prof && arena_prof_accum(arena, prof_accumbytes))
- prof_idump();
- bin = &arena->bins[binind];
- malloc_mutex_lock(&bin->lock);
- for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
- tbin->lg_fill_div); i < nfill; i++) {
- if ((run = bin->runcur) != NULL && run->nfree > 0)
- ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
- else
- ptr = arena_bin_malloc_hard(arena, bin);
- if (ptr == NULL)
- break;
- if (config_fill && opt_junk) {
- arena_alloc_junk_small(ptr, &arena_bin_info[binind],
- true);
- }
- /* Insert such that low regions get used first. */
- tbin->avail[nfill - 1 - i] = ptr;
- }
- if (config_stats) {
- bin->stats.allocated += i * arena_bin_info[binind].reg_size;
- bin->stats.nmalloc += i;
- bin->stats.nrequests += tbin->tstats.nrequests;
- bin->stats.nfills++;
- tbin->tstats.nrequests = 0;
- }
- malloc_mutex_unlock(&bin->lock);
- tbin->ncached = i;
-}
-
-void
-arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
-{
-
- if (zero) {
- size_t redzone_size = bin_info->redzone_size;
- memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
- redzone_size);
- memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
- redzone_size);
- } else {
- memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
- bin_info->reg_interval);
- }
-}
-
-void
-arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
-{
- size_t size = bin_info->reg_size;
- size_t redzone_size = bin_info->redzone_size;
- size_t i;
- bool error = false;
-
- for (i = 1; i <= redzone_size; i++) {
- unsigned byte;
- if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
- error = true;
- malloc_printf("<jemalloc>: Corrupt redzone "
- "%zu byte%s before %p (size %zu), byte=%#x\n", i,
- (i == 1) ? "" : "s", ptr, size, byte);
- }
- }
- for (i = 0; i < redzone_size; i++) {
- unsigned byte;
- if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
- error = true;
- malloc_printf("<jemalloc>: Corrupt redzone "
- "%zu byte%s after end of %p (size %zu), byte=%#x\n",
- i, (i == 1) ? "" : "s", ptr, size, byte);
- }
- }
- if (opt_abort && error)
- abort();
-
- memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
- bin_info->reg_interval);
-}
-
-void *
-arena_malloc_small(arena_t *arena, size_t size, bool zero)
-{
- void *ret;
- arena_bin_t *bin;
- arena_run_t *run;
- size_t binind;
-
- binind = SMALL_SIZE2BIN(size);
- assert(binind < NBINS);
- bin = &arena->bins[binind];
- size = arena_bin_info[binind].reg_size;
-
- malloc_mutex_lock(&bin->lock);
- if ((run = bin->runcur) != NULL && run->nfree > 0)
- ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
- else
- ret = arena_bin_malloc_hard(arena, bin);
-
- if (ret == NULL) {
- malloc_mutex_unlock(&bin->lock);
- return (NULL);
- }
-
- if (config_stats) {
- bin->stats.allocated += size;
- bin->stats.nmalloc++;
- bin->stats.nrequests++;
- }
- malloc_mutex_unlock(&bin->lock);
- if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
- prof_idump();
-
- if (zero == false) {
- if (config_fill) {
- if (opt_junk) {
- arena_alloc_junk_small(ret,
- &arena_bin_info[binind], false);
- } else if (opt_zero)
- memset(ret, 0, size);
- }
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- } else {
- if (config_fill && opt_junk) {
- arena_alloc_junk_small(ret, &arena_bin_info[binind],
- true);
- }
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- memset(ret, 0, size);
- }
-
- return (ret);
-}
-
-void *
-arena_malloc_large(arena_t *arena, size_t size, bool zero)
-{
- void *ret;
- UNUSED bool idump;
-
- /* Large allocation. */
- size = PAGE_CEILING(size);
- malloc_mutex_lock(&arena->lock);
- ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
- if (ret == NULL) {
- malloc_mutex_unlock(&arena->lock);
- return (NULL);
- }
- if (config_stats) {
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
- }
- if (config_prof)
- idump = arena_prof_accum_locked(arena, size);
- malloc_mutex_unlock(&arena->lock);
- if (config_prof && idump)
- prof_idump();
-
- if (zero == false) {
- if (config_fill) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
- }
- }
-
- return (ret);
-}
-
-/* Only handles large allocations that require more than page alignment. */
-void *
-arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
-{
- void *ret;
- size_t alloc_size, leadsize, trailsize;
- arena_run_t *run;
- arena_chunk_t *chunk;
-
- assert((size & PAGE_MASK) == 0);
-
- alignment = PAGE_CEILING(alignment);
- alloc_size = size + alignment - PAGE;
-
- malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
- if (run == NULL) {
- malloc_mutex_unlock(&arena->lock);
- return (NULL);
- }
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
-
- leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
- (uintptr_t)run;
- assert(alloc_size >= leadsize + size);
- trailsize = alloc_size - leadsize - size;
- ret = (void *)((uintptr_t)run + leadsize);
- if (leadsize != 0) {
- arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
- leadsize);
- }
- if (trailsize != 0) {
- arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
- false);
- }
-
- if (config_stats) {
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
- }
- malloc_mutex_unlock(&arena->lock);
-
- if (config_fill && zero == false) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
- }
- return (ret);
-}
-
-void
-arena_prof_promoted(const void *ptr, size_t size)
-{
- arena_chunk_t *chunk;
- size_t pageind, binind;
-
- cassert(config_prof);
- assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
- assert(isalloc(ptr, false) == PAGE);
- assert(isalloc(ptr, true) == PAGE);
- assert(size <= SMALL_MAXCLASS);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- binind = SMALL_SIZE2BIN(size);
- assert(binind < NBINS);
- arena_mapbits_large_binind_set(chunk, pageind, binind);
-
- assert(isalloc(ptr, false) == PAGE);
- assert(isalloc(ptr, true) == size);
-}
-
-static void
-arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
-{
-
- /* Dissociate run from bin. */
- if (run == bin->runcur)
- bin->runcur = NULL;
- else {
- size_t binind = arena_bin_index(chunk->arena, bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
-
- if (bin_info->nregs != 1) {
- /*
- * This block's conditional is necessary because if the
- * run only contains one region, then it never gets
- * inserted into the non-full runs tree.
- */
- arena_bin_runs_remove(bin, run);
- }
- }
-}
-
-static void
-arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
-{
- size_t binind;
- arena_bin_info_t *bin_info;
- size_t npages, run_ind, past;
-
- assert(run != bin->runcur);
- assert(arena_run_tree_search(&bin->runs,
- arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
- == NULL);
-
- binind = arena_bin_index(chunk->arena, run->bin);
- bin_info = &arena_bin_info[binind];
-
- malloc_mutex_unlock(&bin->lock);
- /******************************/
- npages = bin_info->run_size >> LG_PAGE;
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- past = (size_t)(PAGE_CEILING((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
- bin_info->reg_interval - bin_info->redzone_size) -
- (uintptr_t)chunk) >> LG_PAGE);
- malloc_mutex_lock(&arena->lock);
-
- /*
- * If the run was originally clean, and some pages were never touched,
- * trim the clean pages before deallocating the dirty portion of the
- * run.
- */
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+npages-1));
- if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
- npages) {
- /* Trim clean pages. Convert to large run beforehand. */
- assert(npages > 0);
- arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
- arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
- arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
- ((past - run_ind) << LG_PAGE), false);
- /* npages = past - run_ind; */
- }
- arena_run_dalloc(arena, run, true, false);
- malloc_mutex_unlock(&arena->lock);
- /****************************/
- malloc_mutex_lock(&bin->lock);
- if (config_stats)
- bin->stats.curruns--;
-}
-
-static void
-arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
-{
-
- /*
- * Make sure that if bin->runcur is non-NULL, it refers to the lowest
- * non-full run. It is okay to NULL runcur out rather than proactively
- * keeping it pointing at the lowest non-full run.
- */
- if ((uintptr_t)run < (uintptr_t)bin->runcur) {
- /* Switch runcur. */
- if (bin->runcur->nfree > 0)
- arena_bin_runs_insert(bin, bin->runcur);
- bin->runcur = run;
- if (config_stats)
- bin->stats.reruns++;
- } else
- arena_bin_runs_insert(bin, run);
-}
-
-void
-arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- arena_chunk_map_t *mapelm)
-{
- size_t pageind;
- arena_run_t *run;
- arena_bin_t *bin;
- arena_bin_info_t *bin_info;
- size_t size, binind;
-
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
- bin = run->bin;
- binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
- bin_info = &arena_bin_info[binind];
- if (config_fill || config_stats)
- size = bin_info->reg_size;
-
- if (config_fill && opt_junk)
- arena_dalloc_junk_small(ptr, bin_info);
-
- arena_run_reg_dalloc(run, ptr);
- if (run->nfree == bin_info->nregs) {
- arena_dissociate_bin_run(chunk, run, bin);
- arena_dalloc_bin_run(arena, chunk, run, bin);
- } else if (run->nfree == 1 && run != bin->runcur)
- arena_bin_lower_run(arena, chunk, run, bin);
-
- if (config_stats) {
- bin->stats.allocated -= size;
- bin->stats.ndalloc++;
- }
-}
-
-void
-arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind, arena_chunk_map_t *mapelm)
-{
- arena_run_t *run;
- arena_bin_t *bin;
-
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
- bin = run->bin;
- malloc_mutex_lock(&bin->lock);
- arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
- malloc_mutex_unlock(&bin->lock);
-}
-
-void
-arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind)
-{
- arena_chunk_map_t *mapelm;
-
- if (config_debug) {
- /* arena_ptr_small_binind_get() does extra sanity checking. */
- assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
- pageind)) != BININD_INVALID);
- }
- mapelm = arena_mapp_get(chunk, pageind);
- arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
-}
-
-void
-arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
-{
-
- if (config_fill || config_stats) {
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t size = arena_mapbits_large_size_get(chunk, pageind);
-
- if (config_fill && config_stats && opt_junk)
- memset(ptr, 0x5a, size);
- if (config_stats) {
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
- }
- }
-
- arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
-}
-
-void
-arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
-{
-
- malloc_mutex_lock(&arena->lock);
- arena_dalloc_large_locked(arena, chunk, ptr);
- malloc_mutex_unlock(&arena->lock);
-}
-
-static void
-arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t oldsize, size_t size)
-{
-
- assert(size < oldsize);
-
- /*
- * Shrink the run, and make trailing pages available for other
- * allocations.
- */
- malloc_mutex_lock(&arena->lock);
- arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
- true);
- if (config_stats) {
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
- }
- malloc_mutex_unlock(&arena->lock);
-}
-
-static bool
-arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t oldsize, size_t size, size_t extra, bool zero)
-{
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t npages = oldsize >> LG_PAGE;
- size_t followsize;
-
- assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
-
- /* Try to extend the run. */
- assert(size + extra > oldsize);
- malloc_mutex_lock(&arena->lock);
- if (pageind + npages < chunk_npages &&
- arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
- (followsize = arena_mapbits_unallocated_size_get(chunk,
- pageind+npages)) >= size - oldsize) {
- /*
- * The next run is available and sufficiently large. Split the
- * following run, then merge the first part with the existing
- * allocation.
- */
- size_t flag_dirty;
- size_t splitsize = (oldsize + followsize <= size + extra)
- ? followsize : size + extra - oldsize;
- arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
- ((pageind+npages) << LG_PAGE)), splitsize, true,
- BININD_INVALID, zero);
-
- size = oldsize + splitsize;
- npages = size >> LG_PAGE;
-
- /*
- * Mark the extended run as dirty if either portion of the run
- * was dirty before allocation. This is rather pedantic,
- * because there's not actually any sequence of events that
- * could cause the resulting run to be passed to
- * arena_run_dalloc() with the dirty argument set to false
- * (which is when dirty flag consistency would really matter).
- */
- flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
- arena_mapbits_dirty_get(chunk, pageind+npages-1);
- arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
- arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
-
- if (config_stats) {
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
- }
- malloc_mutex_unlock(&arena->lock);
- return (false);
- }
- malloc_mutex_unlock(&arena->lock);
-
- return (true);
-}
-
-/*
- * Try to resize a large allocation, in order to avoid copying. This will
- * always fail if growing an object, and the following run is already in use.
- */
-static bool
-arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
- bool zero)
-{
- size_t psize;
-
- psize = PAGE_CEILING(size + extra);
- if (psize == oldsize) {
- /* Same size class. */
- if (config_fill && opt_junk && size < oldsize) {
- memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
- size);
- }
- return (false);
- } else {
- arena_chunk_t *chunk;
- arena_t *arena;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena = chunk->arena;
-
- if (psize < oldsize) {
- /* Fill before shrinking in order avoid a race. */
- if (config_fill && opt_junk) {
- memset((void *)((uintptr_t)ptr + size), 0x5a,
- oldsize - size);
- }
- arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
- psize);
- return (false);
- } else {
- bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
- oldsize, PAGE_CEILING(size),
- psize - PAGE_CEILING(size), zero);
- if (config_fill && ret == false && zero == false &&
- opt_zero) {
- memset((void *)((uintptr_t)ptr + oldsize), 0,
- size - oldsize);
- }
- return (ret);
- }
- }
-}
-
-void *
-arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
- bool zero)
-{
-
- /*
- * Avoid moving the allocation if the size class can be left the same.
- */
- if (oldsize <= arena_maxclass) {
- if (oldsize <= SMALL_MAXCLASS) {
- assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
- == oldsize);
- if ((size + extra <= SMALL_MAXCLASS &&
- SMALL_SIZE2BIN(size + extra) ==
- SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
- size + extra >= oldsize)) {
- if (config_fill && opt_junk && size < oldsize) {
- memset((void *)((uintptr_t)ptr + size),
- 0x5a, oldsize - size);
- }
- return (ptr);
- }
- } else {
- assert(size <= arena_maxclass);
- if (size + extra > SMALL_MAXCLASS) {
- if (arena_ralloc_large(ptr, oldsize, size,
- extra, zero) == false)
- return (ptr);
- }
- }
- }
-
- /* Reallocation would require a move. */
- return (NULL);
-}
-
-void *
-arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
- bool try_tcache_dalloc)
-{
- void *ret;
- size_t copysize;
-
- /* Try to avoid moving the allocation. */
- ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
- if (ret != NULL)
- return (ret);
-
- /*
- * size and oldsize are different enough that we need to move the
- * object. In that case, fall back to allocating new space and
- * copying.
- */
- if (alignment != 0) {
- size_t usize = sa2u(size + extra, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
- } else
- ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
-
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, this time without extra. */
- if (alignment != 0) {
- size_t usize = sa2u(size, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
- arena);
- } else
- ret = arena_malloc(arena, size, zero, try_tcache_alloc);
-
- if (ret == NULL)
- return (NULL);
- }
-
- /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
-
- /*
- * Copy at most size bytes (not size+extra), since the caller has no
- * expectation that the extra bytes will be reliably preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
- VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
- memcpy(ret, ptr, copysize);
- iqallocx(ptr, try_tcache_dalloc);
- return (ret);
-}
-
-dss_prec_t
-arena_dss_prec_get(arena_t *arena)
-{
- dss_prec_t ret;
-
- malloc_mutex_lock(&arena->lock);
- ret = arena->dss_prec;
- malloc_mutex_unlock(&arena->lock);
- return (ret);
-}
-
-void
-arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
-{
-
- malloc_mutex_lock(&arena->lock);
- arena->dss_prec = dss_prec;
- malloc_mutex_unlock(&arena->lock);
-}
-
-void
-arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
- size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats)
-{
- unsigned i;
-
- malloc_mutex_lock(&arena->lock);
- *dss = dss_prec_names[arena->dss_prec];
- *nactive += arena->nactive;
- *ndirty += arena->ndirty;
-
- astats->mapped += arena->stats.mapped;
- astats->npurge += arena->stats.npurge;
- astats->nmadvise += arena->stats.nmadvise;
- astats->purged += arena->stats.purged;
- astats->allocated_large += arena->stats.allocated_large;
- astats->nmalloc_large += arena->stats.nmalloc_large;
- astats->ndalloc_large += arena->stats.ndalloc_large;
- astats->nrequests_large += arena->stats.nrequests_large;
-
- for (i = 0; i < nlclasses; i++) {
- lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
- lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
- lstats[i].nrequests += arena->stats.lstats[i].nrequests;
- lstats[i].curruns += arena->stats.lstats[i].curruns;
- }
- malloc_mutex_unlock(&arena->lock);
-
- for (i = 0; i < NBINS; i++) {
- arena_bin_t *bin = &arena->bins[i];
-
- malloc_mutex_lock(&bin->lock);
- bstats[i].allocated += bin->stats.allocated;
- bstats[i].nmalloc += bin->stats.nmalloc;
- bstats[i].ndalloc += bin->stats.ndalloc;
- bstats[i].nrequests += bin->stats.nrequests;
- if (config_tcache) {
- bstats[i].nfills += bin->stats.nfills;
- bstats[i].nflushes += bin->stats.nflushes;
- }
- bstats[i].nruns += bin->stats.nruns;
- bstats[i].reruns += bin->stats.reruns;
- bstats[i].curruns += bin->stats.curruns;
- malloc_mutex_unlock(&bin->lock);
- }
-}
-
-bool
-arena_new(arena_t *arena, unsigned ind)
-{
- unsigned i;
- arena_bin_t *bin;
-
- arena->ind = ind;
- arena->nthreads = 0;
-
- if (malloc_mutex_init(&arena->lock))
- return (true);
-
- if (config_stats) {
- memset(&arena->stats, 0, sizeof(arena_stats_t));
- arena->stats.lstats =
- (malloc_large_stats_t *)base_alloc(nlclasses *
- sizeof(malloc_large_stats_t));
- if (arena->stats.lstats == NULL)
- return (true);
- memset(arena->stats.lstats, 0, nlclasses *
- sizeof(malloc_large_stats_t));
- if (config_tcache)
- ql_new(&arena->tcache_ql);
- }
-
- if (config_prof)
- arena->prof_accumbytes = 0;
-
- arena->dss_prec = chunk_dss_prec_get();
-
- /* Initialize chunks. */
- arena_chunk_dirty_new(&arena->chunks_dirty);
- arena->spare = NULL;
-
- arena->nactive = 0;
- arena->ndirty = 0;
- arena->npurgatory = 0;
-
- arena_avail_tree_new(&arena->runs_avail);
-
- /* Initialize bins. */
- for (i = 0; i < NBINS; i++) {
- bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock))
- return (true);
- bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
- if (config_stats)
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
- }
-
- return (false);
-}
-
-/*
- * Calculate bin_info->run_size such that it meets the following constraints:
- *
- * *) bin_info->run_size >= min_run_size
- * *) bin_info->run_size <= arena_maxclass
- * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
- * *) bin_info->nregs <= RUN_MAXREGS
- *
- * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
- * calculated here, since these settings are all interdependent.
- */
-static size_t
-bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
-{
- size_t pad_size;
- size_t try_run_size, good_run_size;
- uint32_t try_nregs, good_nregs;
- uint32_t try_hdr_size, good_hdr_size;
- uint32_t try_bitmap_offset, good_bitmap_offset;
- uint32_t try_ctx0_offset, good_ctx0_offset;
- uint32_t try_redzone0_offset, good_redzone0_offset;
-
- assert(min_run_size >= PAGE);
- assert(min_run_size <= arena_maxclass);
-
- /*
- * Determine redzone size based on minimum alignment and minimum
- * redzone size. Add padding to the end of the run if it is needed to
- * align the regions. The padding allows each redzone to be half the
- * minimum alignment; without the padding, each redzone would have to
- * be twice as large in order to maintain alignment.
- */
- if (config_fill && opt_redzone) {
- size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
- if (align_min <= REDZONE_MINSIZE) {
- bin_info->redzone_size = REDZONE_MINSIZE;
- pad_size = 0;
- } else {
- bin_info->redzone_size = align_min >> 1;
- pad_size = bin_info->redzone_size;
- }
- } else {
- bin_info->redzone_size = 0;
- pad_size = 0;
- }
- bin_info->reg_interval = bin_info->reg_size +
- (bin_info->redzone_size << 1);
-
- /*
- * Calculate known-valid settings before entering the run_size
- * expansion loop, so that the first part of the loop always copies
- * valid settings.
- *
- * The do..while loop iteratively reduces the number of regions until
- * the run header and the regions no longer overlap. A closed formula
- * would be quite messy, since there is an interdependency between the
- * header's mask length and the number of regions.
- */
- try_run_size = min_run_size;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) /
- bin_info->reg_interval)
- + 1; /* Counter-act try_nregs-- in loop. */
- if (try_nregs > RUN_MAXREGS) {
- try_nregs = RUN_MAXREGS
- + 1; /* Counter-act try_nregs-- in loop. */
- }
- do {
- try_nregs--;
- try_hdr_size = sizeof(arena_run_t);
- /* Pad to a long boundary. */
- try_hdr_size = LONG_CEILING(try_hdr_size);
- try_bitmap_offset = try_hdr_size;
- /* Add space for bitmap. */
- try_hdr_size += bitmap_size(try_nregs);
- if (config_prof && opt_prof && prof_promote == false) {
- /* Pad to a quantum boundary. */
- try_hdr_size = QUANTUM_CEILING(try_hdr_size);
- try_ctx0_offset = try_hdr_size;
- /* Add space for one (prof_ctx_t *) per region. */
- try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
- } else
- try_ctx0_offset = 0;
- try_redzone0_offset = try_run_size - (try_nregs *
- bin_info->reg_interval) - pad_size;
- } while (try_hdr_size > try_redzone0_offset);
-
- /* run_size expansion loop. */
- do {
- /*
- * Copy valid settings before trying more aggressive settings.
- */
- good_run_size = try_run_size;
- good_nregs = try_nregs;
- good_hdr_size = try_hdr_size;
- good_bitmap_offset = try_bitmap_offset;
- good_ctx0_offset = try_ctx0_offset;
- good_redzone0_offset = try_redzone0_offset;
-
- /* Try more aggressive settings. */
- try_run_size += PAGE;
- try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
- bin_info->reg_interval)
- + 1; /* Counter-act try_nregs-- in loop. */
- if (try_nregs > RUN_MAXREGS) {
- try_nregs = RUN_MAXREGS
- + 1; /* Counter-act try_nregs-- in loop. */
- }
- do {
- try_nregs--;
- try_hdr_size = sizeof(arena_run_t);
- /* Pad to a long boundary. */
- try_hdr_size = LONG_CEILING(try_hdr_size);
- try_bitmap_offset = try_hdr_size;
- /* Add space for bitmap. */
- try_hdr_size += bitmap_size(try_nregs);
- if (config_prof && opt_prof && prof_promote == false) {
- /* Pad to a quantum boundary. */
- try_hdr_size = QUANTUM_CEILING(try_hdr_size);
- try_ctx0_offset = try_hdr_size;
- /*
- * Add space for one (prof_ctx_t *) per region.
- */
- try_hdr_size += try_nregs *
- sizeof(prof_ctx_t *);
- }
- try_redzone0_offset = try_run_size - (try_nregs *
- bin_info->reg_interval) - pad_size;
- } while (try_hdr_size > try_redzone0_offset);
- } while (try_run_size <= arena_maxclass
- && try_run_size <= arena_maxclass
- && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
- RUN_MAX_OVRHD_RELAX
- && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
- && try_nregs < RUN_MAXREGS);
-
- assert(good_hdr_size <= good_redzone0_offset);
-
- /* Copy final settings. */
- bin_info->run_size = good_run_size;
- bin_info->nregs = good_nregs;
- bin_info->bitmap_offset = good_bitmap_offset;
- bin_info->ctx0_offset = good_ctx0_offset;
- bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
-
- assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
- * bin_info->reg_interval) + pad_size == bin_info->run_size);
-
- return (good_run_size);
-}
-
-static void
-bin_info_init(void)
-{
- arena_bin_info_t *bin_info;
- size_t prev_run_size = PAGE;
-
-#define SIZE_CLASS(bin, delta, size) \
- bin_info = &arena_bin_info[bin]; \
- bin_info->reg_size = size; \
- prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
- bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
- SIZE_CLASSES
-#undef SIZE_CLASS
-}
-
-void
-arena_boot(void)
-{
- size_t header_size;
- unsigned i;
-
- /*
- * Compute the header size such that it is large enough to contain the
- * page map. The page map is biased to omit entries for the header
- * itself, so some iteration is necessary to compute the map bias.
- *
- * 1) Compute safe header_size and map_bias values that include enough
- * space for an unbiased page map.
- * 2) Refine map_bias based on (1) to omit the header pages in the page
- * map. The resulting map_bias may be one too small.
- * 3) Refine map_bias based on (2). The result will be >= the result
- * from (2), and will always be correct.
- */
- map_bias = 0;
- for (i = 0; i < 3; i++) {
- header_size = offsetof(arena_chunk_t, map) +
- (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
- map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
- != 0);
- }
- assert(map_bias > 0);
-
- arena_maxclass = chunksize - (map_bias << LG_PAGE);
-
- bin_info_init();
-}
-
-void
-arena_prefork(arena_t *arena)
-{
- unsigned i;
-
- malloc_mutex_prefork(&arena->lock);
- for (i = 0; i < NBINS; i++)
- malloc_mutex_prefork(&arena->bins[i].lock);
-}
-
-void
-arena_postfork_parent(arena_t *arena)
-{
- unsigned i;
-
- for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_parent(&arena->bins[i].lock);
- malloc_mutex_postfork_parent(&arena->lock);
-}
-
-void
-arena_postfork_child(arena_t *arena)
-{
- unsigned i;
-
- for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_child(&arena->bins[i].lock);
- malloc_mutex_postfork_child(&arena->lock);
-}
diff --git a/extra/jemalloc/src/atomic.c b/extra/jemalloc/src/atomic.c
deleted file mode 100644
index 77ee313113b..00000000000
--- a/extra/jemalloc/src/atomic.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define JEMALLOC_ATOMIC_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/extra/jemalloc/src/base.c b/extra/jemalloc/src/base.c
deleted file mode 100644
index 4e62e8fa918..00000000000
--- a/extra/jemalloc/src/base.c
+++ /dev/null
@@ -1,142 +0,0 @@
-#define JEMALLOC_BASE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-static malloc_mutex_t base_mtx;
-
-/*
- * Current pages that are being used for internal memory allocations. These
- * pages are carved up in cacheline-size quanta, so that there is no chance of
- * false cache line sharing.
- */
-static void *base_pages;
-static void *base_next_addr;
-static void *base_past_addr; /* Addr immediately past base_pages. */
-static extent_node_t *base_nodes;
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static bool base_pages_alloc(size_t minsize);
-
-/******************************************************************************/
-
-static bool
-base_pages_alloc(size_t minsize)
-{
- size_t csize;
- bool zero;
-
- assert(minsize != 0);
- csize = CHUNK_CEILING(minsize);
- zero = false;
- base_pages = chunk_alloc(csize, chunksize, true, &zero,
- chunk_dss_prec_get());
- if (base_pages == NULL)
- return (true);
- base_next_addr = base_pages;
- base_past_addr = (void *)((uintptr_t)base_pages + csize);
-
- return (false);
-}
-
-void *
-base_alloc(size_t size)
-{
- void *ret;
- size_t csize;
-
- /* Round size up to nearest multiple of the cacheline size. */
- csize = CACHELINE_CEILING(size);
-
- malloc_mutex_lock(&base_mtx);
- /* Make sure there's enough space for the allocation. */
- if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
- if (base_pages_alloc(csize)) {
- malloc_mutex_unlock(&base_mtx);
- return (NULL);
- }
- }
- /* Allocate. */
- ret = base_next_addr;
- base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
- malloc_mutex_unlock(&base_mtx);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
-
- return (ret);
-}
-
-void *
-base_calloc(size_t number, size_t size)
-{
- void *ret = base_alloc(number * size);
-
- if (ret != NULL)
- memset(ret, 0, number * size);
-
- return (ret);
-}
-
-extent_node_t *
-base_node_alloc(void)
-{
- extent_node_t *ret;
-
- malloc_mutex_lock(&base_mtx);
- if (base_nodes != NULL) {
- ret = base_nodes;
- base_nodes = *(extent_node_t **)ret;
- malloc_mutex_unlock(&base_mtx);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
- } else {
- malloc_mutex_unlock(&base_mtx);
- ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
- }
-
- return (ret);
-}
-
-void
-base_node_dealloc(extent_node_t *node)
-{
-
- VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
- malloc_mutex_lock(&base_mtx);
- *(extent_node_t **)node = base_nodes;
- base_nodes = node;
- malloc_mutex_unlock(&base_mtx);
-}
-
-bool
-base_boot(void)
-{
-
- base_nodes = NULL;
- if (malloc_mutex_init(&base_mtx))
- return (true);
-
- return (false);
-}
-
-void
-base_prefork(void)
-{
-
- malloc_mutex_prefork(&base_mtx);
-}
-
-void
-base_postfork_parent(void)
-{
-
- malloc_mutex_postfork_parent(&base_mtx);
-}
-
-void
-base_postfork_child(void)
-{
-
- malloc_mutex_postfork_child(&base_mtx);
-}
diff --git a/extra/jemalloc/src/bitmap.c b/extra/jemalloc/src/bitmap.c
deleted file mode 100644
index b47e2629093..00000000000
--- a/extra/jemalloc/src/bitmap.c
+++ /dev/null
@@ -1,90 +0,0 @@
-#define JEMALLOC_BITMAP_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static size_t bits2groups(size_t nbits);
-
-/******************************************************************************/
-
-static size_t
-bits2groups(size_t nbits)
-{
-
- return ((nbits >> LG_BITMAP_GROUP_NBITS) +
- !!(nbits & BITMAP_GROUP_NBITS_MASK));
-}
-
-void
-bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
-{
- unsigned i;
- size_t group_count;
-
- assert(nbits > 0);
- assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
-
- /*
- * Compute the number of groups necessary to store nbits bits, and
- * progressively work upward through the levels until reaching a level
- * that requires only one group.
- */
- binfo->levels[0].group_offset = 0;
- group_count = bits2groups(nbits);
- for (i = 1; group_count > 1; i++) {
- assert(i < BITMAP_MAX_LEVELS);
- binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
- + group_count;
- group_count = bits2groups(group_count);
- }
- binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
- + group_count;
- binfo->nlevels = i;
- binfo->nbits = nbits;
-}
-
-size_t
-bitmap_info_ngroups(const bitmap_info_t *binfo)
-{
-
- return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
-}
-
-size_t
-bitmap_size(size_t nbits)
-{
- bitmap_info_t binfo;
-
- bitmap_info_init(&binfo, nbits);
- return (bitmap_info_ngroups(&binfo));
-}
-
-void
-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
- size_t extra;
- unsigned i;
-
- /*
- * Bits are actually inverted with regard to the external bitmap
- * interface, so the bitmap starts out with all 1 bits, except for
- * trailing unused bits (if any). Note that each group uses bit 0 to
- * correspond to the first logical bit in the group, so extra bits
- * are the most significant bits of the last group.
- */
- memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
- LG_SIZEOF_BITMAP);
- extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
- & BITMAP_GROUP_NBITS_MASK;
- if (extra != 0)
- bitmap[binfo->levels[1].group_offset - 1] >>= extra;
- for (i = 1; i < binfo->nlevels; i++) {
- size_t group_count = binfo->levels[i].group_offset -
- binfo->levels[i-1].group_offset;
- extra = (BITMAP_GROUP_NBITS - (group_count &
- BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
- if (extra != 0)
- bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
- }
-}
diff --git a/extra/jemalloc/src/chunk.c b/extra/jemalloc/src/chunk.c
deleted file mode 100644
index 044f76be96c..00000000000
--- a/extra/jemalloc/src/chunk.c
+++ /dev/null
@@ -1,385 +0,0 @@
-#define JEMALLOC_CHUNK_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-const char *opt_dss = DSS_DEFAULT;
-size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
-
-malloc_mutex_t chunks_mtx;
-chunk_stats_t stats_chunks;
-
-/*
- * Trees of chunks that were previously allocated (trees differ only in node
- * ordering). These are used when allocating chunks, in an attempt to re-use
- * address space. Depending on function, different tree orderings are needed,
- * which is why there are two trees with the same contents.
- */
-static extent_tree_t chunks_szad_mmap;
-static extent_tree_t chunks_ad_mmap;
-static extent_tree_t chunks_szad_dss;
-static extent_tree_t chunks_ad_dss;
-
-rtree_t *chunks_rtree;
-
-/* Various chunk-related settings. */
-size_t chunksize;
-size_t chunksize_mask; /* (chunksize - 1). */
-size_t chunk_npages;
-size_t map_bias;
-size_t arena_maxclass; /* Max size class for arenas. */
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void *chunk_recycle(extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
- bool *zero);
-static void chunk_record(extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, void *chunk, size_t size);
-
-/******************************************************************************/
-
-static void *
-chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
- size_t alignment, bool base, bool *zero)
-{
- void *ret;
- extent_node_t *node;
- extent_node_t key;
- size_t alloc_size, leadsize, trailsize;
- bool zeroed;
-
- if (base) {
- /*
- * This function may need to call base_node_{,de}alloc(), but
- * the current chunk allocation request is on behalf of the
- * base allocator. Avoid deadlock (and if that weren't an
- * issue, potential for infinite recursion) by returning NULL.
- */
- return (NULL);
- }
-
- alloc_size = size + alignment - chunksize;
- /* Beware size_t wrap-around. */
- if (alloc_size < size)
- return (NULL);
- key.addr = NULL;
- key.size = alloc_size;
- malloc_mutex_lock(&chunks_mtx);
- node = extent_tree_szad_nsearch(chunks_szad, &key);
- if (node == NULL) {
- malloc_mutex_unlock(&chunks_mtx);
- return (NULL);
- }
- leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
- (uintptr_t)node->addr;
- assert(node->size >= leadsize + size);
- trailsize = node->size - leadsize - size;
- ret = (void *)((uintptr_t)node->addr + leadsize);
- zeroed = node->zeroed;
- if (zeroed)
- *zero = true;
- /* Remove node from the tree. */
- extent_tree_szad_remove(chunks_szad, node);
- extent_tree_ad_remove(chunks_ad, node);
- if (leadsize != 0) {
- /* Insert the leading space as a smaller chunk. */
- node->size = leadsize;
- extent_tree_szad_insert(chunks_szad, node);
- extent_tree_ad_insert(chunks_ad, node);
- node = NULL;
- }
- if (trailsize != 0) {
- /* Insert the trailing space as a smaller chunk. */
- if (node == NULL) {
- /*
- * An additional node is required, but
- * base_node_alloc() can cause a new base chunk to be
- * allocated. Drop chunks_mtx in order to avoid
- * deadlock, and if node allocation fails, deallocate
- * the result before returning an error.
- */
- malloc_mutex_unlock(&chunks_mtx);
- node = base_node_alloc();
- if (node == NULL) {
- chunk_dealloc(ret, size, true);
- return (NULL);
- }
- malloc_mutex_lock(&chunks_mtx);
- }
- node->addr = (void *)((uintptr_t)(ret) + size);
- node->size = trailsize;
- node->zeroed = zeroed;
- extent_tree_szad_insert(chunks_szad, node);
- extent_tree_ad_insert(chunks_ad, node);
- node = NULL;
- }
- malloc_mutex_unlock(&chunks_mtx);
-
- if (node != NULL)
- base_node_dealloc(node);
- if (*zero) {
- if (zeroed == false)
- memset(ret, 0, size);
- else if (config_debug) {
- size_t i;
- size_t *p = (size_t *)(uintptr_t)ret;
-
- VALGRIND_MAKE_MEM_DEFINED(ret, size);
- for (i = 0; i < size / sizeof(size_t); i++)
- assert(p[i] == 0);
- }
- }
- return (ret);
-}
-
-/*
- * If the caller specifies (*zero == false), it is still possible to receive
- * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
- * takes advantage of this to avoid demanding zeroed chunks, but taking
- * advantage of them if they are returned.
- */
-void *
-chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
- dss_prec_t dss_prec)
-{
- void *ret;
-
- assert(size != 0);
- assert((size & chunksize_mask) == 0);
- assert(alignment != 0);
- assert((alignment & chunksize_mask) == 0);
-
- /* "primary" dss. */
- if (config_dss && dss_prec == dss_prec_primary) {
- if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
- alignment, base, zero)) != NULL)
- goto label_return;
- if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
- goto label_return;
- }
- /* mmap. */
- if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
- alignment, base, zero)) != NULL)
- goto label_return;
- if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
- goto label_return;
- /* "secondary" dss. */
- if (config_dss && dss_prec == dss_prec_secondary) {
- if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
- alignment, base, zero)) != NULL)
- goto label_return;
- if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
- goto label_return;
- }
-
- /* All strategies for allocation failed. */
- ret = NULL;
-label_return:
- if (ret != NULL) {
- if (config_ivsalloc && base == false) {
- if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
- chunk_dealloc(ret, size, true);
- return (NULL);
- }
- }
- if (config_stats || config_prof) {
- bool gdump;
- malloc_mutex_lock(&chunks_mtx);
- if (config_stats)
- stats_chunks.nchunks += (size / chunksize);
- stats_chunks.curchunks += (size / chunksize);
- if (stats_chunks.curchunks > stats_chunks.highchunks) {
- stats_chunks.highchunks =
- stats_chunks.curchunks;
- if (config_prof)
- gdump = true;
- } else if (config_prof)
- gdump = false;
- malloc_mutex_unlock(&chunks_mtx);
- if (config_prof && opt_prof && opt_prof_gdump && gdump)
- prof_gdump();
- }
- if (config_valgrind)
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- }
- assert(CHUNK_ADDR2BASE(ret) == ret);
- return (ret);
-}
-
-static void
-chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
- size_t size)
-{
- bool unzeroed;
- extent_node_t *xnode, *node, *prev, key;
-
- unzeroed = pages_purge(chunk, size);
- VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
-
- /*
- * Allocate a node before acquiring chunks_mtx even though it might not
- * be needed, because base_node_alloc() may cause a new base chunk to
- * be allocated, which could cause deadlock if chunks_mtx were already
- * held.
- */
- xnode = base_node_alloc();
-
- malloc_mutex_lock(&chunks_mtx);
- key.addr = (void *)((uintptr_t)chunk + size);
- node = extent_tree_ad_nsearch(chunks_ad, &key);
- /* Try to coalesce forward. */
- if (node != NULL && node->addr == key.addr) {
- /*
- * Coalesce chunk with the following address range. This does
- * not change the position within chunks_ad, so only
- * remove/insert from/into chunks_szad.
- */
- extent_tree_szad_remove(chunks_szad, node);
- node->addr = chunk;
- node->size += size;
- node->zeroed = (node->zeroed && (unzeroed == false));
- extent_tree_szad_insert(chunks_szad, node);
- if (xnode != NULL)
- base_node_dealloc(xnode);
- } else {
- /* Coalescing forward failed, so insert a new node. */
- if (xnode == NULL) {
- /*
- * base_node_alloc() failed, which is an exceedingly
- * unlikely failure. Leak chunk; its pages have
- * already been purged, so this is only a virtual
- * memory leak.
- */
- malloc_mutex_unlock(&chunks_mtx);
- return;
- }
- node = xnode;
- node->addr = chunk;
- node->size = size;
- node->zeroed = (unzeroed == false);
- extent_tree_ad_insert(chunks_ad, node);
- extent_tree_szad_insert(chunks_szad, node);
- }
-
- /* Try to coalesce backward. */
- prev = extent_tree_ad_prev(chunks_ad, node);
- if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
- chunk) {
- /*
- * Coalesce chunk with the previous address range. This does
- * not change the position within chunks_ad, so only
- * remove/insert node from/into chunks_szad.
- */
- extent_tree_szad_remove(chunks_szad, prev);
- extent_tree_ad_remove(chunks_ad, prev);
-
- extent_tree_szad_remove(chunks_szad, node);
- node->addr = prev->addr;
- node->size += prev->size;
- node->zeroed = (node->zeroed && prev->zeroed);
- extent_tree_szad_insert(chunks_szad, node);
-
- base_node_dealloc(prev);
- }
- malloc_mutex_unlock(&chunks_mtx);
-}
-
-void
-chunk_unmap(void *chunk, size_t size)
-{
- assert(chunk != NULL);
- assert(CHUNK_ADDR2BASE(chunk) == chunk);
- assert(size != 0);
- assert((size & chunksize_mask) == 0);
-
- if (config_dss && chunk_in_dss(chunk))
- chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
- else if (chunk_dealloc_mmap(chunk, size))
- chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
-}
-
-void
-chunk_dealloc(void *chunk, size_t size, bool unmap)
-{
-
- assert(chunk != NULL);
- assert(CHUNK_ADDR2BASE(chunk) == chunk);
- assert(size != 0);
- assert((size & chunksize_mask) == 0);
-
- if (config_ivsalloc)
- rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
- if (config_stats || config_prof) {
- malloc_mutex_lock(&chunks_mtx);
- assert(stats_chunks.curchunks >= (size / chunksize));
- stats_chunks.curchunks -= (size / chunksize);
- malloc_mutex_unlock(&chunks_mtx);
- }
-
- if (unmap)
- chunk_unmap(chunk, size);
-}
-
-bool
-chunk_boot(void)
-{
-
- /* Set variables according to the value of opt_lg_chunk. */
- chunksize = (ZU(1) << opt_lg_chunk);
- assert(chunksize >= PAGE);
- chunksize_mask = chunksize - 1;
- chunk_npages = (chunksize >> LG_PAGE);
-
- if (config_stats || config_prof) {
- if (malloc_mutex_init(&chunks_mtx))
- return (true);
- memset(&stats_chunks, 0, sizeof(chunk_stats_t));
- }
- if (config_dss && chunk_dss_boot())
- return (true);
- extent_tree_szad_new(&chunks_szad_mmap);
- extent_tree_ad_new(&chunks_ad_mmap);
- extent_tree_szad_new(&chunks_szad_dss);
- extent_tree_ad_new(&chunks_ad_dss);
- if (config_ivsalloc) {
- chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
- opt_lg_chunk);
- if (chunks_rtree == NULL)
- return (true);
- }
-
- return (false);
-}
-
-void
-chunk_prefork(void)
-{
-
- malloc_mutex_lock(&chunks_mtx);
- if (config_ivsalloc)
- rtree_prefork(chunks_rtree);
- chunk_dss_prefork();
-}
-
-void
-chunk_postfork_parent(void)
-{
-
- chunk_dss_postfork_parent();
- if (config_ivsalloc)
- rtree_postfork_parent(chunks_rtree);
- malloc_mutex_postfork_parent(&chunks_mtx);
-}
-
-void
-chunk_postfork_child(void)
-{
-
- chunk_dss_postfork_child();
- if (config_ivsalloc)
- rtree_postfork_child(chunks_rtree);
- malloc_mutex_postfork_child(&chunks_mtx);
-}
diff --git a/extra/jemalloc/src/chunk_dss.c b/extra/jemalloc/src/chunk_dss.c
deleted file mode 100644
index 24781cc52dc..00000000000
--- a/extra/jemalloc/src/chunk_dss.c
+++ /dev/null
@@ -1,197 +0,0 @@
-#define JEMALLOC_CHUNK_DSS_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-/******************************************************************************/
-/* Data. */
-
-const char *dss_prec_names[] = {
- "disabled",
- "primary",
- "secondary",
- "N/A"
-};
-
-/* Current dss precedence default, used when creating new arenas. */
-static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
-
-/*
- * Protects sbrk() calls. This avoids malloc races among threads, though it
- * does not protect against races with threads that call sbrk() directly.
- */
-static malloc_mutex_t dss_mtx;
-
-/* Base address of the DSS. */
-static void *dss_base;
-/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
-static void *dss_prev;
-/* Current upper limit on DSS addresses. */
-static void *dss_max;
-
-/******************************************************************************/
-
-#ifndef JEMALLOC_HAVE_SBRK
-static void *
-sbrk(intptr_t increment)
-{
-
- not_implemented();
-
- return (NULL);
-}
-#endif
-
-dss_prec_t
-chunk_dss_prec_get(void)
-{
- dss_prec_t ret;
-
- if (config_dss == false)
- return (dss_prec_disabled);
- malloc_mutex_lock(&dss_mtx);
- ret = dss_prec_default;
- malloc_mutex_unlock(&dss_mtx);
- return (ret);
-}
-
-bool
-chunk_dss_prec_set(dss_prec_t dss_prec)
-{
-
- if (config_dss == false)
- return (true);
- malloc_mutex_lock(&dss_mtx);
- dss_prec_default = dss_prec;
- malloc_mutex_unlock(&dss_mtx);
- return (false);
-}
-
-void *
-chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
-{
- void *ret;
-
- cassert(config_dss);
- assert(size > 0 && (size & chunksize_mask) == 0);
- assert(alignment > 0 && (alignment & chunksize_mask) == 0);
-
- /*
- * sbrk() uses a signed increment argument, so take care not to
- * interpret a huge allocation request as a negative increment.
- */
- if ((intptr_t)size < 0)
- return (NULL);
-
- malloc_mutex_lock(&dss_mtx);
- if (dss_prev != (void *)-1) {
- size_t gap_size, cpad_size;
- void *cpad, *dss_next;
- intptr_t incr;
-
- /*
- * The loop is necessary to recover from races with other
- * threads that are using the DSS for something other than
- * malloc.
- */
- do {
- /* Get the current end of the DSS. */
- dss_max = sbrk(0);
- /*
- * Calculate how much padding is necessary to
- * chunk-align the end of the DSS.
- */
- gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
- chunksize_mask;
- /*
- * Compute how much chunk-aligned pad space (if any) is
- * necessary to satisfy alignment. This space can be
- * recycled for later use.
- */
- cpad = (void *)((uintptr_t)dss_max + gap_size);
- ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
- alignment);
- cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
- dss_next = (void *)((uintptr_t)ret + size);
- if ((uintptr_t)ret < (uintptr_t)dss_max ||
- (uintptr_t)dss_next < (uintptr_t)dss_max) {
- /* Wrap-around. */
- malloc_mutex_unlock(&dss_mtx);
- return (NULL);
- }
- incr = gap_size + cpad_size + size;
- dss_prev = sbrk(incr);
- if (dss_prev == dss_max) {
- /* Success. */
- dss_max = dss_next;
- malloc_mutex_unlock(&dss_mtx);
- if (cpad_size != 0)
- chunk_unmap(cpad, cpad_size);
- if (*zero) {
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- memset(ret, 0, size);
- }
- return (ret);
- }
- } while (dss_prev != (void *)-1);
- }
- malloc_mutex_unlock(&dss_mtx);
-
- return (NULL);
-}
-
-bool
-chunk_in_dss(void *chunk)
-{
- bool ret;
-
- cassert(config_dss);
-
- malloc_mutex_lock(&dss_mtx);
- if ((uintptr_t)chunk >= (uintptr_t)dss_base
- && (uintptr_t)chunk < (uintptr_t)dss_max)
- ret = true;
- else
- ret = false;
- malloc_mutex_unlock(&dss_mtx);
-
- return (ret);
-}
-
-bool
-chunk_dss_boot(void)
-{
-
- cassert(config_dss);
-
- if (malloc_mutex_init(&dss_mtx))
- return (true);
- dss_base = sbrk(0);
- dss_prev = dss_base;
- dss_max = dss_base;
-
- return (false);
-}
-
-void
-chunk_dss_prefork(void)
-{
-
- if (config_dss)
- malloc_mutex_prefork(&dss_mtx);
-}
-
-void
-chunk_dss_postfork_parent(void)
-{
-
- if (config_dss)
- malloc_mutex_postfork_parent(&dss_mtx);
-}
-
-void
-chunk_dss_postfork_child(void)
-{
-
- if (config_dss)
- malloc_mutex_postfork_child(&dss_mtx);
-}
-
-/******************************************************************************/
diff --git a/extra/jemalloc/src/chunk_mmap.c b/extra/jemalloc/src/chunk_mmap.c
deleted file mode 100644
index 8a42e75915f..00000000000
--- a/extra/jemalloc/src/chunk_mmap.c
+++ /dev/null
@@ -1,210 +0,0 @@
-#define JEMALLOC_CHUNK_MMAP_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void *pages_map(void *addr, size_t size);
-static void pages_unmap(void *addr, size_t size);
-static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
- bool *zero);
-
-/******************************************************************************/
-
-static void *
-pages_map(void *addr, size_t size)
-{
- void *ret;
-
- assert(size != 0);
-
-#ifdef _WIN32
- /*
- * If VirtualAlloc can't allocate at the given address when one is
- * given, it fails and returns NULL.
- */
- ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
- PAGE_READWRITE);
-#else
- /*
- * We don't use MAP_FIXED here, because it can cause the *replacement*
- * of existing mappings, and we only want to create new mappings.
- */
- ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
- -1, 0);
- assert(ret != NULL);
-
- if (ret == MAP_FAILED)
- ret = NULL;
- else if (addr != NULL && ret != addr) {
- /*
- * We succeeded in mapping memory, but not in the right place.
- */
- if (munmap(ret, size) == -1) {
- char buf[BUFERROR_BUF];
-
- buferror(buf, sizeof(buf));
- malloc_printf("<jemalloc: Error in munmap(): %s\n",
- buf);
- if (opt_abort)
- abort();
- }
- ret = NULL;
- }
-#endif
- assert(ret == NULL || (addr == NULL && ret != addr)
- || (addr != NULL && ret == addr));
- return (ret);
-}
-
-static void
-pages_unmap(void *addr, size_t size)
-{
-
-#ifdef _WIN32
- if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
-#else
- if (munmap(addr, size) == -1)
-#endif
- {
- char buf[BUFERROR_BUF];
-
- buferror(buf, sizeof(buf));
- malloc_printf("<jemalloc>: Error in "
-#ifdef _WIN32
- "VirtualFree"
-#else
- "munmap"
-#endif
- "(): %s\n", buf);
- if (opt_abort)
- abort();
- }
-}
-
-static void *
-pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
-{
- void *ret = (void *)((uintptr_t)addr + leadsize);
-
- assert(alloc_size >= leadsize + size);
-#ifdef _WIN32
- {
- void *new_addr;
-
- pages_unmap(addr, alloc_size);
- new_addr = pages_map(ret, size);
- if (new_addr == ret)
- return (ret);
- if (new_addr)
- pages_unmap(new_addr, size);
- return (NULL);
- }
-#else
- {
- size_t trailsize = alloc_size - leadsize - size;
-
- if (leadsize != 0)
- pages_unmap(addr, leadsize);
- if (trailsize != 0)
- pages_unmap((void *)((uintptr_t)ret + size), trailsize);
- return (ret);
- }
-#endif
-}
-
-bool
-pages_purge(void *addr, size_t length)
-{
- bool unzeroed;
-
-#ifdef _WIN32
- VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
- unzeroed = true;
-#else
-# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
-# define JEMALLOC_MADV_PURGE MADV_DONTNEED
-# define JEMALLOC_MADV_ZEROS true
-# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
-# define JEMALLOC_MADV_PURGE MADV_FREE
-# define JEMALLOC_MADV_ZEROS false
-# else
-# error "No method defined for purging unused dirty pages."
-# endif
- int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
- unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
-# undef JEMALLOC_MADV_PURGE
-# undef JEMALLOC_MADV_ZEROS
-#endif
- return (unzeroed);
-}
-
-static void *
-chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
-{
- void *ret, *pages;
- size_t alloc_size, leadsize;
-
- alloc_size = size + alignment - PAGE;
- /* Beware size_t wrap-around. */
- if (alloc_size < size)
- return (NULL);
- do {
- pages = pages_map(NULL, alloc_size);
- if (pages == NULL)
- return (NULL);
- leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
- (uintptr_t)pages;
- ret = pages_trim(pages, alloc_size, leadsize, size);
- } while (ret == NULL);
-
- assert(ret != NULL);
- *zero = true;
- return (ret);
-}
-
-void *
-chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
-{
- void *ret;
- size_t offset;
-
- /*
- * Ideally, there would be a way to specify alignment to mmap() (like
- * NetBSD has), but in the absence of such a feature, we have to work
- * hard to efficiently create aligned mappings. The reliable, but
- * slow method is to create a mapping that is over-sized, then trim the
- * excess. However, that always results in one or two calls to
- * pages_unmap().
- *
- * Optimistically try mapping precisely the right amount before falling
- * back to the slow method, with the expectation that the optimistic
- * approach works most of the time.
- */
-
- assert(alignment != 0);
- assert((alignment & chunksize_mask) == 0);
-
- ret = pages_map(NULL, size);
- if (ret == NULL)
- return (NULL);
- offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
- if (offset != 0) {
- pages_unmap(ret, size);
- return (chunk_alloc_mmap_slow(size, alignment, zero));
- }
-
- assert(ret != NULL);
- *zero = true;
- return (ret);
-}
-
-bool
-chunk_dealloc_mmap(void *chunk, size_t size)
-{
-
- if (config_munmap)
- pages_unmap(chunk, size);
-
- return (config_munmap == false);
-}
diff --git a/extra/jemalloc/src/ckh.c b/extra/jemalloc/src/ckh.c
deleted file mode 100644
index 2f38348bb85..00000000000
--- a/extra/jemalloc/src/ckh.c
+++ /dev/null
@@ -1,563 +0,0 @@
-/*
- *******************************************************************************
- * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
- * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
- * functions are employed. The original cuckoo hashing algorithm was described
- * in:
- *
- * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms
- * 51(2):122-144.
- *
- * Generalization of cuckoo hashing was discussed in:
- *
- * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
- * alternative to traditional hash tables. In Proceedings of the 7th
- * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
- * January 2006.
- *
- * This implementation uses precisely two hash functions because that is the
- * fewest that can work, and supporting multiple hashes is an implementation
- * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
- * that shows approximate expected maximum load factors for various
- * configurations:
- *
- * | #cells/bucket |
- * #hashes | 1 | 2 | 4 | 8 |
- * --------+-------+-------+-------+-------+
- * 1 | 0.006 | 0.006 | 0.03 | 0.12 |
- * 2 | 0.49 | 0.86 |>0.93< |>0.96< |
- * 3 | 0.91 | 0.97 | 0.98 | 0.999 |
- * 4 | 0.97 | 0.99 | 0.999 | |
- *
- * The number of cells per bucket is chosen such that a bucket fits in one cache
- * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
- * respectively.
- *
- ******************************************************************************/
-#define JEMALLOC_CKH_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static bool ckh_grow(ckh_t *ckh);
-static void ckh_shrink(ckh_t *ckh);
-
-/******************************************************************************/
-
-/*
- * Search bucket for key and return the cell number if found; SIZE_T_MAX
- * otherwise.
- */
-JEMALLOC_INLINE size_t
-ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
-{
- ckhc_t *cell;
- unsigned i;
-
- for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
- cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
- if (cell->key != NULL && ckh->keycomp(key, cell->key))
- return ((bucket << LG_CKH_BUCKET_CELLS) + i);
- }
-
- return (SIZE_T_MAX);
-}
-
-/*
- * Search table for key and return cell number if found; SIZE_T_MAX otherwise.
- */
-JEMALLOC_INLINE size_t
-ckh_isearch(ckh_t *ckh, const void *key)
-{
- size_t hashes[2], bucket, cell;
-
- assert(ckh != NULL);
-
- ckh->hash(key, hashes);
-
- /* Search primary bucket. */
- bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- cell = ckh_bucket_search(ckh, bucket, key);
- if (cell != SIZE_T_MAX)
- return (cell);
-
- /* Search secondary bucket. */
- bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- cell = ckh_bucket_search(ckh, bucket, key);
- return (cell);
-}
-
-JEMALLOC_INLINE bool
-ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
- const void *data)
-{
- ckhc_t *cell;
- unsigned offset, i;
-
- /*
- * Cycle through the cells in the bucket, starting at a random position.
- * The randomness avoids worst-case search overhead as buckets fill up.
- */
- prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
- for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
- cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
- ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
- if (cell->key == NULL) {
- cell->key = key;
- cell->data = data;
- ckh->count++;
- return (false);
- }
- }
-
- return (true);
-}
-
-/*
- * No space is available in bucket. Randomly evict an item, then try to find an
- * alternate location for that item. Iteratively repeat this
- * eviction/relocation procedure until either success or detection of an
- * eviction/relocation bucket cycle.
- */
-JEMALLOC_INLINE bool
-ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
- void const **argdata)
-{
- const void *key, *data, *tkey, *tdata;
- ckhc_t *cell;
- size_t hashes[2], bucket, tbucket;
- unsigned i;
-
- bucket = argbucket;
- key = *argkey;
- data = *argdata;
- while (true) {
- /*
- * Choose a random item within the bucket to evict. This is
- * critical to correct function, because without (eventually)
- * evicting all items within a bucket during iteration, it
- * would be possible to get stuck in an infinite loop if there
- * were an item for which both hashes indicated the same
- * bucket.
- */
- prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
- cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
- assert(cell->key != NULL);
-
- /* Swap cell->{key,data} and {key,data} (evict). */
- tkey = cell->key; tdata = cell->data;
- cell->key = key; cell->data = data;
- key = tkey; data = tdata;
-
-#ifdef CKH_COUNT
- ckh->nrelocs++;
-#endif
-
- /* Find the alternate bucket for the evicted item. */
- ckh->hash(key, hashes);
- tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (tbucket == bucket) {
- tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
- - 1);
- /*
- * It may be that (tbucket == bucket) still, if the
- * item's hashes both indicate this bucket. However,
- * we are guaranteed to eventually escape this bucket
- * during iteration, assuming pseudo-random item
- * selection (true randomness would make infinite
- * looping a remote possibility). The reason we can
- * never get trapped forever is that there are two
- * cases:
- *
- * 1) This bucket == argbucket, so we will quickly
- * detect an eviction cycle and terminate.
- * 2) An item was evicted to this bucket from another,
- * which means that at least one item in this bucket
- * has hashes that indicate distinct buckets.
- */
- }
- /* Check for a cycle. */
- if (tbucket == argbucket) {
- *argkey = key;
- *argdata = data;
- return (true);
- }
-
- bucket = tbucket;
- if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
- return (false);
- }
-}
-
-JEMALLOC_INLINE bool
-ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
-{
- size_t hashes[2], bucket;
- const void *key = *argkey;
- const void *data = *argdata;
-
- ckh->hash(key, hashes);
-
- /* Try to insert in primary bucket. */
- bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
- return (false);
-
- /* Try to insert in secondary bucket. */
- bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
- return (false);
-
- /*
- * Try to find a place for this item via iterative eviction/relocation.
- */
- return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
-}
-
-/*
- * Try to rebuild the hash table from scratch by inserting all items from the
- * old table into the new.
- */
-JEMALLOC_INLINE bool
-ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
-{
- size_t count, i, nins;
- const void *key, *data;
-
- count = ckh->count;
- ckh->count = 0;
- for (i = nins = 0; nins < count; i++) {
- if (aTab[i].key != NULL) {
- key = aTab[i].key;
- data = aTab[i].data;
- if (ckh_try_insert(ckh, &key, &data)) {
- ckh->count = count;
- return (true);
- }
- nins++;
- }
- }
-
- return (false);
-}
-
-static bool
-ckh_grow(ckh_t *ckh)
-{
- bool ret;
- ckhc_t *tab, *ttab;
- size_t lg_curcells;
- unsigned lg_prevbuckets;
-
-#ifdef CKH_COUNT
- ckh->ngrows++;
-#endif
-
- /*
- * It is possible (though unlikely, given well behaved hashes) that the
- * table will have to be doubled more than once in order to create a
- * usable table.
- */
- lg_prevbuckets = ckh->lg_curbuckets;
- lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
- while (true) {
- size_t usize;
-
- lg_curcells++;
- usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (usize == 0) {
- ret = true;
- goto label_return;
- }
- tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
- if (tab == NULL) {
- ret = true;
- goto label_return;
- }
- /* Swap in new table. */
- ttab = ckh->tab;
- ckh->tab = tab;
- tab = ttab;
- ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
-
- if (ckh_rebuild(ckh, tab) == false) {
- idalloc(tab);
- break;
- }
-
- /* Rebuilding failed, so back out partially rebuilt table. */
- idalloc(ckh->tab);
- ckh->tab = tab;
- ckh->lg_curbuckets = lg_prevbuckets;
- }
-
- ret = false;
-label_return:
- return (ret);
-}
-
-static void
-ckh_shrink(ckh_t *ckh)
-{
- ckhc_t *tab, *ttab;
- size_t lg_curcells, usize;
- unsigned lg_prevbuckets;
-
- /*
- * It is possible (though unlikely, given well behaved hashes) that the
- * table rebuild will fail.
- */
- lg_prevbuckets = ckh->lg_curbuckets;
- lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
- usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (usize == 0)
- return;
- tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
- if (tab == NULL) {
- /*
- * An OOM error isn't worth propagating, since it doesn't
- * prevent this or future operations from proceeding.
- */
- return;
- }
- /* Swap in new table. */
- ttab = ckh->tab;
- ckh->tab = tab;
- tab = ttab;
- ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
-
- if (ckh_rebuild(ckh, tab) == false) {
- idalloc(tab);
-#ifdef CKH_COUNT
- ckh->nshrinks++;
-#endif
- return;
- }
-
- /* Rebuilding failed, so back out partially rebuilt table. */
- idalloc(ckh->tab);
- ckh->tab = tab;
- ckh->lg_curbuckets = lg_prevbuckets;
-#ifdef CKH_COUNT
- ckh->nshrinkfails++;
-#endif
-}
-
-bool
-ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
-{
- bool ret;
- size_t mincells, usize;
- unsigned lg_mincells;
-
- assert(minitems > 0);
- assert(hash != NULL);
- assert(keycomp != NULL);
-
-#ifdef CKH_COUNT
- ckh->ngrows = 0;
- ckh->nshrinks = 0;
- ckh->nshrinkfails = 0;
- ckh->ninserts = 0;
- ckh->nrelocs = 0;
-#endif
- ckh->prng_state = 42; /* Value doesn't really matter. */
- ckh->count = 0;
-
- /*
- * Find the minimum power of 2 that is large enough to fit aBaseCount
- * entries. We are using (2+,2) cuckoo hashing, which has an expected
- * maximum load factor of at least ~0.86, so 0.75 is a conservative load
- * factor that will typically allow 2^aLgMinItems to fit without ever
- * growing the table.
- */
- assert(LG_CKH_BUCKET_CELLS > 0);
- mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
- for (lg_mincells = LG_CKH_BUCKET_CELLS;
- (ZU(1) << lg_mincells) < mincells;
- lg_mincells++)
- ; /* Do nothing. */
- ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
- ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
- ckh->hash = hash;
- ckh->keycomp = keycomp;
-
- usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
- if (usize == 0) {
- ret = true;
- goto label_return;
- }
- ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
- if (ckh->tab == NULL) {
- ret = true;
- goto label_return;
- }
-
- ret = false;
-label_return:
- return (ret);
-}
-
-void
-ckh_delete(ckh_t *ckh)
-{
-
- assert(ckh != NULL);
-
-#ifdef CKH_VERBOSE
- malloc_printf(
- "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
- " nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
- " nrelocs: %"PRIu64"\n", __func__, ckh,
- (unsigned long long)ckh->ngrows,
- (unsigned long long)ckh->nshrinks,
- (unsigned long long)ckh->nshrinkfails,
- (unsigned long long)ckh->ninserts,
- (unsigned long long)ckh->nrelocs);
-#endif
-
- idalloc(ckh->tab);
- if (config_debug)
- memset(ckh, 0x5a, sizeof(ckh_t));
-}
-
-size_t
-ckh_count(ckh_t *ckh)
-{
-
- assert(ckh != NULL);
-
- return (ckh->count);
-}
-
-bool
-ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
-{
- size_t i, ncells;
-
- for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
- LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
- if (ckh->tab[i].key != NULL) {
- if (key != NULL)
- *key = (void *)ckh->tab[i].key;
- if (data != NULL)
- *data = (void *)ckh->tab[i].data;
- *tabind = i + 1;
- return (false);
- }
- }
-
- return (true);
-}
-
-bool
-ckh_insert(ckh_t *ckh, const void *key, const void *data)
-{
- bool ret;
-
- assert(ckh != NULL);
- assert(ckh_search(ckh, key, NULL, NULL));
-
-#ifdef CKH_COUNT
- ckh->ninserts++;
-#endif
-
- while (ckh_try_insert(ckh, &key, &data)) {
- if (ckh_grow(ckh)) {
- ret = true;
- goto label_return;
- }
- }
-
- ret = false;
-label_return:
- return (ret);
-}
-
-bool
-ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
-{
- size_t cell;
-
- assert(ckh != NULL);
-
- cell = ckh_isearch(ckh, searchkey);
- if (cell != SIZE_T_MAX) {
- if (key != NULL)
- *key = (void *)ckh->tab[cell].key;
- if (data != NULL)
- *data = (void *)ckh->tab[cell].data;
- ckh->tab[cell].key = NULL;
- ckh->tab[cell].data = NULL; /* Not necessary. */
-
- ckh->count--;
- /* Try to halve the table if it is less than 1/4 full. */
- if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
- + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
- > ckh->lg_minbuckets) {
- /* Ignore error due to OOM. */
- ckh_shrink(ckh);
- }
-
- return (false);
- }
-
- return (true);
-}
-
-bool
-ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
-{
- size_t cell;
-
- assert(ckh != NULL);
-
- cell = ckh_isearch(ckh, searchkey);
- if (cell != SIZE_T_MAX) {
- if (key != NULL)
- *key = (void *)ckh->tab[cell].key;
- if (data != NULL)
- *data = (void *)ckh->tab[cell].data;
- return (false);
- }
-
- return (true);
-}
-
-void
-ckh_string_hash(const void *key, size_t r_hash[2])
-{
-
- hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
-}
-
-bool
-ckh_string_keycomp(const void *k1, const void *k2)
-{
-
- assert(k1 != NULL);
- assert(k2 != NULL);
-
- return (strcmp((char *)k1, (char *)k2) ? false : true);
-}
-
-void
-ckh_pointer_hash(const void *key, size_t r_hash[2])
-{
- union {
- const void *v;
- size_t i;
- } u;
-
- assert(sizeof(u.v) == sizeof(u.i));
- u.v = key;
- hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
-}
-
-bool
-ckh_pointer_keycomp(const void *k1, const void *k2)
-{
-
- return ((k1 == k2) ? true : false);
-}
diff --git a/extra/jemalloc/src/ctl.c b/extra/jemalloc/src/ctl.c
deleted file mode 100644
index 7ce4fc4d573..00000000000
--- a/extra/jemalloc/src/ctl.c
+++ /dev/null
@@ -1,1673 +0,0 @@
-#define JEMALLOC_CTL_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-/*
- * ctl_mtx protects the following:
- * - ctl_stats.*
- * - opt_prof_active
- */
-static malloc_mutex_t ctl_mtx;
-static bool ctl_initialized;
-static uint64_t ctl_epoch;
-static ctl_stats_t ctl_stats;
-
-/******************************************************************************/
-/* Helpers for named and indexed nodes. */
-
-static inline const ctl_named_node_t *
-ctl_named_node(const ctl_node_t *node)
-{
-
- return ((node->named) ? (const ctl_named_node_t *)node : NULL);
-}
-
-static inline const ctl_named_node_t *
-ctl_named_children(const ctl_named_node_t *node, int index)
-{
- const ctl_named_node_t *children = ctl_named_node(node->children);
-
- return (children ? &children[index] : NULL);
-}
-
-static inline const ctl_indexed_node_t *
-ctl_indexed_node(const ctl_node_t *node)
-{
-
- return ((node->named == false) ? (const ctl_indexed_node_t *)node :
- NULL);
-}
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-#define CTL_PROTO(n) \
-static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen);
-
-#define INDEX_PROTO(n) \
-static const ctl_named_node_t *n##_index(const size_t *mib, \
- size_t miblen, size_t i);
-
-static bool ctl_arena_init(ctl_arena_stats_t *astats);
-static void ctl_arena_clear(ctl_arena_stats_t *astats);
-static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
- arena_t *arena);
-static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
- ctl_arena_stats_t *astats);
-static void ctl_arena_refresh(arena_t *arena, unsigned i);
-static bool ctl_grow(void);
-static void ctl_refresh(void);
-static bool ctl_init(void);
-static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
- size_t *mibp, size_t *depthp);
-
-CTL_PROTO(version)
-CTL_PROTO(epoch)
-CTL_PROTO(thread_tcache_enabled)
-CTL_PROTO(thread_tcache_flush)
-CTL_PROTO(thread_arena)
-CTL_PROTO(thread_allocated)
-CTL_PROTO(thread_allocatedp)
-CTL_PROTO(thread_deallocated)
-CTL_PROTO(thread_deallocatedp)
-CTL_PROTO(config_debug)
-CTL_PROTO(config_dss)
-CTL_PROTO(config_fill)
-CTL_PROTO(config_lazy_lock)
-CTL_PROTO(config_mremap)
-CTL_PROTO(config_munmap)
-CTL_PROTO(config_prof)
-CTL_PROTO(config_prof_libgcc)
-CTL_PROTO(config_prof_libunwind)
-CTL_PROTO(config_stats)
-CTL_PROTO(config_tcache)
-CTL_PROTO(config_tls)
-CTL_PROTO(config_utrace)
-CTL_PROTO(config_valgrind)
-CTL_PROTO(config_xmalloc)
-CTL_PROTO(opt_abort)
-CTL_PROTO(opt_dss)
-CTL_PROTO(opt_lg_chunk)
-CTL_PROTO(opt_narenas)
-CTL_PROTO(opt_lg_dirty_mult)
-CTL_PROTO(opt_stats_print)
-CTL_PROTO(opt_junk)
-CTL_PROTO(opt_zero)
-CTL_PROTO(opt_quarantine)
-CTL_PROTO(opt_redzone)
-CTL_PROTO(opt_utrace)
-CTL_PROTO(opt_valgrind)
-CTL_PROTO(opt_xmalloc)
-CTL_PROTO(opt_tcache)
-CTL_PROTO(opt_lg_tcache_max)
-CTL_PROTO(opt_prof)
-CTL_PROTO(opt_prof_prefix)
-CTL_PROTO(opt_prof_active)
-CTL_PROTO(opt_lg_prof_sample)
-CTL_PROTO(opt_lg_prof_interval)
-CTL_PROTO(opt_prof_gdump)
-CTL_PROTO(opt_prof_final)
-CTL_PROTO(opt_prof_leak)
-CTL_PROTO(opt_prof_accum)
-CTL_PROTO(arena_i_purge)
-static void arena_purge(unsigned arena_ind);
-CTL_PROTO(arena_i_dss)
-INDEX_PROTO(arena_i)
-CTL_PROTO(arenas_bin_i_size)
-CTL_PROTO(arenas_bin_i_nregs)
-CTL_PROTO(arenas_bin_i_run_size)
-INDEX_PROTO(arenas_bin_i)
-CTL_PROTO(arenas_lrun_i_size)
-INDEX_PROTO(arenas_lrun_i)
-CTL_PROTO(arenas_narenas)
-CTL_PROTO(arenas_initialized)
-CTL_PROTO(arenas_quantum)
-CTL_PROTO(arenas_page)
-CTL_PROTO(arenas_tcache_max)
-CTL_PROTO(arenas_nbins)
-CTL_PROTO(arenas_nhbins)
-CTL_PROTO(arenas_nlruns)
-CTL_PROTO(arenas_purge)
-CTL_PROTO(arenas_extend)
-CTL_PROTO(prof_active)
-CTL_PROTO(prof_dump)
-CTL_PROTO(prof_interval)
-CTL_PROTO(stats_chunks_current)
-CTL_PROTO(stats_chunks_total)
-CTL_PROTO(stats_chunks_high)
-CTL_PROTO(stats_huge_allocated)
-CTL_PROTO(stats_huge_nmalloc)
-CTL_PROTO(stats_huge_ndalloc)
-CTL_PROTO(stats_arenas_i_small_allocated)
-CTL_PROTO(stats_arenas_i_small_nmalloc)
-CTL_PROTO(stats_arenas_i_small_ndalloc)
-CTL_PROTO(stats_arenas_i_small_nrequests)
-CTL_PROTO(stats_arenas_i_large_allocated)
-CTL_PROTO(stats_arenas_i_large_nmalloc)
-CTL_PROTO(stats_arenas_i_large_ndalloc)
-CTL_PROTO(stats_arenas_i_large_nrequests)
-CTL_PROTO(stats_arenas_i_bins_j_allocated)
-CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
-CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
-CTL_PROTO(stats_arenas_i_bins_j_nrequests)
-CTL_PROTO(stats_arenas_i_bins_j_nfills)
-CTL_PROTO(stats_arenas_i_bins_j_nflushes)
-CTL_PROTO(stats_arenas_i_bins_j_nruns)
-CTL_PROTO(stats_arenas_i_bins_j_nreruns)
-CTL_PROTO(stats_arenas_i_bins_j_curruns)
-INDEX_PROTO(stats_arenas_i_bins_j)
-CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
-CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
-CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
-CTL_PROTO(stats_arenas_i_lruns_j_curruns)
-INDEX_PROTO(stats_arenas_i_lruns_j)
-CTL_PROTO(stats_arenas_i_nthreads)
-CTL_PROTO(stats_arenas_i_dss)
-CTL_PROTO(stats_arenas_i_pactive)
-CTL_PROTO(stats_arenas_i_pdirty)
-CTL_PROTO(stats_arenas_i_mapped)
-CTL_PROTO(stats_arenas_i_npurge)
-CTL_PROTO(stats_arenas_i_nmadvise)
-CTL_PROTO(stats_arenas_i_purged)
-INDEX_PROTO(stats_arenas_i)
-CTL_PROTO(stats_cactive)
-CTL_PROTO(stats_allocated)
-CTL_PROTO(stats_active)
-CTL_PROTO(stats_mapped)
-
-/******************************************************************************/
-/* mallctl tree. */
-
-/* Maximum tree depth. */
-#define CTL_MAX_DEPTH 6
-
-#define NAME(n) {true}, n
-#define CHILD(t, c) \
- sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
- (ctl_node_t *)c##_node, \
- NULL
-#define CTL(c) 0, NULL, c##_ctl
-
-/*
- * Only handles internal indexed nodes, since there are currently no external
- * ones.
- */
-#define INDEX(i) {false}, i##_index
-
-static const ctl_named_node_t tcache_node[] = {
- {NAME("enabled"), CTL(thread_tcache_enabled)},
- {NAME("flush"), CTL(thread_tcache_flush)}
-};
-
-static const ctl_named_node_t thread_node[] = {
- {NAME("arena"), CTL(thread_arena)},
- {NAME("allocated"), CTL(thread_allocated)},
- {NAME("allocatedp"), CTL(thread_allocatedp)},
- {NAME("deallocated"), CTL(thread_deallocated)},
- {NAME("deallocatedp"), CTL(thread_deallocatedp)},
- {NAME("tcache"), CHILD(named, tcache)}
-};
-
-static const ctl_named_node_t config_node[] = {
- {NAME("debug"), CTL(config_debug)},
- {NAME("dss"), CTL(config_dss)},
- {NAME("fill"), CTL(config_fill)},
- {NAME("lazy_lock"), CTL(config_lazy_lock)},
- {NAME("mremap"), CTL(config_mremap)},
- {NAME("munmap"), CTL(config_munmap)},
- {NAME("prof"), CTL(config_prof)},
- {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
- {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
- {NAME("stats"), CTL(config_stats)},
- {NAME("tcache"), CTL(config_tcache)},
- {NAME("tls"), CTL(config_tls)},
- {NAME("utrace"), CTL(config_utrace)},
- {NAME("valgrind"), CTL(config_valgrind)},
- {NAME("xmalloc"), CTL(config_xmalloc)}
-};
-
-static const ctl_named_node_t opt_node[] = {
- {NAME("abort"), CTL(opt_abort)},
- {NAME("dss"), CTL(opt_dss)},
- {NAME("lg_chunk"), CTL(opt_lg_chunk)},
- {NAME("narenas"), CTL(opt_narenas)},
- {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
- {NAME("stats_print"), CTL(opt_stats_print)},
- {NAME("junk"), CTL(opt_junk)},
- {NAME("zero"), CTL(opt_zero)},
- {NAME("quarantine"), CTL(opt_quarantine)},
- {NAME("redzone"), CTL(opt_redzone)},
- {NAME("utrace"), CTL(opt_utrace)},
- {NAME("valgrind"), CTL(opt_valgrind)},
- {NAME("xmalloc"), CTL(opt_xmalloc)},
- {NAME("tcache"), CTL(opt_tcache)},
- {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
- {NAME("prof"), CTL(opt_prof)},
- {NAME("prof_prefix"), CTL(opt_prof_prefix)},
- {NAME("prof_active"), CTL(opt_prof_active)},
- {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
- {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
- {NAME("prof_gdump"), CTL(opt_prof_gdump)},
- {NAME("prof_final"), CTL(opt_prof_final)},
- {NAME("prof_leak"), CTL(opt_prof_leak)},
- {NAME("prof_accum"), CTL(opt_prof_accum)}
-};
-
-static const ctl_named_node_t arena_i_node[] = {
- {NAME("purge"), CTL(arena_i_purge)},
- {NAME("dss"), CTL(arena_i_dss)}
-};
-static const ctl_named_node_t super_arena_i_node[] = {
- {NAME(""), CHILD(named, arena_i)}
-};
-
-static const ctl_indexed_node_t arena_node[] = {
- {INDEX(arena_i)}
-};
-
-static const ctl_named_node_t arenas_bin_i_node[] = {
- {NAME("size"), CTL(arenas_bin_i_size)},
- {NAME("nregs"), CTL(arenas_bin_i_nregs)},
- {NAME("run_size"), CTL(arenas_bin_i_run_size)}
-};
-static const ctl_named_node_t super_arenas_bin_i_node[] = {
- {NAME(""), CHILD(named, arenas_bin_i)}
-};
-
-static const ctl_indexed_node_t arenas_bin_node[] = {
- {INDEX(arenas_bin_i)}
-};
-
-static const ctl_named_node_t arenas_lrun_i_node[] = {
- {NAME("size"), CTL(arenas_lrun_i_size)}
-};
-static const ctl_named_node_t super_arenas_lrun_i_node[] = {
- {NAME(""), CHILD(named, arenas_lrun_i)}
-};
-
-static const ctl_indexed_node_t arenas_lrun_node[] = {
- {INDEX(arenas_lrun_i)}
-};
-
-static const ctl_named_node_t arenas_node[] = {
- {NAME("narenas"), CTL(arenas_narenas)},
- {NAME("initialized"), CTL(arenas_initialized)},
- {NAME("quantum"), CTL(arenas_quantum)},
- {NAME("page"), CTL(arenas_page)},
- {NAME("tcache_max"), CTL(arenas_tcache_max)},
- {NAME("nbins"), CTL(arenas_nbins)},
- {NAME("nhbins"), CTL(arenas_nhbins)},
- {NAME("bin"), CHILD(indexed, arenas_bin)},
- {NAME("nlruns"), CTL(arenas_nlruns)},
- {NAME("lrun"), CHILD(indexed, arenas_lrun)},
- {NAME("purge"), CTL(arenas_purge)},
- {NAME("extend"), CTL(arenas_extend)}
-};
-
-static const ctl_named_node_t prof_node[] = {
- {NAME("active"), CTL(prof_active)},
- {NAME("dump"), CTL(prof_dump)},
- {NAME("interval"), CTL(prof_interval)}
-};
-
-static const ctl_named_node_t stats_chunks_node[] = {
- {NAME("current"), CTL(stats_chunks_current)},
- {NAME("total"), CTL(stats_chunks_total)},
- {NAME("high"), CTL(stats_chunks_high)}
-};
-
-static const ctl_named_node_t stats_huge_node[] = {
- {NAME("allocated"), CTL(stats_huge_allocated)},
- {NAME("nmalloc"), CTL(stats_huge_nmalloc)},
- {NAME("ndalloc"), CTL(stats_huge_ndalloc)}
-};
-
-static const ctl_named_node_t stats_arenas_i_small_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
-};
-
-static const ctl_named_node_t stats_arenas_i_large_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
-};
-
-static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
- {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
- {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
- {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
- {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
- {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
-};
-static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
-};
-
-static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
- {INDEX(stats_arenas_i_bins_j)}
-};
-
-static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
- {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
- {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
-};
-static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
-};
-
-static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
- {INDEX(stats_arenas_i_lruns_j)}
-};
-
-static const ctl_named_node_t stats_arenas_i_node[] = {
- {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
- {NAME("dss"), CTL(stats_arenas_i_dss)},
- {NAME("pactive"), CTL(stats_arenas_i_pactive)},
- {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
- {NAME("mapped"), CTL(stats_arenas_i_mapped)},
- {NAME("npurge"), CTL(stats_arenas_i_npurge)},
- {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
- {NAME("purged"), CTL(stats_arenas_i_purged)},
- {NAME("small"), CHILD(named, stats_arenas_i_small)},
- {NAME("large"), CHILD(named, stats_arenas_i_large)},
- {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
- {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}
-};
-static const ctl_named_node_t super_stats_arenas_i_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i)}
-};
-
-static const ctl_indexed_node_t stats_arenas_node[] = {
- {INDEX(stats_arenas_i)}
-};
-
-static const ctl_named_node_t stats_node[] = {
- {NAME("cactive"), CTL(stats_cactive)},
- {NAME("allocated"), CTL(stats_allocated)},
- {NAME("active"), CTL(stats_active)},
- {NAME("mapped"), CTL(stats_mapped)},
- {NAME("chunks"), CHILD(named, stats_chunks)},
- {NAME("huge"), CHILD(named, stats_huge)},
- {NAME("arenas"), CHILD(indexed, stats_arenas)}
-};
-
-static const ctl_named_node_t root_node[] = {
- {NAME("version"), CTL(version)},
- {NAME("epoch"), CTL(epoch)},
- {NAME("thread"), CHILD(named, thread)},
- {NAME("config"), CHILD(named, config)},
- {NAME("opt"), CHILD(named, opt)},
- {NAME("arena"), CHILD(indexed, arena)},
- {NAME("arenas"), CHILD(named, arenas)},
- {NAME("prof"), CHILD(named, prof)},
- {NAME("stats"), CHILD(named, stats)}
-};
-static const ctl_named_node_t super_root_node[] = {
- {NAME(""), CHILD(named, root)}
-};
-
-#undef NAME
-#undef CHILD
-#undef CTL
-#undef INDEX
-
-/******************************************************************************/
-
-static bool
-ctl_arena_init(ctl_arena_stats_t *astats)
-{
-
- if (astats->lstats == NULL) {
- astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
- sizeof(malloc_large_stats_t));
- if (astats->lstats == NULL)
- return (true);
- }
-
- return (false);
-}
-
-static void
-ctl_arena_clear(ctl_arena_stats_t *astats)
-{
-
- astats->dss = dss_prec_names[dss_prec_limit];
- astats->pactive = 0;
- astats->pdirty = 0;
- if (config_stats) {
- memset(&astats->astats, 0, sizeof(arena_stats_t));
- astats->allocated_small = 0;
- astats->nmalloc_small = 0;
- astats->ndalloc_small = 0;
- astats->nrequests_small = 0;
- memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
- memset(astats->lstats, 0, nlclasses *
- sizeof(malloc_large_stats_t));
- }
-}
-
-static void
-ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
-{
- unsigned i;
-
- arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
- &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
-
- for (i = 0; i < NBINS; i++) {
- cstats->allocated_small += cstats->bstats[i].allocated;
- cstats->nmalloc_small += cstats->bstats[i].nmalloc;
- cstats->ndalloc_small += cstats->bstats[i].ndalloc;
- cstats->nrequests_small += cstats->bstats[i].nrequests;
- }
-}
-
-static void
-ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
-{
- unsigned i;
-
- sstats->pactive += astats->pactive;
- sstats->pdirty += astats->pdirty;
-
- sstats->astats.mapped += astats->astats.mapped;
- sstats->astats.npurge += astats->astats.npurge;
- sstats->astats.nmadvise += astats->astats.nmadvise;
- sstats->astats.purged += astats->astats.purged;
-
- sstats->allocated_small += astats->allocated_small;
- sstats->nmalloc_small += astats->nmalloc_small;
- sstats->ndalloc_small += astats->ndalloc_small;
- sstats->nrequests_small += astats->nrequests_small;
-
- sstats->astats.allocated_large += astats->astats.allocated_large;
- sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
- sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
- sstats->astats.nrequests_large += astats->astats.nrequests_large;
-
- for (i = 0; i < nlclasses; i++) {
- sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
- sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
- sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
- sstats->lstats[i].curruns += astats->lstats[i].curruns;
- }
-
- for (i = 0; i < NBINS; i++) {
- sstats->bstats[i].allocated += astats->bstats[i].allocated;
- sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
- sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
- sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
- if (config_tcache) {
- sstats->bstats[i].nfills += astats->bstats[i].nfills;
- sstats->bstats[i].nflushes +=
- astats->bstats[i].nflushes;
- }
- sstats->bstats[i].nruns += astats->bstats[i].nruns;
- sstats->bstats[i].reruns += astats->bstats[i].reruns;
- sstats->bstats[i].curruns += astats->bstats[i].curruns;
- }
-}
-
-static void
-ctl_arena_refresh(arena_t *arena, unsigned i)
-{
- ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
- ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
-
- ctl_arena_clear(astats);
-
- sstats->nthreads += astats->nthreads;
- if (config_stats) {
- ctl_arena_stats_amerge(astats, arena);
- /* Merge into sum stats as well. */
- ctl_arena_stats_smerge(sstats, astats);
- } else {
- astats->pactive += arena->nactive;
- astats->pdirty += arena->ndirty;
- /* Merge into sum stats as well. */
- sstats->pactive += arena->nactive;
- sstats->pdirty += arena->ndirty;
- }
-}
-
-static bool
-ctl_grow(void)
-{
- size_t astats_size;
- ctl_arena_stats_t *astats;
- arena_t **tarenas;
-
- /* Extend arena stats and arenas arrays. */
- astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t);
- if (ctl_stats.narenas == narenas_auto) {
- /* ctl_stats.arenas and arenas came from base_alloc(). */
- astats = (ctl_arena_stats_t *)imalloc(astats_size);
- if (astats == NULL)
- return (true);
- memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
- sizeof(ctl_arena_stats_t));
-
- tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
- sizeof(arena_t *));
- if (tarenas == NULL) {
- idalloc(astats);
- return (true);
- }
- memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *));
- } else {
- astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas,
- astats_size, 0, 0, false, false);
- if (astats == NULL)
- return (true);
-
- tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) *
- sizeof(arena_t *), 0, 0, false, false);
- if (tarenas == NULL)
- return (true);
- }
- /* Initialize the new astats and arenas elements. */
- memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
- if (ctl_arena_init(&astats[ctl_stats.narenas + 1]))
- return (true);
- tarenas[ctl_stats.narenas] = NULL;
- /* Swap merged stats to their new location. */
- {
- ctl_arena_stats_t tstats;
- memcpy(&tstats, &astats[ctl_stats.narenas],
- sizeof(ctl_arena_stats_t));
- memcpy(&astats[ctl_stats.narenas],
- &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
- memcpy(&astats[ctl_stats.narenas + 1], &tstats,
- sizeof(ctl_arena_stats_t));
- }
- ctl_stats.arenas = astats;
- ctl_stats.narenas++;
- malloc_mutex_lock(&arenas_lock);
- arenas = tarenas;
- narenas_total++;
- arenas_extend(narenas_total - 1);
- malloc_mutex_unlock(&arenas_lock);
-
- return (false);
-}
-
-static void
-ctl_refresh(void)
-{
- unsigned i;
- VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
-
- if (config_stats) {
- malloc_mutex_lock(&chunks_mtx);
- ctl_stats.chunks.current = stats_chunks.curchunks;
- ctl_stats.chunks.total = stats_chunks.nchunks;
- ctl_stats.chunks.high = stats_chunks.highchunks;
- malloc_mutex_unlock(&chunks_mtx);
-
- malloc_mutex_lock(&huge_mtx);
- ctl_stats.huge.allocated = huge_allocated;
- ctl_stats.huge.nmalloc = huge_nmalloc;
- ctl_stats.huge.ndalloc = huge_ndalloc;
- malloc_mutex_unlock(&huge_mtx);
- }
-
- /*
- * Clear sum stats, since they will be merged into by
- * ctl_arena_refresh().
- */
- ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
- ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
-
- malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
- for (i = 0; i < ctl_stats.narenas; i++) {
- if (arenas[i] != NULL)
- ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
- else
- ctl_stats.arenas[i].nthreads = 0;
- }
- malloc_mutex_unlock(&arenas_lock);
- for (i = 0; i < ctl_stats.narenas; i++) {
- bool initialized = (tarenas[i] != NULL);
-
- ctl_stats.arenas[i].initialized = initialized;
- if (initialized)
- ctl_arena_refresh(tarenas[i], i);
- }
-
- if (config_stats) {
- ctl_stats.allocated =
- ctl_stats.arenas[ctl_stats.narenas].allocated_small
- + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
- + ctl_stats.huge.allocated;
- ctl_stats.active =
- (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
- + ctl_stats.huge.allocated;
- ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
- }
-
- ctl_epoch++;
-}
-
-static bool
-ctl_init(void)
-{
- bool ret;
-
- malloc_mutex_lock(&ctl_mtx);
- if (ctl_initialized == false) {
- /*
- * Allocate space for one extra arena stats element, which
- * contains summed stats across all arenas.
- */
- assert(narenas_auto == narenas_total_get());
- ctl_stats.narenas = narenas_auto;
- ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
- (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
- if (ctl_stats.arenas == NULL) {
- ret = true;
- goto label_return;
- }
- memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
- sizeof(ctl_arena_stats_t));
-
- /*
- * Initialize all stats structures, regardless of whether they
- * ever get used. Lazy initialization would allow errors to
- * cause inconsistent state to be viewable by the application.
- */
- if (config_stats) {
- unsigned i;
- for (i = 0; i <= ctl_stats.narenas; i++) {
- if (ctl_arena_init(&ctl_stats.arenas[i])) {
- ret = true;
- goto label_return;
- }
- }
- }
- ctl_stats.arenas[ctl_stats.narenas].initialized = true;
-
- ctl_epoch = 0;
- ctl_refresh();
- ctl_initialized = true;
- }
-
- ret = false;
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-static int
-ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
- size_t *depthp)
-{
- int ret;
- const char *elm, *tdot, *dot;
- size_t elen, i, j;
- const ctl_named_node_t *node;
-
- elm = name;
- /* Equivalent to strchrnul(). */
- dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
- elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
- if (elen == 0) {
- ret = ENOENT;
- goto label_return;
- }
- node = super_root_node;
- for (i = 0; i < *depthp; i++) {
- assert(node);
- assert(node->nchildren > 0);
- if (ctl_named_node(node->children) != NULL) {
- const ctl_named_node_t *pnode = node;
-
- /* Children are named. */
- for (j = 0; j < node->nchildren; j++) {
- const ctl_named_node_t *child =
- ctl_named_children(node, j);
- if (strlen(child->name) == elen &&
- strncmp(elm, child->name, elen) == 0) {
- node = child;
- if (nodesp != NULL)
- nodesp[i] =
- (const ctl_node_t *)node;
- mibp[i] = j;
- break;
- }
- }
- if (node == pnode) {
- ret = ENOENT;
- goto label_return;
- }
- } else {
- uintmax_t index;
- const ctl_indexed_node_t *inode;
-
- /* Children are indexed. */
- index = malloc_strtoumax(elm, NULL, 10);
- if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
- ret = ENOENT;
- goto label_return;
- }
-
- inode = ctl_indexed_node(node->children);
- node = inode->index(mibp, *depthp, (size_t)index);
- if (node == NULL) {
- ret = ENOENT;
- goto label_return;
- }
-
- if (nodesp != NULL)
- nodesp[i] = (const ctl_node_t *)node;
- mibp[i] = (size_t)index;
- }
-
- if (node->ctl != NULL) {
- /* Terminal node. */
- if (*dot != '\0') {
- /*
- * The name contains more elements than are
- * in this path through the tree.
- */
- ret = ENOENT;
- goto label_return;
- }
- /* Complete lookup successful. */
- *depthp = i + 1;
- break;
- }
-
- /* Update elm. */
- if (*dot == '\0') {
- /* No more elements. */
- ret = ENOENT;
- goto label_return;
- }
- elm = &dot[1];
- dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
- strchr(elm, '\0');
- elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
- }
-
- ret = 0;
-label_return:
- return (ret);
-}
-
-int
-ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen)
-{
- int ret;
- size_t depth;
- ctl_node_t const *nodes[CTL_MAX_DEPTH];
- size_t mib[CTL_MAX_DEPTH];
- const ctl_named_node_t *node;
-
- if (ctl_initialized == false && ctl_init()) {
- ret = EAGAIN;
- goto label_return;
- }
-
- depth = CTL_MAX_DEPTH;
- ret = ctl_lookup(name, nodes, mib, &depth);
- if (ret != 0)
- goto label_return;
-
- node = ctl_named_node(nodes[depth-1]);
- if (node != NULL && node->ctl)
- ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
- else {
- /* The name refers to a partial path through the ctl tree. */
- ret = ENOENT;
- }
-
-label_return:
- return(ret);
-}
-
-int
-ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
-{
- int ret;
-
- if (ctl_initialized == false && ctl_init()) {
- ret = EAGAIN;
- goto label_return;
- }
-
- ret = ctl_lookup(name, NULL, mibp, miblenp);
-label_return:
- return(ret);
-}
-
-int
-ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
- int ret;
- const ctl_named_node_t *node;
- size_t i;
-
- if (ctl_initialized == false && ctl_init()) {
- ret = EAGAIN;
- goto label_return;
- }
-
- /* Iterate down the tree. */
- node = super_root_node;
- for (i = 0; i < miblen; i++) {
- assert(node);
- assert(node->nchildren > 0);
- if (ctl_named_node(node->children) != NULL) {
- /* Children are named. */
- if (node->nchildren <= mib[i]) {
- ret = ENOENT;
- goto label_return;
- }
- node = ctl_named_children(node, mib[i]);
- } else {
- const ctl_indexed_node_t *inode;
-
- /* Indexed element. */
- inode = ctl_indexed_node(node->children);
- node = inode->index(mib, miblen, mib[i]);
- if (node == NULL) {
- ret = ENOENT;
- goto label_return;
- }
- }
- }
-
- /* Call the ctl function. */
- if (node && node->ctl)
- ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
- else {
- /* Partial MIB. */
- ret = ENOENT;
- }
-
-label_return:
- return(ret);
-}
-
-bool
-ctl_boot(void)
-{
-
- if (malloc_mutex_init(&ctl_mtx))
- return (true);
-
- ctl_initialized = false;
-
- return (false);
-}
-
-void
-ctl_prefork(void)
-{
-
- malloc_mutex_lock(&ctl_mtx);
-}
-
-void
-ctl_postfork_parent(void)
-{
-
- malloc_mutex_postfork_parent(&ctl_mtx);
-}
-
-void
-ctl_postfork_child(void)
-{
-
- malloc_mutex_postfork_child(&ctl_mtx);
-}
-
-/******************************************************************************/
-/* *_ctl() functions. */
-
-#define READONLY() do { \
- if (newp != NULL || newlen != 0) { \
- ret = EPERM; \
- goto label_return; \
- } \
-} while (0)
-
-#define WRITEONLY() do { \
- if (oldp != NULL || oldlenp != NULL) { \
- ret = EPERM; \
- goto label_return; \
- } \
-} while (0)
-
-#define READ(v, t) do { \
- if (oldp != NULL && oldlenp != NULL) { \
- if (*oldlenp != sizeof(t)) { \
- size_t copylen = (sizeof(t) <= *oldlenp) \
- ? sizeof(t) : *oldlenp; \
- memcpy(oldp, (void *)&(v), copylen); \
- ret = EINVAL; \
- goto label_return; \
- } else \
- *(t *)oldp = (v); \
- } \
-} while (0)
-
-#define WRITE(v, t) do { \
- if (newp != NULL) { \
- if (newlen != sizeof(t)) { \
- ret = EINVAL; \
- goto label_return; \
- } \
- (v) = *(t *)newp; \
- } \
-} while (0)
-
-/*
- * There's a lot of code duplication in the following macros due to limitations
- * in how nested cpp macros are expanded.
- */
-#define CTL_RO_CLGEN(c, l, n, v, t) \
-static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
- int ret; \
- t oldval; \
- \
- if ((c) == false) \
- return (ENOENT); \
- if (l) \
- malloc_mutex_lock(&ctl_mtx); \
- READONLY(); \
- oldval = (v); \
- READ(oldval, t); \
- \
- ret = 0; \
-label_return: \
- if (l) \
- malloc_mutex_unlock(&ctl_mtx); \
- return (ret); \
-}
-
-#define CTL_RO_CGEN(c, n, v, t) \
-static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
- int ret; \
- t oldval; \
- \
- if ((c) == false) \
- return (ENOENT); \
- malloc_mutex_lock(&ctl_mtx); \
- READONLY(); \
- oldval = (v); \
- READ(oldval, t); \
- \
- ret = 0; \
-label_return: \
- malloc_mutex_unlock(&ctl_mtx); \
- return (ret); \
-}
-
-#define CTL_RO_GEN(n, v, t) \
-static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
- int ret; \
- t oldval; \
- \
- malloc_mutex_lock(&ctl_mtx); \
- READONLY(); \
- oldval = (v); \
- READ(oldval, t); \
- \
- ret = 0; \
-label_return: \
- malloc_mutex_unlock(&ctl_mtx); \
- return (ret); \
-}
-
-/*
- * ctl_mtx is not acquired, under the assumption that no pertinent data will
- * mutate during the call.
- */
-#define CTL_RO_NL_CGEN(c, n, v, t) \
-static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
- int ret; \
- t oldval; \
- \
- if ((c) == false) \
- return (ENOENT); \
- READONLY(); \
- oldval = (v); \
- READ(oldval, t); \
- \
- ret = 0; \
-label_return: \
- return (ret); \
-}
-
-#define CTL_RO_NL_GEN(n, v, t) \
-static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
- int ret; \
- t oldval; \
- \
- READONLY(); \
- oldval = (v); \
- READ(oldval, t); \
- \
- ret = 0; \
-label_return: \
- return (ret); \
-}
-
-#define CTL_RO_BOOL_CONFIG_GEN(n) \
-static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
- int ret; \
- bool oldval; \
- \
- READONLY(); \
- oldval = n; \
- READ(oldval, bool); \
- \
- ret = 0; \
-label_return: \
- return (ret); \
-}
-
-CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
-
-static int
-epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
- int ret;
- uint64_t newval __attribute__((unused));
-
- malloc_mutex_lock(&ctl_mtx);
- WRITE(newval, uint64_t);
- if (newp != NULL)
- ctl_refresh();
- READ(ctl_epoch, uint64_t);
-
- ret = 0;
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-static int
-thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
- bool oldval;
-
- if (config_tcache == false)
- return (ENOENT);
-
- oldval = tcache_enabled_get();
- if (newp != NULL) {
- if (newlen != sizeof(bool)) {
- ret = EINVAL;
- goto label_return;
- }
- tcache_enabled_set(*(bool *)newp);
- }
- READ(oldval, bool);
-
- ret = 0;
-label_return:
- return (ret);
-}
-
-static int
-thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
-
- if (config_tcache == false)
- return (ENOENT);
-
- READONLY();
- WRITEONLY();
-
- tcache_flush();
-
- ret = 0;
-label_return:
- return (ret);
-}
-
-static int
-thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
- int ret;
- unsigned newind, oldind;
-
- malloc_mutex_lock(&ctl_mtx);
- newind = oldind = choose_arena(NULL)->ind;
- WRITE(newind, unsigned);
- READ(oldind, unsigned);
- if (newind != oldind) {
- arena_t *arena;
-
- if (newind >= ctl_stats.narenas) {
- /* New arena index is out of range. */
- ret = EFAULT;
- goto label_return;
- }
-
- /* Initialize arena if necessary. */
- malloc_mutex_lock(&arenas_lock);
- if ((arena = arenas[newind]) == NULL && (arena =
- arenas_extend(newind)) == NULL) {
- malloc_mutex_unlock(&arenas_lock);
- ret = EAGAIN;
- goto label_return;
- }
- assert(arena == arenas[newind]);
- arenas[oldind]->nthreads--;
- arenas[newind]->nthreads++;
- malloc_mutex_unlock(&arenas_lock);
-
- /* Set new arena association. */
- if (config_tcache) {
- tcache_t *tcache;
- if ((uintptr_t)(tcache = *tcache_tsd_get()) >
- (uintptr_t)TCACHE_STATE_MAX) {
- tcache_arena_dissociate(tcache);
- tcache_arena_associate(tcache, arena);
- }
- }
- arenas_tsd_set(&arena);
- }
-
- ret = 0;
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-CTL_RO_NL_CGEN(config_stats, thread_allocated,
- thread_allocated_tsd_get()->allocated, uint64_t)
-CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
- &thread_allocated_tsd_get()->allocated, uint64_t *)
-CTL_RO_NL_CGEN(config_stats, thread_deallocated,
- thread_allocated_tsd_get()->deallocated, uint64_t)
-CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
- &thread_allocated_tsd_get()->deallocated, uint64_t *)
-
-/******************************************************************************/
-
-CTL_RO_BOOL_CONFIG_GEN(config_debug)
-CTL_RO_BOOL_CONFIG_GEN(config_dss)
-CTL_RO_BOOL_CONFIG_GEN(config_fill)
-CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
-CTL_RO_BOOL_CONFIG_GEN(config_mremap)
-CTL_RO_BOOL_CONFIG_GEN(config_munmap)
-CTL_RO_BOOL_CONFIG_GEN(config_prof)
-CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
-CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
-CTL_RO_BOOL_CONFIG_GEN(config_stats)
-CTL_RO_BOOL_CONFIG_GEN(config_tcache)
-CTL_RO_BOOL_CONFIG_GEN(config_tls)
-CTL_RO_BOOL_CONFIG_GEN(config_utrace)
-CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
-CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
-
-/******************************************************************************/
-
-CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
-CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
-CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
-CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
-CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
-CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
-CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
-CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
-CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
-CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
-CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
-CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
-CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
-CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
-CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
-CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
-CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
-CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
-CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
-CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
-
-/******************************************************************************/
-
-/* ctl_mutex must be held during execution of this function. */
-static void
-arena_purge(unsigned arena_ind)
-{
- VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
-
- malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
- malloc_mutex_unlock(&arenas_lock);
-
- if (arena_ind == ctl_stats.narenas) {
- unsigned i;
- for (i = 0; i < ctl_stats.narenas; i++) {
- if (tarenas[i] != NULL)
- arena_purge_all(tarenas[i]);
- }
- } else {
- assert(arena_ind < ctl_stats.narenas);
- if (tarenas[arena_ind] != NULL)
- arena_purge_all(tarenas[arena_ind]);
- }
-}
-
-static int
-arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
- int ret;
-
- READONLY();
- WRITEONLY();
- malloc_mutex_lock(&ctl_mtx);
- arena_purge(mib[1]);
- malloc_mutex_unlock(&ctl_mtx);
-
- ret = 0;
-label_return:
- return (ret);
-}
-
-static int
-arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
- int ret, i;
- bool match, err;
- const char *dss;
- unsigned arena_ind = mib[1];
- dss_prec_t dss_prec_old = dss_prec_limit;
- dss_prec_t dss_prec = dss_prec_limit;
-
- malloc_mutex_lock(&ctl_mtx);
- WRITE(dss, const char *);
- match = false;
- for (i = 0; i < dss_prec_limit; i++) {
- if (strcmp(dss_prec_names[i], dss) == 0) {
- dss_prec = i;
- match = true;
- break;
- }
- }
- if (match == false) {
- ret = EINVAL;
- goto label_return;
- }
-
- if (arena_ind < ctl_stats.narenas) {
- arena_t *arena = arenas[arena_ind];
- if (arena != NULL) {
- dss_prec_old = arena_dss_prec_get(arena);
- arena_dss_prec_set(arena, dss_prec);
- err = false;
- } else
- err = true;
- } else {
- dss_prec_old = chunk_dss_prec_get();
- err = chunk_dss_prec_set(dss_prec);
- }
- dss = dss_prec_names[dss_prec_old];
- READ(dss, const char *);
- if (err) {
- ret = EFAULT;
- goto label_return;
- }
-
- ret = 0;
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-static const ctl_named_node_t *
-arena_i_index(const size_t *mib, size_t miblen, size_t i)
-{
- const ctl_named_node_t * ret;
-
- malloc_mutex_lock(&ctl_mtx);
- if (i > ctl_stats.narenas) {
- ret = NULL;
- goto label_return;
- }
-
- ret = super_arena_i_node;
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-
-/******************************************************************************/
-
-CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
-CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
-CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
-static const ctl_named_node_t *
-arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
-{
-
- if (i > NBINS)
- return (NULL);
- return (super_arenas_bin_i_node);
-}
-
-CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
-static const ctl_named_node_t *
-arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
-{
-
- if (i > nlclasses)
- return (NULL);
- return (super_arenas_lrun_i_node);
-}
-
-static int
-arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
- unsigned narenas;
-
- malloc_mutex_lock(&ctl_mtx);
- READONLY();
- if (*oldlenp != sizeof(unsigned)) {
- ret = EINVAL;
- goto label_return;
- }
- narenas = ctl_stats.narenas;
- READ(narenas, unsigned);
-
- ret = 0;
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-static int
-arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
- unsigned nread, i;
-
- malloc_mutex_lock(&ctl_mtx);
- READONLY();
- if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
- ret = EINVAL;
- nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
- ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
- } else {
- ret = 0;
- nread = ctl_stats.narenas;
- }
-
- for (i = 0; i < nread; i++)
- ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
-
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
-CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
-CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
-CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
-CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
-CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
-
-static int
-arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
- int ret;
- unsigned arena_ind;
-
- malloc_mutex_lock(&ctl_mtx);
- WRITEONLY();
- arena_ind = UINT_MAX;
- WRITE(arena_ind, unsigned);
- if (newp != NULL && arena_ind >= ctl_stats.narenas)
- ret = EFAULT;
- else {
- if (arena_ind == UINT_MAX)
- arena_ind = ctl_stats.narenas;
- arena_purge(arena_ind);
- ret = 0;
- }
-
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-static int
-arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
- int ret;
- unsigned narenas;
-
- malloc_mutex_lock(&ctl_mtx);
- READONLY();
- if (ctl_grow()) {
- ret = EAGAIN;
- goto label_return;
- }
- narenas = ctl_stats.narenas - 1;
- READ(narenas, unsigned);
-
- ret = 0;
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-/******************************************************************************/
-
-static int
-prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
- int ret;
- bool oldval;
-
- if (config_prof == false)
- return (ENOENT);
-
- malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
- oldval = opt_prof_active;
- if (newp != NULL) {
- /*
- * The memory barriers will tend to make opt_prof_active
- * propagate faster on systems with weak memory ordering.
- */
- mb_write();
- WRITE(opt_prof_active, bool);
- mb_write();
- }
- READ(oldval, bool);
-
- ret = 0;
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-static int
-prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
- int ret;
- const char *filename = NULL;
-
- if (config_prof == false)
- return (ENOENT);
-
- WRITEONLY();
- WRITE(filename, const char *);
-
- if (prof_mdump(filename)) {
- ret = EFAULT;
- goto label_return;
- }
-
- ret = 0;
-label_return:
- return (ret);
-}
-
-CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
-
-/******************************************************************************/
-
-CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
- size_t)
-CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
-CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
-CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
- ctl_stats.arenas[mib[2]].allocated_small, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
- ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
- ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
- ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
- ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
- ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
- ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
- ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
-
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
-CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
-CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
-
-static const ctl_named_node_t *
-stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
-{
-
- if (j > NBINS)
- return (NULL);
- return (super_stats_arenas_i_bins_j_node);
-}
-
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
-
-static const ctl_named_node_t *
-stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
-{
-
- if (j > nlclasses)
- return (NULL);
- return (super_stats_arenas_i_lruns_j_node);
-}
-
-CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
-CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
-CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
-CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
- ctl_stats.arenas[mib[2]].astats.mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
- ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
- ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
- ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
-
-static const ctl_named_node_t *
-stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
-{
- const ctl_named_node_t * ret;
-
- malloc_mutex_lock(&ctl_mtx);
- if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) {
- ret = NULL;
- goto label_return;
- }
-
- ret = super_stats_arenas_i_node;
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
-CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
-CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
diff --git a/extra/jemalloc/src/extent.c b/extra/jemalloc/src/extent.c
deleted file mode 100644
index 8c09b486ed8..00000000000
--- a/extra/jemalloc/src/extent.c
+++ /dev/null
@@ -1,39 +0,0 @@
-#define JEMALLOC_EXTENT_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-
-static inline int
-extent_szad_comp(extent_node_t *a, extent_node_t *b)
-{
- int ret;
- size_t a_size = a->size;
- size_t b_size = b->size;
-
- ret = (a_size > b_size) - (a_size < b_size);
- if (ret == 0) {
- uintptr_t a_addr = (uintptr_t)a->addr;
- uintptr_t b_addr = (uintptr_t)b->addr;
-
- ret = (a_addr > b_addr) - (a_addr < b_addr);
- }
-
- return (ret);
-}
-
-/* Generate red-black tree functions. */
-rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
- extent_szad_comp)
-
-static inline int
-extent_ad_comp(extent_node_t *a, extent_node_t *b)
-{
- uintptr_t a_addr = (uintptr_t)a->addr;
- uintptr_t b_addr = (uintptr_t)b->addr;
-
- return ((a_addr > b_addr) - (a_addr < b_addr));
-}
-
-/* Generate red-black tree functions. */
-rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
- extent_ad_comp)
diff --git a/extra/jemalloc/src/hash.c b/extra/jemalloc/src/hash.c
deleted file mode 100644
index cfa4da0275c..00000000000
--- a/extra/jemalloc/src/hash.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define JEMALLOC_HASH_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/extra/jemalloc/src/huge.c b/extra/jemalloc/src/huge.c
deleted file mode 100644
index aa08d43d362..00000000000
--- a/extra/jemalloc/src/huge.c
+++ /dev/null
@@ -1,313 +0,0 @@
-#define JEMALLOC_HUGE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-uint64_t huge_nmalloc;
-uint64_t huge_ndalloc;
-size_t huge_allocated;
-
-malloc_mutex_t huge_mtx;
-
-/******************************************************************************/
-
-/* Tree of chunks that are stand-alone huge allocations. */
-static extent_tree_t huge;
-
-void *
-huge_malloc(size_t size, bool zero)
-{
-
- return (huge_palloc(size, chunksize, zero));
-}
-
-void *
-huge_palloc(size_t size, size_t alignment, bool zero)
-{
- void *ret;
- size_t csize;
- extent_node_t *node;
- bool is_zeroed;
-
- /* Allocate one or more contiguous chunks for this request. */
-
- csize = CHUNK_CEILING(size);
- if (csize == 0) {
- /* size is large enough to cause size_t wrap-around. */
- return (NULL);
- }
-
- /* Allocate an extent node with which to track the chunk. */
- node = base_node_alloc();
- if (node == NULL)
- return (NULL);
-
- /*
- * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
- * it is possible to make correct junk/zero fill decisions below.
- */
- is_zeroed = zero;
- ret = chunk_alloc(csize, alignment, false, &is_zeroed,
- chunk_dss_prec_get());
- if (ret == NULL) {
- base_node_dealloc(node);
- return (NULL);
- }
-
- /* Insert node into huge. */
- node->addr = ret;
- node->size = csize;
-
- malloc_mutex_lock(&huge_mtx);
- extent_tree_ad_insert(&huge, node);
- if (config_stats) {
- stats_cactive_add(csize);
- huge_nmalloc++;
- huge_allocated += csize;
- }
- malloc_mutex_unlock(&huge_mtx);
-
- if (config_fill && zero == false) {
- if (opt_junk)
- memset(ret, 0xa5, csize);
- else if (opt_zero && is_zeroed == false)
- memset(ret, 0, csize);
- }
-
- return (ret);
-}
-
-void *
-huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
-{
-
- /*
- * Avoid moving the allocation if the size class can be left the same.
- */
- if (oldsize > arena_maxclass
- && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
- && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
- assert(CHUNK_CEILING(oldsize) == oldsize);
- if (config_fill && opt_junk && size < oldsize) {
- memset((void *)((uintptr_t)ptr + size), 0x5a,
- oldsize - size);
- }
- return (ptr);
- }
-
- /* Reallocation would require a move. */
- return (NULL);
-}
-
-void *
-huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_dalloc)
-{
- void *ret;
- size_t copysize;
-
- /* Try to avoid moving the allocation. */
- ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
- if (ret != NULL)
- return (ret);
-
- /*
- * size and oldsize are different enough that we need to use a
- * different size class. In that case, fall back to allocating new
- * space and copying.
- */
- if (alignment > chunksize)
- ret = huge_palloc(size + extra, alignment, zero);
- else
- ret = huge_malloc(size + extra, zero);
-
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, this time without extra. */
- if (alignment > chunksize)
- ret = huge_palloc(size, alignment, zero);
- else
- ret = huge_malloc(size, zero);
-
- if (ret == NULL)
- return (NULL);
- }
-
- /*
- * Copy at most size bytes (not size+extra), since the caller has no
- * expectation that the extra bytes will be reliably preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
-
-#ifdef JEMALLOC_MREMAP
- /*
- * Use mremap(2) if this is a huge-->huge reallocation, and neither the
- * source nor the destination are in dss.
- */
- if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
- == false && chunk_in_dss(ret) == false))) {
- size_t newsize = huge_salloc(ret);
-
- /*
- * Remove ptr from the tree of huge allocations before
- * performing the remap operation, in order to avoid the
- * possibility of another thread acquiring that mapping before
- * this one removes it from the tree.
- */
- huge_dalloc(ptr, false);
- if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
- ret) == MAP_FAILED) {
- /*
- * Assuming no chunk management bugs in the allocator,
- * the only documented way an error can occur here is
- * if the application changed the map type for a
- * portion of the old allocation. This is firmly in
- * undefined behavior territory, so write a diagnostic
- * message, and optionally abort.
- */
- char buf[BUFERROR_BUF];
-
- buferror(buf, sizeof(buf));
- malloc_printf("<jemalloc>: Error in mremap(): %s\n",
- buf);
- if (opt_abort)
- abort();
- memcpy(ret, ptr, copysize);
- chunk_dealloc_mmap(ptr, oldsize);
- }
- } else
-#endif
- {
- memcpy(ret, ptr, copysize);
- iqallocx(ptr, try_tcache_dalloc);
- }
- return (ret);
-}
-
-void
-huge_dalloc(void *ptr, bool unmap)
-{
- extent_node_t *node, key;
-
- malloc_mutex_lock(&huge_mtx);
-
- /* Extract from tree of huge allocations. */
- key.addr = ptr;
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
- assert(node->addr == ptr);
- extent_tree_ad_remove(&huge, node);
-
- if (config_stats) {
- stats_cactive_sub(node->size);
- huge_ndalloc++;
- huge_allocated -= node->size;
- }
-
- malloc_mutex_unlock(&huge_mtx);
-
- if (unmap && config_fill && config_dss && opt_junk)
- memset(node->addr, 0x5a, node->size);
-
- chunk_dealloc(node->addr, node->size, unmap);
-
- base_node_dealloc(node);
-}
-
-size_t
-huge_salloc(const void *ptr)
-{
- size_t ret;
- extent_node_t *node, key;
-
- malloc_mutex_lock(&huge_mtx);
-
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
-
- ret = node->size;
-
- malloc_mutex_unlock(&huge_mtx);
-
- return (ret);
-}
-
-prof_ctx_t *
-huge_prof_ctx_get(const void *ptr)
-{
- prof_ctx_t *ret;
- extent_node_t *node, key;
-
- malloc_mutex_lock(&huge_mtx);
-
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
-
- ret = node->prof_ctx;
-
- malloc_mutex_unlock(&huge_mtx);
-
- return (ret);
-}
-
-void
-huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
-{
- extent_node_t *node, key;
-
- malloc_mutex_lock(&huge_mtx);
-
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
-
- node->prof_ctx = ctx;
-
- malloc_mutex_unlock(&huge_mtx);
-}
-
-bool
-huge_boot(void)
-{
-
- /* Initialize chunks data. */
- if (malloc_mutex_init(&huge_mtx))
- return (true);
- extent_tree_ad_new(&huge);
-
- if (config_stats) {
- huge_nmalloc = 0;
- huge_ndalloc = 0;
- huge_allocated = 0;
- }
-
- return (false);
-}
-
-void
-huge_prefork(void)
-{
-
- malloc_mutex_prefork(&huge_mtx);
-}
-
-void
-huge_postfork_parent(void)
-{
-
- malloc_mutex_postfork_parent(&huge_mtx);
-}
-
-void
-huge_postfork_child(void)
-{
-
- malloc_mutex_postfork_child(&huge_mtx);
-}
diff --git a/extra/jemalloc/src/jemalloc.c b/extra/jemalloc/src/jemalloc.c
deleted file mode 100644
index bc350ed953b..00000000000
--- a/extra/jemalloc/src/jemalloc.c
+++ /dev/null
@@ -1,1868 +0,0 @@
-#define JEMALLOC_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-malloc_tsd_data(, arenas, arena_t *, NULL)
-malloc_tsd_data(, thread_allocated, thread_allocated_t,
- THREAD_ALLOCATED_INITIALIZER)
-
-/* Runtime configuration options. */
-const char *je_malloc_conf;
-bool opt_abort =
-#ifdef JEMALLOC_DEBUG
- true
-#else
- false
-#endif
- ;
-bool opt_junk =
-#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
- true
-#else
- false
-#endif
- ;
-size_t opt_quarantine = ZU(0);
-bool opt_redzone = false;
-bool opt_utrace = false;
-bool opt_valgrind = false;
-bool opt_xmalloc = false;
-bool opt_zero = false;
-size_t opt_narenas = 0;
-
-unsigned ncpus;
-
-malloc_mutex_t arenas_lock;
-arena_t **arenas;
-unsigned narenas_total;
-unsigned narenas_auto;
-
-/* Set to true once the allocator has been initialized. */
-static bool malloc_initialized = false;
-
-#ifdef JEMALLOC_THREADED_INIT
-/* Used to let the initializing thread recursively allocate. */
-# define NO_INITIALIZER ((unsigned long)0)
-# define INITIALIZER pthread_self()
-# define IS_INITIALIZER (malloc_initializer == pthread_self())
-static pthread_t malloc_initializer = NO_INITIALIZER;
-#else
-# define NO_INITIALIZER false
-# define INITIALIZER true
-# define IS_INITIALIZER malloc_initializer
-static bool malloc_initializer = NO_INITIALIZER;
-#endif
-
-/* Used to avoid initialization races. */
-#ifdef _WIN32
-static malloc_mutex_t init_lock;
-
-JEMALLOC_ATTR(constructor)
-static void WINAPI
-_init_init_lock(void)
-{
-
- malloc_mutex_init(&init_lock);
-}
-
-#ifdef _MSC_VER
-# pragma section(".CRT$XCU", read)
-JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
-static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
-#endif
-
-#else
-static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
-#endif
-
-typedef struct {
- void *p; /* Input pointer (as in realloc(p, s)). */
- size_t s; /* Request size. */
- void *r; /* Result pointer. */
-} malloc_utrace_t;
-
-#ifdef JEMALLOC_UTRACE
-# define UTRACE(a, b, c) do { \
- if (opt_utrace) { \
- int utrace_serrno = errno; \
- malloc_utrace_t ut; \
- ut.p = (a); \
- ut.s = (b); \
- ut.r = (c); \
- utrace(&ut, sizeof(ut)); \
- errno = utrace_serrno; \
- } \
-} while (0)
-#else
-# define UTRACE(a, b, c)
-#endif
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void stats_print_atexit(void);
-static unsigned malloc_ncpus(void);
-static bool malloc_conf_next(char const **opts_p, char const **k_p,
- size_t *klen_p, char const **v_p, size_t *vlen_p);
-static void malloc_conf_error(const char *msg, const char *k, size_t klen,
- const char *v, size_t vlen);
-static void malloc_conf_init(void);
-static bool malloc_init_hard(void);
-static int imemalign(void **memptr, size_t alignment, size_t size,
- size_t min_alignment);
-
-/******************************************************************************/
-/*
- * Begin miscellaneous support functions.
- */
-
-/* Create a new arena and insert it into the arenas array at index ind. */
-arena_t *
-arenas_extend(unsigned ind)
-{
- arena_t *ret;
-
- ret = (arena_t *)base_alloc(sizeof(arena_t));
- if (ret != NULL && arena_new(ret, ind) == false) {
- arenas[ind] = ret;
- return (ret);
- }
- /* Only reached if there is an OOM error. */
-
- /*
- * OOM here is quite inconvenient to propagate, since dealing with it
- * would require a check for failure in the fast path. Instead, punt
- * by using arenas[0]. In practice, this is an extremely unlikely
- * failure.
- */
- malloc_write("<jemalloc>: Error initializing arena\n");
- if (opt_abort)
- abort();
-
- return (arenas[0]);
-}
-
-/* Slow path, called only by choose_arena(). */
-arena_t *
-choose_arena_hard(void)
-{
- arena_t *ret;
-
- if (narenas_auto > 1) {
- unsigned i, choose, first_null;
-
- choose = 0;
- first_null = narenas_auto;
- malloc_mutex_lock(&arenas_lock);
- assert(arenas[0] != NULL);
- for (i = 1; i < narenas_auto; i++) {
- if (arenas[i] != NULL) {
- /*
- * Choose the first arena that has the lowest
- * number of threads assigned to it.
- */
- if (arenas[i]->nthreads <
- arenas[choose]->nthreads)
- choose = i;
- } else if (first_null == narenas_auto) {
- /*
- * Record the index of the first uninitialized
- * arena, in case all extant arenas are in use.
- *
- * NB: It is possible for there to be
- * discontinuities in terms of initialized
- * versus uninitialized arenas, due to the
- * "thread.arena" mallctl.
- */
- first_null = i;
- }
- }
-
- if (arenas[choose]->nthreads == 0
- || first_null == narenas_auto) {
- /*
- * Use an unloaded arena, or the least loaded arena if
- * all arenas are already initialized.
- */
- ret = arenas[choose];
- } else {
- /* Initialize a new arena. */
- ret = arenas_extend(first_null);
- }
- ret->nthreads++;
- malloc_mutex_unlock(&arenas_lock);
- } else {
- ret = arenas[0];
- malloc_mutex_lock(&arenas_lock);
- ret->nthreads++;
- malloc_mutex_unlock(&arenas_lock);
- }
-
- arenas_tsd_set(&ret);
-
- return (ret);
-}
-
-static void
-stats_print_atexit(void)
-{
-
- if (config_tcache && config_stats) {
- unsigned narenas, i;
-
- /*
- * Merge stats from extant threads. This is racy, since
- * individual threads do not lock when recording tcache stats
- * events. As a consequence, the final stats may be slightly
- * out of date by the time they are reported, if other threads
- * continue to allocate.
- */
- for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena = arenas[i];
- if (arena != NULL) {
- tcache_t *tcache;
-
- /*
- * tcache_stats_merge() locks bins, so if any
- * code is introduced that acquires both arena
- * and bin locks in the opposite order,
- * deadlocks may result.
- */
- malloc_mutex_lock(&arena->lock);
- ql_foreach(tcache, &arena->tcache_ql, link) {
- tcache_stats_merge(tcache, arena);
- }
- malloc_mutex_unlock(&arena->lock);
- }
- }
- }
- je_malloc_stats_print(NULL, NULL, NULL);
-}
-
-/*
- * End miscellaneous support functions.
- */
-/******************************************************************************/
-/*
- * Begin initialization functions.
- */
-
-static unsigned
-malloc_ncpus(void)
-{
- unsigned ret;
- long result;
-
-#ifdef _WIN32
- SYSTEM_INFO si;
- GetSystemInfo(&si);
- result = si.dwNumberOfProcessors;
-#else
- result = sysconf(_SC_NPROCESSORS_ONLN);
-#endif
- if (result == -1) {
- /* Error. */
- ret = 1;
- } else {
- ret = (unsigned)result;
- }
-
- return (ret);
-}
-
-void
-arenas_cleanup(void *arg)
-{
- arena_t *arena = *(arena_t **)arg;
-
- malloc_mutex_lock(&arenas_lock);
- arena->nthreads--;
- malloc_mutex_unlock(&arenas_lock);
-}
-
-static JEMALLOC_ATTR(always_inline) void
-malloc_thread_init(void)
-{
-
- /*
- * TSD initialization can't be safely done as a side effect of
- * deallocation, because it is possible for a thread to do nothing but
- * deallocate its TLS data via free(), in which case writing to TLS
- * would cause write-after-free memory corruption. The quarantine
- * facility *only* gets used as a side effect of deallocation, so make
- * a best effort attempt at initializing its TSD by hooking all
- * allocation events.
- */
- if (config_fill && opt_quarantine)
- quarantine_alloc_hook();
-}
-
-static JEMALLOC_ATTR(always_inline) bool
-malloc_init(void)
-{
-
- if (malloc_initialized == false && malloc_init_hard())
- return (true);
- malloc_thread_init();
-
- return (false);
-}
-
-static bool
-malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
- char const **v_p, size_t *vlen_p)
-{
- bool accept;
- const char *opts = *opts_p;
-
- *k_p = opts;
-
- for (accept = false; accept == false;) {
- switch (*opts) {
- case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
- case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
- case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
- case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
- case 'Y': case 'Z':
- case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
- case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
- case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
- case 's': case 't': case 'u': case 'v': case 'w': case 'x':
- case 'y': case 'z':
- case '0': case '1': case '2': case '3': case '4': case '5':
- case '6': case '7': case '8': case '9':
- case '_':
- opts++;
- break;
- case ':':
- opts++;
- *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
- *v_p = opts;
- accept = true;
- break;
- case '\0':
- if (opts != *opts_p) {
- malloc_write("<jemalloc>: Conf string ends "
- "with key\n");
- }
- return (true);
- default:
- malloc_write("<jemalloc>: Malformed conf string\n");
- return (true);
- }
- }
-
- for (accept = false; accept == false;) {
- switch (*opts) {
- case ',':
- opts++;
- /*
- * Look ahead one character here, because the next time
- * this function is called, it will assume that end of
- * input has been cleanly reached if no input remains,
- * but we have optimistically already consumed the
- * comma if one exists.
- */
- if (*opts == '\0') {
- malloc_write("<jemalloc>: Conf string ends "
- "with comma\n");
- }
- *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
- accept = true;
- break;
- case '\0':
- *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
- accept = true;
- break;
- default:
- opts++;
- break;
- }
- }
-
- *opts_p = opts;
- return (false);
-}
-
-static void
-malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
- size_t vlen)
-{
-
- malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
- (int)vlen, v);
-}
-
-static void
-malloc_conf_init(void)
-{
- unsigned i;
- char buf[PATH_MAX + 1];
- const char *opts, *k, *v;
- size_t klen, vlen;
-
- /*
- * Automatically configure valgrind before processing options. The
- * valgrind option remains in jemalloc 3.x for compatibility reasons.
- */
- if (config_valgrind) {
- opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
- if (config_fill && opt_valgrind) {
- opt_junk = false;
- assert(opt_zero == false);
- opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
- opt_redzone = true;
- }
- if (config_tcache && opt_valgrind)
- opt_tcache = false;
- }
-
- for (i = 0; i < 3; i++) {
- /* Get runtime configuration. */
- switch (i) {
- case 0:
- if (je_malloc_conf != NULL) {
- /*
- * Use options that were compiled into the
- * program.
- */
- opts = je_malloc_conf;
- } else {
- /* No configuration specified. */
- buf[0] = '\0';
- opts = buf;
- }
- break;
- case 1: {
-#ifndef _WIN32
- int linklen;
- const char *linkname =
-# ifdef JEMALLOC_PREFIX
- "/etc/"JEMALLOC_PREFIX"malloc.conf"
-# else
- "/etc/malloc.conf"
-# endif
- ;
-
- if ((linklen = readlink(linkname, buf,
- sizeof(buf) - 1)) != -1) {
- /*
- * Use the contents of the "/etc/malloc.conf"
- * symbolic link's name.
- */
- buf[linklen] = '\0';
- opts = buf;
- } else
-#endif
- {
- /* No configuration specified. */
- buf[0] = '\0';
- opts = buf;
- }
- break;
- } case 2: {
- const char *envname =
-#ifdef JEMALLOC_PREFIX
- JEMALLOC_CPREFIX"MALLOC_CONF"
-#else
- "MALLOC_CONF"
-#endif
- ;
-
- if ((opts = getenv(envname)) != NULL) {
- /*
- * Do nothing; opts is already initialized to
- * the value of the MALLOC_CONF environment
- * variable.
- */
- } else {
- /* No configuration specified. */
- buf[0] = '\0';
- opts = buf;
- }
- break;
- } default:
- /* NOTREACHED */
- assert(false);
- buf[0] = '\0';
- opts = buf;
- }
-
- while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
- &vlen) == false) {
-#define CONF_HANDLE_BOOL(o, n) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
- if (strncmp("true", v, vlen) == 0 && \
- vlen == sizeof("true")-1) \
- o = true; \
- else if (strncmp("false", v, vlen) == \
- 0 && vlen == sizeof("false")-1) \
- o = false; \
- else { \
- malloc_conf_error( \
- "Invalid conf value", \
- k, klen, v, vlen); \
- } \
- continue; \
- }
-#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
- uintmax_t um; \
- char *end; \
- \
- set_errno(0); \
- um = malloc_strtoumax(v, &end, 0); \
- if (get_errno() != 0 || (uintptr_t)end -\
- (uintptr_t)v != vlen) { \
- malloc_conf_error( \
- "Invalid conf value", \
- k, klen, v, vlen); \
- } else if (clip) { \
- if (um < min) \
- o = min; \
- else if (um > max) \
- o = max; \
- else \
- o = um; \
- } else { \
- if (um < min || um > max) { \
- malloc_conf_error( \
- "Out-of-range " \
- "conf value", \
- k, klen, v, vlen); \
- } else \
- o = um; \
- } \
- continue; \
- }
-#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
- long l; \
- char *end; \
- \
- set_errno(0); \
- l = strtol(v, &end, 0); \
- if (get_errno() != 0 || (uintptr_t)end -\
- (uintptr_t)v != vlen) { \
- malloc_conf_error( \
- "Invalid conf value", \
- k, klen, v, vlen); \
- } else if (l < (ssize_t)min || l > \
- (ssize_t)max) { \
- malloc_conf_error( \
- "Out-of-range conf value", \
- k, klen, v, vlen); \
- } else \
- o = l; \
- continue; \
- }
-#define CONF_HANDLE_CHAR_P(o, n, d) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
- size_t cpylen = (vlen <= \
- sizeof(o)-1) ? vlen : \
- sizeof(o)-1; \
- strncpy(o, v, cpylen); \
- o[cpylen] = '\0'; \
- continue; \
- }
-
- CONF_HANDLE_BOOL(opt_abort, "abort")
- /*
- * Chunks always require at least one header page, plus
- * one data page in the absence of redzones, or three
- * pages in the presence of redzones. In order to
- * simplify options processing, fix the limit based on
- * config_fill.
- */
- CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
- (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
- true)
- if (strncmp("dss", k, klen) == 0) {
- int i;
- bool match = false;
- for (i = 0; i < dss_prec_limit; i++) {
- if (strncmp(dss_prec_names[i], v, vlen)
- == 0) {
- if (chunk_dss_prec_set(i)) {
- malloc_conf_error(
- "Error setting dss",
- k, klen, v, vlen);
- } else {
- opt_dss =
- dss_prec_names[i];
- match = true;
- break;
- }
- }
- }
- if (match == false) {
- malloc_conf_error("Invalid conf value",
- k, klen, v, vlen);
- }
- continue;
- }
- CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
- SIZE_T_MAX, false)
- CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
- -1, (sizeof(size_t) << 3) - 1)
- CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
- if (config_fill) {
- CONF_HANDLE_BOOL(opt_junk, "junk")
- CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
- 0, SIZE_T_MAX, false)
- CONF_HANDLE_BOOL(opt_redzone, "redzone")
- CONF_HANDLE_BOOL(opt_zero, "zero")
- }
- if (config_utrace) {
- CONF_HANDLE_BOOL(opt_utrace, "utrace")
- }
- if (config_valgrind) {
- CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
- }
- if (config_xmalloc) {
- CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
- }
- if (config_tcache) {
- CONF_HANDLE_BOOL(opt_tcache, "tcache")
- CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
- "lg_tcache_max", -1,
- (sizeof(size_t) << 3) - 1)
- }
- if (config_prof) {
- CONF_HANDLE_BOOL(opt_prof, "prof")
- CONF_HANDLE_CHAR_P(opt_prof_prefix,
- "prof_prefix", "jeprof")
- CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
- CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
- "lg_prof_sample", 0,
- (sizeof(uint64_t) << 3) - 1)
- CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
- CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
- "lg_prof_interval", -1,
- (sizeof(uint64_t) << 3) - 1)
- CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
- CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
- CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
- }
- malloc_conf_error("Invalid conf pair", k, klen, v,
- vlen);
-#undef CONF_HANDLE_BOOL
-#undef CONF_HANDLE_SIZE_T
-#undef CONF_HANDLE_SSIZE_T
-#undef CONF_HANDLE_CHAR_P
- }
- }
-}
-
-static bool
-malloc_init_hard(void)
-{
- arena_t *init_arenas[1];
-
- malloc_mutex_lock(&init_lock);
- if (malloc_initialized || IS_INITIALIZER) {
- /*
- * Another thread initialized the allocator before this one
- * acquired init_lock, or this thread is the initializing
- * thread, and it is recursively allocating.
- */
- malloc_mutex_unlock(&init_lock);
- return (false);
- }
-#ifdef JEMALLOC_THREADED_INIT
- if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
- /* Busy-wait until the initializing thread completes. */
- do {
- malloc_mutex_unlock(&init_lock);
- CPU_SPINWAIT;
- malloc_mutex_lock(&init_lock);
- } while (malloc_initialized == false);
- malloc_mutex_unlock(&init_lock);
- return (false);
- }
-#endif
- malloc_initializer = INITIALIZER;
-
- malloc_tsd_boot();
- if (config_prof)
- prof_boot0();
-
- malloc_conf_init();
-
-#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
- && !defined(_WIN32))
- /* Register fork handlers. */
- if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
- jemalloc_postfork_child) != 0) {
- malloc_write("<jemalloc>: Error in pthread_atfork()\n");
- if (opt_abort)
- abort();
- }
-#endif
-
- if (opt_stats_print) {
- /* Print statistics at exit. */
- if (atexit(stats_print_atexit) != 0) {
- malloc_write("<jemalloc>: Error in atexit()\n");
- if (opt_abort)
- abort();
- }
- }
-
- if (base_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (chunk_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (ctl_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (config_prof)
- prof_boot1();
-
- arena_boot();
-
- if (config_tcache && tcache_boot0()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (huge_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (malloc_mutex_init(&arenas_lock))
- return (true);
-
- /*
- * Create enough scaffolding to allow recursive allocation in
- * malloc_ncpus().
- */
- narenas_total = narenas_auto = 1;
- arenas = init_arenas;
- memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
-
- /*
- * Initialize one arena here. The rest are lazily created in
- * choose_arena_hard().
- */
- arenas_extend(0);
- if (arenas[0] == NULL) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- /* Initialize allocation counters before any allocations can occur. */
- if (config_stats && thread_allocated_tsd_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (arenas_tsd_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (config_tcache && tcache_boot1()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (config_fill && quarantine_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (config_prof && prof_boot2()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- /* Get number of CPUs. */
- malloc_mutex_unlock(&init_lock);
- ncpus = malloc_ncpus();
- malloc_mutex_lock(&init_lock);
-
- if (mutex_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (opt_narenas == 0) {
- /*
- * For SMP systems, create more than one arena per CPU by
- * default.
- */
- if (ncpus > 1)
- opt_narenas = ncpus << 2;
- else
- opt_narenas = 1;
- }
- narenas_auto = opt_narenas;
- /*
- * Make sure that the arenas array can be allocated. In practice, this
- * limit is enough to allow the allocator to function, but the ctl
- * machinery will fail to allocate memory at far lower limits.
- */
- if (narenas_auto > chunksize / sizeof(arena_t *)) {
- narenas_auto = chunksize / sizeof(arena_t *);
- malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
- narenas_auto);
- }
- narenas_total = narenas_auto;
-
- /* Allocate and initialize arenas. */
- arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
- if (arenas == NULL) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
- /*
- * Zero the array. In practice, this should always be pre-zeroed,
- * since it was just mmap()ed, but let's be sure.
- */
- memset(arenas, 0, sizeof(arena_t *) * narenas_total);
- /* Copy the pointer to the one arena that was already initialized. */
- arenas[0] = init_arenas[0];
-
- malloc_initialized = true;
- malloc_mutex_unlock(&init_lock);
- return (false);
-}
-
-/*
- * End initialization functions.
- */
-/******************************************************************************/
-/*
- * Begin malloc(3)-compatible functions.
- */
-
-void *
-je_malloc(size_t size)
-{
- void *ret;
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
-
- if (malloc_init()) {
- ret = NULL;
- goto label_oom;
- }
-
- if (size == 0)
- size = 1;
-
- if (config_prof && opt_prof) {
- usize = s2u(size);
- PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL) {
- ret = NULL;
- goto label_oom;
- }
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- ret = imalloc(SMALL_MAXCLASS+1);
- if (ret != NULL)
- arena_prof_promoted(ret, usize);
- } else
- ret = imalloc(size);
- } else {
- if (config_stats || (config_valgrind && opt_valgrind))
- usize = s2u(size);
- ret = imalloc(size);
- }
-
-label_oom:
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in malloc(): "
- "out of memory\n");
- abort();
- }
- set_errno(ENOMEM);
- }
- if (config_prof && opt_prof && ret != NULL)
- prof_malloc(ret, usize, cnt);
- if (config_stats && ret != NULL) {
- assert(usize == isalloc(ret, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
- }
- UTRACE(0, size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
- return (ret);
-}
-
-JEMALLOC_ATTR(nonnull(1))
-#ifdef JEMALLOC_PROF
-/*
- * Avoid any uncertainty as to how many backtrace frames to ignore in
- * PROF_ALLOC_PREP().
- */
-JEMALLOC_NOINLINE
-#endif
-static int
-imemalign(void **memptr, size_t alignment, size_t size,
- size_t min_alignment)
-{
- int ret;
- size_t usize;
- void *result;
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
-
- assert(min_alignment != 0);
-
- if (malloc_init())
- result = NULL;
- else {
- if (size == 0)
- size = 1;
-
- /* Make sure that alignment is a large enough power of 2. */
- if (((alignment - 1) & alignment) != 0
- || (alignment < min_alignment)) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error allocating "
- "aligned memory: invalid alignment\n");
- abort();
- }
- result = NULL;
- ret = EINVAL;
- goto label_return;
- }
-
- usize = sa2u(size, alignment);
- if (usize == 0) {
- result = NULL;
- ret = ENOMEM;
- goto label_return;
- }
-
- if (config_prof && opt_prof) {
- PROF_ALLOC_PREP(2, usize, cnt);
- if (cnt == NULL) {
- result = NULL;
- ret = EINVAL;
- } else {
- if (prof_promote && (uintptr_t)cnt !=
- (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
- assert(sa2u(SMALL_MAXCLASS+1,
- alignment) != 0);
- result = ipalloc(sa2u(SMALL_MAXCLASS+1,
- alignment), alignment, false);
- if (result != NULL) {
- arena_prof_promoted(result,
- usize);
- }
- } else {
- result = ipalloc(usize, alignment,
- false);
- }
- }
- } else
- result = ipalloc(usize, alignment, false);
- }
-
- if (result == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error allocating aligned "
- "memory: out of memory\n");
- abort();
- }
- ret = ENOMEM;
- goto label_return;
- }
-
- *memptr = result;
- ret = 0;
-
-label_return:
- if (config_stats && result != NULL) {
- assert(usize == isalloc(result, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
- }
- if (config_prof && opt_prof && result != NULL)
- prof_malloc(result, usize, cnt);
- UTRACE(0, size, result);
- return (ret);
-}
-
-int
-je_posix_memalign(void **memptr, size_t alignment, size_t size)
-{
- int ret = imemalign(memptr, alignment, size, sizeof(void *));
- JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
- config_prof), false);
- return (ret);
-}
-
-void *
-je_aligned_alloc(size_t alignment, size_t size)
-{
- void *ret;
- int err;
-
- if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
- ret = NULL;
- set_errno(err);
- }
- JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
- false);
- return (ret);
-}
-
-void *
-je_calloc(size_t num, size_t size)
-{
- void *ret;
- size_t num_size;
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
-
- if (malloc_init()) {
- num_size = 0;
- ret = NULL;
- goto label_return;
- }
-
- num_size = num * size;
- if (num_size == 0) {
- if (num == 0 || size == 0)
- num_size = 1;
- else {
- ret = NULL;
- goto label_return;
- }
- /*
- * Try to avoid division here. We know that it isn't possible to
- * overflow during multiplication if neither operand uses any of the
- * most significant half of the bits in a size_t.
- */
- } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
- && (num_size / size != num)) {
- /* size_t overflow. */
- ret = NULL;
- goto label_return;
- }
-
- if (config_prof && opt_prof) {
- usize = s2u(num_size);
- PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL) {
- ret = NULL;
- goto label_return;
- }
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
- <= SMALL_MAXCLASS) {
- ret = icalloc(SMALL_MAXCLASS+1);
- if (ret != NULL)
- arena_prof_promoted(ret, usize);
- } else
- ret = icalloc(num_size);
- } else {
- if (config_stats || (config_valgrind && opt_valgrind))
- usize = s2u(num_size);
- ret = icalloc(num_size);
- }
-
-label_return:
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in calloc(): out of "
- "memory\n");
- abort();
- }
- set_errno(ENOMEM);
- }
-
- if (config_prof && opt_prof && ret != NULL)
- prof_malloc(ret, usize, cnt);
- if (config_stats && ret != NULL) {
- assert(usize == isalloc(ret, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
- }
- UTRACE(0, num_size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
- return (ret);
-}
-
-void *
-je_realloc(void *ptr, size_t size)
-{
- void *ret;
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- size_t old_size = 0;
- size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
- prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
-
- if (size == 0) {
- if (ptr != NULL) {
- /* realloc(ptr, 0) is equivalent to free(p). */
- assert(malloc_initialized || IS_INITIALIZER);
- if (config_prof) {
- old_size = isalloc(ptr, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(ptr);
- } else if (config_stats) {
- old_size = isalloc(ptr, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_size);
- } else if (config_valgrind && opt_valgrind) {
- old_size = isalloc(ptr, false);
- old_rzsize = u2rz(old_size);
- }
- if (config_prof && opt_prof) {
- old_ctx = prof_ctx_get(ptr);
- cnt = NULL;
- }
- iqalloc(ptr);
- ret = NULL;
- goto label_return;
- } else
- size = 1;
- }
-
- if (ptr != NULL) {
- assert(malloc_initialized || IS_INITIALIZER);
- malloc_thread_init();
-
- if (config_prof) {
- old_size = isalloc(ptr, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(ptr);
- } else if (config_stats) {
- old_size = isalloc(ptr, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_size);
- } else if (config_valgrind && opt_valgrind) {
- old_size = isalloc(ptr, false);
- old_rzsize = u2rz(old_size);
- }
- if (config_prof && opt_prof) {
- usize = s2u(size);
- old_ctx = prof_ctx_get(ptr);
- PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL) {
- old_ctx = NULL;
- ret = NULL;
- goto label_oom;
- }
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
- usize <= SMALL_MAXCLASS) {
- ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
- false, false);
- if (ret != NULL)
- arena_prof_promoted(ret, usize);
- else
- old_ctx = NULL;
- } else {
- ret = iralloc(ptr, size, 0, 0, false, false);
- if (ret == NULL)
- old_ctx = NULL;
- }
- } else {
- if (config_stats || (config_valgrind && opt_valgrind))
- usize = s2u(size);
- ret = iralloc(ptr, size, 0, 0, false, false);
- }
-
-label_oom:
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in realloc(): "
- "out of memory\n");
- abort();
- }
- set_errno(ENOMEM);
- }
- } else {
- /* realloc(NULL, size) is equivalent to malloc(size). */
- if (config_prof && opt_prof)
- old_ctx = NULL;
- if (malloc_init()) {
- if (config_prof && opt_prof)
- cnt = NULL;
- ret = NULL;
- } else {
- if (config_prof && opt_prof) {
- usize = s2u(size);
- PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL)
- ret = NULL;
- else {
- if (prof_promote && (uintptr_t)cnt !=
- (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- ret = imalloc(SMALL_MAXCLASS+1);
- if (ret != NULL) {
- arena_prof_promoted(ret,
- usize);
- }
- } else
- ret = imalloc(size);
- }
- } else {
- if (config_stats || (config_valgrind &&
- opt_valgrind))
- usize = s2u(size);
- ret = imalloc(size);
- }
- }
-
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in realloc(): "
- "out of memory\n");
- abort();
- }
- set_errno(ENOMEM);
- }
- }
-
-label_return:
- if (config_prof && opt_prof)
- prof_realloc(ret, usize, cnt, old_size, old_ctx);
- if (config_stats && ret != NULL) {
- thread_allocated_t *ta;
- assert(usize == isalloc(ret, config_prof));
- ta = thread_allocated_tsd_get();
- ta->allocated += usize;
- ta->deallocated += old_size;
- }
- UTRACE(ptr, size, ret);
- JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
- return (ret);
-}
-
-void
-je_free(void *ptr)
-{
-
- UTRACE(ptr, 0, 0);
- if (ptr != NULL) {
- size_t usize;
- size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
-
- assert(malloc_initialized || IS_INITIALIZER);
-
- if (config_prof && opt_prof) {
- usize = isalloc(ptr, config_prof);
- prof_free(ptr, usize);
- } else if (config_stats || config_valgrind)
- usize = isalloc(ptr, config_prof);
- if (config_stats)
- thread_allocated_tsd_get()->deallocated += usize;
- if (config_valgrind && opt_valgrind)
- rzsize = p2rz(ptr);
- iqalloc(ptr);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
- }
-}
-
-/*
- * End malloc(3)-compatible functions.
- */
-/******************************************************************************/
-/*
- * Begin non-standard override functions.
- */
-
-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-void *
-je_memalign(size_t alignment, size_t size)
-{
- void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
- imemalign(&ret, alignment, size, 1);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
- return (ret);
-}
-#endif
-
-#ifdef JEMALLOC_OVERRIDE_VALLOC
-void *
-je_valloc(size_t size)
-{
- void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
- imemalign(&ret, PAGE, size, 1);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
- return (ret);
-}
-#endif
-
-/*
- * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
- * #define je_malloc malloc
- */
-#define malloc_is_malloc 1
-#define is_malloc_(a) malloc_is_ ## a
-#define is_malloc(a) is_malloc_(a)
-
-#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
-/*
- * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
- * to inconsistently reference libc's malloc(3)-compatible functions
- * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
- *
- * These definitions interpose hooks in glibc. The functions are actually
- * passed an extra argument for the caller return address, which will be
- * ignored.
- */
-JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
-JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
-JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
-JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
- je_memalign;
-#endif
-
-/*
- * End non-standard override functions.
- */
-/******************************************************************************/
-/*
- * Begin non-standard functions.
- */
-
-size_t
-je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
-{
- size_t ret;
-
- assert(malloc_initialized || IS_INITIALIZER);
- malloc_thread_init();
-
- if (config_ivsalloc)
- ret = ivsalloc(ptr, config_prof);
- else
- ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
-
- return (ret);
-}
-
-void
-je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
-{
-
- stats_print(write_cb, cbopaque, opts);
-}
-
-int
-je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen)
-{
-
- if (malloc_init())
- return (EAGAIN);
-
- return (ctl_byname(name, oldp, oldlenp, newp, newlen));
-}
-
-int
-je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
-{
-
- if (malloc_init())
- return (EAGAIN);
-
- return (ctl_nametomib(name, mibp, miblenp));
-}
-
-int
-je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
-
- if (malloc_init())
- return (EAGAIN);
-
- return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
-}
-
-/*
- * End non-standard functions.
- */
-/******************************************************************************/
-/*
- * Begin experimental functions.
- */
-#ifdef JEMALLOC_EXPERIMENTAL
-
-static JEMALLOC_ATTR(always_inline) void *
-iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena)
-{
-
- assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
- alignment)));
-
- if (alignment != 0)
- return (ipallocx(usize, alignment, zero, try_tcache, arena));
- else if (zero)
- return (icallocx(usize, try_tcache, arena));
- else
- return (imallocx(usize, try_tcache, arena));
-}
-
-int
-je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
-{
- void *p;
- size_t usize;
- size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
- bool zero = flags & ALLOCM_ZERO;
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- arena_t *arena;
- bool try_tcache;
-
- assert(ptr != NULL);
- assert(size != 0);
-
- if (malloc_init())
- goto label_oom;
-
- if (arena_ind != UINT_MAX) {
- arena = arenas[arena_ind];
- try_tcache = false;
- } else {
- arena = NULL;
- try_tcache = true;
- }
-
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- if (usize == 0)
- goto label_oom;
-
- if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
- PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL)
- goto label_oom;
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- size_t usize_promoted = (alignment == 0) ?
- s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
- alignment);
- assert(usize_promoted != 0);
- p = iallocm(usize_promoted, alignment, zero,
- try_tcache, arena);
- if (p == NULL)
- goto label_oom;
- arena_prof_promoted(p, usize);
- } else {
- p = iallocm(usize, alignment, zero, try_tcache, arena);
- if (p == NULL)
- goto label_oom;
- }
- prof_malloc(p, usize, cnt);
- } else {
- p = iallocm(usize, alignment, zero, try_tcache, arena);
- if (p == NULL)
- goto label_oom;
- }
- if (rsize != NULL)
- *rsize = usize;
-
- *ptr = p;
- if (config_stats) {
- assert(usize == isalloc(p, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
- }
- UTRACE(0, size, p);
- JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
- return (ALLOCM_SUCCESS);
-label_oom:
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in allocm(): "
- "out of memory\n");
- abort();
- }
- *ptr = NULL;
- UTRACE(0, size, 0);
- return (ALLOCM_ERR_OOM);
-}
-
-int
-je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
-{
- void *p, *q;
- size_t usize;
- size_t old_size;
- size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
- bool zero = flags & ALLOCM_ZERO;
- bool no_move = flags & ALLOCM_NO_MOVE;
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- bool try_tcache_alloc, try_tcache_dalloc;
- arena_t *arena;
-
- assert(ptr != NULL);
- assert(*ptr != NULL);
- assert(size != 0);
- assert(SIZE_T_MAX - size >= extra);
- assert(malloc_initialized || IS_INITIALIZER);
- malloc_thread_init();
-
- if (arena_ind != UINT_MAX) {
- arena_chunk_t *chunk;
- try_tcache_alloc = true;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
- try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
- arenas[arena_ind]);
- arena = arenas[arena_ind];
- } else {
- try_tcache_alloc = true;
- try_tcache_dalloc = true;
- arena = NULL;
- }
-
- p = *ptr;
- if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
- /*
- * usize isn't knowable before iralloc() returns when extra is
- * non-zero. Therefore, compute its maximum possible value and
- * use that in PROF_ALLOC_PREP() to decide whether to capture a
- * backtrace. prof_realloc() will use the actual usize to
- * decide whether to sample.
- */
- size_t max_usize = (alignment == 0) ? s2u(size+extra) :
- sa2u(size+extra, alignment);
- prof_ctx_t *old_ctx = prof_ctx_get(p);
- old_size = isalloc(p, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(p);
- PROF_ALLOC_PREP(1, max_usize, cnt);
- if (cnt == NULL)
- goto label_oom;
- /*
- * Use minimum usize to determine whether promotion may happen.
- */
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
- && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
- <= SMALL_MAXCLASS) {
- q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
- size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
- alignment, zero, no_move, try_tcache_alloc,
- try_tcache_dalloc, arena);
- if (q == NULL)
- goto label_err;
- if (max_usize < PAGE) {
- usize = max_usize;
- arena_prof_promoted(q, usize);
- } else
- usize = isalloc(q, config_prof);
- } else {
- q = irallocx(p, size, extra, alignment, zero, no_move,
- try_tcache_alloc, try_tcache_dalloc, arena);
- if (q == NULL)
- goto label_err;
- usize = isalloc(q, config_prof);
- }
- prof_realloc(q, usize, cnt, old_size, old_ctx);
- if (rsize != NULL)
- *rsize = usize;
- } else {
- if (config_stats) {
- old_size = isalloc(p, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_size);
- } else if (config_valgrind && opt_valgrind) {
- old_size = isalloc(p, false);
- old_rzsize = u2rz(old_size);
- }
- q = irallocx(p, size, extra, alignment, zero, no_move,
- try_tcache_alloc, try_tcache_dalloc, arena);
- if (q == NULL)
- goto label_err;
- if (config_stats)
- usize = isalloc(q, config_prof);
- if (rsize != NULL) {
- if (config_stats == false)
- usize = isalloc(q, config_prof);
- *rsize = usize;
- }
- }
-
- *ptr = q;
- if (config_stats) {
- thread_allocated_t *ta;
- ta = thread_allocated_tsd_get();
- ta->allocated += usize;
- ta->deallocated += old_size;
- }
- UTRACE(p, size, q);
- JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
- return (ALLOCM_SUCCESS);
-label_err:
- if (no_move) {
- UTRACE(p, size, q);
- return (ALLOCM_ERR_NOT_MOVED);
- }
-label_oom:
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in rallocm(): "
- "out of memory\n");
- abort();
- }
- UTRACE(p, size, 0);
- return (ALLOCM_ERR_OOM);
-}
-
-int
-je_sallocm(const void *ptr, size_t *rsize, int flags)
-{
- size_t sz;
-
- assert(malloc_initialized || IS_INITIALIZER);
- malloc_thread_init();
-
- if (config_ivsalloc)
- sz = ivsalloc(ptr, config_prof);
- else {
- assert(ptr != NULL);
- sz = isalloc(ptr, config_prof);
- }
- assert(rsize != NULL);
- *rsize = sz;
-
- return (ALLOCM_SUCCESS);
-}
-
-int
-je_dallocm(void *ptr, int flags)
-{
- size_t usize;
- size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- bool try_tcache;
-
- assert(ptr != NULL);
- assert(malloc_initialized || IS_INITIALIZER);
-
- if (arena_ind != UINT_MAX) {
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- try_tcache = (chunk == ptr || chunk->arena !=
- arenas[arena_ind]);
- } else
- try_tcache = true;
-
- UTRACE(ptr, 0, 0);
- if (config_stats || config_valgrind)
- usize = isalloc(ptr, config_prof);
- if (config_prof && opt_prof) {
- if (config_stats == false && config_valgrind == false)
- usize = isalloc(ptr, config_prof);
- prof_free(ptr, usize);
- }
- if (config_stats)
- thread_allocated_tsd_get()->deallocated += usize;
- if (config_valgrind && opt_valgrind)
- rzsize = p2rz(ptr);
- iqallocx(ptr, try_tcache);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
-
- return (ALLOCM_SUCCESS);
-}
-
-int
-je_nallocm(size_t *rsize, size_t size, int flags)
-{
- size_t usize;
- size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
-
- assert(size != 0);
-
- if (malloc_init())
- return (ALLOCM_ERR_OOM);
-
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- if (usize == 0)
- return (ALLOCM_ERR_OOM);
-
- if (rsize != NULL)
- *rsize = usize;
- return (ALLOCM_SUCCESS);
-}
-
-#endif
-/*
- * End experimental functions.
- */
-/******************************************************************************/
-/*
- * The following functions are used by threading libraries for protection of
- * malloc during fork().
- */
-
-/*
- * If an application creates a thread before doing any allocation in the main
- * thread, then calls fork(2) in the main thread followed by memory allocation
- * in the child process, a race can occur that results in deadlock within the
- * child: the main thread may have forked while the created thread had
- * partially initialized the allocator. Ordinarily jemalloc prevents
- * fork/malloc races via the following functions it registers during
- * initialization using pthread_atfork(), but of course that does no good if
- * the allocator isn't fully initialized at fork time. The following library
- * constructor is a partial solution to this problem. It may still possible to
- * trigger the deadlock described above, but doing so would involve forking via
- * a library constructor that runs before jemalloc's runs.
- */
-JEMALLOC_ATTR(constructor)
-static void
-jemalloc_constructor(void)
-{
-
- malloc_init();
-}
-
-#ifndef JEMALLOC_MUTEX_INIT_CB
-void
-jemalloc_prefork(void)
-#else
-JEMALLOC_EXPORT void
-_malloc_prefork(void)
-#endif
-{
- unsigned i;
-
-#ifdef JEMALLOC_MUTEX_INIT_CB
- if (malloc_initialized == false)
- return;
-#endif
- assert(malloc_initialized);
-
- /* Acquire all mutexes in a safe order. */
- ctl_prefork();
- prof_prefork();
- malloc_mutex_prefork(&arenas_lock);
- for (i = 0; i < narenas_total; i++) {
- if (arenas[i] != NULL)
- arena_prefork(arenas[i]);
- }
- chunk_prefork();
- base_prefork();
- huge_prefork();
-}
-
-#ifndef JEMALLOC_MUTEX_INIT_CB
-void
-jemalloc_postfork_parent(void)
-#else
-JEMALLOC_EXPORT void
-_malloc_postfork(void)
-#endif
-{
- unsigned i;
-
-#ifdef JEMALLOC_MUTEX_INIT_CB
- if (malloc_initialized == false)
- return;
-#endif
- assert(malloc_initialized);
-
- /* Release all mutexes, now that fork() has completed. */
- huge_postfork_parent();
- base_postfork_parent();
- chunk_postfork_parent();
- for (i = 0; i < narenas_total; i++) {
- if (arenas[i] != NULL)
- arena_postfork_parent(arenas[i]);
- }
- malloc_mutex_postfork_parent(&arenas_lock);
- prof_postfork_parent();
- ctl_postfork_parent();
-}
-
-void
-jemalloc_postfork_child(void)
-{
- unsigned i;
-
- assert(malloc_initialized);
-
- /* Release all mutexes, now that fork() has completed. */
- huge_postfork_child();
- base_postfork_child();
- chunk_postfork_child();
- for (i = 0; i < narenas_total; i++) {
- if (arenas[i] != NULL)
- arena_postfork_child(arenas[i]);
- }
- malloc_mutex_postfork_child(&arenas_lock);
- prof_postfork_child();
- ctl_postfork_child();
-}
-
-/******************************************************************************/
-/*
- * The following functions are used for TLS allocation/deallocation in static
- * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
- * is that these avoid accessing TLS variables.
- */
-
-static void *
-a0alloc(size_t size, bool zero)
-{
-
- if (malloc_init())
- return (NULL);
-
- if (size == 0)
- size = 1;
-
- if (size <= arena_maxclass)
- return (arena_malloc(arenas[0], size, zero, false));
- else
- return (huge_malloc(size, zero));
-}
-
-void *
-a0malloc(size_t size)
-{
-
- return (a0alloc(size, false));
-}
-
-void *
-a0calloc(size_t num, size_t size)
-{
-
- return (a0alloc(num * size, true));
-}
-
-void
-a0free(void *ptr)
-{
- arena_chunk_t *chunk;
-
- if (ptr == NULL)
- return;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr)
- arena_dalloc(chunk->arena, chunk, ptr, false);
- else
- huge_dalloc(ptr, true);
-}
-
-/******************************************************************************/
diff --git a/extra/jemalloc/src/mb.c b/extra/jemalloc/src/mb.c
deleted file mode 100644
index dc2c0a256fd..00000000000
--- a/extra/jemalloc/src/mb.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define JEMALLOC_MB_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/extra/jemalloc/src/mutex.c b/extra/jemalloc/src/mutex.c
deleted file mode 100644
index 55e18c23713..00000000000
--- a/extra/jemalloc/src/mutex.c
+++ /dev/null
@@ -1,149 +0,0 @@
-#define JEMALLOC_MUTEX_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
-#include <dlfcn.h>
-#endif
-
-#ifndef _CRT_SPINCOUNT
-#define _CRT_SPINCOUNT 4000
-#endif
-
-/******************************************************************************/
-/* Data. */
-
-#ifdef JEMALLOC_LAZY_LOCK
-bool isthreaded = false;
-#endif
-#ifdef JEMALLOC_MUTEX_INIT_CB
-static bool postpone_init = true;
-static malloc_mutex_t *postponed_mutexes = NULL;
-#endif
-
-#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
-static void pthread_create_once(void);
-#endif
-
-/******************************************************************************/
-/*
- * We intercept pthread_create() calls in order to toggle isthreaded if the
- * process goes multi-threaded.
- */
-
-#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
-static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
- void *(*)(void *), void *__restrict);
-
-static void
-pthread_create_once(void)
-{
-
- pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
- if (pthread_create_fptr == NULL) {
- malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
- "\"pthread_create\")\n");
- abort();
- }
-
- isthreaded = true;
-}
-
-JEMALLOC_EXPORT int
-pthread_create(pthread_t *__restrict thread,
- const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
- void *__restrict arg)
-{
- static pthread_once_t once_control = PTHREAD_ONCE_INIT;
-
- pthread_once(&once_control, pthread_create_once);
-
- return (pthread_create_fptr(thread, attr, start_routine, arg));
-}
-#endif
-
-/******************************************************************************/
-
-#ifdef JEMALLOC_MUTEX_INIT_CB
-JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
- void *(calloc_cb)(size_t, size_t));
-#endif
-
-bool
-malloc_mutex_init(malloc_mutex_t *mutex)
-{
-
-#ifdef _WIN32
- if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
- _CRT_SPINCOUNT))
- return (true);
-#elif (defined(JEMALLOC_OSSPIN))
- mutex->lock = 0;
-#elif (defined(JEMALLOC_MUTEX_INIT_CB))
- if (postpone_init) {
- mutex->postponed_next = postponed_mutexes;
- postponed_mutexes = mutex;
- } else {
- if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
- 0)
- return (true);
- }
-#else
- pthread_mutexattr_t attr;
-
- if (pthread_mutexattr_init(&attr) != 0)
- return (true);
- pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
- if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
- pthread_mutexattr_destroy(&attr);
- return (true);
- }
- pthread_mutexattr_destroy(&attr);
-#endif
- return (false);
-}
-
-void
-malloc_mutex_prefork(malloc_mutex_t *mutex)
-{
-
- malloc_mutex_lock(mutex);
-}
-
-void
-malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
-{
-
- malloc_mutex_unlock(mutex);
-}
-
-void
-malloc_mutex_postfork_child(malloc_mutex_t *mutex)
-{
-
-#ifdef JEMALLOC_MUTEX_INIT_CB
- malloc_mutex_unlock(mutex);
-#else
- if (malloc_mutex_init(mutex)) {
- malloc_printf("<jemalloc>: Error re-initializing mutex in "
- "child\n");
- if (opt_abort)
- abort();
- }
-#endif
-}
-
-bool
-mutex_boot(void)
-{
-
-#ifdef JEMALLOC_MUTEX_INIT_CB
- postpone_init = false;
- while (postponed_mutexes != NULL) {
- if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
- base_calloc) != 0)
- return (true);
- postponed_mutexes = postponed_mutexes->postponed_next;
- }
-#endif
- return (false);
-}
diff --git a/extra/jemalloc/src/prof.c b/extra/jemalloc/src/prof.c
deleted file mode 100644
index c133b95c2c6..00000000000
--- a/extra/jemalloc/src/prof.c
+++ /dev/null
@@ -1,1283 +0,0 @@
-#define JEMALLOC_PROF_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-/******************************************************************************/
-
-#ifdef JEMALLOC_PROF_LIBUNWIND
-#define UNW_LOCAL_ONLY
-#include <libunwind.h>
-#endif
-
-#ifdef JEMALLOC_PROF_LIBGCC
-#include <unwind.h>
-#endif
-
-/******************************************************************************/
-/* Data. */
-
-malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL)
-
-bool opt_prof = false;
-bool opt_prof_active = true;
-size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
-ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
-bool opt_prof_gdump = false;
-bool opt_prof_final = true;
-bool opt_prof_leak = false;
-bool opt_prof_accum = false;
-char opt_prof_prefix[PATH_MAX + 1];
-
-uint64_t prof_interval = 0;
-bool prof_promote;
-
-/*
- * Table of mutexes that are shared among ctx's. These are leaf locks, so
- * there is no problem with using them for more than one ctx at the same time.
- * The primary motivation for this sharing though is that ctx's are ephemeral,
- * and destroying mutexes causes complications for systems that allocate when
- * creating/destroying mutexes.
- */
-static malloc_mutex_t *ctx_locks;
-static unsigned cum_ctxs; /* Atomic counter. */
-
-/*
- * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data
- * structure that knows about all backtraces currently captured.
- */
-static ckh_t bt2ctx;
-static malloc_mutex_t bt2ctx_mtx;
-
-static malloc_mutex_t prof_dump_seq_mtx;
-static uint64_t prof_dump_seq;
-static uint64_t prof_dump_iseq;
-static uint64_t prof_dump_mseq;
-static uint64_t prof_dump_useq;
-
-/*
- * This buffer is rather large for stack allocation, so use a single buffer for
- * all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since
- * it must be locked anyway during dumping.
- */
-static char prof_dump_buf[PROF_DUMP_BUFSIZE];
-static unsigned prof_dump_buf_end;
-static int prof_dump_fd;
-
-/* Do not dump any profiles until bootstrapping is complete. */
-static bool prof_booted = false;
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static prof_bt_t *bt_dup(prof_bt_t *bt);
-static void bt_destroy(prof_bt_t *bt);
-#ifdef JEMALLOC_PROF_LIBGCC
-static _Unwind_Reason_Code prof_unwind_init_callback(
- struct _Unwind_Context *context, void *arg);
-static _Unwind_Reason_Code prof_unwind_callback(
- struct _Unwind_Context *context, void *arg);
-#endif
-static bool prof_flush(bool propagate_err);
-static bool prof_write(bool propagate_err, const char *s);
-static bool prof_printf(bool propagate_err, const char *format, ...)
- JEMALLOC_ATTR(format(printf, 2, 3));
-static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all,
- size_t *leak_nctx);
-static void prof_ctx_destroy(prof_ctx_t *ctx);
-static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt);
-static bool prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx,
- prof_bt_t *bt);
-static bool prof_dump_maps(bool propagate_err);
-static bool prof_dump(bool propagate_err, const char *filename,
- bool leakcheck);
-static void prof_dump_filename(char *filename, char v, int64_t vseq);
-static void prof_fdump(void);
-static void prof_bt_hash(const void *key, size_t r_hash[2]);
-static bool prof_bt_keycomp(const void *k1, const void *k2);
-static malloc_mutex_t *prof_ctx_mutex_choose(void);
-
-/******************************************************************************/
-
-void
-bt_init(prof_bt_t *bt, void **vec)
-{
-
- cassert(config_prof);
-
- bt->vec = vec;
- bt->len = 0;
-}
-
-static void
-bt_destroy(prof_bt_t *bt)
-{
-
- cassert(config_prof);
-
- idalloc(bt);
-}
-
-static prof_bt_t *
-bt_dup(prof_bt_t *bt)
-{
- prof_bt_t *ret;
-
- cassert(config_prof);
-
- /*
- * Create a single allocation that has space for vec immediately
- * following the prof_bt_t structure. The backtraces that get
- * stored in the backtrace caches are copied from stack-allocated
- * temporary variables, so size is known at creation time. Making this
- * a contiguous object improves cache locality.
- */
- ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) +
- (bt->len * sizeof(void *)));
- if (ret == NULL)
- return (NULL);
- ret->vec = (void **)((uintptr_t)ret +
- QUANTUM_CEILING(sizeof(prof_bt_t)));
- memcpy(ret->vec, bt->vec, bt->len * sizeof(void *));
- ret->len = bt->len;
-
- return (ret);
-}
-
-static inline void
-prof_enter(prof_tdata_t *prof_tdata)
-{
-
- cassert(config_prof);
-
- assert(prof_tdata->enq == false);
- prof_tdata->enq = true;
-
- malloc_mutex_lock(&bt2ctx_mtx);
-}
-
-static inline void
-prof_leave(prof_tdata_t *prof_tdata)
-{
- bool idump, gdump;
-
- cassert(config_prof);
-
- malloc_mutex_unlock(&bt2ctx_mtx);
-
- assert(prof_tdata->enq);
- prof_tdata->enq = false;
- idump = prof_tdata->enq_idump;
- prof_tdata->enq_idump = false;
- gdump = prof_tdata->enq_gdump;
- prof_tdata->enq_gdump = false;
-
- if (idump)
- prof_idump();
- if (gdump)
- prof_gdump();
-}
-
-#ifdef JEMALLOC_PROF_LIBUNWIND
-void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
-{
- unw_context_t uc;
- unw_cursor_t cursor;
- unsigned i;
- int err;
-
- cassert(config_prof);
- assert(bt->len == 0);
- assert(bt->vec != NULL);
-
- unw_getcontext(&uc);
- unw_init_local(&cursor, &uc);
-
- /* Throw away (nignore+1) stack frames, if that many exist. */
- for (i = 0; i < nignore + 1; i++) {
- err = unw_step(&cursor);
- if (err <= 0)
- return;
- }
-
- /*
- * Iterate over stack frames until there are no more, or until no space
- * remains in bt.
- */
- for (i = 0; i < PROF_BT_MAX; i++) {
- unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
- bt->len++;
- err = unw_step(&cursor);
- if (err <= 0)
- break;
- }
-}
-#elif (defined(JEMALLOC_PROF_LIBGCC))
-static _Unwind_Reason_Code
-prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
-{
-
- cassert(config_prof);
-
- return (_URC_NO_REASON);
-}
-
-static _Unwind_Reason_Code
-prof_unwind_callback(struct _Unwind_Context *context, void *arg)
-{
- prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
-
- cassert(config_prof);
-
- if (data->nignore > 0)
- data->nignore--;
- else {
- data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context);
- data->bt->len++;
- if (data->bt->len == data->max)
- return (_URC_END_OF_STACK);
- }
-
- return (_URC_NO_REASON);
-}
-
-void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
-{
- prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX};
-
- cassert(config_prof);
-
- _Unwind_Backtrace(prof_unwind_callback, &data);
-}
-#elif (defined(JEMALLOC_PROF_GCC))
-void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
-{
-#define BT_FRAME(i) \
- if ((i) < nignore + PROF_BT_MAX) { \
- void *p; \
- if (__builtin_frame_address(i) == 0) \
- return; \
- p = __builtin_return_address(i); \
- if (p == NULL) \
- return; \
- if (i >= nignore) { \
- bt->vec[(i) - nignore] = p; \
- bt->len = (i) - nignore + 1; \
- } \
- } else \
- return;
-
- cassert(config_prof);
- assert(nignore <= 3);
-
- BT_FRAME(0)
- BT_FRAME(1)
- BT_FRAME(2)
- BT_FRAME(3)
- BT_FRAME(4)
- BT_FRAME(5)
- BT_FRAME(6)
- BT_FRAME(7)
- BT_FRAME(8)
- BT_FRAME(9)
-
- BT_FRAME(10)
- BT_FRAME(11)
- BT_FRAME(12)
- BT_FRAME(13)
- BT_FRAME(14)
- BT_FRAME(15)
- BT_FRAME(16)
- BT_FRAME(17)
- BT_FRAME(18)
- BT_FRAME(19)
-
- BT_FRAME(20)
- BT_FRAME(21)
- BT_FRAME(22)
- BT_FRAME(23)
- BT_FRAME(24)
- BT_FRAME(25)
- BT_FRAME(26)
- BT_FRAME(27)
- BT_FRAME(28)
- BT_FRAME(29)
-
- BT_FRAME(30)
- BT_FRAME(31)
- BT_FRAME(32)
- BT_FRAME(33)
- BT_FRAME(34)
- BT_FRAME(35)
- BT_FRAME(36)
- BT_FRAME(37)
- BT_FRAME(38)
- BT_FRAME(39)
-
- BT_FRAME(40)
- BT_FRAME(41)
- BT_FRAME(42)
- BT_FRAME(43)
- BT_FRAME(44)
- BT_FRAME(45)
- BT_FRAME(46)
- BT_FRAME(47)
- BT_FRAME(48)
- BT_FRAME(49)
-
- BT_FRAME(50)
- BT_FRAME(51)
- BT_FRAME(52)
- BT_FRAME(53)
- BT_FRAME(54)
- BT_FRAME(55)
- BT_FRAME(56)
- BT_FRAME(57)
- BT_FRAME(58)
- BT_FRAME(59)
-
- BT_FRAME(60)
- BT_FRAME(61)
- BT_FRAME(62)
- BT_FRAME(63)
- BT_FRAME(64)
- BT_FRAME(65)
- BT_FRAME(66)
- BT_FRAME(67)
- BT_FRAME(68)
- BT_FRAME(69)
-
- BT_FRAME(70)
- BT_FRAME(71)
- BT_FRAME(72)
- BT_FRAME(73)
- BT_FRAME(74)
- BT_FRAME(75)
- BT_FRAME(76)
- BT_FRAME(77)
- BT_FRAME(78)
- BT_FRAME(79)
-
- BT_FRAME(80)
- BT_FRAME(81)
- BT_FRAME(82)
- BT_FRAME(83)
- BT_FRAME(84)
- BT_FRAME(85)
- BT_FRAME(86)
- BT_FRAME(87)
- BT_FRAME(88)
- BT_FRAME(89)
-
- BT_FRAME(90)
- BT_FRAME(91)
- BT_FRAME(92)
- BT_FRAME(93)
- BT_FRAME(94)
- BT_FRAME(95)
- BT_FRAME(96)
- BT_FRAME(97)
- BT_FRAME(98)
- BT_FRAME(99)
-
- BT_FRAME(100)
- BT_FRAME(101)
- BT_FRAME(102)
- BT_FRAME(103)
- BT_FRAME(104)
- BT_FRAME(105)
- BT_FRAME(106)
- BT_FRAME(107)
- BT_FRAME(108)
- BT_FRAME(109)
-
- BT_FRAME(110)
- BT_FRAME(111)
- BT_FRAME(112)
- BT_FRAME(113)
- BT_FRAME(114)
- BT_FRAME(115)
- BT_FRAME(116)
- BT_FRAME(117)
- BT_FRAME(118)
- BT_FRAME(119)
-
- BT_FRAME(120)
- BT_FRAME(121)
- BT_FRAME(122)
- BT_FRAME(123)
- BT_FRAME(124)
- BT_FRAME(125)
- BT_FRAME(126)
- BT_FRAME(127)
-
- /* Extras to compensate for nignore. */
- BT_FRAME(128)
- BT_FRAME(129)
- BT_FRAME(130)
-#undef BT_FRAME
-}
-#else
-void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
-{
-
- cassert(config_prof);
- assert(false);
-}
-#endif
-
-prof_thr_cnt_t *
-prof_lookup(prof_bt_t *bt)
-{
- union {
- prof_thr_cnt_t *p;
- void *v;
- } ret;
- prof_tdata_t *prof_tdata;
-
- cassert(config_prof);
-
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
- return (NULL);
-
- if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
- union {
- prof_bt_t *p;
- void *v;
- } btkey;
- union {
- prof_ctx_t *p;
- void *v;
- } ctx;
- bool new_ctx;
-
- /*
- * This thread's cache lacks bt. Look for it in the global
- * cache.
- */
- prof_enter(prof_tdata);
- if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
- /* bt has never been seen before. Insert it. */
- ctx.v = imalloc(sizeof(prof_ctx_t));
- if (ctx.v == NULL) {
- prof_leave(prof_tdata);
- return (NULL);
- }
- btkey.p = bt_dup(bt);
- if (btkey.v == NULL) {
- prof_leave(prof_tdata);
- idalloc(ctx.v);
- return (NULL);
- }
- ctx.p->bt = btkey.p;
- ctx.p->lock = prof_ctx_mutex_choose();
- /*
- * Set nlimbo to 1, in order to avoid a race condition
- * with prof_ctx_merge()/prof_ctx_destroy().
- */
- ctx.p->nlimbo = 1;
- memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t));
- ql_new(&ctx.p->cnts_ql);
- if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
- /* OOM. */
- prof_leave(prof_tdata);
- idalloc(btkey.v);
- idalloc(ctx.v);
- return (NULL);
- }
- new_ctx = true;
- } else {
- /*
- * Increment nlimbo, in order to avoid a race condition
- * with prof_ctx_merge()/prof_ctx_destroy().
- */
- malloc_mutex_lock(ctx.p->lock);
- ctx.p->nlimbo++;
- malloc_mutex_unlock(ctx.p->lock);
- new_ctx = false;
- }
- prof_leave(prof_tdata);
-
- /* Link a prof_thd_cnt_t into ctx for this thread. */
- if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
- assert(ckh_count(&prof_tdata->bt2cnt) > 0);
- /*
- * Flush the least recently used cnt in order to keep
- * bt2cnt from becoming too large.
- */
- ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
- assert(ret.v != NULL);
- if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
- NULL, NULL))
- assert(false);
- ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
- prof_ctx_merge(ret.p->ctx, ret.p);
- /* ret can now be re-used. */
- } else {
- assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
- /* Allocate and partially initialize a new cnt. */
- ret.v = imalloc(sizeof(prof_thr_cnt_t));
- if (ret.p == NULL) {
- if (new_ctx)
- prof_ctx_destroy(ctx.p);
- return (NULL);
- }
- ql_elm_new(ret.p, cnts_link);
- ql_elm_new(ret.p, lru_link);
- }
- /* Finish initializing ret. */
- ret.p->ctx = ctx.p;
- ret.p->epoch = 0;
- memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
- if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
- if (new_ctx)
- prof_ctx_destroy(ctx.p);
- idalloc(ret.v);
- return (NULL);
- }
- ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
- malloc_mutex_lock(ctx.p->lock);
- ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
- ctx.p->nlimbo--;
- malloc_mutex_unlock(ctx.p->lock);
- } else {
- /* Move ret to the front of the LRU. */
- ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
- ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
- }
-
- return (ret.p);
-}
-
-static bool
-prof_flush(bool propagate_err)
-{
- bool ret = false;
- ssize_t err;
-
- cassert(config_prof);
-
- err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
- if (err == -1) {
- if (propagate_err == false) {
- malloc_write("<jemalloc>: write() failed during heap "
- "profile flush\n");
- if (opt_abort)
- abort();
- }
- ret = true;
- }
- prof_dump_buf_end = 0;
-
- return (ret);
-}
-
-static bool
-prof_write(bool propagate_err, const char *s)
-{
- unsigned i, slen, n;
-
- cassert(config_prof);
-
- i = 0;
- slen = strlen(s);
- while (i < slen) {
- /* Flush the buffer if it is full. */
- if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
- if (prof_flush(propagate_err) && propagate_err)
- return (true);
-
- if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
- /* Finish writing. */
- n = slen - i;
- } else {
- /* Write as much of s as will fit. */
- n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
- }
- memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
- prof_dump_buf_end += n;
- i += n;
- }
-
- return (false);
-}
-
-JEMALLOC_ATTR(format(printf, 2, 3))
-static bool
-prof_printf(bool propagate_err, const char *format, ...)
-{
- bool ret;
- va_list ap;
- char buf[PROF_PRINTF_BUFSIZE];
-
- va_start(ap, format);
- malloc_vsnprintf(buf, sizeof(buf), format, ap);
- va_end(ap);
- ret = prof_write(propagate_err, buf);
-
- return (ret);
-}
-
-static void
-prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
-{
- prof_thr_cnt_t *thr_cnt;
- prof_cnt_t tcnt;
-
- cassert(config_prof);
-
- malloc_mutex_lock(ctx->lock);
-
- memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
- ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
- volatile unsigned *epoch = &thr_cnt->epoch;
-
- while (true) {
- unsigned epoch0 = *epoch;
-
- /* Make sure epoch is even. */
- if (epoch0 & 1U)
- continue;
-
- memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t));
-
- /* Terminate if epoch didn't change while reading. */
- if (*epoch == epoch0)
- break;
- }
-
- ctx->cnt_summed.curobjs += tcnt.curobjs;
- ctx->cnt_summed.curbytes += tcnt.curbytes;
- if (opt_prof_accum) {
- ctx->cnt_summed.accumobjs += tcnt.accumobjs;
- ctx->cnt_summed.accumbytes += tcnt.accumbytes;
- }
- }
-
- if (ctx->cnt_summed.curobjs != 0)
- (*leak_nctx)++;
-
- /* Add to cnt_all. */
- cnt_all->curobjs += ctx->cnt_summed.curobjs;
- cnt_all->curbytes += ctx->cnt_summed.curbytes;
- if (opt_prof_accum) {
- cnt_all->accumobjs += ctx->cnt_summed.accumobjs;
- cnt_all->accumbytes += ctx->cnt_summed.accumbytes;
- }
-
- malloc_mutex_unlock(ctx->lock);
-}
-
-static void
-prof_ctx_destroy(prof_ctx_t *ctx)
-{
- prof_tdata_t *prof_tdata;
-
- cassert(config_prof);
-
- /*
- * Check that ctx is still unused by any thread cache before destroying
- * it. prof_lookup() increments ctx->nlimbo in order to avoid a race
- * condition with this function, as does prof_ctx_merge() in order to
- * avoid a race between the main body of prof_ctx_merge() and entry
- * into this function.
- */
- prof_tdata = prof_tdata_get(false);
- assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
- prof_enter(prof_tdata);
- malloc_mutex_lock(ctx->lock);
- if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
- ctx->nlimbo == 1) {
- assert(ctx->cnt_merged.curbytes == 0);
- assert(ctx->cnt_merged.accumobjs == 0);
- assert(ctx->cnt_merged.accumbytes == 0);
- /* Remove ctx from bt2ctx. */
- if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
- assert(false);
- prof_leave(prof_tdata);
- /* Destroy ctx. */
- malloc_mutex_unlock(ctx->lock);
- bt_destroy(ctx->bt);
- idalloc(ctx);
- } else {
- /*
- * Compensate for increment in prof_ctx_merge() or
- * prof_lookup().
- */
- ctx->nlimbo--;
- malloc_mutex_unlock(ctx->lock);
- prof_leave(prof_tdata);
- }
-}
-
-static void
-prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
-{
- bool destroy;
-
- cassert(config_prof);
-
- /* Merge cnt stats and detach from ctx. */
- malloc_mutex_lock(ctx->lock);
- ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
- ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
- ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
- ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
- ql_remove(&ctx->cnts_ql, cnt, cnts_link);
- if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
- ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
- /*
- * Increment ctx->nlimbo in order to keep another thread from
- * winning the race to destroy ctx while this one has ctx->lock
- * dropped. Without this, it would be possible for another
- * thread to:
- *
- * 1) Sample an allocation associated with ctx.
- * 2) Deallocate the sampled object.
- * 3) Successfully prof_ctx_destroy(ctx).
- *
- * The result would be that ctx no longer exists by the time
- * this thread accesses it in prof_ctx_destroy().
- */
- ctx->nlimbo++;
- destroy = true;
- } else
- destroy = false;
- malloc_mutex_unlock(ctx->lock);
- if (destroy)
- prof_ctx_destroy(ctx);
-}
-
-static bool
-prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt)
-{
- unsigned i;
-
- cassert(config_prof);
-
- /*
- * Current statistics can sum to 0 as a result of unmerged per thread
- * statistics. Additionally, interval- and growth-triggered dumps can
- * occur between the time a ctx is created and when its statistics are
- * filled in. Avoid dumping any ctx that is an artifact of either
- * implementation detail.
- */
- if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) ||
- (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) {
- assert(ctx->cnt_summed.curobjs == 0);
- assert(ctx->cnt_summed.curbytes == 0);
- assert(ctx->cnt_summed.accumobjs == 0);
- assert(ctx->cnt_summed.accumbytes == 0);
- return (false);
- }
-
- if (prof_printf(propagate_err, "%"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @",
- ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
- ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes))
- return (true);
-
- for (i = 0; i < bt->len; i++) {
- if (prof_printf(propagate_err, " %#"PRIxPTR,
- (uintptr_t)bt->vec[i]))
- return (true);
- }
-
- if (prof_write(propagate_err, "\n"))
- return (true);
-
- return (false);
-}
-
-static bool
-prof_dump_maps(bool propagate_err)
-{
- int mfd;
- char filename[PATH_MAX + 1];
-
- cassert(config_prof);
-
- malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
- (int)getpid());
- mfd = open(filename, O_RDONLY);
- if (mfd != -1) {
- ssize_t nread;
-
- if (prof_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
- propagate_err)
- return (true);
- nread = 0;
- do {
- prof_dump_buf_end += nread;
- if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
- /* Make space in prof_dump_buf before read(). */
- if (prof_flush(propagate_err) && propagate_err)
- return (true);
- }
- nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
- PROF_DUMP_BUFSIZE - prof_dump_buf_end);
- } while (nread > 0);
- close(mfd);
- } else
- return (true);
-
- return (false);
-}
-
-static bool
-prof_dump(bool propagate_err, const char *filename, bool leakcheck)
-{
- prof_tdata_t *prof_tdata;
- prof_cnt_t cnt_all;
- size_t tabind;
- union {
- prof_bt_t *p;
- void *v;
- } bt;
- union {
- prof_ctx_t *p;
- void *v;
- } ctx;
- size_t leak_nctx;
-
- cassert(config_prof);
-
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
- return (true);
- prof_enter(prof_tdata);
- prof_dump_fd = creat(filename, 0644);
- if (prof_dump_fd == -1) {
- if (propagate_err == false) {
- malloc_printf(
- "<jemalloc>: creat(\"%s\"), 0644) failed\n",
- filename);
- if (opt_abort)
- abort();
- }
- goto label_error;
- }
-
- /* Merge per thread profile stats, and sum them in cnt_all. */
- memset(&cnt_all, 0, sizeof(prof_cnt_t));
- leak_nctx = 0;
- for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
- prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx);
-
- /* Dump profile header. */
- if (opt_lg_prof_sample == 0) {
- if (prof_printf(propagate_err,
- "heap profile: %"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
- cnt_all.curobjs, cnt_all.curbytes,
- cnt_all.accumobjs, cnt_all.accumbytes))
- goto label_error;
- } else {
- if (prof_printf(propagate_err,
- "heap profile: %"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
- cnt_all.curobjs, cnt_all.curbytes,
- cnt_all.accumobjs, cnt_all.accumbytes,
- ((uint64_t)1U << opt_lg_prof_sample)))
- goto label_error;
- }
-
- /* Dump per ctx profile stats. */
- for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v)
- == false;) {
- if (prof_dump_ctx(propagate_err, ctx.p, bt.p))
- goto label_error;
- }
-
- /* Dump /proc/<pid>/maps if possible. */
- if (prof_dump_maps(propagate_err))
- goto label_error;
-
- if (prof_flush(propagate_err))
- goto label_error;
- close(prof_dump_fd);
- prof_leave(prof_tdata);
-
- if (leakcheck && cnt_all.curbytes != 0) {
- malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
- PRId64" object%s, %zu context%s\n",
- cnt_all.curbytes, (cnt_all.curbytes != 1) ? "s" : "",
- cnt_all.curobjs, (cnt_all.curobjs != 1) ? "s" : "",
- leak_nctx, (leak_nctx != 1) ? "s" : "");
- malloc_printf(
- "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
- filename);
- }
-
- return (false);
-label_error:
- prof_leave(prof_tdata);
- return (true);
-}
-
-#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
-static void
-prof_dump_filename(char *filename, char v, int64_t vseq)
-{
-
- cassert(config_prof);
-
- if (vseq != UINT64_C(0xffffffffffffffff)) {
- /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
- malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"PRIu64".%c%"PRId64".heap",
- opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
- } else {
- /* "<prefix>.<pid>.<seq>.<v>.heap" */
- malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"PRIu64".%c.heap",
- opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
- }
- prof_dump_seq++;
-}
-
-static void
-prof_fdump(void)
-{
- char filename[DUMP_FILENAME_BUFSIZE];
-
- cassert(config_prof);
-
- if (prof_booted == false)
- return;
-
- if (opt_prof_final && opt_prof_prefix[0] != '\0') {
- malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff));
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(false, filename, opt_prof_leak);
- }
-}
-
-void
-prof_idump(void)
-{
- prof_tdata_t *prof_tdata;
- char filename[PATH_MAX + 1];
-
- cassert(config_prof);
-
- if (prof_booted == false)
- return;
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
- return;
- if (prof_tdata->enq) {
- prof_tdata->enq_idump = true;
- return;
- }
-
- if (opt_prof_prefix[0] != '\0') {
- malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'i', prof_dump_iseq);
- prof_dump_iseq++;
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(false, filename, false);
- }
-}
-
-bool
-prof_mdump(const char *filename)
-{
- char filename_buf[DUMP_FILENAME_BUFSIZE];
-
- cassert(config_prof);
-
- if (opt_prof == false || prof_booted == false)
- return (true);
-
- if (filename == NULL) {
- /* No filename specified, so automatically generate one. */
- if (opt_prof_prefix[0] == '\0')
- return (true);
- malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
- prof_dump_mseq++;
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- filename = filename_buf;
- }
- return (prof_dump(true, filename, false));
-}
-
-void
-prof_gdump(void)
-{
- prof_tdata_t *prof_tdata;
- char filename[DUMP_FILENAME_BUFSIZE];
-
- cassert(config_prof);
-
- if (prof_booted == false)
- return;
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
- return;
- if (prof_tdata->enq) {
- prof_tdata->enq_gdump = true;
- return;
- }
-
- if (opt_prof_prefix[0] != '\0') {
- malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'u', prof_dump_useq);
- prof_dump_useq++;
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(false, filename, false);
- }
-}
-
-static void
-prof_bt_hash(const void *key, size_t r_hash[2])
-{
- prof_bt_t *bt = (prof_bt_t *)key;
-
- cassert(config_prof);
-
- hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
-}
-
-static bool
-prof_bt_keycomp(const void *k1, const void *k2)
-{
- const prof_bt_t *bt1 = (prof_bt_t *)k1;
- const prof_bt_t *bt2 = (prof_bt_t *)k2;
-
- cassert(config_prof);
-
- if (bt1->len != bt2->len)
- return (false);
- return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
-}
-
-static malloc_mutex_t *
-prof_ctx_mutex_choose(void)
-{
- unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
-
- return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
-}
-
-prof_tdata_t *
-prof_tdata_init(void)
-{
- prof_tdata_t *prof_tdata;
-
- cassert(config_prof);
-
- /* Initialize an empty cache for this thread. */
- prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
- if (prof_tdata == NULL)
- return (NULL);
-
- if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS,
- prof_bt_hash, prof_bt_keycomp)) {
- idalloc(prof_tdata);
- return (NULL);
- }
- ql_new(&prof_tdata->lru_ql);
-
- prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX);
- if (prof_tdata->vec == NULL) {
- ckh_delete(&prof_tdata->bt2cnt);
- idalloc(prof_tdata);
- return (NULL);
- }
-
- prof_tdata->prng_state = 0;
- prof_tdata->threshold = 0;
- prof_tdata->accum = 0;
-
- prof_tdata->enq = false;
- prof_tdata->enq_idump = false;
- prof_tdata->enq_gdump = false;
-
- prof_tdata_tsd_set(&prof_tdata);
-
- return (prof_tdata);
-}
-
-void
-prof_tdata_cleanup(void *arg)
-{
- prof_thr_cnt_t *cnt;
- prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg;
-
- cassert(config_prof);
-
- if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) {
- /*
- * Another destructor deallocated memory after this destructor
- * was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY
- * in order to receive another callback.
- */
- prof_tdata = PROF_TDATA_STATE_PURGATORY;
- prof_tdata_tsd_set(&prof_tdata);
- } else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) {
- /*
- * The previous time this destructor was called, we set the key
- * to PROF_TDATA_STATE_PURGATORY so that other destructors
- * wouldn't cause re-creation of the prof_tdata. This time, do
- * nothing, so that the destructor will not be called again.
- */
- } else if (prof_tdata != NULL) {
- /*
- * Delete the hash table. All of its contents can still be
- * iterated over via the LRU.
- */
- ckh_delete(&prof_tdata->bt2cnt);
- /*
- * Iteratively merge cnt's into the global stats and delete
- * them.
- */
- while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
- ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
- prof_ctx_merge(cnt->ctx, cnt);
- idalloc(cnt);
- }
- idalloc(prof_tdata->vec);
- idalloc(prof_tdata);
- prof_tdata = PROF_TDATA_STATE_PURGATORY;
- prof_tdata_tsd_set(&prof_tdata);
- }
-}
-
-void
-prof_boot0(void)
-{
-
- cassert(config_prof);
-
- memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
- sizeof(PROF_PREFIX_DEFAULT));
-}
-
-void
-prof_boot1(void)
-{
-
- cassert(config_prof);
-
- /*
- * opt_prof and prof_promote must be in their final state before any
- * arenas are initialized, so this function must be executed early.
- */
-
- if (opt_prof_leak && opt_prof == false) {
- /*
- * Enable opt_prof, but in such a way that profiles are never
- * automatically dumped.
- */
- opt_prof = true;
- opt_prof_gdump = false;
- } else if (opt_prof) {
- if (opt_lg_prof_interval >= 0) {
- prof_interval = (((uint64_t)1U) <<
- opt_lg_prof_interval);
- }
- }
-
- prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
-}
-
-bool
-prof_boot2(void)
-{
-
- cassert(config_prof);
-
- if (opt_prof) {
- unsigned i;
-
- if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
- prof_bt_keycomp))
- return (true);
- if (malloc_mutex_init(&bt2ctx_mtx))
- return (true);
- if (prof_tdata_tsd_boot()) {
- malloc_write(
- "<jemalloc>: Error in pthread_key_create()\n");
- abort();
- }
-
- if (malloc_mutex_init(&prof_dump_seq_mtx))
- return (true);
-
- if (atexit(prof_fdump) != 0) {
- malloc_write("<jemalloc>: Error in atexit()\n");
- if (opt_abort)
- abort();
- }
-
- ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
- sizeof(malloc_mutex_t));
- if (ctx_locks == NULL)
- return (true);
- for (i = 0; i < PROF_NCTX_LOCKS; i++) {
- if (malloc_mutex_init(&ctx_locks[i]))
- return (true);
- }
- }
-
-#ifdef JEMALLOC_PROF_LIBGCC
- /*
- * Cause the backtracing machinery to allocate its internal state
- * before enabling profiling.
- */
- _Unwind_Backtrace(prof_unwind_init_callback, NULL);
-#endif
-
- prof_booted = true;
-
- return (false);
-}
-
-void
-prof_prefork(void)
-{
-
- if (opt_prof) {
- unsigned i;
-
- malloc_mutex_lock(&bt2ctx_mtx);
- malloc_mutex_lock(&prof_dump_seq_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_lock(&ctx_locks[i]);
- }
-}
-
-void
-prof_postfork_parent(void)
-{
-
- if (opt_prof) {
- unsigned i;
-
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_parent(&ctx_locks[i]);
- malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
- malloc_mutex_postfork_parent(&bt2ctx_mtx);
- }
-}
-
-void
-prof_postfork_child(void)
-{
-
- if (opt_prof) {
- unsigned i;
-
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_child(&ctx_locks[i]);
- malloc_mutex_postfork_child(&prof_dump_seq_mtx);
- malloc_mutex_postfork_child(&bt2ctx_mtx);
- }
-}
-
-/******************************************************************************/
diff --git a/extra/jemalloc/src/quarantine.c b/extra/jemalloc/src/quarantine.c
deleted file mode 100644
index f96a948d5c7..00000000000
--- a/extra/jemalloc/src/quarantine.c
+++ /dev/null
@@ -1,190 +0,0 @@
-#define JEMALLOC_QUARANTINE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/*
- * quarantine pointers close to NULL are used to encode state information that
- * is used for cleaning up during thread shutdown.
- */
-#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
-#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
-#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
-
-/******************************************************************************/
-/* Data. */
-
-malloc_tsd_data(, quarantine, quarantine_t *, NULL)
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static quarantine_t *quarantine_grow(quarantine_t *quarantine);
-static void quarantine_drain_one(quarantine_t *quarantine);
-static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
-
-/******************************************************************************/
-
-quarantine_t *
-quarantine_init(size_t lg_maxobjs)
-{
- quarantine_t *quarantine;
-
- quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
- ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
- if (quarantine == NULL)
- return (NULL);
- quarantine->curbytes = 0;
- quarantine->curobjs = 0;
- quarantine->first = 0;
- quarantine->lg_maxobjs = lg_maxobjs;
-
- quarantine_tsd_set(&quarantine);
-
- return (quarantine);
-}
-
-static quarantine_t *
-quarantine_grow(quarantine_t *quarantine)
-{
- quarantine_t *ret;
-
- ret = quarantine_init(quarantine->lg_maxobjs + 1);
- if (ret == NULL) {
- quarantine_drain_one(quarantine);
- return (quarantine);
- }
-
- ret->curbytes = quarantine->curbytes;
- ret->curobjs = quarantine->curobjs;
- if (quarantine->first + quarantine->curobjs <= (ZU(1) <<
- quarantine->lg_maxobjs)) {
- /* objs ring buffer data are contiguous. */
- memcpy(ret->objs, &quarantine->objs[quarantine->first],
- quarantine->curobjs * sizeof(quarantine_obj_t));
- } else {
- /* objs ring buffer data wrap around. */
- size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) -
- quarantine->first;
- size_t ncopy_b = quarantine->curobjs - ncopy_a;
-
- memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a
- * sizeof(quarantine_obj_t));
- memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
- sizeof(quarantine_obj_t));
- }
- idalloc(quarantine);
-
- return (ret);
-}
-
-static void
-quarantine_drain_one(quarantine_t *quarantine)
-{
- quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
- assert(obj->usize == isalloc(obj->ptr, config_prof));
- idalloc(obj->ptr);
- quarantine->curbytes -= obj->usize;
- quarantine->curobjs--;
- quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
- quarantine->lg_maxobjs) - 1);
-}
-
-static void
-quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
-{
-
- while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
- quarantine_drain_one(quarantine);
-}
-
-void
-quarantine(void *ptr)
-{
- quarantine_t *quarantine;
- size_t usize = isalloc(ptr, config_prof);
-
- cassert(config_fill);
- assert(opt_quarantine);
-
- quarantine = *quarantine_tsd_get();
- if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
- if (quarantine == QUARANTINE_STATE_PURGATORY) {
- /*
- * Make a note that quarantine() was called after
- * quarantine_cleanup() was called.
- */
- quarantine = QUARANTINE_STATE_REINCARNATED;
- quarantine_tsd_set(&quarantine);
- }
- idalloc(ptr);
- return;
- }
- /*
- * Drain one or more objects if the quarantine size limit would be
- * exceeded by appending ptr.
- */
- if (quarantine->curbytes + usize > opt_quarantine) {
- size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- - usize : 0;
- quarantine_drain(quarantine, upper_bound);
- }
- /* Grow the quarantine ring buffer if it's full. */
- if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
- quarantine = quarantine_grow(quarantine);
- /* quarantine_grow() must free a slot if it fails to grow. */
- assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
- /* Append ptr if its size doesn't exceed the quarantine size. */
- if (quarantine->curbytes + usize <= opt_quarantine) {
- size_t offset = (quarantine->first + quarantine->curobjs) &
- ((ZU(1) << quarantine->lg_maxobjs) - 1);
- quarantine_obj_t *obj = &quarantine->objs[offset];
- obj->ptr = ptr;
- obj->usize = usize;
- quarantine->curbytes += usize;
- quarantine->curobjs++;
- if (opt_junk)
- memset(ptr, 0x5a, usize);
- } else {
- assert(quarantine->curbytes == 0);
- idalloc(ptr);
- }
-}
-
-void
-quarantine_cleanup(void *arg)
-{
- quarantine_t *quarantine = *(quarantine_t **)arg;
-
- if (quarantine == QUARANTINE_STATE_REINCARNATED) {
- /*
- * Another destructor deallocated memory after this destructor
- * was called. Reset quarantine to QUARANTINE_STATE_PURGATORY
- * in order to receive another callback.
- */
- quarantine = QUARANTINE_STATE_PURGATORY;
- quarantine_tsd_set(&quarantine);
- } else if (quarantine == QUARANTINE_STATE_PURGATORY) {
- /*
- * The previous time this destructor was called, we set the key
- * to QUARANTINE_STATE_PURGATORY so that other destructors
- * wouldn't cause re-creation of the quarantine. This time, do
- * nothing, so that the destructor will not be called again.
- */
- } else if (quarantine != NULL) {
- quarantine_drain(quarantine, 0);
- idalloc(quarantine);
- quarantine = QUARANTINE_STATE_PURGATORY;
- quarantine_tsd_set(&quarantine);
- }
-}
-
-bool
-quarantine_boot(void)
-{
-
- cassert(config_fill);
-
- if (quarantine_tsd_boot())
- return (true);
-
- return (false);
-}
diff --git a/extra/jemalloc/src/rtree.c b/extra/jemalloc/src/rtree.c
deleted file mode 100644
index 90c6935a0ed..00000000000
--- a/extra/jemalloc/src/rtree.c
+++ /dev/null
@@ -1,67 +0,0 @@
-#define JEMALLOC_RTREE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-rtree_t *
-rtree_new(unsigned bits)
-{
- rtree_t *ret;
- unsigned bits_per_level, height, i;
-
- bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
- height = bits / bits_per_level;
- if (height * bits_per_level != bits)
- height++;
- assert(height * bits_per_level >= bits);
-
- ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) +
- (sizeof(unsigned) * height));
- if (ret == NULL)
- return (NULL);
- memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
- height));
-
- if (malloc_mutex_init(&ret->mutex)) {
- /* Leak the rtree. */
- return (NULL);
- }
- ret->height = height;
- if (bits_per_level * height > bits)
- ret->level2bits[0] = bits % bits_per_level;
- else
- ret->level2bits[0] = bits_per_level;
- for (i = 1; i < height; i++)
- ret->level2bits[i] = bits_per_level;
-
- ret->root = (void**)base_alloc(sizeof(void *) << ret->level2bits[0]);
- if (ret->root == NULL) {
- /*
- * We leak the rtree here, since there's no generic base
- * deallocation.
- */
- return (NULL);
- }
- memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
-
- return (ret);
-}
-
-void
-rtree_prefork(rtree_t *rtree)
-{
-
- malloc_mutex_prefork(&rtree->mutex);
-}
-
-void
-rtree_postfork_parent(rtree_t *rtree)
-{
-
- malloc_mutex_postfork_parent(&rtree->mutex);
-}
-
-void
-rtree_postfork_child(rtree_t *rtree)
-{
-
- malloc_mutex_postfork_child(&rtree->mutex);
-}
diff --git a/extra/jemalloc/src/stats.c b/extra/jemalloc/src/stats.c
deleted file mode 100644
index 43f87af6700..00000000000
--- a/extra/jemalloc/src/stats.c
+++ /dev/null
@@ -1,549 +0,0 @@
-#define JEMALLOC_STATS_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-#define CTL_GET(n, v, t) do { \
- size_t sz = sizeof(t); \
- xmallctl(n, v, &sz, NULL, 0); \
-} while (0)
-
-#define CTL_I_GET(n, v, t) do { \
- size_t mib[6]; \
- size_t miblen = sizeof(mib) / sizeof(size_t); \
- size_t sz = sizeof(t); \
- xmallctlnametomib(n, mib, &miblen); \
- mib[2] = i; \
- xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
-} while (0)
-
-#define CTL_J_GET(n, v, t) do { \
- size_t mib[6]; \
- size_t miblen = sizeof(mib) / sizeof(size_t); \
- size_t sz = sizeof(t); \
- xmallctlnametomib(n, mib, &miblen); \
- mib[2] = j; \
- xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
-} while (0)
-
-#define CTL_IJ_GET(n, v, t) do { \
- size_t mib[6]; \
- size_t miblen = sizeof(mib) / sizeof(size_t); \
- size_t sz = sizeof(t); \
- xmallctlnametomib(n, mib, &miblen); \
- mib[2] = i; \
- mib[4] = j; \
- xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
-} while (0)
-
-/******************************************************************************/
-/* Data. */
-
-bool opt_stats_print = false;
-
-size_t stats_cactive = 0;
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
- void *cbopaque, unsigned i);
-static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
- void *cbopaque, unsigned i);
-static void stats_arena_print(void (*write_cb)(void *, const char *),
- void *cbopaque, unsigned i, bool bins, bool large);
-
-/******************************************************************************/
-
-static void
-stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
- unsigned i)
-{
- size_t page;
- bool config_tcache;
- unsigned nbins, j, gap_start;
-
- CTL_GET("arenas.page", &page, size_t);
-
- CTL_GET("config.tcache", &config_tcache, bool);
- if (config_tcache) {
- malloc_cprintf(write_cb, cbopaque,
- "bins: bin size regs pgs allocated nmalloc"
- " ndalloc nrequests nfills nflushes"
- " newruns reruns curruns\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "bins: bin size regs pgs allocated nmalloc"
- " ndalloc newruns reruns curruns\n");
- }
- CTL_GET("arenas.nbins", &nbins, unsigned);
- for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
- uint64_t nruns;
-
- CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
- if (nruns == 0) {
- if (gap_start == UINT_MAX)
- gap_start = j;
- } else {
- size_t reg_size, run_size, allocated;
- uint32_t nregs;
- uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
- uint64_t reruns;
- size_t curruns;
-
- if (gap_start != UINT_MAX) {
- if (j > gap_start + 1) {
- /* Gap of more than one size class. */
- malloc_cprintf(write_cb, cbopaque,
- "[%u..%u]\n", gap_start,
- j - 1);
- } else {
- /* Gap of one size class. */
- malloc_cprintf(write_cb, cbopaque,
- "[%u]\n", gap_start);
- }
- gap_start = UINT_MAX;
- }
- CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
- CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
- CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
- &allocated, size_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
- &nmalloc, uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
- &ndalloc, uint64_t);
- if (config_tcache) {
- CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
- &nrequests, uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
- &nfills, uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
- &nflushes, uint64_t);
- }
- CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
- uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
- size_t);
- if (config_tcache) {
- malloc_cprintf(write_cb, cbopaque,
- "%13u %5zu %4u %3zu %12zu %12"PRIu64
- " %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu\n",
- j, reg_size, nregs, run_size / page,
- allocated, nmalloc, ndalloc, nrequests,
- nfills, nflushes, nruns, reruns, curruns);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "%13u %5zu %4u %3zu %12zu %12"PRIu64
- " %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu\n",
- j, reg_size, nregs, run_size / page,
- allocated, nmalloc, ndalloc, nruns, reruns,
- curruns);
- }
- }
- }
- if (gap_start != UINT_MAX) {
- if (j > gap_start + 1) {
- /* Gap of more than one size class. */
- malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
- gap_start, j - 1);
- } else {
- /* Gap of one size class. */
- malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
- }
- }
-}
-
-static void
-stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
- unsigned i)
-{
- size_t page, nlruns, j;
- ssize_t gap_start;
-
- CTL_GET("arenas.page", &page, size_t);
-
- malloc_cprintf(write_cb, cbopaque,
- "large: size pages nmalloc ndalloc nrequests"
- " curruns\n");
- CTL_GET("arenas.nlruns", &nlruns, size_t);
- for (j = 0, gap_start = -1; j < nlruns; j++) {
- uint64_t nmalloc, ndalloc, nrequests;
- size_t run_size, curruns;
-
- CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
- uint64_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc,
- uint64_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
- uint64_t);
- if (nrequests == 0) {
- if (gap_start == -1)
- gap_start = j;
- } else {
- CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
- size_t);
- if (gap_start != -1) {
- malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
- j - gap_start);
- gap_start = -1;
- }
- malloc_cprintf(write_cb, cbopaque,
- "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu\n",
- run_size, run_size / page, nmalloc, ndalloc,
- nrequests, curruns);
- }
- }
- if (gap_start != -1)
- malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
-}
-
-static void
-stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
- unsigned i, bool bins, bool large)
-{
- unsigned nthreads;
- const char *dss;
- size_t page, pactive, pdirty, mapped;
- uint64_t npurge, nmadvise, purged;
- size_t small_allocated;
- uint64_t small_nmalloc, small_ndalloc, small_nrequests;
- size_t large_allocated;
- uint64_t large_nmalloc, large_ndalloc, large_nrequests;
-
- CTL_GET("arenas.page", &page, size_t);
-
- CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
- malloc_cprintf(write_cb, cbopaque,
- "assigned threads: %u\n", nthreads);
- CTL_I_GET("stats.arenas.0.dss", &dss, const char *);
- malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
- dss);
- CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
- CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
- CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
- CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t);
- CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t);
- malloc_cprintf(write_cb, cbopaque,
- "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
- " %"PRIu64" madvise%s, %"PRIu64" purged\n",
- pactive, pdirty, npurge, npurge == 1 ? "" : "s",
- nmadvise, nmadvise == 1 ? "" : "s", purged);
-
- malloc_cprintf(write_cb, cbopaque,
- " allocated nmalloc ndalloc nrequests\n");
- CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
- CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
- malloc_cprintf(write_cb, cbopaque,
- "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
- small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
- CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
- CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
- malloc_cprintf(write_cb, cbopaque,
- "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
- large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
- malloc_cprintf(write_cb, cbopaque,
- "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
- small_allocated + large_allocated,
- small_nmalloc + large_nmalloc,
- small_ndalloc + large_ndalloc,
- small_nrequests + large_nrequests);
- malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
- CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
- malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
-
- if (bins)
- stats_arena_bins_print(write_cb, cbopaque, i);
- if (large)
- stats_arena_lruns_print(write_cb, cbopaque, i);
-}
-
-void
-stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
-{
- int err;
- uint64_t epoch;
- size_t u64sz;
- bool general = true;
- bool merged = true;
- bool unmerged = true;
- bool bins = true;
- bool large = true;
-
- /*
- * Refresh stats, in case mallctl() was called by the application.
- *
- * Check for OOM here, since refreshing the ctl cache can trigger
- * allocation. In practice, none of the subsequent mallctl()-related
- * calls in this function will cause OOM if this one succeeds.
- * */
- epoch = 1;
- u64sz = sizeof(uint64_t);
- err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
- if (err != 0) {
- if (err == EAGAIN) {
- malloc_write("<jemalloc>: Memory allocation failure in "
- "mallctl(\"epoch\", ...)\n");
- return;
- }
- malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
- "...)\n");
- abort();
- }
-
- if (opts != NULL) {
- unsigned i;
-
- for (i = 0; opts[i] != '\0'; i++) {
- switch (opts[i]) {
- case 'g':
- general = false;
- break;
- case 'm':
- merged = false;
- break;
- case 'a':
- unmerged = false;
- break;
- case 'b':
- bins = false;
- break;
- case 'l':
- large = false;
- break;
- default:;
- }
- }
- }
-
- malloc_cprintf(write_cb, cbopaque,
- "___ Begin jemalloc statistics ___\n");
- if (general) {
- int err;
- const char *cpv;
- bool bv;
- unsigned uv;
- ssize_t ssv;
- size_t sv, bsz, ssz, sssz, cpsz;
-
- bsz = sizeof(bool);
- ssz = sizeof(size_t);
- sssz = sizeof(ssize_t);
- cpsz = sizeof(const char *);
-
- CTL_GET("version", &cpv, const char *);
- malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
- CTL_GET("config.debug", &bv, bool);
- malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
- bv ? "enabled" : "disabled");
-
-#define OPT_WRITE_BOOL(n) \
- if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
- == 0) { \
- malloc_cprintf(write_cb, cbopaque, \
- " opt."#n": %s\n", bv ? "true" : "false"); \
- }
-#define OPT_WRITE_SIZE_T(n) \
- if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
- == 0) { \
- malloc_cprintf(write_cb, cbopaque, \
- " opt."#n": %zu\n", sv); \
- }
-#define OPT_WRITE_SSIZE_T(n) \
- if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
- == 0) { \
- malloc_cprintf(write_cb, cbopaque, \
- " opt."#n": %zd\n", ssv); \
- }
-#define OPT_WRITE_CHAR_P(n) \
- if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
- == 0) { \
- malloc_cprintf(write_cb, cbopaque, \
- " opt."#n": \"%s\"\n", cpv); \
- }
-
- malloc_cprintf(write_cb, cbopaque,
- "Run-time option settings:\n");
- OPT_WRITE_BOOL(abort)
- OPT_WRITE_SIZE_T(lg_chunk)
- OPT_WRITE_CHAR_P(dss)
- OPT_WRITE_SIZE_T(narenas)
- OPT_WRITE_SSIZE_T(lg_dirty_mult)
- OPT_WRITE_BOOL(stats_print)
- OPT_WRITE_BOOL(junk)
- OPT_WRITE_SIZE_T(quarantine)
- OPT_WRITE_BOOL(redzone)
- OPT_WRITE_BOOL(zero)
- OPT_WRITE_BOOL(utrace)
- OPT_WRITE_BOOL(valgrind)
- OPT_WRITE_BOOL(xmalloc)
- OPT_WRITE_BOOL(tcache)
- OPT_WRITE_SSIZE_T(lg_tcache_max)
- OPT_WRITE_BOOL(prof)
- OPT_WRITE_CHAR_P(prof_prefix)
- OPT_WRITE_BOOL(prof_active)
- OPT_WRITE_SSIZE_T(lg_prof_sample)
- OPT_WRITE_BOOL(prof_accum)
- OPT_WRITE_SSIZE_T(lg_prof_interval)
- OPT_WRITE_BOOL(prof_gdump)
- OPT_WRITE_BOOL(prof_final)
- OPT_WRITE_BOOL(prof_leak)
-
-#undef OPT_WRITE_BOOL
-#undef OPT_WRITE_SIZE_T
-#undef OPT_WRITE_SSIZE_T
-#undef OPT_WRITE_CHAR_P
-
- malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
-
- CTL_GET("arenas.narenas", &uv, unsigned);
- malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
-
- malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
- sizeof(void *));
-
- CTL_GET("arenas.quantum", &sv, size_t);
- malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
-
- CTL_GET("arenas.page", &sv, size_t);
- malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
-
- CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
- if (ssv >= 0) {
- malloc_cprintf(write_cb, cbopaque,
- "Min active:dirty page ratio per arena: %u:1\n",
- (1U << ssv));
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "Min active:dirty page ratio per arena: N/A\n");
- }
- if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
- == 0) {
- malloc_cprintf(write_cb, cbopaque,
- "Maximum thread-cached size class: %zu\n", sv);
- }
- if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
- bv) {
- CTL_GET("opt.lg_prof_sample", &sv, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "Average profile sample interval: %"PRIu64
- " (2^%zu)\n", (((uint64_t)1U) << sv), sv);
-
- CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
- if (ssv >= 0) {
- malloc_cprintf(write_cb, cbopaque,
- "Average profile dump interval: %"PRIu64
- " (2^%zd)\n",
- (((uint64_t)1U) << ssv), ssv);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "Average profile dump interval: N/A\n");
- }
- }
- CTL_GET("opt.lg_chunk", &sv, size_t);
- malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
- (ZU(1) << sv), sv);
- }
-
- if (config_stats) {
- size_t *cactive;
- size_t allocated, active, mapped;
- size_t chunks_current, chunks_high;
- uint64_t chunks_total;
- size_t huge_allocated;
- uint64_t huge_nmalloc, huge_ndalloc;
-
- CTL_GET("stats.cactive", &cactive, size_t *);
- CTL_GET("stats.allocated", &allocated, size_t);
- CTL_GET("stats.active", &active, size_t);
- CTL_GET("stats.mapped", &mapped, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "Allocated: %zu, active: %zu, mapped: %zu\n",
- allocated, active, mapped);
- malloc_cprintf(write_cb, cbopaque,
- "Current active ceiling: %zu\n", atomic_read_z(cactive));
-
- /* Print chunk stats. */
- CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
- CTL_GET("stats.chunks.high", &chunks_high, size_t);
- CTL_GET("stats.chunks.current", &chunks_current, size_t);
- malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
- "highchunks curchunks\n");
- malloc_cprintf(write_cb, cbopaque,
- " %13"PRIu64" %12zu %12zu\n",
- chunks_total, chunks_high, chunks_current);
-
- /* Print huge stats. */
- CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
- CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
- CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "huge: nmalloc ndalloc allocated\n");
- malloc_cprintf(write_cb, cbopaque,
- " %12"PRIu64" %12"PRIu64" %12zu\n",
- huge_nmalloc, huge_ndalloc, huge_allocated);
-
- if (merged) {
- unsigned narenas;
-
- CTL_GET("arenas.narenas", &narenas, unsigned);
- {
- VARIABLE_ARRAY(bool, initialized, narenas);
- size_t isz;
- unsigned i, ninitialized;
-
- isz = sizeof(bool) * narenas;
- xmallctl("arenas.initialized", initialized,
- &isz, NULL, 0);
- for (i = ninitialized = 0; i < narenas; i++) {
- if (initialized[i])
- ninitialized++;
- }
-
- if (ninitialized > 1 || unmerged == false) {
- /* Print merged arena stats. */
- malloc_cprintf(write_cb, cbopaque,
- "\nMerged arenas stats:\n");
- stats_arena_print(write_cb, cbopaque,
- narenas, bins, large);
- }
- }
- }
-
- if (unmerged) {
- unsigned narenas;
-
- /* Print stats for each arena. */
-
- CTL_GET("arenas.narenas", &narenas, unsigned);
- {
- VARIABLE_ARRAY(bool, initialized, narenas);
- size_t isz;
- unsigned i;
-
- isz = sizeof(bool) * narenas;
- xmallctl("arenas.initialized", initialized,
- &isz, NULL, 0);
-
- for (i = 0; i < narenas; i++) {
- if (initialized[i]) {
- malloc_cprintf(write_cb,
- cbopaque,
- "\narenas[%u]:\n", i);
- stats_arena_print(write_cb,
- cbopaque, i, bins, large);
- }
- }
- }
- }
- }
- malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
-}
diff --git a/extra/jemalloc/src/tcache.c b/extra/jemalloc/src/tcache.c
deleted file mode 100644
index 98ed19edd52..00000000000
--- a/extra/jemalloc/src/tcache.c
+++ /dev/null
@@ -1,476 +0,0 @@
-#define JEMALLOC_TCACHE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-malloc_tsd_data(, tcache, tcache_t *, NULL)
-malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
-
-bool opt_tcache = true;
-ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
-
-tcache_bin_info_t *tcache_bin_info;
-static unsigned stack_nelms; /* Total stack elms per tcache. */
-
-size_t nhbins;
-size_t tcache_maxclass;
-
-/******************************************************************************/
-
-size_t tcache_salloc(const void *ptr)
-{
-
- return (arena_salloc(ptr, false));
-}
-
-void
-tcache_event_hard(tcache_t *tcache)
-{
- size_t binind = tcache->next_gc_bin;
- tcache_bin_t *tbin = &tcache->tbins[binind];
- tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
-
- if (tbin->low_water > 0) {
- /*
- * Flush (ceiling) 3/4 of the objects below the low water mark.
- */
- if (binind < NBINS) {
- tcache_bin_flush_small(tbin, binind, tbin->ncached -
- tbin->low_water + (tbin->low_water >> 2), tcache);
- } else {
- tcache_bin_flush_large(tbin, binind, tbin->ncached -
- tbin->low_water + (tbin->low_water >> 2), tcache);
- }
- /*
- * Reduce fill count by 2X. Limit lg_fill_div such that the
- * fill count is always at least 1.
- */
- if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
- tbin->lg_fill_div++;
- } else if (tbin->low_water < 0) {
- /*
- * Increase fill count by 2X. Make sure lg_fill_div stays
- * greater than 0.
- */
- if (tbin->lg_fill_div > 1)
- tbin->lg_fill_div--;
- }
- tbin->low_water = tbin->ncached;
-
- tcache->next_gc_bin++;
- if (tcache->next_gc_bin == nhbins)
- tcache->next_gc_bin = 0;
- tcache->ev_cnt = 0;
-}
-
-void *
-tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
-{
- void *ret;
-
- arena_tcache_fill_small(tcache->arena, tbin, binind,
- config_prof ? tcache->prof_accumbytes : 0);
- if (config_prof)
- tcache->prof_accumbytes = 0;
- ret = tcache_alloc_easy(tbin);
-
- return (ret);
-}
-
-void
-tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
- tcache_t *tcache)
-{
- void *ptr;
- unsigned i, nflush, ndeferred;
- bool merged_stats = false;
-
- assert(binind < NBINS);
- assert(rem <= tbin->ncached);
-
- for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
- /* Lock the arena bin associated with the first object. */
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
- tbin->avail[0]);
- arena_t *arena = chunk->arena;
- arena_bin_t *bin = &arena->bins[binind];
-
- if (config_prof && arena == tcache->arena) {
- if (arena_prof_accum(arena, tcache->prof_accumbytes))
- prof_idump();
- tcache->prof_accumbytes = 0;
- }
-
- malloc_mutex_lock(&bin->lock);
- if (config_stats && arena == tcache->arena) {
- assert(merged_stats == false);
- merged_stats = true;
- bin->stats.nflushes++;
- bin->stats.nrequests += tbin->tstats.nrequests;
- tbin->tstats.nrequests = 0;
- }
- ndeferred = 0;
- for (i = 0; i < nflush; i++) {
- ptr = tbin->avail[i];
- assert(ptr != NULL);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk->arena == arena) {
- size_t pageind = ((uintptr_t)ptr -
- (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm =
- arena_mapp_get(chunk, pageind);
- if (config_fill && opt_junk) {
- arena_alloc_junk_small(ptr,
- &arena_bin_info[binind], true);
- }
- arena_dalloc_bin_locked(arena, chunk, ptr,
- mapelm);
- } else {
- /*
- * This object was allocated via a different
- * arena bin than the one that is currently
- * locked. Stash the object, so that it can be
- * handled in a future pass.
- */
- tbin->avail[ndeferred] = ptr;
- ndeferred++;
- }
- }
- malloc_mutex_unlock(&bin->lock);
- }
- if (config_stats && merged_stats == false) {
- /*
- * The flush loop didn't happen to flush to this thread's
- * arena, so the stats didn't get merged. Manually do so now.
- */
- arena_bin_t *bin = &tcache->arena->bins[binind];
- malloc_mutex_lock(&bin->lock);
- bin->stats.nflushes++;
- bin->stats.nrequests += tbin->tstats.nrequests;
- tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(&bin->lock);
- }
-
- memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
- rem * sizeof(void *));
- tbin->ncached = rem;
- if ((int)tbin->ncached < tbin->low_water)
- tbin->low_water = tbin->ncached;
-}
-
-void
-tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
- tcache_t *tcache)
-{
- void *ptr;
- unsigned i, nflush, ndeferred;
- bool merged_stats = false;
-
- assert(binind < nhbins);
- assert(rem <= tbin->ncached);
-
- for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
- /* Lock the arena associated with the first object. */
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
- tbin->avail[0]);
- arena_t *arena = chunk->arena;
- UNUSED bool idump;
-
- if (config_prof)
- idump = false;
- malloc_mutex_lock(&arena->lock);
- if ((config_prof || config_stats) && arena == tcache->arena) {
- if (config_prof) {
- idump = arena_prof_accum_locked(arena,
- tcache->prof_accumbytes);
- tcache->prof_accumbytes = 0;
- }
- if (config_stats) {
- merged_stats = true;
- arena->stats.nrequests_large +=
- tbin->tstats.nrequests;
- arena->stats.lstats[binind - NBINS].nrequests +=
- tbin->tstats.nrequests;
- tbin->tstats.nrequests = 0;
- }
- }
- ndeferred = 0;
- for (i = 0; i < nflush; i++) {
- ptr = tbin->avail[i];
- assert(ptr != NULL);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk->arena == arena)
- arena_dalloc_large_locked(arena, chunk, ptr);
- else {
- /*
- * This object was allocated via a different
- * arena than the one that is currently locked.
- * Stash the object, so that it can be handled
- * in a future pass.
- */
- tbin->avail[ndeferred] = ptr;
- ndeferred++;
- }
- }
- malloc_mutex_unlock(&arena->lock);
- if (config_prof && idump)
- prof_idump();
- }
- if (config_stats && merged_stats == false) {
- /*
- * The flush loop didn't happen to flush to this thread's
- * arena, so the stats didn't get merged. Manually do so now.
- */
- arena_t *arena = tcache->arena;
- malloc_mutex_lock(&arena->lock);
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- arena->stats.lstats[binind - NBINS].nrequests +=
- tbin->tstats.nrequests;
- tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(&arena->lock);
- }
-
- memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
- rem * sizeof(void *));
- tbin->ncached = rem;
- if ((int)tbin->ncached < tbin->low_water)
- tbin->low_water = tbin->ncached;
-}
-
-void
-tcache_arena_associate(tcache_t *tcache, arena_t *arena)
-{
-
- if (config_stats) {
- /* Link into list of extant tcaches. */
- malloc_mutex_lock(&arena->lock);
- ql_elm_new(tcache, link);
- ql_tail_insert(&arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(&arena->lock);
- }
- tcache->arena = arena;
-}
-
-void
-tcache_arena_dissociate(tcache_t *tcache)
-{
-
- if (config_stats) {
- /* Unlink from list of extant tcaches. */
- malloc_mutex_lock(&tcache->arena->lock);
- ql_remove(&tcache->arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(&tcache->arena->lock);
- tcache_stats_merge(tcache, tcache->arena);
- }
-}
-
-tcache_t *
-tcache_create(arena_t *arena)
-{
- tcache_t *tcache;
- size_t size, stack_offset;
- unsigned i;
-
- size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
- /* Naturally align the pointer stacks. */
- size = PTR_CEILING(size);
- stack_offset = size;
- size += stack_nelms * sizeof(void *);
- /*
- * Round up to the nearest multiple of the cacheline size, in order to
- * avoid the possibility of false cacheline sharing.
- *
- * That this works relies on the same logic as in ipalloc(), but we
- * cannot directly call ipalloc() here due to tcache bootstrapping
- * issues.
- */
- size = (size + CACHELINE_MASK) & (-CACHELINE);
-
- if (size <= SMALL_MAXCLASS)
- tcache = (tcache_t *)arena_malloc_small(arena, size, true);
- else if (size <= tcache_maxclass)
- tcache = (tcache_t *)arena_malloc_large(arena, size, true);
- else
- tcache = (tcache_t *)icallocx(size, false, arena);
-
- if (tcache == NULL)
- return (NULL);
-
- tcache_arena_associate(tcache, arena);
-
- assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
- for (i = 0; i < nhbins; i++) {
- tcache->tbins[i].lg_fill_div = 1;
- tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
- (uintptr_t)stack_offset);
- stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
- }
-
- tcache_tsd_set(&tcache);
-
- return (tcache);
-}
-
-void
-tcache_destroy(tcache_t *tcache)
-{
- unsigned i;
- size_t tcache_size;
-
- tcache_arena_dissociate(tcache);
-
- for (i = 0; i < NBINS; i++) {
- tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_small(tbin, i, 0, tcache);
-
- if (config_stats && tbin->tstats.nrequests != 0) {
- arena_t *arena = tcache->arena;
- arena_bin_t *bin = &arena->bins[i];
- malloc_mutex_lock(&bin->lock);
- bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(&bin->lock);
- }
- }
-
- for (; i < nhbins; i++) {
- tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_large(tbin, i, 0, tcache);
-
- if (config_stats && tbin->tstats.nrequests != 0) {
- arena_t *arena = tcache->arena;
- malloc_mutex_lock(&arena->lock);
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- arena->stats.lstats[i - NBINS].nrequests +=
- tbin->tstats.nrequests;
- malloc_mutex_unlock(&arena->lock);
- }
- }
-
- if (config_prof && tcache->prof_accumbytes > 0 &&
- arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
- prof_idump();
-
- tcache_size = arena_salloc(tcache, false);
- if (tcache_size <= SMALL_MAXCLASS) {
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
- arena_t *arena = chunk->arena;
- size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
- LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
-
- arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
- } else if (tcache_size <= tcache_maxclass) {
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
- arena_t *arena = chunk->arena;
-
- arena_dalloc_large(arena, chunk, tcache);
- } else
- idallocx(tcache, false);
-}
-
-void
-tcache_thread_cleanup(void *arg)
-{
- tcache_t *tcache = *(tcache_t **)arg;
-
- if (tcache == TCACHE_STATE_DISABLED) {
- /* Do nothing. */
- } else if (tcache == TCACHE_STATE_REINCARNATED) {
- /*
- * Another destructor called an allocator function after this
- * destructor was called. Reset tcache to
- * TCACHE_STATE_PURGATORY in order to receive another callback.
- */
- tcache = TCACHE_STATE_PURGATORY;
- tcache_tsd_set(&tcache);
- } else if (tcache == TCACHE_STATE_PURGATORY) {
- /*
- * The previous time this destructor was called, we set the key
- * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
- * cause re-creation of the tcache. This time, do nothing, so
- * that the destructor will not be called again.
- */
- } else if (tcache != NULL) {
- assert(tcache != TCACHE_STATE_PURGATORY);
- tcache_destroy(tcache);
- tcache = TCACHE_STATE_PURGATORY;
- tcache_tsd_set(&tcache);
- }
-}
-
-void
-tcache_stats_merge(tcache_t *tcache, arena_t *arena)
-{
- unsigned i;
-
- /* Merge and reset tcache stats. */
- for (i = 0; i < NBINS; i++) {
- arena_bin_t *bin = &arena->bins[i];
- tcache_bin_t *tbin = &tcache->tbins[i];
- malloc_mutex_lock(&bin->lock);
- bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(&bin->lock);
- tbin->tstats.nrequests = 0;
- }
-
- for (; i < nhbins; i++) {
- malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
- tcache_bin_t *tbin = &tcache->tbins[i];
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- lstats->nrequests += tbin->tstats.nrequests;
- tbin->tstats.nrequests = 0;
- }
-}
-
-bool
-tcache_boot0(void)
-{
- unsigned i;
-
- /*
- * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
- * known.
- */
- if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
- tcache_maxclass = SMALL_MAXCLASS;
- else if ((1U << opt_lg_tcache_max) > arena_maxclass)
- tcache_maxclass = arena_maxclass;
- else
- tcache_maxclass = (1U << opt_lg_tcache_max);
-
- nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
-
- /* Initialize tcache_bin_info. */
- tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
- sizeof(tcache_bin_info_t));
- if (tcache_bin_info == NULL)
- return (true);
- stack_nelms = 0;
- for (i = 0; i < NBINS; i++) {
- if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
- tcache_bin_info[i].ncached_max =
- (arena_bin_info[i].nregs << 1);
- } else {
- tcache_bin_info[i].ncached_max =
- TCACHE_NSLOTS_SMALL_MAX;
- }
- stack_nelms += tcache_bin_info[i].ncached_max;
- }
- for (; i < nhbins; i++) {
- tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
- stack_nelms += tcache_bin_info[i].ncached_max;
- }
-
- return (false);
-}
-
-bool
-tcache_boot1(void)
-{
-
- if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
- return (true);
-
- return (false);
-}
diff --git a/extra/jemalloc/src/tsd.c b/extra/jemalloc/src/tsd.c
deleted file mode 100644
index 961a546329c..00000000000
--- a/extra/jemalloc/src/tsd.c
+++ /dev/null
@@ -1,107 +0,0 @@
-#define JEMALLOC_TSD_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Data. */
-
-static unsigned ncleanups;
-static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
-
-/******************************************************************************/
-
-void *
-malloc_tsd_malloc(size_t size)
-{
-
- /* Avoid choose_arena() in order to dodge bootstrapping issues. */
- return (arena_malloc(arenas[0], size, false, false));
-}
-
-void
-malloc_tsd_dalloc(void *wrapper)
-{
-
- idalloc(wrapper);
-}
-
-void
-malloc_tsd_no_cleanup(void *arg)
-{
-
- not_reached();
-}
-
-#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
-#ifndef _WIN32
-JEMALLOC_EXPORT
-#endif
-void
-_malloc_thread_cleanup(void)
-{
- bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
- unsigned i;
-
- for (i = 0; i < ncleanups; i++)
- pending[i] = true;
-
- do {
- again = false;
- for (i = 0; i < ncleanups; i++) {
- if (pending[i]) {
- pending[i] = cleanups[i]();
- if (pending[i])
- again = true;
- }
- }
- } while (again);
-}
-#endif
-
-void
-malloc_tsd_cleanup_register(bool (*f)(void))
-{
-
- assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
- cleanups[ncleanups] = f;
- ncleanups++;
-}
-
-void
-malloc_tsd_boot(void)
-{
-
- ncleanups = 0;
-}
-
-#ifdef _WIN32
-static BOOL WINAPI
-_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
-{
-
- switch (fdwReason) {
-#ifdef JEMALLOC_LAZY_LOCK
- case DLL_THREAD_ATTACH:
- isthreaded = true;
- break;
-#endif
- case DLL_THREAD_DETACH:
- _malloc_thread_cleanup();
- break;
- default:
- break;
- }
- return (true);
-}
-
-#ifdef _MSC_VER
-# ifdef _M_IX86
-# pragma comment(linker, "/INCLUDE:__tls_used")
-# else
-# pragma comment(linker, "/INCLUDE:_tls_used")
-# endif
-# pragma section(".CRT$XLY",long,read)
-#endif
-JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
-static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
- DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
-#endif
diff --git a/extra/jemalloc/src/util.c b/extra/jemalloc/src/util.c
deleted file mode 100644
index b3a01143698..00000000000
--- a/extra/jemalloc/src/util.c
+++ /dev/null
@@ -1,641 +0,0 @@
-#define assert(e) do { \
- if (config_debug && !(e)) { \
- malloc_write("<jemalloc>: Failed assertion\n"); \
- abort(); \
- } \
-} while (0)
-
-#define not_reached() do { \
- if (config_debug) { \
- malloc_write("<jemalloc>: Unreachable code reached\n"); \
- abort(); \
- } \
-} while (0)
-
-#define not_implemented() do { \
- if (config_debug) { \
- malloc_write("<jemalloc>: Not implemented\n"); \
- abort(); \
- } \
-} while (0)
-
-#define JEMALLOC_UTIL_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void wrtmessage(void *cbopaque, const char *s);
-#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
-static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
- size_t *slen_p);
-#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
-static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
-#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
-static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
-#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
-static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
- size_t *slen_p);
-
-/******************************************************************************/
-
-/* malloc_message() setup. */
-static void
-wrtmessage(void *cbopaque, const char *s)
-{
-
-#ifdef SYS_write
- /*
- * Use syscall(2) rather than write(2) when possible in order to avoid
- * the possibility of memory allocation within libc. This is necessary
- * on FreeBSD; most operating systems do not have this problem though.
- */
- UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
-#else
- UNUSED int result = write(STDERR_FILENO, s, strlen(s));
-#endif
-}
-
-JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
-
-/*
- * Wrapper around malloc_message() that avoids the need for
- * je_malloc_message(...) throughout the code.
- */
-void
-malloc_write(const char *s)
-{
-
- if (je_malloc_message != NULL)
- je_malloc_message(NULL, s);
- else
- wrtmessage(NULL, s);
-}
-
-/*
- * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
- * provide a wrapper.
- */
-int
-buferror(char *buf, size_t buflen)
-{
-
-#ifdef _WIN32
- FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
- (LPSTR)buf, buflen, NULL);
- return (0);
-#elif defined(_GNU_SOURCE)
- char *b = strerror_r(errno, buf, buflen);
- if (b != buf) {
- strncpy(buf, b, buflen);
- buf[buflen-1] = '\0';
- }
- return (0);
-#else
- return (strerror_r(errno, buf, buflen));
-#endif
-}
-
-uintmax_t
-malloc_strtoumax(const char *nptr, char **endptr, int base)
-{
- uintmax_t ret, digit;
- int b;
- bool neg;
- const char *p, *ns;
-
- if (base < 0 || base == 1 || base > 36) {
- set_errno(EINVAL);
- return (UINTMAX_MAX);
- }
- b = base;
-
- /* Swallow leading whitespace and get sign, if any. */
- neg = false;
- p = nptr;
- while (true) {
- switch (*p) {
- case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
- p++;
- break;
- case '-':
- neg = true;
- /* Fall through. */
- case '+':
- p++;
- /* Fall through. */
- default:
- goto label_prefix;
- }
- }
-
- /* Get prefix, if any. */
- label_prefix:
- /*
- * Note where the first non-whitespace/sign character is so that it is
- * possible to tell whether any digits are consumed (e.g., " 0" vs.
- * " -x").
- */
- ns = p;
- if (*p == '0') {
- switch (p[1]) {
- case '0': case '1': case '2': case '3': case '4': case '5':
- case '6': case '7':
- if (b == 0)
- b = 8;
- if (b == 8)
- p++;
- break;
- case 'x':
- switch (p[2]) {
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- case 'A': case 'B': case 'C': case 'D': case 'E':
- case 'F':
- case 'a': case 'b': case 'c': case 'd': case 'e':
- case 'f':
- if (b == 0)
- b = 16;
- if (b == 16)
- p += 2;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- }
- if (b == 0)
- b = 10;
-
- /* Convert. */
- ret = 0;
- while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
- || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
- || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
- uintmax_t pret = ret;
- ret *= b;
- ret += digit;
- if (ret < pret) {
- /* Overflow. */
- set_errno(ERANGE);
- return (UINTMAX_MAX);
- }
- p++;
- }
- if (neg)
- ret = -ret;
-
- if (endptr != NULL) {
- if (p == ns) {
- /* No characters were converted. */
- *endptr = (char *)nptr;
- } else
- *endptr = (char *)p;
- }
-
- return (ret);
-}
-
-static char *
-u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
-{
- unsigned i;
-
- i = U2S_BUFSIZE - 1;
- s[i] = '\0';
- switch (base) {
- case 10:
- do {
- i--;
- s[i] = "0123456789"[x % (uint64_t)10];
- x /= (uint64_t)10;
- } while (x > 0);
- break;
- case 16: {
- const char *digits = (uppercase)
- ? "0123456789ABCDEF"
- : "0123456789abcdef";
-
- do {
- i--;
- s[i] = digits[x & 0xf];
- x >>= 4;
- } while (x > 0);
- break;
- } default: {
- const char *digits = (uppercase)
- ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- : "0123456789abcdefghijklmnopqrstuvwxyz";
-
- assert(base >= 2 && base <= 36);
- do {
- i--;
- s[i] = digits[x % (uint64_t)base];
- x /= (uint64_t)base;
- } while (x > 0);
- }}
-
- *slen_p = U2S_BUFSIZE - 1 - i;
- return (&s[i]);
-}
-
-static char *
-d2s(intmax_t x, char sign, char *s, size_t *slen_p)
-{
- bool neg;
-
- if ((neg = (x < 0)))
- x = -x;
- s = u2s(x, 10, false, s, slen_p);
- if (neg)
- sign = '-';
- switch (sign) {
- case '-':
- if (neg == false)
- break;
- /* Fall through. */
- case ' ':
- case '+':
- s--;
- (*slen_p)++;
- *s = sign;
- break;
- default: not_reached();
- }
- return (s);
-}
-
-static char *
-o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
-{
-
- s = u2s(x, 8, false, s, slen_p);
- if (alt_form && *s != '0') {
- s--;
- (*slen_p)++;
- *s = '0';
- }
- return (s);
-}
-
-static char *
-x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
-{
-
- s = u2s(x, 16, uppercase, s, slen_p);
- if (alt_form) {
- s -= 2;
- (*slen_p) += 2;
- memcpy(s, uppercase ? "0X" : "0x", 2);
- }
- return (s);
-}
-
-int
-malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
-{
- int ret;
- size_t i;
- const char *f;
-
-#define APPEND_C(c) do { \
- if (i < size) \
- str[i] = (c); \
- i++; \
-} while (0)
-#define APPEND_S(s, slen) do { \
- if (i < size) { \
- size_t cpylen = (slen <= size - i) ? slen : size - i; \
- memcpy(&str[i], s, cpylen); \
- } \
- i += slen; \
-} while (0)
-#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
- /* Left padding. */ \
- size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
- (size_t)width - slen : 0); \
- if (left_justify == false && pad_len != 0) { \
- size_t j; \
- for (j = 0; j < pad_len; j++) \
- APPEND_C(' '); \
- } \
- /* Value. */ \
- APPEND_S(s, slen); \
- /* Right padding. */ \
- if (left_justify && pad_len != 0) { \
- size_t j; \
- for (j = 0; j < pad_len; j++) \
- APPEND_C(' '); \
- } \
-} while (0)
-#define GET_ARG_NUMERIC(val, len) do { \
- switch (len) { \
- case '?': \
- val = va_arg(ap, int); \
- break; \
- case '?' | 0x80: \
- val = va_arg(ap, unsigned int); \
- break; \
- case 'l': \
- val = va_arg(ap, long); \
- break; \
- case 'l' | 0x80: \
- val = va_arg(ap, unsigned long); \
- break; \
- case 'q': \
- val = va_arg(ap, long long); \
- break; \
- case 'q' | 0x80: \
- val = va_arg(ap, unsigned long long); \
- break; \
- case 'j': \
- val = va_arg(ap, intmax_t); \
- break; \
- case 't': \
- val = va_arg(ap, ptrdiff_t); \
- break; \
- case 'z': \
- val = va_arg(ap, ssize_t); \
- break; \
- case 'z' | 0x80: \
- val = va_arg(ap, size_t); \
- break; \
- case 'p': /* Synthetic; used for %p. */ \
- val = va_arg(ap, uintptr_t); \
- break; \
- default: not_reached(); \
- } \
-} while (0)
-
- i = 0;
- f = format;
- while (true) {
- switch (*f) {
- case '\0': goto label_out;
- case '%': {
- bool alt_form = false;
- bool left_justify = false;
- bool plus_space = false;
- bool plus_plus = false;
- int prec = -1;
- int width = -1;
- unsigned char len = '?';
-
- f++;
- if (*f == '%') {
- /* %% */
- APPEND_C(*f);
- break;
- }
- /* Flags. */
- while (true) {
- switch (*f) {
- case '#':
- assert(alt_form == false);
- alt_form = true;
- break;
- case '-':
- assert(left_justify == false);
- left_justify = true;
- break;
- case ' ':
- assert(plus_space == false);
- plus_space = true;
- break;
- case '+':
- assert(plus_plus == false);
- plus_plus = true;
- break;
- default: goto label_width;
- }
- f++;
- }
- /* Width. */
- label_width:
- switch (*f) {
- case '*':
- width = va_arg(ap, int);
- f++;
- break;
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9': {
- uintmax_t uwidth;
- set_errno(0);
- uwidth = malloc_strtoumax(f, (char **)&f, 10);
- assert(uwidth != UINTMAX_MAX || get_errno() !=
- ERANGE);
- width = (int)uwidth;
- if (*f == '.') {
- f++;
- goto label_precision;
- } else
- goto label_length;
- break;
- } case '.':
- f++;
- goto label_precision;
- default: goto label_length;
- }
- /* Precision. */
- label_precision:
- switch (*f) {
- case '*':
- prec = va_arg(ap, int);
- f++;
- break;
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9': {
- uintmax_t uprec;
- set_errno(0);
- uprec = malloc_strtoumax(f, (char **)&f, 10);
- assert(uprec != UINTMAX_MAX || get_errno() !=
- ERANGE);
- prec = (int)uprec;
- break;
- }
- default: break;
- }
- /* Length. */
- label_length:
- switch (*f) {
- case 'l':
- f++;
- if (*f == 'l') {
- len = 'q';
- f++;
- } else
- len = 'l';
- break;
- case 'j':
- len = 'j';
- f++;
- break;
- case 't':
- len = 't';
- f++;
- break;
- case 'z':
- len = 'z';
- f++;
- break;
- default: break;
- }
- /* Conversion specifier. */
- switch (*f) {
- char *s;
- size_t slen;
- case 'd': case 'i': {
- intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
- char buf[D2S_BUFSIZE];
-
- GET_ARG_NUMERIC(val, len);
- s = d2s(val, (plus_plus ? '+' : (plus_space ?
- ' ' : '-')), buf, &slen);
- APPEND_PADDED_S(s, slen, width, left_justify);
- f++;
- break;
- } case 'o': {
- uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
- char buf[O2S_BUFSIZE];
-
- GET_ARG_NUMERIC(val, len | 0x80);
- s = o2s(val, alt_form, buf, &slen);
- APPEND_PADDED_S(s, slen, width, left_justify);
- f++;
- break;
- } case 'u': {
- uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
- char buf[U2S_BUFSIZE];
-
- GET_ARG_NUMERIC(val, len | 0x80);
- s = u2s(val, 10, false, buf, &slen);
- APPEND_PADDED_S(s, slen, width, left_justify);
- f++;
- break;
- } case 'x': case 'X': {
- uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
- char buf[X2S_BUFSIZE];
-
- GET_ARG_NUMERIC(val, len | 0x80);
- s = x2s(val, alt_form, *f == 'X', buf, &slen);
- APPEND_PADDED_S(s, slen, width, left_justify);
- f++;
- break;
- } case 'c': {
- unsigned char val;
- char buf[2];
-
- assert(len == '?' || len == 'l');
- assert_not_implemented(len != 'l');
- val = va_arg(ap, int);
- buf[0] = val;
- buf[1] = '\0';
- APPEND_PADDED_S(buf, 1, width, left_justify);
- f++;
- break;
- } case 's':
- assert(len == '?' || len == 'l');
- assert_not_implemented(len != 'l');
- s = va_arg(ap, char *);
- slen = (prec == -1) ? strlen(s) : prec;
- APPEND_PADDED_S(s, slen, width, left_justify);
- f++;
- break;
- case 'p': {
- uintmax_t val;
- char buf[X2S_BUFSIZE];
-
- GET_ARG_NUMERIC(val, 'p');
- s = x2s(val, true, false, buf, &slen);
- APPEND_PADDED_S(s, slen, width, left_justify);
- f++;
- break;
- }
- default: not_implemented();
- }
- break;
- } default: {
- APPEND_C(*f);
- f++;
- break;
- }}
- }
- label_out:
- if (i < size)
- str[i] = '\0';
- else
- str[size - 1] = '\0';
- ret = i;
-
-#undef APPEND_C
-#undef APPEND_S
-#undef APPEND_PADDED_S
-#undef GET_ARG_NUMERIC
- return (ret);
-}
-
-JEMALLOC_ATTR(format(printf, 3, 4))
-int
-malloc_snprintf(char *str, size_t size, const char *format, ...)
-{
- int ret;
- va_list ap;
-
- va_start(ap, format);
- ret = malloc_vsnprintf(str, size, format, ap);
- va_end(ap);
-
- return (ret);
-}
-
-void
-malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, va_list ap)
-{
- char buf[MALLOC_PRINTF_BUFSIZE];
-
- if (write_cb == NULL) {
- /*
- * The caller did not provide an alternate write_cb callback
- * function, so use the default one. malloc_write() is an
- * inline function, so use malloc_message() directly here.
- */
- write_cb = (je_malloc_message != NULL) ? je_malloc_message :
- wrtmessage;
- cbopaque = NULL;
- }
-
- malloc_vsnprintf(buf, sizeof(buf), format, ap);
- write_cb(cbopaque, buf);
-}
-
-/*
- * Print to a callback function in such a way as to (hopefully) avoid memory
- * allocation.
- */
-JEMALLOC_ATTR(format(printf, 3, 4))
-void
-malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, ...)
-{
- va_list ap;
-
- va_start(ap, format);
- malloc_vcprintf(write_cb, cbopaque, format, ap);
- va_end(ap);
-}
-
-/* Print to stderr in such a way as to avoid memory allocation. */
-JEMALLOC_ATTR(format(printf, 1, 2))
-void
-malloc_printf(const char *format, ...)
-{
- va_list ap;
-
- va_start(ap, format);
- malloc_vcprintf(NULL, NULL, format, ap);
- va_end(ap);
-}
diff --git a/extra/jemalloc/src/zone.c b/extra/jemalloc/src/zone.c
deleted file mode 100644
index c62c183f65e..00000000000
--- a/extra/jemalloc/src/zone.c
+++ /dev/null
@@ -1,258 +0,0 @@
-#include "jemalloc/internal/jemalloc_internal.h"
-#ifndef JEMALLOC_ZONE
-# error "This source file is for zones on Darwin (OS X)."
-#endif
-
-/*
- * The malloc_default_purgeable_zone function is only available on >= 10.6.
- * We need to check whether it is present at runtime, thus the weak_import.
- */
-extern malloc_zone_t *malloc_default_purgeable_zone(void)
-JEMALLOC_ATTR(weak_import);
-
-/******************************************************************************/
-/* Data. */
-
-static malloc_zone_t zone;
-static struct malloc_introspection_t zone_introspect;
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static size_t zone_size(malloc_zone_t *zone, void *ptr);
-static void *zone_malloc(malloc_zone_t *zone, size_t size);
-static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
-static void *zone_valloc(malloc_zone_t *zone, size_t size);
-static void zone_free(malloc_zone_t *zone, void *ptr);
-static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
-#if (JEMALLOC_ZONE_VERSION >= 5)
-static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
-#endif
-#if (JEMALLOC_ZONE_VERSION >= 6)
- size_t size);
-static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
- size_t size);
-#endif
-static void *zone_destroy(malloc_zone_t *zone);
-static size_t zone_good_size(malloc_zone_t *zone, size_t size);
-static void zone_force_lock(malloc_zone_t *zone);
-static void zone_force_unlock(malloc_zone_t *zone);
-
-/******************************************************************************/
-/*
- * Functions.
- */
-
-static size_t
-zone_size(malloc_zone_t *zone, void *ptr)
-{
-
- /*
- * There appear to be places within Darwin (such as setenv(3)) that
- * cause calls to this function with pointers that *no* zone owns. If
- * we knew that all pointers were owned by *some* zone, we could split
- * our zone into two parts, and use one as the default allocator and
- * the other as the default deallocator/reallocator. Since that will
- * not work in practice, we must check all pointers to assure that they
- * reside within a mapped chunk before determining size.
- */
- return (ivsalloc(ptr, config_prof));
-}
-
-static void *
-zone_malloc(malloc_zone_t *zone, size_t size)
-{
-
- return (je_malloc(size));
-}
-
-static void *
-zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
-{
-
- return (je_calloc(num, size));
-}
-
-static void *
-zone_valloc(malloc_zone_t *zone, size_t size)
-{
- void *ret = NULL; /* Assignment avoids useless compiler warning. */
-
- je_posix_memalign(&ret, PAGE, size);
-
- return (ret);
-}
-
-static void
-zone_free(malloc_zone_t *zone, void *ptr)
-{
-
- if (ivsalloc(ptr, config_prof) != 0) {
- je_free(ptr);
- return;
- }
-
- free(ptr);
-}
-
-static void *
-zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
-{
-
- if (ivsalloc(ptr, config_prof) != 0)
- return (je_realloc(ptr, size));
-
- return (realloc(ptr, size));
-}
-
-#if (JEMALLOC_ZONE_VERSION >= 5)
-static void *
-zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
-{
- void *ret = NULL; /* Assignment avoids useless compiler warning. */
-
- je_posix_memalign(&ret, alignment, size);
-
- return (ret);
-}
-#endif
-
-#if (JEMALLOC_ZONE_VERSION >= 6)
-static void
-zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
-{
-
- if (ivsalloc(ptr, config_prof) != 0) {
- assert(ivsalloc(ptr, config_prof) == size);
- je_free(ptr);
- return;
- }
-
- free(ptr);
-}
-#endif
-
-static void *
-zone_destroy(malloc_zone_t *zone)
-{
-
- /* This function should never be called. */
- assert(false);
- return (NULL);
-}
-
-static size_t
-zone_good_size(malloc_zone_t *zone, size_t size)
-{
-
- if (size == 0)
- size = 1;
- return (s2u(size));
-}
-
-static void
-zone_force_lock(malloc_zone_t *zone)
-{
-
- if (isthreaded)
- jemalloc_prefork();
-}
-
-static void
-zone_force_unlock(malloc_zone_t *zone)
-{
-
- if (isthreaded)
- jemalloc_postfork_parent();
-}
-
-JEMALLOC_ATTR(constructor)
-void
-register_zone(void)
-{
-
- /*
- * If something else replaced the system default zone allocator, don't
- * register jemalloc's.
- */
- malloc_zone_t *default_zone = malloc_default_zone();
- if (!default_zone->zone_name ||
- strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
- return;
- }
-
- zone.size = (void *)zone_size;
- zone.malloc = (void *)zone_malloc;
- zone.calloc = (void *)zone_calloc;
- zone.valloc = (void *)zone_valloc;
- zone.free = (void *)zone_free;
- zone.realloc = (void *)zone_realloc;
- zone.destroy = (void *)zone_destroy;
- zone.zone_name = "jemalloc_zone";
- zone.batch_malloc = NULL;
- zone.batch_free = NULL;
- zone.introspect = &zone_introspect;
- zone.version = JEMALLOC_ZONE_VERSION;
-#if (JEMALLOC_ZONE_VERSION >= 5)
- zone.memalign = zone_memalign;
-#endif
-#if (JEMALLOC_ZONE_VERSION >= 6)
- zone.free_definite_size = zone_free_definite_size;
-#endif
-#if (JEMALLOC_ZONE_VERSION >= 8)
- zone.pressure_relief = NULL;
-#endif
-
- zone_introspect.enumerator = NULL;
- zone_introspect.good_size = (void *)zone_good_size;
- zone_introspect.check = NULL;
- zone_introspect.print = NULL;
- zone_introspect.log = NULL;
- zone_introspect.force_lock = (void *)zone_force_lock;
- zone_introspect.force_unlock = (void *)zone_force_unlock;
- zone_introspect.statistics = NULL;
-#if (JEMALLOC_ZONE_VERSION >= 6)
- zone_introspect.zone_locked = NULL;
-#endif
-#if (JEMALLOC_ZONE_VERSION >= 7)
- zone_introspect.enable_discharge_checking = NULL;
- zone_introspect.disable_discharge_checking = NULL;
- zone_introspect.discharge = NULL;
-#ifdef __BLOCKS__
- zone_introspect.enumerate_discharged_pointers = NULL;
-#else
- zone_introspect.enumerate_unavailable_without_blocks = NULL;
-#endif
-#endif
-
- /*
- * The default purgeable zone is created lazily by OSX's libc. It uses
- * the default zone when it is created for "small" allocations
- * (< 15 KiB), but assumes the default zone is a scalable_zone. This
- * obviously fails when the default zone is the jemalloc zone, so
- * malloc_default_purgeable_zone is called beforehand so that the
- * default purgeable zone is created when the default zone is still
- * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
- * to check for the existence of malloc_default_purgeable_zone() at
- * run time.
- */
- if (malloc_default_purgeable_zone != NULL)
- malloc_default_purgeable_zone();
-
- /* Register the custom zone. At this point it won't be the default. */
- malloc_zone_register(&zone);
-
- /*
- * Unregister and reregister the default zone. On OSX >= 10.6,
- * unregistering takes the last registered zone and places it at the
- * location of the specified zone. Unregistering the default zone thus
- * makes the last registered one the default. On OSX < 10.6,
- * unregistering shifts all registered zones. The first registered zone
- * then becomes the default.
- */
- do {
- default_zone = malloc_default_zone();
- malloc_zone_unregister(default_zone);
- malloc_zone_register(default_zone);
- } while (malloc_default_zone() != &zone);
-}