summaryrefslogtreecommitdiff
path: root/deps/jemalloc/src
diff options
context:
space:
mode:
authorantirez <antirez@gmail.com>2017-04-22 13:12:42 +0200
committerantirez <antirez@gmail.com>2017-04-22 13:17:07 +0200
commite3b8492e83822aa618d084732f8ab997bf41d7f3 (patch)
treec1ed5fc9cb15c3a85b6202ad16bde4fe8f012b4b /deps/jemalloc/src
parent238cebdd5eb0fd0d8f2db87d895a007bc492dcdb (diff)
downloadredis-e3b8492e83822aa618d084732f8ab997bf41d7f3.tar.gz
Revert "Jemalloc updated to 4.4.0."
This reverts commit 36c1acc222d29e6e2dc9fc25362e4faa471111bd.
Diffstat (limited to 'deps/jemalloc/src')
-rw-r--r--deps/jemalloc/src/arena.c2021
-rw-r--r--deps/jemalloc/src/base.c73
-rw-r--r--deps/jemalloc/src/bitmap.c59
-rw-r--r--deps/jemalloc/src/chunk.c426
-rw-r--r--deps/jemalloc/src/chunk_dss.c182
-rw-r--r--deps/jemalloc/src/chunk_mmap.c18
-rw-r--r--deps/jemalloc/src/ckh.c43
-rw-r--r--deps/jemalloc/src/ctl.c761
-rw-r--r--deps/jemalloc/src/extent.c70
-rw-r--r--deps/jemalloc/src/huge.c238
-rw-r--r--deps/jemalloc/src/jemalloc.c1492
-rw-r--r--deps/jemalloc/src/mutex.c23
-rw-r--r--deps/jemalloc/src/nstime.c194
-rw-r--r--deps/jemalloc/src/pages.c177
-rw-r--r--deps/jemalloc/src/prng.c2
-rw-r--r--deps/jemalloc/src/prof.c664
-rw-r--r--deps/jemalloc/src/quarantine.c50
-rw-r--r--deps/jemalloc/src/rtree.c9
-rw-r--r--deps/jemalloc/src/spin.c2
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/src/stats.c1220
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/src/tcache.c170
-rw-r--r--deps/jemalloc/src/ticker.c2
-rw-r--r--deps/jemalloc/src/tsd.c28
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/src/util.c42
-rw-r--r--deps/jemalloc/src/witness.c136
-rw-r--r--deps/jemalloc/src/zone.c198
26 files changed, 2965 insertions, 5335 deletions
diff --git a/deps/jemalloc/src/arena.c b/deps/jemalloc/src/arena.c
index 648a8da3a..3081519cc 100644
--- a/deps/jemalloc/src/arena.c
+++ b/deps/jemalloc/src/arena.c
@@ -4,23 +4,16 @@
/******************************************************************************/
/* Data. */
-purge_mode_t opt_purge = PURGE_DEFAULT;
-const char *purge_mode_names[] = {
- "ratio",
- "decay",
- "N/A"
-};
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
static ssize_t lg_dirty_mult_default;
-ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
-static ssize_t decay_time_default;
-
arena_bin_info_t arena_bin_info[NBINS];
size_t map_bias;
size_t map_misc_offset;
size_t arena_maxrun; /* Max run size for arenas. */
size_t large_maxclass; /* Max large size class. */
+static size_t small_maxrun; /* Max run size used for small size classes. */
+static bool *small_run_tab; /* Valid small run page multiples. */
unsigned nlclasses; /* Number of large size classes. */
unsigned nhclasses; /* Number of huge size classes. */
@@ -30,57 +23,60 @@ unsigned nhclasses; /* Number of huge size classes. */
* definition.
*/
-static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk);
-static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
- size_t ndirty_limit);
-static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
- bool dirty, bool cleaned, bool decommitted);
-static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
-static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
- arena_bin_t *bin);
+static void arena_purge(arena_t *arena, bool all);
+static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
+ bool cleaned, bool decommitted);
+static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, arena_bin_t *bin);
+static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, arena_bin_t *bin);
/******************************************************************************/
-JEMALLOC_INLINE_C size_t
-arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
+#define CHUNK_MAP_KEY ((uintptr_t)0x1U)
+
+JEMALLOC_INLINE_C arena_chunk_map_misc_t *
+arena_miscelm_key_create(size_t size)
{
- arena_chunk_t *chunk;
- size_t pageind, mapbits;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
- pageind = arena_miscelm_to_pageind(miscelm);
- mapbits = arena_mapbits_get(chunk, pageind);
- return (arena_mapbits_size_decode(mapbits));
+ return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) |
+ CHUNK_MAP_KEY));
}
-JEMALLOC_INLINE_C const extent_node_t *
-arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
+JEMALLOC_INLINE_C bool
+arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm)
{
- arena_chunk_t *chunk;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
- return (&chunk->node);
+ return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0);
}
-JEMALLOC_INLINE_C int
-arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
+#undef CHUNK_MAP_KEY
+
+JEMALLOC_INLINE_C size_t
+arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
{
- size_t a_sn, b_sn;
- assert(a != NULL);
- assert(b != NULL);
+ assert(arena_miscelm_is_key(miscelm));
+
+ return (arena_mapbits_size_decode((uintptr_t)miscelm));
+}
- a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
- b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
+JEMALLOC_INLINE_C size_t
+arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk;
+ size_t pageind, mapbits;
- return ((a_sn > b_sn) - (a_sn < b_sn));
+ assert(!arena_miscelm_is_key(miscelm));
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ pageind = arena_miscelm_to_pageind(miscelm);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_INLINE_C int
-arena_ad_comp(const arena_chunk_map_misc_t *a,
- const arena_chunk_map_misc_t *b)
+arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
uintptr_t a_miscelm = (uintptr_t)a;
uintptr_t b_miscelm = (uintptr_t)b;
@@ -91,79 +87,74 @@ arena_ad_comp(const arena_chunk_map_misc_t *a,
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
}
-JEMALLOC_INLINE_C int
-arena_snad_comp(const arena_chunk_map_misc_t *a,
- const arena_chunk_map_misc_t *b)
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
+ rb_link, arena_run_comp)
+
+static size_t
+run_quantize(size_t size)
{
- int ret;
+ size_t qsize;
- assert(a != NULL);
- assert(b != NULL);
+ assert(size != 0);
+ assert(size == PAGE_CEILING(size));
- ret = arena_sn_comp(a, b);
- if (ret != 0)
- return (ret);
+ /* Don't change sizes that are valid small run sizes. */
+ if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
+ return (size);
- ret = arena_ad_comp(a, b);
- return (ret);
+ /*
+ * Round down to the nearest run size that can actually be requested
+ * during normal large allocation. Add large_pad so that cache index
+ * randomization can offset the allocation from the page boundary.
+ */
+ qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
+ if (qsize <= SMALL_MAXCLASS + large_pad)
+ return (run_quantize(size - large_pad));
+ assert(qsize <= size);
+ return (qsize);
}
-/* Generate pairing heap functions. */
-ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
- ph_link, arena_snad_comp)
-
-#ifdef JEMALLOC_JET
-#undef run_quantize_floor
-#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
-#endif
static size_t
-run_quantize_floor(size_t size)
+run_quantize_next(size_t size)
{
- size_t ret;
- pszind_t pind;
-
- assert(size > 0);
- assert(size <= HUGE_MAXCLASS);
- assert((size & PAGE_MASK) == 0);
+ size_t large_run_size_next;
assert(size != 0);
assert(size == PAGE_CEILING(size));
- pind = psz2ind(size - large_pad + 1);
- if (pind == 0) {
- /*
- * Avoid underflow. This short-circuit would also do the right
- * thing for all sizes in the range for which there are
- * PAGE-spaced size classes, but it's simplest to just handle
- * the one case that would cause erroneous results.
- */
- return (size);
+ /*
+ * Return the next quantized size greater than the input size.
+ * Quantized sizes comprise the union of run sizes that back small
+ * region runs, and run sizes that back large regions with no explicit
+ * alignment constraints.
+ */
+
+ if (size > SMALL_MAXCLASS) {
+ large_run_size_next = PAGE_CEILING(index2size(size2index(size -
+ large_pad) + 1) + large_pad);
+ } else
+ large_run_size_next = SIZE_T_MAX;
+ if (size >= small_maxrun)
+ return (large_run_size_next);
+
+ while (true) {
+ size += PAGE;
+ assert(size <= small_maxrun);
+ if (small_run_tab[size >> LG_PAGE]) {
+ if (large_run_size_next < size)
+ return (large_run_size_next);
+ return (size);
+ }
}
- ret = pind2sz(pind - 1) + large_pad;
- assert(ret <= size);
- return (ret);
}
-#ifdef JEMALLOC_JET
-#undef run_quantize_floor
-#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
-run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
-#endif
-#ifdef JEMALLOC_JET
-#undef run_quantize_ceil
-#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
-#endif
static size_t
-run_quantize_ceil(size_t size)
+run_quantize_first(size_t size)
{
- size_t ret;
-
- assert(size > 0);
- assert(size <= HUGE_MAXCLASS);
- assert((size & PAGE_MASK) == 0);
+ size_t qsize = run_quantize(size);
- ret = run_quantize_floor(size);
- if (ret < size) {
+ if (qsize < size) {
/*
* Skip a quantization that may have an adequately large run,
* because under-sized runs may be mixed in. This only happens
@@ -172,50 +163,72 @@ run_quantize_ceil(size_t size)
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
- ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
+ qsize = run_quantize_next(size);
+ }
+ return (qsize);
+}
+
+JEMALLOC_INLINE_C int
+arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
+{
+ int ret;
+ uintptr_t a_miscelm = (uintptr_t)a;
+ size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ?
+ arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a));
+ size_t b_qsize = run_quantize(arena_miscelm_size_get(b));
+
+ /*
+ * Compare based on quantized size rather than size, in order to sort
+ * equally useful runs only by address.
+ */
+ ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
+ if (ret == 0) {
+ if (!arena_miscelm_is_key(a)) {
+ uintptr_t b_miscelm = (uintptr_t)b;
+
+ ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
+ } else {
+ /*
+ * Treat keys as if they are lower than anything else.
+ */
+ ret = -1;
+ }
}
+
return (ret);
}
-#ifdef JEMALLOC_JET
-#undef run_quantize_ceil
-#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
-run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
-#endif
+
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
+ arena_chunk_map_misc_t, rb_link, arena_avail_comp)
static void
arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
- pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
- arena_miscelm_get_const(chunk, pageind))));
+
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
- assert((npages << LG_PAGE) < chunksize);
- assert(pind2sz(pind) <= chunksize);
- arena_run_heap_insert(&arena->runs_avail[pind],
- arena_miscelm_get_mutable(chunk, pageind));
+ arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
+ pageind));
}
static void
arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
- pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
- arena_miscelm_get_const(chunk, pageind))));
+
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
- assert((npages << LG_PAGE) < chunksize);
- assert(pind2sz(pind) <= chunksize);
- arena_run_heap_remove(&arena->runs_avail[pind],
- arena_miscelm_get_mutable(chunk, pageind));
+ arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
+ pageind));
}
static void
arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
@@ -232,8 +245,7 @@ static void
arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
@@ -280,14 +292,14 @@ JEMALLOC_INLINE_C void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
{
void *ret;
- size_t regind;
+ unsigned regind;
arena_chunk_map_misc_t *miscelm;
void *rpages;
assert(run->nfree > 0);
assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
- regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
+ regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
miscelm = arena_run_to_miscelm(run);
rpages = arena_miscelm_to_rpages(miscelm);
ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
@@ -304,7 +316,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
size_t mapbits = arena_mapbits_get(chunk, pageind);
szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
- size_t regind = arena_run_regind(run, bin_info, ptr);
+ unsigned regind = arena_run_regind(run, bin_info, ptr);
assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */
@@ -352,30 +364,16 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
}
static void
-arena_nactive_add(arena_t *arena, size_t add_pages)
+arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
{
if (config_stats) {
- size_t cactive_add = CHUNK_CEILING((arena->nactive +
- add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
+ ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
+ - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
LG_PAGE);
- if (cactive_add != 0)
- stats_cactive_add(cactive_add);
- }
- arena->nactive += add_pages;
-}
-
-static void
-arena_nactive_sub(arena_t *arena, size_t sub_pages)
-{
-
- if (config_stats) {
- size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
- CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
- if (cactive_sub != 0)
- stats_cactive_sub(cactive_sub);
+ if (cactive_diff != 0)
+ stats_cactive_add(cactive_diff);
}
- arena->nactive -= sub_pages;
}
static void
@@ -396,7 +394,8 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
arena_avail_remove(arena, chunk, run_ind, total_pages);
if (flag_dirty != 0)
arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
- arena_nactive_add(arena, need_pages);
+ arena_cactive_update(arena, need_pages, 0);
+ arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) {
@@ -568,8 +567,7 @@ arena_chunk_init_spare(arena_t *arena)
}
static bool
-arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- size_t sn, bool zero)
+arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
{
/*
@@ -578,67 +576,64 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
* of runs is tracked individually, and upon chunk deallocation the
* entire chunk is in a consistent commit state.
*/
- extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
+ extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
extent_node_achunk_set(&chunk->node, true);
- return (chunk_register(tsdn, chunk, &chunk->node));
+ return (chunk_register(chunk, &chunk->node));
}
static arena_chunk_t *
-arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
+arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ bool *zero, bool *commit)
{
arena_chunk_t *chunk;
- size_t sn;
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
- NULL, chunksize, chunksize, &sn, zero, commit);
+ chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
+ chunksize, chunksize, zero, commit);
if (chunk != NULL && !*commit) {
/* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) {
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
- (void *)chunk, chunksize, sn, *zero, *commit);
+ chunk_dalloc_wrapper(arena, chunk_hooks,
+ (void *)chunk, chunksize, *commit);
chunk = NULL;
}
}
- if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
- *zero)) {
+ if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
if (!*commit) {
/* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
- chunksize, sn, *zero, *commit);
+ chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
+ chunksize, *commit);
chunk = NULL;
}
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
return (chunk);
}
static arena_chunk_t *
-arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
- bool *commit)
+arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
{
arena_chunk_t *chunk;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- size_t sn;
- chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
- chunksize, &sn, zero, commit, true);
+ chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
+ chunksize, zero, true);
if (chunk != NULL) {
- if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
- chunksize, sn, true);
+ if (arena_chunk_register(arena, chunk, *zero)) {
+ chunk_dalloc_cache(arena, &chunk_hooks, chunk,
+ chunksize, true);
return (NULL);
}
+ *commit = true;
}
if (chunk == NULL) {
- chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
- &chunk_hooks, zero, commit);
+ chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
+ zero, commit);
}
if (config_stats && chunk != NULL) {
@@ -650,7 +645,7 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
}
static arena_chunk_t *
-arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
+arena_chunk_init_hard(arena_t *arena)
{
arena_chunk_t *chunk;
bool zero, commit;
@@ -660,16 +655,14 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
zero = false;
commit = false;
- chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
+ chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
if (chunk == NULL)
return (NULL);
- chunk->hugepage = true;
-
/*
* Initialize the map to contain one maximal free untouched run. Mark
- * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
- * or decommitted chunk.
+ * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
+ * chunk.
*/
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
@@ -681,18 +674,17 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
*/
if (!zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
- (void *)arena_bitselm_get_const(chunk, map_bias+1),
- (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
- chunk_npages-1) -
- (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
+ (void *)arena_bitselm_get(chunk, map_bias+1),
+ (size_t)((uintptr_t) arena_bitselm_get(chunk,
+ chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
+ map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
- *)arena_bitselm_get_const(chunk, map_bias+1),
- (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
- chunk_npages-1) -
- (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
+ *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
+ arena_bitselm_get(chunk, chunk_npages-1) -
+ (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
@@ -707,85 +699,28 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
}
static arena_chunk_t *
-arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
+arena_chunk_alloc(arena_t *arena)
{
arena_chunk_t *chunk;
if (arena->spare != NULL)
chunk = arena_chunk_init_spare(arena);
else {
- chunk = arena_chunk_init_hard(tsdn, arena);
+ chunk = arena_chunk_init_hard(arena);
if (chunk == NULL)
return (NULL);
}
- ql_elm_new(&chunk->node, ql_link);
- ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
+ /* Insert the run into the runs_avail tree. */
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
return (chunk);
}
static void
-arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
-{
- size_t sn, hugepage;
- bool committed;
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
-
- chunk_deregister(chunk, &chunk->node);
-
- sn = extent_node_sn_get(&chunk->node);
- hugepage = chunk->hugepage;
- committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
- if (!committed) {
- /*
- * Decommit the header. Mark the chunk as decommitted even if
- * header decommit fails, since treating a partially committed
- * chunk as committed has a high potential for causing later
- * access of decommitted memory.
- */
- chunk_hooks = chunk_hooks_get(tsdn, arena);
- chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
- arena->ind);
- }
- if (!hugepage) {
- /*
- * Convert chunk back to the default state, so that all
- * subsequent chunk allocations start out with chunks that can
- * be backed by transparent huge pages.
- */
- pages_huge(chunk, chunksize);
- }
-
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
- sn, committed);
-
- if (config_stats) {
- arena->stats.mapped -= chunksize;
- arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
- }
-}
-
-static void
-arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
+arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
{
- assert(arena->spare != spare);
-
- if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
- arena_run_dirty_remove(arena, spare, map_bias,
- chunk_npages-map_bias);
- }
-
- arena_chunk_discard(tsdn, arena, spare);
-}
-
-static void
-arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
-{
- arena_chunk_t *spare;
-
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
@@ -797,14 +732,49 @@ arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
arena_mapbits_decommitted_get(chunk, chunk_npages-1));
- /* Remove run from runs_avail, so that the arena does not use it. */
+ /*
+ * Remove run from the runs_avail tree, so that the arena does not use
+ * it.
+ */
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
- ql_remove(&arena->achunks, &chunk->node, ql_link);
- spare = arena->spare;
- arena->spare = chunk;
- if (spare != NULL)
- arena_spare_discard(tsdn, arena, spare);
+ if (arena->spare != NULL) {
+ arena_chunk_t *spare = arena->spare;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ bool committed;
+
+ arena->spare = chunk;
+ if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
+ arena_run_dirty_remove(arena, spare, map_bias,
+ chunk_npages-map_bias);
+ }
+
+ chunk_deregister(spare, &spare->node);
+
+ committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
+ 0);
+ if (!committed) {
+ /*
+ * Decommit the header. Mark the chunk as decommitted
+ * even if header decommit fails, since treating a
+ * partially committed chunk as committed has a high
+ * potential for causing later access of decommitted
+ * memory.
+ */
+ chunk_hooks = chunk_hooks_get(arena);
+ chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
+ LG_PAGE, arena->ind);
+ }
+
+ chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
+ chunksize, committed);
+
+ if (config_stats) {
+ arena->stats.mapped -= chunksize;
+ arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
+ }
+ } else
+ arena->spare = chunk;
}
static void
@@ -847,17 +817,6 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
}
static void
-arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
-{
- szind_t index = size2index(usize) - nlclasses - NBINS;
-
- cassert(config_stats);
-
- arena->stats.ndalloc_huge++;
- arena->stats.hstats[index].ndalloc--;
-}
-
-static void
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
{
szind_t index = size2index(usize) - nlclasses - NBINS;
@@ -888,240 +847,243 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
}
extent_node_t *
-arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
+arena_node_alloc(arena_t *arena)
{
extent_node_t *node;
- malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_lock(&arena->node_cache_mtx);
node = ql_last(&arena->node_cache, ql_link);
if (node == NULL) {
- malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
- return (base_alloc(tsdn, sizeof(extent_node_t)));
+ malloc_mutex_unlock(&arena->node_cache_mtx);
+ return (base_alloc(sizeof(extent_node_t)));
}
ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
- malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_unlock(&arena->node_cache_mtx);
return (node);
}
void
-arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
+arena_node_dalloc(arena_t *arena, extent_node_t *node)
{
- malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_lock(&arena->node_cache_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->node_cache, node, ql_link);
- malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_unlock(&arena->node_cache_mtx);
}
static void *
-arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
- bool *zero, size_t csize)
+arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ size_t usize, size_t alignment, bool *zero, size_t csize)
{
void *ret;
bool commit = true;
- ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
- alignment, sn, zero, &commit);
+ ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
+ zero, &commit);
if (ret == NULL) {
/* Revert optimistic stats updates. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_malloc_stats_update_undo(arena, usize);
arena->stats.mapped -= usize;
}
- arena_nactive_sub(arena, usize >> LG_PAGE);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ arena->nactive -= (usize >> LG_PAGE);
+ malloc_mutex_unlock(&arena->lock);
}
return (ret);
}
void *
-arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, size_t *sn, bool *zero)
+arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
+ bool *zero)
{
void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize = CHUNK_CEILING(usize);
- bool commit = true;
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
/* Optimistically update stats. */
if (config_stats) {
arena_huge_malloc_stats_update(arena, usize);
arena->stats.mapped += usize;
}
- arena_nactive_add(arena, usize >> LG_PAGE);
+ arena->nactive += (usize >> LG_PAGE);
- ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
- alignment, sn, zero, &commit, true);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
+ zero, true);
+ malloc_mutex_unlock(&arena->lock);
if (ret == NULL) {
- ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
- usize, alignment, sn, zero, csize);
+ ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
+ alignment, zero, csize);
}
+ if (config_stats && ret != NULL)
+ stats_cactive_add(usize);
return (ret);
}
void
-arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
- size_t sn)
+arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
{
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize;
csize = CHUNK_CEILING(usize);
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_dalloc_stats_update(arena, usize);
arena->stats.mapped -= usize;
+ stats_cactive_sub(usize);
}
- arena_nactive_sub(arena, usize >> LG_PAGE);
+ arena->nactive -= (usize >> LG_PAGE);
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
+ malloc_mutex_unlock(&arena->lock);
}
void
-arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize)
+arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
+ size_t usize)
{
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
assert(oldsize != usize);
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_stats)
arena_huge_ralloc_stats_update(arena, oldsize, usize);
- if (oldsize < usize)
- arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
- else
- arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ if (oldsize < usize) {
+ size_t udiff = usize - oldsize;
+ arena->nactive += udiff >> LG_PAGE;
+ if (config_stats)
+ stats_cactive_add(udiff);
+ } else {
+ size_t udiff = oldsize - usize;
+ arena->nactive -= udiff >> LG_PAGE;
+ if (config_stats)
+ stats_cactive_sub(udiff);
+ }
+ malloc_mutex_unlock(&arena->lock);
}
void
-arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize, size_t sn)
+arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
+ size_t usize)
{
size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize);
- if (cdiff != 0)
+ if (cdiff != 0) {
arena->stats.mapped -= cdiff;
+ stats_cactive_sub(udiff);
+ }
}
- arena_nactive_sub(arena, udiff >> LG_PAGE);
+ arena->nactive -= udiff >> LG_PAGE;
if (cdiff != 0) {
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
void *nchunk = (void *)((uintptr_t)chunk +
CHUNK_CEILING(usize));
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- sn, true);
+ chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
}
static bool
-arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
- size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
+arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
+ size_t udiff, size_t cdiff)
{
bool err;
bool commit = true;
- err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
- chunksize, sn, zero, &commit) == NULL);
+ err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
+ zero, &commit) == NULL);
if (err) {
/* Revert optimistic stats updates. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena, oldsize,
usize);
arena->stats.mapped -= cdiff;
}
- arena_nactive_sub(arena, udiff >> LG_PAGE);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ arena->nactive -= (udiff >> LG_PAGE);
+ malloc_mutex_unlock(&arena->lock);
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
- *sn, *zero, true);
+ chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
+ true);
err = true;
}
return (err);
}
bool
-arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize, bool *zero)
+arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
+ size_t usize, bool *zero)
{
bool err;
- chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
- size_t sn;
- bool commit = true;
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
/* Optimistically update stats. */
if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize);
arena->stats.mapped += cdiff;
}
- arena_nactive_add(arena, udiff >> LG_PAGE);
+ arena->nactive += (udiff >> LG_PAGE);
- err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- chunksize, &sn, zero, &commit, true) == NULL);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
+ chunksize, zero, true) == NULL);
+ malloc_mutex_unlock(&arena->lock);
if (err) {
- err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
- &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
- udiff, cdiff);
+ err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
+ chunk, oldsize, usize, zero, nchunk, udiff,
+ cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
- chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- sn, *zero, true);
+ chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
+ true);
err = true;
}
+ if (config_stats && !err)
+ stats_cactive_add(udiff);
return (err);
}
/*
* Do first-best-fit run selection, i.e. select the lowest run that best fits.
- * Run sizes are indexed, so not all candidate runs are necessarily exactly the
- * same size.
+ * Run sizes are quantized, so not all candidate runs are necessarily exactly
+ * the same size.
*/
static arena_run_t *
arena_run_first_best_fit(arena_t *arena, size_t size)
{
- pszind_t pind, i;
-
- pind = psz2ind(run_quantize_ceil(size));
-
- for (i = pind; pind2sz(i) <= chunksize; i++) {
- arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
- &arena->runs_avail[i]);
- if (miscelm != NULL)
- return (&miscelm->run);
- }
-
- return (NULL);
+ size_t search_size = run_quantize_first(size);
+ arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size);
+ arena_chunk_map_misc_t *miscelm =
+ arena_avail_tree_nsearch(&arena->runs_avail, key);
+ if (miscelm == NULL)
+ return (NULL);
+ return (&miscelm->run);
}
static arena_run_t *
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{
- arena_run_t *run = arena_run_first_best_fit(arena, size);
+ arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
if (run != NULL) {
if (arena_run_split_large(arena, run, size, zero))
run = NULL;
@@ -1130,7 +1092,7 @@ arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
}
static arena_run_t *
-arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
+arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
{
arena_chunk_t *chunk;
arena_run_t *run;
@@ -1146,9 +1108,9 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
- chunk = arena_chunk_alloc(tsdn, arena);
+ chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
- run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
+ run = &arena_miscelm_get(chunk, map_bias)->run;
if (arena_run_split_large(arena, run, size, zero))
run = NULL;
return (run);
@@ -1174,7 +1136,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
}
static arena_run_t *
-arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
+arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
@@ -1191,9 +1153,9 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
- chunk = arena_chunk_alloc(tsdn, arena);
+ chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
- run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
+ run = &arena_miscelm_get(chunk, map_bias)->run;
if (arena_run_split_small(arena, run, size, binind))
run = NULL;
return (run);
@@ -1216,239 +1178,42 @@ arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
}
ssize_t
-arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
+arena_lg_dirty_mult_get(arena_t *arena)
{
ssize_t lg_dirty_mult;
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
lg_dirty_mult = arena->lg_dirty_mult;
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (lg_dirty_mult);
}
bool
-arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
+arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
{
if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true);
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
arena->lg_dirty_mult = lg_dirty_mult;
- arena_maybe_purge(tsdn, arena);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ arena_maybe_purge(arena);
+ malloc_mutex_unlock(&arena->lock);
return (false);
}
-static void
-arena_decay_deadline_init(arena_t *arena)
-{
-
- assert(opt_purge == purge_mode_decay);
-
- /*
- * Generate a new deadline that is uniformly random within the next
- * epoch after the current one.
- */
- nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
- nstime_add(&arena->decay.deadline, &arena->decay.interval);
- if (arena->decay.time > 0) {
- nstime_t jitter;
-
- nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
- nstime_ns(&arena->decay.interval)));
- nstime_add(&arena->decay.deadline, &jitter);
- }
-}
-
-static bool
-arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
-{
-
- assert(opt_purge == purge_mode_decay);
-
- return (nstime_compare(&arena->decay.deadline, time) <= 0);
-}
-
-static size_t
-arena_decay_backlog_npages_limit(const arena_t *arena)
-{
- static const uint64_t h_steps[] = {
-#define STEP(step, h, x, y) \
- h,
- SMOOTHSTEP
-#undef STEP
- };
- uint64_t sum;
- size_t npages_limit_backlog;
- unsigned i;
-
- assert(opt_purge == purge_mode_decay);
-
- /*
- * For each element of decay_backlog, multiply by the corresponding
- * fixed-point smoothstep decay factor. Sum the products, then divide
- * to round down to the nearest whole number of pages.
- */
- sum = 0;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
- sum += arena->decay.backlog[i] * h_steps[i];
- npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
-
- return (npages_limit_backlog);
-}
-
-static void
-arena_decay_backlog_update_last(arena_t *arena)
-{
- size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
- arena->ndirty - arena->decay.ndirty : 0;
- arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
-}
-
-static void
-arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
-{
-
- if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
- memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
- sizeof(size_t));
- } else {
- size_t nadvance_z = (size_t)nadvance_u64;
-
- assert((uint64_t)nadvance_z == nadvance_u64);
-
- memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
- (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
- if (nadvance_z > 1) {
- memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
- nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
- }
- }
-
- arena_decay_backlog_update_last(arena);
-}
-
-static void
-arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
-{
- uint64_t nadvance_u64;
- nstime_t delta;
-
- assert(opt_purge == purge_mode_decay);
- assert(arena_decay_deadline_reached(arena, time));
-
- nstime_copy(&delta, time);
- nstime_subtract(&delta, &arena->decay.epoch);
- nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
- assert(nadvance_u64 > 0);
-
- /* Add nadvance_u64 decay intervals to epoch. */
- nstime_copy(&delta, &arena->decay.interval);
- nstime_imultiply(&delta, nadvance_u64);
- nstime_add(&arena->decay.epoch, &delta);
-
- /* Set a new deadline. */
- arena_decay_deadline_init(arena);
-
- /* Update the backlog. */
- arena_decay_backlog_update(arena, nadvance_u64);
-}
-
-static void
-arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
-{
- size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
-
- if (arena->ndirty > ndirty_limit)
- arena_purge_to_limit(tsdn, arena, ndirty_limit);
- arena->decay.ndirty = arena->ndirty;
-}
-
-static void
-arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
-{
-
- arena_decay_epoch_advance_helper(arena, time);
- arena_decay_epoch_advance_purge(tsdn, arena);
-}
-
-static void
-arena_decay_init(arena_t *arena, ssize_t decay_time)
-{
-
- arena->decay.time = decay_time;
- if (decay_time > 0) {
- nstime_init2(&arena->decay.interval, decay_time, 0);
- nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
- }
-
- nstime_init(&arena->decay.epoch, 0);
- nstime_update(&arena->decay.epoch);
- arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
- arena_decay_deadline_init(arena);
- arena->decay.ndirty = arena->ndirty;
- memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
-}
-
-static bool
-arena_decay_time_valid(ssize_t decay_time)
-{
-
- if (decay_time < -1)
- return (false);
- if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
- return (true);
- return (false);
-}
-
-ssize_t
-arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
-{
- ssize_t decay_time;
-
- malloc_mutex_lock(tsdn, &arena->lock);
- decay_time = arena->decay.time;
- malloc_mutex_unlock(tsdn, &arena->lock);
-
- return (decay_time);
-}
-
-bool
-arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
-{
-
- if (!arena_decay_time_valid(decay_time))
- return (true);
-
- malloc_mutex_lock(tsdn, &arena->lock);
- /*
- * Restart decay backlog from scratch, which may cause many dirty pages
- * to be immediately purged. It would conceptually be possible to map
- * the old backlog onto the new backlog, but there is no justification
- * for such complexity since decay_time changes are intended to be
- * infrequent, either between the {-1, 0, >0} states, or a one-time
- * arbitrary change during initial arena configuration.
- */
- arena_decay_init(arena, decay_time);
- arena_maybe_purge(tsdn, arena);
- malloc_mutex_unlock(tsdn, &arena->lock);
-
- return (false);
-}
-
-static void
-arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
+void
+arena_maybe_purge(arena_t *arena)
{
- assert(opt_purge == purge_mode_ratio);
-
/* Don't purge if the option is disabled. */
if (arena->lg_dirty_mult < 0)
return;
-
+ /* Don't recursively purge. */
+ if (arena->purging)
+ return;
/*
* Iterate, since preventing recursive purging could otherwise leave too
* many dirty pages.
@@ -1463,68 +1228,10 @@ arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
*/
if (arena->ndirty <= threshold)
return;
- arena_purge_to_limit(tsdn, arena, threshold);
+ arena_purge(arena, false);
}
}
-static void
-arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
-{
- nstime_t time;
-
- assert(opt_purge == purge_mode_decay);
-
- /* Purge all or nothing if the option is disabled. */
- if (arena->decay.time <= 0) {
- if (arena->decay.time == 0)
- arena_purge_to_limit(tsdn, arena, 0);
- return;
- }
-
- nstime_init(&time, 0);
- nstime_update(&time);
- if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
- &time) > 0)) {
- /*
- * Time went backwards. Move the epoch back in time and
- * generate a new deadline, with the expectation that time
- * typically flows forward for long enough periods of time that
- * epochs complete. Unfortunately, this strategy is susceptible
- * to clock jitter triggering premature epoch advances, but
- * clock jitter estimation and compensation isn't feasible here
- * because calls into this code are event-driven.
- */
- nstime_copy(&arena->decay.epoch, &time);
- arena_decay_deadline_init(arena);
- } else {
- /* Verify that time does not go backwards. */
- assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
- }
-
- /*
- * If the deadline has been reached, advance to the current epoch and
- * purge to the new limit if necessary. Note that dirty pages created
- * during the current epoch are not subject to purge until a future
- * epoch, so as a result purging only happens during epoch advances.
- */
- if (arena_decay_deadline_reached(arena, &time))
- arena_decay_epoch_advance(tsdn, arena, &time);
-}
-
-void
-arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
-{
-
- /* Don't recursively purge. */
- if (arena->purging)
- return;
-
- if (opt_purge == purge_mode_ratio)
- arena_maybe_purge_ratio(tsdn, arena);
- else
- arena_maybe_purge_decay(tsdn, arena);
-}
-
static size_t
arena_dirty_count(arena_t *arena)
{
@@ -1560,15 +1267,35 @@ arena_dirty_count(arena_t *arena)
}
static size_t
-arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
+arena_compute_npurge(arena_t *arena, bool all)
+{
+ size_t npurge;
+
+ /*
+ * Compute the minimum number of pages that this thread should try to
+ * purge.
+ */
+ if (!all) {
+ size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
+ threshold = threshold < chunk_npages ? chunk_npages : threshold;
+
+ npurge = arena->ndirty - threshold;
+ } else
+ npurge = arena->ndirty;
+
+ return (npurge);
+}
+
+static size_t
+arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
+ size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm;
size_t nstashed = 0;
- /* Stash runs/chunks according to ndirty_limit. */
+ /* Stash at least npurge pages. */
for (rdelm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_cache, cc_link);
rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
@@ -1577,32 +1304,24 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next;
- size_t sn;
- bool zero, commit;
+ bool zero;
UNUSED void *chunk;
- npages = extent_node_size_get(chunkselm) >> LG_PAGE;
- if (opt_purge == purge_mode_decay && arena->ndirty -
- (nstashed + npages) < ndirty_limit)
- break;
-
chunkselm_next = qr_next(chunkselm, cc_link);
/*
* Allocate. chunkselm remains valid due to the
* dalloc_node=false argument to chunk_alloc_cache().
*/
zero = false;
- commit = false;
- chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
+ chunk = chunk_alloc_cache(arena, chunk_hooks,
extent_node_addr_get(chunkselm),
- extent_node_size_get(chunkselm), chunksize, &sn,
- &zero, &commit, false);
+ extent_node_size_get(chunkselm), chunksize, &zero,
+ false);
assert(chunk == extent_node_addr_get(chunkselm));
assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
purge_chunks_sentinel);
- assert(npages == (extent_node_size_get(chunkselm) >>
- LG_PAGE));
+ npages = extent_node_size_get(chunkselm) >> LG_PAGE;
chunkselm = chunkselm_next;
} else {
arena_chunk_t *chunk =
@@ -1615,9 +1334,6 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_mapbits_unallocated_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
- if (opt_purge == purge_mode_decay && arena->ndirty -
- (nstashed + npages) < ndirty_limit)
- break;
assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) ==
@@ -1628,7 +1344,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
* prior to allocation.
*/
if (chunk == arena->spare)
- arena_chunk_alloc(tsdn, arena);
+ arena_chunk_alloc(arena);
/* Temporarily allocate the free dirty run. */
arena_run_split_large(arena, run, run_size, false);
@@ -1643,8 +1359,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
nstashed += npages;
- if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
- ndirty_limit)
+ if (!all && nstashed >= npurge)
break;
}
@@ -1652,7 +1367,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
static size_t
-arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
@@ -1664,7 +1379,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
nmadvise = 0;
npurged = 0;
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
@@ -1693,17 +1408,6 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
run_size = arena_mapbits_large_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
- /*
- * If this is the first run purged within chunk, mark
- * the chunk as non-huge. This will prevent all use of
- * transparent huge pages for this chunk until the chunk
- * as a whole is deallocated.
- */
- if (chunk->hugepage) {
- pages_nohuge(chunk, chunksize);
- chunk->hugepage = false;
- }
-
assert(pageind + npages <= chunk_npages);
assert(!arena_mapbits_decommitted_get(chunk, pageind));
assert(!arena_mapbits_decommitted_get(chunk,
@@ -1714,7 +1418,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
flag_unzeroed = 0;
flags = CHUNK_MAP_DECOMMITTED;
} else {
- flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
+ flag_unzeroed = chunk_purge_wrapper(arena,
chunk_hooks, chunk, chunksize, pageind <<
LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
flags = flag_unzeroed;
@@ -1745,7 +1449,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (config_stats)
nmadvise++;
}
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena->stats.nmadvise += nmadvise;
@@ -1756,7 +1460,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
static void
-arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
@@ -1773,14 +1477,13 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
cc_link);
void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm);
- size_t sn = extent_node_sn_get(chunkselm);
bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm);
- arena_node_dalloc(tsdn, arena, chunkselm);
+ arena_node_dalloc(arena, chunkselm);
chunkselm = chunkselm_next;
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
- size, sn, zeroed, committed);
+ chunk_dalloc_arena(arena, chunk_hooks, addr, size,
+ zeroed, committed);
} else {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
@@ -1791,26 +1494,16 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
pageind) != 0);
arena_run_t *run = &miscelm->run;
qr_remove(rdelm, rd_link);
- arena_run_dalloc(tsdn, arena, run, false, true,
- decommitted);
+ arena_run_dalloc(arena, run, false, true, decommitted);
}
}
}
-/*
- * NB: ndirty_limit is interpreted differently depending on opt_purge:
- * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
- * desired state:
- * (arena->ndirty <= ndirty_limit)
- * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
- * violating the invariant:
- * (arena->ndirty >= ndirty_limit)
- */
static void
-arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
+arena_purge(arena_t *arena, bool all)
{
- chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
- size_t npurge, npurged;
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
+ size_t npurge, npurgeable, npurged;
arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel;
@@ -1824,183 +1517,34 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
size_t ndirty = arena_dirty_count(arena);
assert(ndirty == arena->ndirty);
}
- assert(opt_purge != purge_mode_ratio || (arena->nactive >>
- arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
+ assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all);
+
+ if (config_stats)
+ arena->stats.npurge++;
+ npurge = arena_compute_npurge(arena, all);
qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
- npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
- &purge_runs_sentinel, &purge_chunks_sentinel);
- if (npurge == 0)
- goto label_return;
- npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
+ npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge,
&purge_runs_sentinel, &purge_chunks_sentinel);
- assert(npurged == npurge);
- arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
+ assert(npurgeable >= npurge);
+ npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
+ &purge_chunks_sentinel);
+ assert(npurged == npurgeable);
+ arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel);
- if (config_stats)
- arena->stats.npurge++;
-
-label_return:
arena->purging = false;
}
void
-arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
+arena_purge_all(arena_t *arena)
{
- malloc_mutex_lock(tsdn, &arena->lock);
- if (all)
- arena_purge_to_limit(tsdn, arena, 0);
- else
- arena_maybe_purge(tsdn, arena);
- malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-static void
-arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
-{
- size_t pageind, npages;
-
- cassert(config_prof);
- assert(opt_prof);
-
- /*
- * Iterate over the allocated runs and remove profiled allocations from
- * the sample set.
- */
- for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
- if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
- if (arena_mapbits_large_get(chunk, pageind) != 0) {
- void *ptr = (void *)((uintptr_t)chunk + (pageind
- << LG_PAGE));
- size_t usize = isalloc(tsd_tsdn(tsd), ptr,
- config_prof);
-
- prof_free(tsd, ptr, usize);
- npages = arena_mapbits_large_size_get(chunk,
- pageind) >> LG_PAGE;
- } else {
- /* Skip small run. */
- size_t binind = arena_mapbits_binind_get(chunk,
- pageind);
- arena_bin_info_t *bin_info =
- &arena_bin_info[binind];
- npages = bin_info->run_size >> LG_PAGE;
- }
- } else {
- /* Skip unallocated run. */
- npages = arena_mapbits_unallocated_size_get(chunk,
- pageind) >> LG_PAGE;
- }
- assert(pageind + npages <= chunk_npages);
- }
-}
-
-void
-arena_reset(tsd_t *tsd, arena_t *arena)
-{
- unsigned i;
- extent_node_t *node;
-
- /*
- * Locking in this function is unintuitive. The caller guarantees that
- * no concurrent operations are happening in this arena, but there are
- * still reasons that some locking is necessary:
- *
- * - Some of the functions in the transitive closure of calls assume
- * appropriate locks are held, and in some cases these locks are
- * temporarily dropped to avoid lock order reversal or deadlock due to
- * reentry.
- * - mallctl("epoch", ...) may concurrently refresh stats. While
- * strictly speaking this is a "concurrent operation", disallowing
- * stats refreshes would impose an inconvenient burden.
- */
-
- /* Remove large allocations from prof sample set. */
- if (config_prof && opt_prof) {
- ql_foreach(node, &arena->achunks, ql_link) {
- arena_achunk_prof_reset(tsd, arena,
- extent_node_addr_get(node));
- }
- }
-
- /* Reset curruns for large size classes. */
- if (config_stats) {
- for (i = 0; i < nlclasses; i++)
- arena->stats.lstats[i].curruns = 0;
- }
-
- /* Huge allocations. */
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
- for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
- ql_last(&arena->huge, ql_link)) {
- void *ptr = extent_node_addr_get(node);
- size_t usize;
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
- if (config_stats || (config_prof && opt_prof))
- usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
- /* Remove huge allocation from prof sample set. */
- if (config_prof && opt_prof)
- prof_free(tsd, ptr, usize);
- huge_dalloc(tsd_tsdn(tsd), ptr);
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
- /* Cancel out unwanted effects on stats. */
- if (config_stats)
- arena_huge_reset_stats_cancel(arena, usize);
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
-
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
-
- /* Bins. */
- for (i = 0; i < NBINS; i++) {
- arena_bin_t *bin = &arena->bins[i];
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- bin->runcur = NULL;
- arena_run_heap_new(&bin->runs);
- if (config_stats) {
- bin->stats.curregs = 0;
- bin->stats.curruns = 0;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- }
-
- /*
- * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
- * chains directly correspond.
- */
- qr_new(&arena->runs_dirty, rd_link);
- for (node = qr_next(&arena->chunks_cache, cc_link);
- node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
- qr_new(&node->rd, rd_link);
- qr_meld(&arena->runs_dirty, &node->rd, rd_link);
- }
-
- /* Arena chunks. */
- for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
- ql_last(&arena->achunks, ql_link)) {
- ql_remove(&arena->achunks, node, ql_link);
- arena_chunk_discard(tsd_tsdn(tsd), arena,
- extent_node_addr_get(node));
- }
-
- /* Spare. */
- if (arena->spare != NULL) {
- arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
- arena->spare = NULL;
- }
-
- assert(!arena->purging);
- arena->nactive = 0;
-
- for (i = 0; i < NPSIZES; i++)
- arena_run_heap_new(&arena->runs_avail[i]);
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+ malloc_mutex_lock(&arena->lock);
+ arena_purge(arena, true);
+ malloc_mutex_unlock(&arena->lock);
}
static void
@@ -2116,9 +1660,21 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
return (size);
}
+static bool
+arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run)
+{
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ size_t run_ind = arena_miscelm_to_pageind(miscelm);
+ size_t offset = run_ind << LG_PAGE;
+ size_t length = arena_run_size_get(arena, chunk, run, run_ind);
+
+ return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length,
+ arena->ind));
+}
+
static void
-arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
- bool cleaned, bool decommitted)
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
+ bool decommitted)
{
arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm;
@@ -2131,7 +1687,8 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
assert(run_ind < chunk_npages);
size = arena_run_size_get(arena, chunk, run, run_ind);
run_pages = (size >> LG_PAGE);
- arena_nactive_sub(arena, run_pages);
+ arena_cactive_update(arena, 0, run_pages);
+ arena->nactive -= run_pages;
/*
* The run is dirty if the caller claims to have dirtied it, as well as
@@ -2178,7 +1735,7 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
if (size == arena_maxrun) {
assert(run_ind == map_bias);
assert(run_pages == (arena_maxrun >> LG_PAGE));
- arena_chunk_dalloc(tsdn, arena, chunk);
+ arena_chunk_dalloc(arena, chunk);
}
/*
@@ -2189,12 +1746,21 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
* chances of spuriously crossing the dirty page purging threshold.
*/
if (dirty)
- arena_maybe_purge(tsdn, arena);
+ arena_maybe_purge(arena);
}
static void
-arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize)
+arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run)
+{
+ bool committed = arena_run_decommit(arena, chunk, run);
+
+ arena_run_dalloc(arena, run, committed, false, !committed);
+}
+
+static void
+arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ size_t oldsize, size_t newsize)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -2229,13 +1795,12 @@ arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages)));
- arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
- 0));
+ arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
}
static void
-arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
+arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ size_t oldsize, size_t newsize, bool dirty)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -2272,10 +1837,20 @@ arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages)));
- tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
+ tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
tail_run = &tail_miscelm->run;
- arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
- != 0));
+ arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
+ 0));
+}
+
+static arena_run_t *
+arena_bin_runs_first(arena_bin_t *bin)
+{
+ arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
+ if (miscelm != NULL)
+ return (&miscelm->run);
+
+ return (NULL);
}
static void
@@ -2283,25 +1858,35 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- arena_run_heap_insert(&bin->runs, miscelm);
+ assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
+
+ arena_run_tree_insert(&bin->runs, miscelm);
}
-static arena_run_t *
-arena_bin_nonfull_run_tryget(arena_bin_t *bin)
+static void
+arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
{
- arena_chunk_map_misc_t *miscelm;
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- miscelm = arena_run_heap_remove_first(&bin->runs);
- if (miscelm == NULL)
- return (NULL);
- if (config_stats)
- bin->stats.reruns++;
+ assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
- return (&miscelm->run);
+ arena_run_tree_remove(&bin->runs, miscelm);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_tryget(arena_bin_t *bin)
+{
+ arena_run_t *run = arena_bin_runs_first(bin);
+ if (run != NULL) {
+ arena_bin_runs_remove(bin, run);
+ if (config_stats)
+ bin->stats.reruns++;
+ }
+ return (run);
}
static arena_run_t *
-arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
+arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
{
arena_run_t *run;
szind_t binind;
@@ -2317,19 +1902,19 @@ arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
bin_info = &arena_bin_info[binind];
/* Allocate a new run. */
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
/******************************/
- malloc_mutex_lock(tsdn, &arena->lock);
- run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
+ malloc_mutex_lock(&arena->lock);
+ run = arena_run_alloc_small(arena, bin_info->run_size, binind);
if (run != NULL) {
/* Initialize run internals. */
run->binind = binind;
run->nfree = bin_info->nregs;
bitmap_init(run->bitmap, &bin_info->bitmap_info);
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
/********************************/
- malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
if (run != NULL) {
if (config_stats) {
bin->stats.nruns++;
@@ -2352,7 +1937,7 @@ arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
static void *
-arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
+arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
{
szind_t binind;
arena_bin_info_t *bin_info;
@@ -2361,7 +1946,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
bin->runcur = NULL;
- run = arena_bin_nonfull_run_get(tsdn, arena, bin);
+ run = arena_bin_nonfull_run_get(arena, bin);
if (bin->runcur != NULL && bin->runcur->nfree > 0) {
/*
* Another thread updated runcur while this one ran without the
@@ -2382,11 +1967,10 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
* were just deallocated from the run.
*/
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- if (run->nfree == bin_info->nregs) {
- arena_dalloc_bin_run(tsdn, arena, chunk, run,
- bin);
- } else
- arena_bin_lower_run(arena, run, bin);
+ if (run->nfree == bin_info->nregs)
+ arena_dalloc_bin_run(arena, chunk, run, bin);
+ else
+ arena_bin_lower_run(arena, chunk, run, bin);
}
return (ret);
}
@@ -2402,18 +1986,18 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
}
void
-arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
- szind_t binind, uint64_t prof_accumbytes)
+arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
+ uint64_t prof_accumbytes)
{
unsigned i, nfill;
arena_bin_t *bin;
assert(tbin->ncached == 0);
- if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
- prof_idump(tsdn);
+ if (config_prof && arena_prof_accum(arena, prof_accumbytes))
+ prof_idump();
bin = &arena->bins[binind];
- malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) {
arena_run_t *run;
@@ -2421,15 +2005,16 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
if ((run = bin->runcur) != NULL && run->nfree > 0)
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
- ptr = arena_bin_malloc_hard(tsdn, arena, bin);
+ ptr = arena_bin_malloc_hard(arena, bin);
if (ptr == NULL) {
/*
* OOM. tbin->avail isn't yet filled down to its first
* element, so the successful allocations (if any) must
- * be moved just before tbin->avail before bailing out.
+ * be moved to the base of tbin->avail before bailing
+ * out.
*/
if (i > 0) {
- memmove(tbin->avail - i, tbin->avail - nfill,
+ memmove(tbin->avail, &tbin->avail[nfill - i],
i * sizeof(void *));
}
break;
@@ -2439,7 +2024,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
true);
}
/* Insert such that low regions get used first. */
- *(tbin->avail - nfill + i) = ptr;
+ tbin->avail[nfill - 1 - i] = ptr;
}
if (config_stats) {
bin->stats.nmalloc += i;
@@ -2448,31 +2033,29 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
bin->stats.nfills++;
tbin->tstats.nrequests = 0;
}
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
tbin->ncached = i;
- arena_decay_tick(tsdn, arena);
}
void
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
{
- size_t redzone_size = bin_info->redzone_size;
-
if (zero) {
- memset((void *)((uintptr_t)ptr - redzone_size),
- JEMALLOC_ALLOC_JUNK, redzone_size);
- memset((void *)((uintptr_t)ptr + bin_info->reg_size),
- JEMALLOC_ALLOC_JUNK, redzone_size);
+ size_t redzone_size = bin_info->redzone_size;
+ memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
+ redzone_size);
+ memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
+ redzone_size);
} else {
- memset((void *)((uintptr_t)ptr - redzone_size),
- JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
+ memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
+ bin_info->reg_interval);
}
}
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
-#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
+#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
#endif
static void
arena_redzone_corruption(void *ptr, size_t usize, bool after,
@@ -2487,7 +2070,7 @@ arena_redzone_corruption(void *ptr, size_t usize, bool after,
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
arena_redzone_corruption_t *arena_redzone_corruption =
- JEMALLOC_N(n_arena_redzone_corruption);
+ JEMALLOC_N(arena_redzone_corruption_impl);
#endif
static void
@@ -2502,22 +2085,22 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
for (i = 1; i <= redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
- if (*byte != JEMALLOC_ALLOC_JUNK) {
+ if (*byte != 0xa5) {
error = true;
arena_redzone_corruption(ptr, size, false, i,
*byte);
if (reset)
- *byte = JEMALLOC_ALLOC_JUNK;
+ *byte = 0xa5;
}
}
for (i = 0; i < redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
- if (*byte != JEMALLOC_ALLOC_JUNK) {
+ if (*byte != 0xa5) {
error = true;
arena_redzone_corruption(ptr, size, true, i,
*byte);
if (reset)
- *byte = JEMALLOC_ALLOC_JUNK;
+ *byte = 0xa5;
}
}
}
@@ -2528,7 +2111,7 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
-#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
+#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
#endif
void
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
@@ -2536,14 +2119,14 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
size_t redzone_size = bin_info->redzone_size;
arena_redzones_validate(ptr, bin_info, false);
- memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
+ memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
bin_info->reg_interval);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
arena_dalloc_junk_small_t *arena_dalloc_junk_small =
- JEMALLOC_N(n_arena_dalloc_junk_small);
+ JEMALLOC_N(arena_dalloc_junk_small_impl);
#endif
void
@@ -2561,26 +2144,27 @@ arena_quarantine_junk_small(void *ptr, size_t usize)
arena_redzones_validate(ptr, bin_info, true);
}
-static void *
-arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
+void *
+arena_malloc_small(arena_t *arena, size_t size, bool zero)
{
void *ret;
arena_bin_t *bin;
- size_t usize;
arena_run_t *run;
+ szind_t binind;
+ binind = size2index(size);
assert(binind < NBINS);
bin = &arena->bins[binind];
- usize = index2size(binind);
+ size = index2size(binind);
- malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0)
ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
- ret = arena_bin_malloc_hard(tsdn, arena, bin);
+ ret = arena_bin_malloc_hard(arena, bin);
if (ret == NULL) {
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
return (NULL);
}
@@ -2589,9 +2173,9 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
bin->stats.nrequests++;
bin->stats.curregs++;
}
- malloc_mutex_unlock(tsdn, &bin->lock);
- if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
- prof_idump(tsdn);
+ malloc_mutex_unlock(&bin->lock);
+ if (config_prof && !isthreaded && arena_prof_accum(arena, size))
+ prof_idump();
if (!zero) {
if (config_fill) {
@@ -2599,35 +2183,34 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (unlikely(opt_zero))
- memset(ret, 0, usize);
+ memset(ret, 0, size);
}
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
- memset(ret, 0, usize);
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ memset(ret, 0, size);
}
- arena_decay_tick(tsdn, arena);
return (ret);
}
void *
-arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
+arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
size_t usize;
uintptr_t random_offset;
arena_run_t *run;
arena_chunk_map_misc_t *miscelm;
- UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
+ UNUSED bool idump;
/* Large allocation. */
- usize = index2size(binind);
- malloc_mutex_lock(tsdn, &arena->lock);
+ usize = s2u(size);
+ malloc_mutex_lock(&arena->lock);
if (config_cache_oblivious) {
uint64_t r;
@@ -2636,21 +2219,22 @@ arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
* that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
* for 4 KiB pages and 64-byte cachelines.
*/
- r = prng_lg_range_zu(&arena->offset_state, LG_PAGE -
- LG_CACHELINE, false);
+ prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state,
+ UINT64_C(6364136223846793009),
+ UINT64_C(1442695040888963409));
random_offset = ((uintptr_t)r) << LG_CACHELINE;
} else
random_offset = 0;
- run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
+ run = arena_run_alloc_large(arena, usize + large_pad, zero);
if (run == NULL) {
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (NULL);
}
miscelm = arena_run_to_miscelm(run);
ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
random_offset);
if (config_stats) {
- szind_t index = binind - NBINS;
+ szind_t index = size2index(usize) - NBINS;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
@@ -2661,45 +2245,25 @@ arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
}
if (config_prof)
idump = arena_prof_accum_locked(arena, usize);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
if (config_prof && idump)
- prof_idump(tsdn);
+ prof_idump();
if (!zero) {
if (config_fill) {
if (unlikely(opt_junk_alloc))
- memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+ memset(ret, 0xa5, usize);
else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
}
- arena_decay_tick(tsdn, arena);
return (ret);
}
-void *
-arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
- bool zero)
-{
-
- assert(!tsdn_null(tsdn) || arena != NULL);
-
- if (likely(!tsdn_null(tsdn)))
- arena = arena_choose(tsdn_tsd(tsdn), arena);
- if (unlikely(arena == NULL))
- return (NULL);
-
- if (likely(size <= SMALL_MAXCLASS))
- return (arena_malloc_small(tsdn, arena, ind, zero));
- if (likely(size <= large_maxclass))
- return (arena_malloc_large(tsdn, arena, ind, zero));
- return (huge_malloc(tsdn, arena, index2size(ind), zero));
-}
-
/* Only handles large allocations that require more than page alignment. */
static void *
-arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero)
{
void *ret;
@@ -2709,21 +2273,19 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *miscelm;
void *rpages;
- assert(!tsdn_null(tsdn) || arena != NULL);
assert(usize == PAGE_CEILING(usize));
- if (likely(!tsdn_null(tsdn)))
- arena = arena_choose(tsdn_tsd(tsdn), arena);
+ arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
alignment = PAGE_CEILING(alignment);
alloc_size = usize + large_pad + alignment - PAGE;
- malloc_mutex_lock(tsdn, &arena->lock);
- run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
+ malloc_mutex_lock(&arena->lock);
+ run = arena_run_alloc_large(arena, alloc_size, false);
if (run == NULL) {
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (NULL);
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
@@ -2738,16 +2300,16 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *head_miscelm = miscelm;
arena_run_t *head_run = run;
- miscelm = arena_miscelm_get_mutable(chunk,
+ miscelm = arena_miscelm_get(chunk,
arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
LG_PAGE));
run = &miscelm->run;
- arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
+ arena_run_trim_head(arena, chunk, head_run, alloc_size,
alloc_size - leadsize);
}
if (trailsize != 0) {
- arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
+ arena_run_trim_tail(arena, chunk, run, usize + large_pad +
trailsize, usize + large_pad, false);
}
if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
@@ -2758,8 +2320,8 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
run_ind) != 0);
assert(decommitted); /* Cause of OOM. */
- arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ arena_run_dalloc(arena, run, dirty, false, decommitted);
+ malloc_mutex_unlock(&arena->lock);
return (NULL);
}
ret = arena_miscelm_to_rpages(miscelm);
@@ -2774,20 +2336,19 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
if (config_fill && !zero) {
if (unlikely(opt_junk_alloc))
- memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+ memset(ret, 0xa5, usize);
else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
- arena_decay_tick(tsdn, arena);
return (ret);
}
void *
-arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache)
{
void *ret;
@@ -2795,8 +2356,7 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special run placement. */
- ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
- tcache, true);
+ ret = arena_malloc(tsd, arena, usize, zero, tcache);
} else if (usize <= large_maxclass && alignment <= PAGE) {
/*
* Large; alignment doesn't require special run placement.
@@ -2804,25 +2364,25 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
* the base of the run, so do some bit manipulation to retrieve
* the base.
*/
- ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
- tcache, true);
+ ret = arena_malloc(tsd, arena, usize, zero, tcache);
if (config_cache_oblivious)
ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
} else {
if (likely(usize <= large_maxclass)) {
- ret = arena_palloc_large(tsdn, arena, usize, alignment,
+ ret = arena_palloc_large(tsd, arena, usize, alignment,
zero);
} else if (likely(alignment <= chunksize))
- ret = huge_malloc(tsdn, arena, usize, zero);
+ ret = huge_malloc(tsd, arena, usize, zero, tcache);
else {
- ret = huge_palloc(tsdn, arena, usize, alignment, zero);
+ ret = huge_palloc(tsd, arena, usize, alignment, zero,
+ tcache);
}
}
return (ret);
}
void
-arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
+arena_prof_promoted(const void *ptr, size_t size)
{
arena_chunk_t *chunk;
size_t pageind;
@@ -2831,8 +2391,8 @@ arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
- assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
- assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
+ assert(isalloc(ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(ptr, true) == LARGE_MINCLASS);
assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
@@ -2841,8 +2401,8 @@ arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
assert(binind < NBINS);
arena_mapbits_large_binind_set(chunk, pageind, binind);
- assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
- assert(isalloc(tsdn, ptr, true) == size);
+ assert(isalloc(ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(ptr, true) == size);
}
static void
@@ -2858,51 +2418,48 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
&chunk->node), bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
- /*
- * The following block's conditional is necessary because if the
- * run only contains one region, then it never gets inserted
- * into the non-full runs tree.
- */
if (bin_info->nregs != 1) {
- arena_chunk_map_misc_t *miscelm =
- arena_run_to_miscelm(run);
-
- arena_run_heap_remove(&bin->runs, miscelm);
+ /*
+ * This block's conditional is necessary because if the
+ * run only contains one region, then it never gets
+ * inserted into the non-full runs tree.
+ */
+ arena_bin_runs_remove(bin, run);
}
}
}
static void
-arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin)
+arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin)
{
assert(run != bin->runcur);
+ assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
+ NULL);
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
/******************************/
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_run_dalloc(tsdn, arena, run, true, false, false);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
+ arena_run_dalloc_decommit(arena, chunk, run);
+ malloc_mutex_unlock(&arena->lock);
/****************************/
- malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
if (config_stats)
bin->stats.curruns--;
}
static void
-arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
+arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin)
{
/*
- * Make sure that if bin->runcur is non-NULL, it refers to the
- * oldest/lowest non-full run. It is okay to NULL runcur out rather
- * than proactively keeping it pointing at the oldest/lowest non-full
- * run.
+ * Make sure that if bin->runcur is non-NULL, it refers to the lowest
+ * non-full run. It is okay to NULL runcur out rather than proactively
+ * keeping it pointing at the lowest non-full run.
*/
- if (bin->runcur != NULL &&
- arena_snad_comp(arena_run_to_miscelm(bin->runcur),
- arena_run_to_miscelm(run)) > 0) {
+ if ((uintptr_t)run < (uintptr_t)bin->runcur) {
/* Switch runcur. */
if (bin->runcur->nfree > 0)
arena_bin_runs_insert(bin, bin->runcur);
@@ -2914,8 +2471,8 @@ arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
}
static void
-arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
+arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ arena_chunk_map_bits_t *bitselm, bool junked)
{
size_t pageind, rpages_ind;
arena_run_t *run;
@@ -2925,7 +2482,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
+ run = &arena_miscelm_get(chunk, rpages_ind)->run;
binind = run->binind;
bin = &arena->bins[binind];
bin_info = &arena_bin_info[binind];
@@ -2936,9 +2493,9 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena_run_reg_dalloc(run, ptr);
if (run->nfree == bin_info->nregs) {
arena_dissociate_bin_run(chunk, run, bin);
- arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
+ arena_dalloc_bin_run(arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur)
- arena_bin_lower_run(arena, run, bin);
+ arena_bin_lower_run(arena, chunk, run, bin);
if (config_stats) {
bin->stats.ndalloc++;
@@ -2947,15 +2504,15 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
}
void
-arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
+arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ arena_chunk_map_bits_t *bitselm)
{
- arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
+ arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
}
void
-arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
+arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_bits_t *bitselm)
{
arena_run_t *run;
@@ -2963,16 +2520,16 @@ arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t rpages_ind;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
+ run = &arena_miscelm_get(chunk, rpages_ind)->run;
bin = &arena->bins[run->binind];
- malloc_mutex_lock(tsdn, &bin->lock);
- arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
+ arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
+ malloc_mutex_unlock(&bin->lock);
}
void
-arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t pageind)
+arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind)
{
arena_chunk_map_bits_t *bitselm;
@@ -2981,36 +2538,34 @@ arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID);
}
- bitselm = arena_bitselm_get_mutable(chunk, pageind);
- arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
- arena_decay_tick(tsdn, arena);
+ bitselm = arena_bitselm_get(chunk, pageind);
+ arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
-#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
+#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
#endif
void
arena_dalloc_junk_large(void *ptr, size_t usize)
{
if (config_fill && unlikely(opt_junk_free))
- memset(ptr, JEMALLOC_FREE_JUNK, usize);
+ memset(ptr, 0x5a, usize);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
arena_dalloc_junk_large_t *arena_dalloc_junk_large =
- JEMALLOC_N(n_arena_dalloc_junk_large);
+ JEMALLOC_N(arena_dalloc_junk_large_impl);
#endif
static void
-arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr, bool junked)
+arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, bool junked)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
arena_run_t *run = &miscelm->run;
if (config_fill || config_stats) {
@@ -3029,35 +2584,32 @@ arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
}
}
- arena_run_dalloc(tsdn, arena, run, true, false, false);
+ arena_run_dalloc_decommit(arena, chunk, run);
}
void
-arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr)
+arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr)
{
- arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
+ arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
}
void
-arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr)
+arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
- malloc_mutex_unlock(tsdn, &arena->lock);
- arena_decay_tick(tsdn, arena);
+ malloc_mutex_lock(&arena->lock);
+ arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
+ malloc_mutex_unlock(&arena->lock);
}
static void
-arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t size)
+arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t oldsize, size_t size)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
arena_run_t *run = &miscelm->run;
assert(size < oldsize);
@@ -3066,8 +2618,8 @@ arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
* Shrink the run, and make trailing pages available for other
* allocations.
*/
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
+ malloc_mutex_lock(&arena->lock);
+ arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
large_pad, true);
if (config_stats) {
szind_t oldindex = size2index(oldsize) - NBINS;
@@ -3085,12 +2637,12 @@ arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
}
static bool
-arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
+arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t npages = (oldsize + large_pad) >> LG_PAGE;
@@ -3100,7 +2652,7 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
large_pad);
/* Try to extend the run. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
pageind+npages) != 0)
goto label_fail;
@@ -3123,7 +2675,7 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
if (splitsize == 0)
goto label_fail;
- run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
+ run = &arena_miscelm_get(chunk, pageind+npages)->run;
if (arena_run_split_large(arena, run, splitsize, zero))
goto label_fail;
@@ -3131,16 +2683,10 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
/*
* Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state.
- * There will always be trailing bytes, because ptr's
- * offset from the beginning of the run is a multiple of
- * CACHELINE in [0 .. PAGE).
*/
- void *zbase = (void *)((uintptr_t)ptr + oldsize);
- void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
- PAGE));
- size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
- assert(nzero > 0);
- memset(zbase, 0, nzero);
+ assert(PAGE_CEILING(oldsize) == oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ PAGE_CEILING((uintptr_t)ptr) - (uintptr_t)ptr);
}
size = oldsize + splitsize;
@@ -3180,24 +2726,24 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (false);
}
label_fail:
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (true);
}
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
-#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
+#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
#endif
static void
arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
{
if (config_fill && unlikely(opt_junk_free)) {
- memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
+ memset((void *)((uintptr_t)ptr + usize), 0x5a,
old_usize - usize);
}
}
@@ -3205,7 +2751,7 @@ arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
arena_ralloc_junk_large_t *arena_ralloc_junk_large =
- JEMALLOC_N(n_arena_ralloc_junk_large);
+ JEMALLOC_N(arena_ralloc_junk_large_impl);
#endif
/*
@@ -3213,7 +2759,7 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large =
* always fail if growing an object, and the following run is already in use.
*/
static bool
-arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
+arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
arena_chunk_t *chunk;
@@ -3228,16 +2774,15 @@ arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
arena = extent_node_arena_get(&chunk->node);
if (oldsize < usize_max) {
- bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
- oldsize, usize_min, usize_max, zero);
+ bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
+ usize_min, usize_max, zero);
if (config_fill && !ret && !zero) {
if (unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize),
- JEMALLOC_ALLOC_JUNK,
- isalloc(tsdn, ptr, config_prof) - oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
+ isalloc(ptr, config_prof) - oldsize);
} else if (unlikely(opt_zero)) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
- isalloc(tsdn, ptr, config_prof) - oldsize);
+ isalloc(ptr, config_prof) - oldsize);
}
}
return (ret);
@@ -3246,27 +2791,19 @@ arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
assert(oldsize > usize_max);
/* Fill before shrinking in order avoid a race. */
arena_ralloc_junk_large(ptr, oldsize, usize_max);
- arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
+ arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
return (false);
}
bool
-arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
- size_t extra, bool zero)
+arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
+ bool zero)
{
size_t usize_min, usize_max;
- /* Calls with non-zero extra had to clamp extra. */
- assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
-
- if (unlikely(size > HUGE_MAXCLASS))
- return (true);
-
usize_min = s2u(size);
usize_max = s2u(size + extra);
if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
- arena_chunk_t *chunk;
-
/*
* Avoid moving the allocation if the size class can be left the
* same.
@@ -3274,39 +2811,37 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
if (oldsize <= SMALL_MAXCLASS) {
assert(arena_bin_info[size2index(oldsize)].reg_size ==
oldsize);
- if ((usize_max > SMALL_MAXCLASS ||
- size2index(usize_max) != size2index(oldsize)) &&
- (size > oldsize || usize_max < oldsize))
- return (true);
+ if ((usize_max <= SMALL_MAXCLASS &&
+ size2index(usize_max) == size2index(oldsize)) ||
+ (size <= oldsize && usize_max >= oldsize))
+ return (false);
} else {
- if (usize_max <= SMALL_MAXCLASS)
- return (true);
- if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
- usize_max, zero))
- return (true);
+ if (usize_max > SMALL_MAXCLASS) {
+ if (!arena_ralloc_large(ptr, oldsize, usize_min,
+ usize_max, zero))
+ return (false);
+ }
}
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
- return (false);
+ /* Reallocation would require a move. */
+ return (true);
} else {
- return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
- usize_max, zero));
+ return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max,
+ zero));
}
}
static void *
-arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
+arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache)
{
if (alignment == 0)
- return (arena_malloc(tsdn, arena, usize, size2index(usize),
- zero, tcache, true));
+ return (arena_malloc(tsd, arena, usize, zero, tcache));
usize = sa2u(usize, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ if (usize == 0)
return (NULL);
- return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
+ return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
}
void *
@@ -3317,15 +2852,14 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize;
usize = s2u(size);
- if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
+ if (usize == 0)
return (NULL);
if (likely(usize <= large_maxclass)) {
size_t copysize;
/* Try to avoid moving the allocation. */
- if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
- zero))
+ if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero))
return (ptr);
/*
@@ -3333,8 +2867,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
* the object. In that case, fall back to allocating new space
* and copying.
*/
- ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
- alignment, zero, tcache);
+ ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
+ zero, tcache);
if (ret == NULL)
return (NULL);
@@ -3346,7 +2880,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
copysize = (usize < oldsize) ? usize : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache, true);
+ isqalloc(tsd, ptr, oldsize, tcache);
} else {
ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
zero, tcache);
@@ -3355,25 +2889,25 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
}
dss_prec_t
-arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
+arena_dss_prec_get(arena_t *arena)
{
dss_prec_t ret;
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
ret = arena->dss_prec;
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (ret);
}
bool
-arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
+arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
{
if (!have_dss)
return (dss_prec != dss_prec_disabled);
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
arena->dss_prec = dss_prec;
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (false);
}
@@ -3388,76 +2922,27 @@ bool
arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
{
- if (opt_purge != purge_mode_ratio)
- return (true);
if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true);
atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
return (false);
}
-ssize_t
-arena_decay_time_default_get(void)
-{
-
- return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
-}
-
-bool
-arena_decay_time_default_set(ssize_t decay_time)
-{
-
- if (opt_purge != purge_mode_decay)
- return (true);
- if (!arena_decay_time_valid(decay_time))
- return (true);
- atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
- return (false);
-}
-
-static void
-arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
- size_t *nactive, size_t *ndirty)
-{
-
- *nthreads += arena_nthreads_get(arena, false);
- *dss = dss_prec_names[arena->dss_prec];
- *lg_dirty_mult = arena->lg_dirty_mult;
- *decay_time = arena->decay.time;
- *nactive += arena->nactive;
- *ndirty += arena->ndirty;
-}
-
void
-arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
- size_t *nactive, size_t *ndirty)
-{
-
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
- decay_time, nactive, ndirty);
- malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-void
-arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
malloc_huge_stats_t *hstats)
{
unsigned i;
- cassert(config_stats);
-
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
- decay_time, nactive, ndirty);
+ malloc_mutex_lock(&arena->lock);
+ *dss = dss_prec_names[arena->dss_prec];
+ *lg_dirty_mult = arena->lg_dirty_mult;
+ *nactive += arena->nactive;
+ *ndirty += arena->ndirty;
astats->mapped += arena->stats.mapped;
- astats->retained += arena->stats.retained;
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
@@ -3483,12 +2968,12 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
- malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
@@ -3500,61 +2985,33 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns;
bstats[i].curruns += bin->stats.curruns;
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
}
}
-unsigned
-arena_nthreads_get(arena_t *arena, bool internal)
-{
-
- return (atomic_read_u(&arena->nthreads[internal]));
-}
-
-void
-arena_nthreads_inc(arena_t *arena, bool internal)
-{
-
- atomic_add_u(&arena->nthreads[internal], 1);
-}
-
-void
-arena_nthreads_dec(arena_t *arena, bool internal)
-{
-
- atomic_sub_u(&arena->nthreads[internal], 1);
-}
-
-size_t
-arena_extent_sn_next(arena_t *arena)
-{
-
- return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
-}
-
arena_t *
-arena_new(tsdn_t *tsdn, unsigned ind)
+arena_new(unsigned ind)
{
arena_t *arena;
unsigned i;
+ arena_bin_t *bin;
/*
* Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
* because there is no way to clean up if base_alloc() OOMs.
*/
if (config_stats) {
- arena = (arena_t *)base_alloc(tsdn,
- CACHELINE_CEILING(sizeof(arena_t)) +
- QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
- + (nhclasses * sizeof(malloc_huge_stats_t)));
+ arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t))
+ + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
+ nhclasses) * sizeof(malloc_huge_stats_t));
} else
- arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
+ arena = (arena_t *)base_alloc(sizeof(arena_t));
if (arena == NULL)
return (NULL);
arena->ind = ind;
- arena->nthreads[0] = arena->nthreads[1] = 0;
- if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
+ arena->nthreads = 0;
+ if (malloc_mutex_init(&arena->lock))
return (NULL);
if (config_stats) {
@@ -3584,15 +3041,11 @@ arena_new(tsdn_t *tsdn, unsigned ind)
* deterministic seed.
*/
arena->offset_state = config_debug ? ind :
- (size_t)(uintptr_t)arena;
+ (uint64_t)(uintptr_t)arena;
}
arena->dss_prec = chunk_dss_prec_get();
- ql_new(&arena->achunks);
-
- arena->extent_sn_next = 0;
-
arena->spare = NULL;
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
@@ -3600,42 +3053,33 @@ arena_new(tsdn_t *tsdn, unsigned ind)
arena->nactive = 0;
arena->ndirty = 0;
- for (i = 0; i < NPSIZES; i++)
- arena_run_heap_new(&arena->runs_avail[i]);
-
+ arena_avail_tree_new(&arena->runs_avail);
qr_new(&arena->runs_dirty, rd_link);
qr_new(&arena->chunks_cache, cc_link);
- if (opt_purge == purge_mode_decay)
- arena_decay_init(arena, arena_decay_time_default_get());
-
ql_new(&arena->huge);
- if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
- WITNESS_RANK_ARENA_HUGE))
+ if (malloc_mutex_init(&arena->huge_mtx))
return (NULL);
- extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
+ extent_tree_szad_new(&arena->chunks_szad_cached);
extent_tree_ad_new(&arena->chunks_ad_cached);
- extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
+ extent_tree_szad_new(&arena->chunks_szad_retained);
extent_tree_ad_new(&arena->chunks_ad_retained);
- if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
- WITNESS_RANK_ARENA_CHUNKS))
+ if (malloc_mutex_init(&arena->chunks_mtx))
return (NULL);
ql_new(&arena->node_cache);
- if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
- WITNESS_RANK_ARENA_NODE_CACHE))
+ if (malloc_mutex_init(&arena->node_cache_mtx))
return (NULL);
arena->chunk_hooks = chunk_hooks_default;
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
- arena_bin_t *bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock, "arena_bin",
- WITNESS_RANK_ARENA_BIN))
+ bin = &arena->bins[i];
+ if (malloc_mutex_init(&bin->lock))
return (NULL);
bin->runcur = NULL;
- arena_run_heap_new(&bin->runs);
+ arena_run_tree_new(&bin->runs);
if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
@@ -3667,7 +3111,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
* be twice as large in order to maintain alignment.
*/
if (config_fill && unlikely(opt_redzone)) {
- size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
+ size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
+ 1);
if (align_min <= REDZONE_MINSIZE) {
bin_info->redzone_size = REDZONE_MINSIZE;
pad_size = 0;
@@ -3687,19 +3132,18 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
* size).
*/
try_run_size = PAGE;
- try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
+ try_nregs = try_run_size / bin_info->reg_size;
do {
perfect_run_size = try_run_size;
perfect_nregs = try_nregs;
try_run_size += PAGE;
- try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
+ try_nregs = try_run_size / bin_info->reg_size;
} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
assert(perfect_nregs <= RUN_MAXREGS);
actual_run_size = perfect_run_size;
- actual_nregs = (uint32_t)((actual_run_size - pad_size) /
- bin_info->reg_interval);
+ actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
/*
* Redzones can require enough padding that not even a single region can
@@ -3711,8 +3155,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
assert(config_fill && unlikely(opt_redzone));
actual_run_size += PAGE;
- actual_nregs = (uint32_t)((actual_run_size - pad_size) /
- bin_info->reg_interval);
+ actual_nregs = (actual_run_size - pad_size) /
+ bin_info->reg_interval;
}
/*
@@ -3720,8 +3164,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
*/
while (actual_run_size > arena_maxrun) {
actual_run_size -= PAGE;
- actual_nregs = (uint32_t)((actual_run_size - pad_size) /
- bin_info->reg_interval);
+ actual_nregs = (actual_run_size - pad_size) /
+ bin_info->reg_interval;
}
assert(actual_nregs > 0);
assert(actual_run_size == s2u(actual_run_size));
@@ -3729,8 +3173,11 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
/* Copy final settings. */
bin_info->run_size = actual_run_size;
bin_info->nregs = actual_nregs;
- bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
- bin_info->reg_interval) - pad_size + bin_info->redzone_size);
+ bin_info->reg0_offset = actual_run_size - (actual_nregs *
+ bin_info->reg_interval) - pad_size + bin_info->redzone_size;
+
+ if (actual_run_size > small_maxrun)
+ small_maxrun = actual_run_size;
assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
* bin_info->reg_interval) + pad_size == bin_info->run_size);
@@ -3747,7 +3194,7 @@ bin_info_init(void)
bin_info_run_size_calc(bin_info); \
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
#define BIN_INFO_INIT_bin_no(index, size)
-#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
#undef BIN_INFO_INIT_bin_yes
@@ -3755,13 +3202,38 @@ bin_info_init(void)
#undef SC
}
-void
+static bool
+small_run_size_init(void)
+{
+
+ assert(small_maxrun != 0);
+
+ small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
+ LG_PAGE));
+ if (small_run_tab == NULL)
+ return (true);
+
+#define TAB_INIT_bin_yes(index, size) { \
+ arena_bin_info_t *bin_info = &arena_bin_info[index]; \
+ small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
+ }
+#define TAB_INIT_bin_no(index, size)
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+ TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
+ SIZE_CLASSES
+#undef TAB_INIT_bin_yes
+#undef TAB_INIT_bin_no
+#undef SC
+
+ return (false);
+}
+
+bool
arena_boot(void)
{
unsigned i;
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
- arena_decay_time_default_set(opt_decay_time);
/*
* Compute the header size such that it is large enough to contain the
@@ -3803,61 +3275,44 @@ arena_boot(void)
nhclasses = NSIZES - nlclasses - NBINS;
bin_info_init();
+ return (small_run_size_init());
}
void
-arena_prefork0(tsdn_t *tsdn, arena_t *arena)
-{
-
- malloc_mutex_prefork(tsdn, &arena->lock);
-}
-
-void
-arena_prefork1(tsdn_t *tsdn, arena_t *arena)
-{
-
- malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
-}
-
-void
-arena_prefork2(tsdn_t *tsdn, arena_t *arena)
-{
-
- malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
-}
-
-void
-arena_prefork3(tsdn_t *tsdn, arena_t *arena)
+arena_prefork(arena_t *arena)
{
unsigned i;
+ malloc_mutex_prefork(&arena->lock);
+ malloc_mutex_prefork(&arena->huge_mtx);
+ malloc_mutex_prefork(&arena->chunks_mtx);
+ malloc_mutex_prefork(&arena->node_cache_mtx);
for (i = 0; i < NBINS; i++)
- malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
- malloc_mutex_prefork(tsdn, &arena->huge_mtx);
+ malloc_mutex_prefork(&arena->bins[i].lock);
}
void
-arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
+arena_postfork_parent(arena_t *arena)
{
unsigned i;
- malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
- malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
- malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
- malloc_mutex_postfork_parent(tsdn, &arena->lock);
+ malloc_mutex_postfork_parent(&arena->bins[i].lock);
+ malloc_mutex_postfork_parent(&arena->node_cache_mtx);
+ malloc_mutex_postfork_parent(&arena->chunks_mtx);
+ malloc_mutex_postfork_parent(&arena->huge_mtx);
+ malloc_mutex_postfork_parent(&arena->lock);
}
void
-arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
+arena_postfork_child(arena_t *arena)
{
unsigned i;
- malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
- malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
- malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
- malloc_mutex_postfork_child(tsdn, &arena->lock);
+ malloc_mutex_postfork_child(&arena->bins[i].lock);
+ malloc_mutex_postfork_child(&arena->node_cache_mtx);
+ malloc_mutex_postfork_child(&arena->chunks_mtx);
+ malloc_mutex_postfork_child(&arena->huge_mtx);
+ malloc_mutex_postfork_child(&arena->lock);
}
diff --git a/deps/jemalloc/src/base.c b/deps/jemalloc/src/base.c
index 5681a3f36..7cdcfed86 100644
--- a/deps/jemalloc/src/base.c
+++ b/deps/jemalloc/src/base.c
@@ -5,8 +5,7 @@
/* Data. */
static malloc_mutex_t base_mtx;
-static size_t base_extent_sn_next;
-static extent_tree_t base_avail_szsnad;
+static extent_tree_t base_avail_szad;
static extent_node_t *base_nodes;
static size_t base_allocated;
static size_t base_resident;
@@ -14,13 +13,12 @@ static size_t base_mapped;
/******************************************************************************/
+/* base_mtx must be held. */
static extent_node_t *
-base_node_try_alloc(tsdn_t *tsdn)
+base_node_try_alloc(void)
{
extent_node_t *node;
- malloc_mutex_assert_owner(tsdn, &base_mtx);
-
if (base_nodes == NULL)
return (NULL);
node = base_nodes;
@@ -29,42 +27,33 @@ base_node_try_alloc(tsdn_t *tsdn)
return (node);
}
+/* base_mtx must be held. */
static void
-base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
+base_node_dalloc(extent_node_t *node)
{
- malloc_mutex_assert_owner(tsdn, &base_mtx);
-
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
*(extent_node_t **)node = base_nodes;
base_nodes = node;
}
-static void
-base_extent_node_init(extent_node_t *node, void *addr, size_t size)
-{
- size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
-
- extent_node_init(node, NULL, addr, size, sn, true, true);
-}
-
+/* base_mtx must be held. */
static extent_node_t *
-base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
+base_chunk_alloc(size_t minsize)
{
extent_node_t *node;
size_t csize, nsize;
void *addr;
- malloc_mutex_assert_owner(tsdn, &base_mtx);
assert(minsize != 0);
- node = base_node_try_alloc(tsdn);
+ node = base_node_try_alloc();
/* Allocate enough space to also carve a node out if necessary. */
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize);
addr = chunk_alloc_base(csize);
if (addr == NULL) {
if (node != NULL)
- base_node_dalloc(tsdn, node);
+ base_node_dalloc(node);
return (NULL);
}
base_mapped += csize;
@@ -77,7 +66,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
base_resident += PAGE_CEILING(nsize);
}
}
- base_extent_node_init(node, addr, csize);
+ extent_node_init(node, NULL, addr, csize, true, true);
return (node);
}
@@ -87,7 +76,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
* physical memory usage.
*/
void *
-base_alloc(tsdn_t *tsdn, size_t size)
+base_alloc(size_t size)
{
void *ret;
size_t csize, usize;
@@ -101,15 +90,15 @@ base_alloc(tsdn_t *tsdn, size_t size)
csize = CACHELINE_CEILING(size);
usize = s2u(csize);
- extent_node_init(&key, NULL, NULL, usize, 0, false, false);
- malloc_mutex_lock(tsdn, &base_mtx);
- node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
+ extent_node_init(&key, NULL, NULL, usize, false, false);
+ malloc_mutex_lock(&base_mtx);
+ node = extent_tree_szad_nsearch(&base_avail_szad, &key);
if (node != NULL) {
/* Use existing space. */
- extent_tree_szsnad_remove(&base_avail_szsnad, node);
+ extent_tree_szad_remove(&base_avail_szad, node);
} else {
/* Try to allocate more space. */
- node = base_chunk_alloc(tsdn, csize);
+ node = base_chunk_alloc(csize);
}
if (node == NULL) {
ret = NULL;
@@ -120,9 +109,9 @@ base_alloc(tsdn_t *tsdn, size_t size)
if (extent_node_size_get(node) > csize) {
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
extent_node_size_set(node, extent_node_size_get(node) - csize);
- extent_tree_szsnad_insert(&base_avail_szsnad, node);
+ extent_tree_szad_insert(&base_avail_szad, node);
} else
- base_node_dalloc(tsdn, node);
+ base_node_dalloc(node);
if (config_stats) {
base_allocated += csize;
/*
@@ -134,54 +123,52 @@ base_alloc(tsdn_t *tsdn, size_t size)
}
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return:
- malloc_mutex_unlock(tsdn, &base_mtx);
+ malloc_mutex_unlock(&base_mtx);
return (ret);
}
void
-base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
- size_t *mapped)
+base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
{
- malloc_mutex_lock(tsdn, &base_mtx);
+ malloc_mutex_lock(&base_mtx);
assert(base_allocated <= base_resident);
assert(base_resident <= base_mapped);
*allocated = base_allocated;
*resident = base_resident;
*mapped = base_mapped;
- malloc_mutex_unlock(tsdn, &base_mtx);
+ malloc_mutex_unlock(&base_mtx);
}
bool
base_boot(void)
{
- if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
+ if (malloc_mutex_init(&base_mtx))
return (true);
- base_extent_sn_next = 0;
- extent_tree_szsnad_new(&base_avail_szsnad);
+ extent_tree_szad_new(&base_avail_szad);
base_nodes = NULL;
return (false);
}
void
-base_prefork(tsdn_t *tsdn)
+base_prefork(void)
{
- malloc_mutex_prefork(tsdn, &base_mtx);
+ malloc_mutex_prefork(&base_mtx);
}
void
-base_postfork_parent(tsdn_t *tsdn)
+base_postfork_parent(void)
{
- malloc_mutex_postfork_parent(tsdn, &base_mtx);
+ malloc_mutex_postfork_parent(&base_mtx);
}
void
-base_postfork_child(tsdn_t *tsdn)
+base_postfork_child(void)
{
- malloc_mutex_postfork_child(tsdn, &base_mtx);
+ malloc_mutex_postfork_child(&base_mtx);
}
diff --git a/deps/jemalloc/src/bitmap.c b/deps/jemalloc/src/bitmap.c
index ac0f3b381..c733372b4 100644
--- a/deps/jemalloc/src/bitmap.c
+++ b/deps/jemalloc/src/bitmap.c
@@ -3,8 +3,6 @@
/******************************************************************************/
-#ifdef USE_TREE
-
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
@@ -34,11 +32,20 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
binfo->nbits = nbits;
}
-static size_t
+size_t
bitmap_info_ngroups(const bitmap_info_t *binfo)
{
- return (binfo->levels[binfo->nlevels].group_offset);
+ return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
+}
+
+size_t
+bitmap_size(size_t nbits)
+{
+ bitmap_info_t binfo;
+
+ bitmap_info_init(&binfo, nbits);
+ return (bitmap_info_ngroups(&binfo));
}
void
@@ -54,7 +61,8 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
* correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group.
*/
- memset(bitmap, 0xffU, bitmap_size(binfo));
+ memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
+ LG_SIZEOF_BITMAP);
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
@@ -68,44 +76,3 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
}
}
-
-#else /* USE_TREE */
-
-void
-bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
-{
-
- assert(nbits > 0);
- assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
-
- binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
- binfo->nbits = nbits;
-}
-
-static size_t
-bitmap_info_ngroups(const bitmap_info_t *binfo)
-{
-
- return (binfo->ngroups);
-}
-
-void
-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
- size_t extra;
-
- memset(bitmap, 0xffU, bitmap_size(binfo));
- extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
- & BITMAP_GROUP_NBITS_MASK;
- if (extra != 0)
- bitmap[binfo->ngroups - 1] >>= extra;
-}
-
-#endif /* USE_TREE */
-
-size_t
-bitmap_size(const bitmap_info_t *binfo)
-{
-
- return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
-}
diff --git a/deps/jemalloc/src/chunk.c b/deps/jemalloc/src/chunk.c
index c1c514a86..6ba1ca7a5 100644
--- a/deps/jemalloc/src/chunk.c
+++ b/deps/jemalloc/src/chunk.c
@@ -49,10 +49,9 @@ const chunk_hooks_t chunk_hooks_default = {
* definition.
*/
-static void chunk_record(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
- extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
- bool zeroed, bool committed);
+static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, bool zeroed, bool committed);
/******************************************************************************/
@@ -64,23 +63,23 @@ chunk_hooks_get_locked(arena_t *arena)
}
chunk_hooks_t
-chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
+chunk_hooks_get(arena_t *arena)
{
chunk_hooks_t chunk_hooks;
- malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_lock(&arena->chunks_mtx);
chunk_hooks = chunk_hooks_get_locked(arena);
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_unlock(&arena->chunks_mtx);
return (chunk_hooks);
}
chunk_hooks_t
-chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
+chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
{
chunk_hooks_t old_chunk_hooks;
- malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_lock(&arena->chunks_mtx);
old_chunk_hooks = arena->chunk_hooks;
/*
* Copy each field atomically so that it is impossible for readers to
@@ -105,14 +104,14 @@ chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
ATOMIC_COPY_HOOK(split);
ATOMIC_COPY_HOOK(merge);
#undef ATOMIC_COPY_HOOK
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_unlock(&arena->chunks_mtx);
return (old_chunk_hooks);
}
static void
-chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, bool locked)
+chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ bool locked)
{
static const chunk_hooks_t uninitialized_hooks =
CHUNK_HOOKS_INITIALIZER;
@@ -120,28 +119,27 @@ chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
0) {
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
- chunk_hooks_get(tsdn, arena);
+ chunk_hooks_get(arena);
}
}
static void
-chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
+chunk_hooks_assure_initialized_locked(arena_t *arena,
chunk_hooks_t *chunk_hooks)
{
- chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
+ chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
}
static void
-chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks)
+chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
{
- chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
+ chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
}
bool
-chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
+chunk_register(const void *chunk, const extent_node_t *node)
{
assert(extent_node_addr_get(node) == chunk);
@@ -161,7 +159,7 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
high = atomic_read_z(&highchunks);
}
if (cur > high && prof_gdump_get_unlocked())
- prof_gdump(tsdn);
+ prof_gdump();
}
return (false);
@@ -183,35 +181,33 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
}
/*
- * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
- * best fits.
+ * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
+ * fits.
*/
static extent_node_t *
-chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
+chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, size_t size)
{
extent_node_t key;
assert(size == CHUNK_CEILING(size));
- extent_node_init(&key, arena, NULL, size, 0, false, false);
- return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
+ extent_node_init(&key, arena, NULL, size, false, false);
+ return (extent_tree_szad_nsearch(chunks_szad, &key));
}
static void *
-chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
- void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
- bool *commit, bool dalloc_node)
+chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
+ bool dalloc_node)
{
void *ret;
extent_node_t *node;
size_t alloc_size, leadsize, trailsize;
bool zeroed, committed;
- assert(CHUNK_CEILING(size) == size);
- assert(alignment > 0);
assert(new_addr == NULL || alignment == chunksize);
- assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
/*
* Cached chunks use the node linkage embedded in their headers, in
* which case dalloc_node is true, and new_addr is non-NULL because
@@ -219,23 +215,24 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
*/
assert(dalloc_node || new_addr != NULL);
- alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
+ alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
- malloc_mutex_lock(tsdn, &arena->chunks_mtx);
- chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
+ malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
- extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
+ extent_node_init(&key, arena, new_addr, alloc_size, false,
false);
node = extent_tree_ad_search(chunks_ad, &key);
} else {
- node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
+ node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
+ alloc_size);
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
size)) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_unlock(&arena->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
@@ -244,7 +241,6 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(extent_node_size_get(node) >= leadsize + size);
trailsize = extent_node_size_get(node) - leadsize - size;
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
- *sn = extent_node_sn_get(node);
zeroed = extent_node_zeroed_get(node);
if (zeroed)
*zero = true;
@@ -255,17 +251,17 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (leadsize != 0 &&
chunk_hooks->split(extent_node_addr_get(node),
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_unlock(&arena->chunks_mtx);
return (NULL);
}
/* Remove node from the tree. */
- extent_tree_szsnad_remove(chunks_szsnad, node);
+ extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
extent_node_size_set(node, leadsize);
- extent_tree_szsnad_insert(chunks_szsnad, node);
+ extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
@@ -275,42 +271,41 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (chunk_hooks->split(ret, size + trailsize, size,
trailsize, false, arena->ind)) {
if (dalloc_node && node != NULL)
- arena_node_dalloc(tsdn, arena, node);
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
- chunks_ad, cache, ret, size + trailsize, *sn,
- zeroed, committed);
+ arena_node_dalloc(arena, node);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
+ cache, ret, size + trailsize, zeroed, committed);
return (NULL);
}
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
- node = arena_node_alloc(tsdn, arena);
+ node = arena_node_alloc(arena);
if (node == NULL) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks,
- chunks_szsnad, chunks_ad, cache, ret, size
- + trailsize, *sn, zeroed, committed);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad,
+ chunks_ad, cache, ret, size + trailsize,
+ zeroed, committed);
return (NULL);
}
}
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
- trailsize, *sn, zeroed, committed);
- extent_tree_szsnad_insert(chunks_szsnad, node);
+ trailsize, zeroed, committed);
+ extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
- cache, ret, size, *sn, zeroed, committed);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
+ ret, size, zeroed, committed);
return (NULL);
}
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_unlock(&arena->chunks_mtx);
assert(dalloc_node || node != NULL);
if (dalloc_node && node != NULL)
- arena_node_dalloc(tsdn, arena, node);
+ arena_node_dalloc(arena, node);
if (*zero) {
if (!zeroed)
memset(ret, 0, size);
@@ -318,11 +313,10 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
- if (config_valgrind)
- JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
}
return (ret);
}
@@ -334,29 +328,39 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
* them if they are returned.
*/
static void *
-chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
+chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit, dss_prec_t dss_prec)
{
void *ret;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
+ /* Retained. */
+ if ((ret = chunk_recycle(arena, &chunk_hooks,
+ &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
+ new_addr, size, alignment, zero, commit, true)) != NULL)
+ return (ret);
+
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
- chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL)
- return (ret);
- /* mmap. */
- if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
+ chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
NULL)
return (ret);
+ /*
+ * mmap. Requesting an address is not implemented for
+ * chunk_alloc_mmap(), so only call it if (new_addr == NULL).
+ */
+ if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
+ commit)) != NULL)
+ return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
- chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL)
+ chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
+ NULL)
return (ret);
/* All strategies for allocation failed. */
@@ -376,7 +380,7 @@ chunk_alloc_base(size_t size)
*/
zero = true;
commit = true;
- ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
+ ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
if (ret == NULL)
return (NULL);
if (config_valgrind)
@@ -386,33 +390,37 @@ chunk_alloc_base(size_t size)
}
void *
-chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
- bool *commit, bool dalloc_node)
+chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool dalloc_node)
{
void *ret;
+ bool commit;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
- ret = chunk_recycle(tsdn, arena, chunk_hooks,
- &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
- new_addr, size, alignment, sn, zero, commit, dalloc_node);
+ commit = true;
+ ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
+ &arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
+ &commit, dalloc_node);
if (ret == NULL)
return (NULL);
+ assert(commit);
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
static arena_t *
-chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
+chunk_arena_get(unsigned arena_ind)
{
arena_t *arena;
- arena = arena_get(tsdn, arena_ind, false);
+ /* Dodge tsd for a0 in order to avoid bootstrapping issues. */
+ arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
+ false, true);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
@@ -422,12 +430,14 @@ chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
}
static void *
-chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit)
+chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit, unsigned arena_ind)
{
void *ret;
+ arena_t *arena;
- ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
+ arena = chunk_arena_get(arena_ind);
+ ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
commit, arena->dss_prec);
if (ret == NULL)
return (NULL);
@@ -437,80 +447,26 @@ chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
return (ret);
}
-static void *
-chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
- bool *commit, unsigned arena_ind)
-{
- tsdn_t *tsdn;
- arena_t *arena;
-
- tsdn = tsdn_fetch();
- arena = chunk_arena_get(tsdn, arena_ind);
-
- return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
- zero, commit));
-}
-
-static void *
-chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
- bool *commit)
-{
- void *ret;
-
- assert(size != 0);
- assert((size & chunksize_mask) == 0);
- assert(alignment != 0);
- assert((alignment & chunksize_mask) == 0);
-
- ret = chunk_recycle(tsdn, arena, chunk_hooks,
- &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
- new_addr, size, alignment, sn, zero, commit, true);
-
- if (config_stats && ret != NULL)
- arena->stats.retained -= size;
-
- return (ret);
-}
-
void *
-chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
- bool *commit)
+chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
- chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
-
- ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
- alignment, sn, zero, commit);
- if (ret == NULL) {
- if (chunk_hooks->alloc == chunk_alloc_default) {
- /* Call directly to propagate tsdn. */
- ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
- size, alignment, zero, commit);
- } else {
- ret = chunk_hooks->alloc(new_addr, size, alignment,
- zero, commit, arena->ind);
- }
-
- if (ret == NULL)
- return (NULL);
-
- *sn = arena_extent_sn_next(arena);
-
- if (config_valgrind && chunk_hooks->alloc !=
- chunk_alloc_default)
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
- }
-
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
+ arena->ind);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
return (ret);
}
static void
-chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
- void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
+chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, bool zeroed, bool committed)
{
bool unzeroed;
extent_node_t *node, *prev;
@@ -520,9 +476,9 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
- malloc_mutex_lock(tsdn, &arena->chunks_mtx);
- chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
- extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
+ malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
+ extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
@@ -534,21 +490,19 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
- * remove/insert from/into chunks_szsnad.
+ * remove/insert from/into chunks_szad.
*/
- extent_tree_szsnad_remove(chunks_szsnad, node);
+ extent_tree_szad_remove(chunks_szad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, chunk);
extent_node_size_set(node, size + extent_node_size_get(node));
- if (sn < extent_node_sn_get(node))
- extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
!unzeroed);
- extent_tree_szsnad_insert(chunks_szsnad, node);
+ extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
- node = arena_node_alloc(tsdn, arena);
+ node = arena_node_alloc(arena);
if (node == NULL) {
/*
* Node allocation failed, which is an exceedingly
@@ -557,15 +511,15 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
* a virtual memory leak.
*/
if (cache) {
- chunk_purge_wrapper(tsdn, arena, chunk_hooks,
- chunk, size, 0, size);
+ chunk_purge_wrapper(arena, chunk_hooks, chunk,
+ size, 0, size);
}
goto label_return;
}
- extent_node_init(node, arena, chunk, size, sn, !unzeroed,
+ extent_node_init(node, arena, chunk, size, !unzeroed,
committed);
extent_tree_ad_insert(chunks_ad, node);
- extent_tree_szsnad_insert(chunks_szsnad, node);
+ extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
}
@@ -579,33 +533,31 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
- * remove/insert node from/into chunks_szsnad.
+ * remove/insert node from/into chunks_szad.
*/
- extent_tree_szsnad_remove(chunks_szsnad, prev);
+ extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev);
arena_chunk_cache_maybe_remove(arena, prev, cache);
- extent_tree_szsnad_remove(chunks_szsnad, node);
+ extent_tree_szad_remove(chunks_szad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, extent_node_addr_get(prev));
extent_node_size_set(node, extent_node_size_get(prev) +
extent_node_size_get(node));
- if (extent_node_sn_get(prev) < extent_node_sn_get(node))
- extent_node_sn_set(node, extent_node_sn_get(prev));
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
extent_node_zeroed_get(node));
- extent_tree_szsnad_insert(chunks_szsnad, node);
+ extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
- arena_node_dalloc(tsdn, arena, prev);
+ arena_node_dalloc(arena, prev);
}
label_return:
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_unlock(&arena->chunks_mtx);
}
void
-chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, size_t sn, bool committed)
+chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool committed)
{
assert(chunk != NULL);
@@ -613,49 +565,24 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(size != 0);
assert((size & chunksize_mask) == 0);
- chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
- &arena->chunks_ad_cached, true, chunk, size, sn, false,
- committed);
- arena_maybe_purge(tsdn, arena);
-}
-
-static bool
-chunk_dalloc_default_impl(void *chunk, size_t size)
-{
-
- if (!have_dss || !chunk_in_dss(chunk))
- return (chunk_dalloc_mmap(chunk, size));
- return (true);
-}
-
-static bool
-chunk_dalloc_default(void *chunk, size_t size, bool committed,
- unsigned arena_ind)
-{
-
- return (chunk_dalloc_default_impl(chunk, size));
+ chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
+ &arena->chunks_ad_cached, true, chunk, size, false, committed);
+ arena_maybe_purge(arena);
}
void
-chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
+chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool zeroed, bool committed)
{
- bool err;
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
- chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
/* Try to deallocate. */
- if (chunk_hooks->dalloc == chunk_dalloc_default) {
- /* Call directly to propagate tsdn. */
- err = chunk_dalloc_default_impl(chunk, size);
- } else
- err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
-
- if (!err)
+ if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
return;
/* Try to decommit; purge if that fails. */
if (committed) {
@@ -664,12 +591,29 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
- chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
- &arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
- committed);
+ chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
+ &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
+}
+
+static bool
+chunk_dalloc_default(void *chunk, size_t size, bool committed,
+ unsigned arena_ind)
+{
- if (config_stats)
- arena->stats.retained += size;
+ if (!have_dss || !chunk_in_dss(chunk))
+ return (chunk_dalloc_mmap(chunk, size));
+ return (true);
+}
+
+void
+chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool committed)
+{
+
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ chunk_hooks->dalloc(chunk, size, committed, arena->ind);
+ if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
+ JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
}
static bool
@@ -690,9 +634,8 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
length));
}
-static bool
-chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
- unsigned arena_ind)
+bool
+chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
{
assert(chunk != NULL);
@@ -705,12 +648,21 @@ chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
length));
}
+static bool
+chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
+ length));
+}
+
bool
-chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, size_t offset, size_t length)
+chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, size_t offset, size_t length)
{
- chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
}
@@ -725,30 +677,23 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
}
static bool
-chunk_merge_default_impl(void *chunk_a, void *chunk_b)
+chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ bool committed, unsigned arena_ind)
{
if (!maps_coalesce)
return (true);
- if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b))
+ if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
return (true);
return (false);
}
-static bool
-chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
- bool committed, unsigned arena_ind)
-{
-
- return (chunk_merge_default_impl(chunk_a, chunk_b));
-}
-
static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{
- return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
+ return ((rtree_node_elm_t *)base_alloc(nelms *
sizeof(rtree_node_elm_t)));
}
@@ -771,7 +716,7 @@ chunk_boot(void)
* so pages_map will always take fast path.
*/
if (!opt_lg_chunk) {
- opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
+ opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
- 1;
}
#else
@@ -785,11 +730,32 @@ chunk_boot(void)
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE);
- if (have_dss)
- chunk_dss_boot();
- if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
- opt_lg_chunk), chunks_rtree_node_alloc, NULL))
+ if (have_dss && chunk_dss_boot())
+ return (true);
+ if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
+ opt_lg_chunk, chunks_rtree_node_alloc, NULL))
return (true);
return (false);
}
+
+void
+chunk_prefork(void)
+{
+
+ chunk_dss_prefork();
+}
+
+void
+chunk_postfork_parent(void)
+{
+
+ chunk_dss_postfork_parent();
+}
+
+void
+chunk_postfork_child(void)
+{
+
+ chunk_dss_postfork_child();
+}
diff --git a/deps/jemalloc/src/chunk_dss.c b/deps/jemalloc/src/chunk_dss.c
index ee3f83888..61fc91696 100644
--- a/deps/jemalloc/src/chunk_dss.c
+++ b/deps/jemalloc/src/chunk_dss.c
@@ -10,19 +10,20 @@ const char *dss_prec_names[] = {
"N/A"
};
+/* Current dss precedence default, used when creating new arenas. */
+static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
+
/*
- * Current dss precedence default, used when creating new arenas. NB: This is
- * stored as unsigned rather than dss_prec_t because in principle there's no
- * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
- * atomic operations to synchronize the setting.
+ * Protects sbrk() calls. This avoids malloc races among threads, though it
+ * does not protect against races with threads that call sbrk() directly.
*/
-static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT;
+static malloc_mutex_t dss_mtx;
/* Base address of the DSS. */
static void *dss_base;
-/* Atomic boolean indicating whether the DSS is exhausted. */
-static unsigned dss_exhausted;
-/* Atomic current upper limit on DSS addresses. */
+/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
+static void *dss_prev;
+/* Current upper limit on DSS addresses. */
static void *dss_max;
/******************************************************************************/
@@ -46,7 +47,9 @@ chunk_dss_prec_get(void)
if (!have_dss)
return (dss_prec_disabled);
- ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
+ malloc_mutex_lock(&dss_mtx);
+ ret = dss_prec_default;
+ malloc_mutex_unlock(&dss_mtx);
return (ret);
}
@@ -56,46 +59,15 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
if (!have_dss)
return (dss_prec != dss_prec_disabled);
- atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
+ malloc_mutex_lock(&dss_mtx);
+ dss_prec_default = dss_prec;
+ malloc_mutex_unlock(&dss_mtx);
return (false);
}
-static void *
-chunk_dss_max_update(void *new_addr)
-{
- void *max_cur;
- spin_t spinner;
-
- /*
- * Get the current end of the DSS as max_cur and assure that dss_max is
- * up to date.
- */
- spin_init(&spinner);
- while (true) {
- void *max_prev = atomic_read_p(&dss_max);
-
- max_cur = chunk_dss_sbrk(0);
- if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
- /*
- * Another thread optimistically updated dss_max. Wait
- * for it to finish.
- */
- spin_adaptive(&spinner);
- continue;
- }
- if (!atomic_cas_p(&dss_max, max_prev, max_cur))
- break;
- }
- /* Fixed new_addr can only be supported if it is at the edge of DSS. */
- if (new_addr != NULL && max_cur != new_addr)
- return (NULL);
-
- return (max_cur);
-}
-
void *
-chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit)
+chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit)
{
cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
@@ -108,20 +80,28 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
if ((intptr_t)size < 0)
return (NULL);
- if (!atomic_read_u(&dss_exhausted)) {
+ malloc_mutex_lock(&dss_mtx);
+ if (dss_prev != (void *)-1) {
+
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
- while (true) {
- void *ret, *cpad, *max_cur, *dss_next, *dss_prev;
+ do {
+ void *ret, *cpad, *dss_next;
size_t gap_size, cpad_size;
intptr_t incr;
+ /* Avoid an unnecessary system call. */
+ if (new_addr != NULL && dss_max != new_addr)
+ break;
+
+ /* Get the current end of the DSS. */
+ dss_max = chunk_dss_sbrk(0);
- max_cur = chunk_dss_max_update(new_addr);
- if (max_cur == NULL)
- goto label_oom;
+ /* Make sure the earlier condition still holds. */
+ if (new_addr != NULL && dss_max != new_addr)
+ break;
/*
* Calculate how much padding is necessary to
@@ -140,29 +120,22 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||
- (uintptr_t)dss_next < (uintptr_t)dss_max)
- goto label_oom; /* Wrap-around. */
+ (uintptr_t)dss_next < (uintptr_t)dss_max) {
+ /* Wrap-around. */
+ malloc_mutex_unlock(&dss_mtx);
+ return (NULL);
+ }
incr = gap_size + cpad_size + size;
-
- /*
- * Optimistically update dss_max, and roll back below if
- * sbrk() fails. No other thread will try to extend the
- * DSS while dss_max is greater than the current DSS
- * max reported by sbrk(0).
- */
- if (atomic_cas_p(&dss_max, max_cur, dss_next))
- continue;
-
- /* Try to allocate. */
dss_prev = chunk_dss_sbrk(incr);
- if (dss_prev == max_cur) {
+ if (dss_prev == dss_max) {
/* Success. */
+ dss_max = dss_next;
+ malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0) {
chunk_hooks_t chunk_hooks =
CHUNK_HOOKS_INITIALIZER;
- chunk_dalloc_wrapper(tsdn, arena,
+ chunk_dalloc_wrapper(arena,
&chunk_hooks, cpad, cpad_size,
- arena_extent_sn_next(arena), false,
true);
}
if (*zero) {
@@ -174,65 +147,68 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
*commit = pages_decommit(ret, size);
return (ret);
}
-
- /*
- * Failure, whether due to OOM or a race with a raw
- * sbrk() call from outside the allocator. Try to roll
- * back optimistic dss_max update; if rollback fails,
- * it's due to another caller of this function having
- * succeeded since this invocation started, in which
- * case rollback is not necessary.
- */
- atomic_cas_p(&dss_max, dss_next, max_cur);
- if (dss_prev == (void *)-1) {
- /* OOM. */
- atomic_write_u(&dss_exhausted, (unsigned)true);
- goto label_oom;
- }
- }
+ } while (dss_prev != (void *)-1);
}
-label_oom:
- return (NULL);
-}
-
-static bool
-chunk_in_dss_helper(void *chunk, void *max)
-{
+ malloc_mutex_unlock(&dss_mtx);
- return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk <
- (uintptr_t)max);
+ return (NULL);
}
bool
chunk_in_dss(void *chunk)
{
+ bool ret;
cassert(have_dss);
- return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max)));
+ malloc_mutex_lock(&dss_mtx);
+ if ((uintptr_t)chunk >= (uintptr_t)dss_base
+ && (uintptr_t)chunk < (uintptr_t)dss_max)
+ ret = true;
+ else
+ ret = false;
+ malloc_mutex_unlock(&dss_mtx);
+
+ return (ret);
}
bool
-chunk_dss_mergeable(void *chunk_a, void *chunk_b)
+chunk_dss_boot(void)
{
- void *max;
cassert(have_dss);
- max = atomic_read_p(&dss_max);
- return (chunk_in_dss_helper(chunk_a, max) ==
- chunk_in_dss_helper(chunk_b, max));
+ if (malloc_mutex_init(&dss_mtx))
+ return (true);
+ dss_base = chunk_dss_sbrk(0);
+ dss_prev = dss_base;
+ dss_max = dss_base;
+
+ return (false);
}
void
-chunk_dss_boot(void)
+chunk_dss_prefork(void)
{
- cassert(have_dss);
+ if (have_dss)
+ malloc_mutex_prefork(&dss_mtx);
+}
- dss_base = chunk_dss_sbrk(0);
- dss_exhausted = (unsigned)(dss_base == (void *)-1);
- dss_max = dss_base;
+void
+chunk_dss_postfork_parent(void)
+{
+
+ if (have_dss)
+ malloc_mutex_postfork_parent(&dss_mtx);
+}
+
+void
+chunk_dss_postfork_child(void)
+{
+
+ if (have_dss)
+ malloc_mutex_postfork_child(&dss_mtx);
}
/******************************************************************************/
diff --git a/deps/jemalloc/src/chunk_mmap.c b/deps/jemalloc/src/chunk_mmap.c
index 73fc497af..b9ba74191 100644
--- a/deps/jemalloc/src/chunk_mmap.c
+++ b/deps/jemalloc/src/chunk_mmap.c
@@ -16,22 +16,23 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
do {
void *pages;
size_t leadsize;
- pages = pages_map(NULL, alloc_size, commit);
+ pages = pages_map(NULL, alloc_size);
if (pages == NULL)
return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
- ret = pages_trim(pages, alloc_size, leadsize, size, commit);
+ ret = pages_trim(pages, alloc_size, leadsize, size);
} while (ret == NULL);
assert(ret != NULL);
*zero = true;
+ if (!*commit)
+ *commit = pages_decommit(ret, size);
return (ret);
}
void *
-chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
- bool *commit)
+chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
size_t offset;
@@ -52,10 +53,9 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
- ret = pages_map(new_addr, size, commit);
- if (ret == NULL || ret == new_addr)
- return (ret);
- assert(new_addr == NULL);
+ ret = pages_map(NULL, size);
+ if (ret == NULL)
+ return (NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
@@ -64,6 +64,8 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
assert(ret != NULL);
*zero = true;
+ if (!*commit)
+ *commit = pages_decommit(ret, size);
return (ret);
}
diff --git a/deps/jemalloc/src/ckh.c b/deps/jemalloc/src/ckh.c
index 159bd8ae1..53a1c1ef1 100644
--- a/deps/jemalloc/src/ckh.c
+++ b/deps/jemalloc/src/ckh.c
@@ -99,8 +99,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
- offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
- LG_CKH_BUCKET_CELLS);
+ prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
@@ -142,8 +141,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
- i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
- LG_CKH_BUCKET_CELLS);
+ prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
@@ -249,7 +247,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
{
bool ret;
ckhc_t *tab, *ttab;
- unsigned lg_prevbuckets, lg_curcells;
+ size_t lg_curcells;
+ unsigned lg_prevbuckets;
#ifdef CKH_COUNT
ckh->ngrows++;
@@ -267,12 +266,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ if (usize == 0) {
ret = true;
goto label_return;
}
- tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
- true, NULL, true, arena_ichoose(tsd, NULL));
+ tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
+ true, NULL);
if (tab == NULL) {
ret = true;
goto label_return;
@@ -284,12 +283,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
- idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
+ idalloctm(tsd, tab, tcache_get(tsd, false), true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
@@ -303,8 +302,8 @@ static void
ckh_shrink(tsd_t *tsd, ckh_t *ckh)
{
ckhc_t *tab, *ttab;
- size_t usize;
- unsigned lg_prevbuckets, lg_curcells;
+ size_t lg_curcells, usize;
+ unsigned lg_prevbuckets;
/*
* It is possible (though unlikely, given well behaved hashes) that the
@@ -313,10 +312,10 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ if (usize == 0)
return;
- tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
- true, arena_ichoose(tsd, NULL));
+ tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
+ NULL);
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
@@ -331,7 +330,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
- idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
+ idalloctm(tsd, tab, tcache_get(tsd, false), true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@@ -339,7 +338,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@@ -388,12 +387,12 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ if (usize == 0) {
ret = true;
goto label_return;
}
- ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
- NULL, true, arena_ichoose(tsd, NULL));
+ ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
+ NULL);
if (ckh->tab == NULL) {
ret = true;
goto label_return;
@@ -422,9 +421,9 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
(unsigned long long)ckh->nrelocs);
#endif
- idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
if (config_debug)
- memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
+ memset(ckh, 0x5a, sizeof(ckh_t));
}
size_t
diff --git a/deps/jemalloc/src/ctl.c b/deps/jemalloc/src/ctl.c
index bc78b2055..3de8e602d 100644
--- a/deps/jemalloc/src/ctl.c
+++ b/deps/jemalloc/src/ctl.c
@@ -24,7 +24,7 @@ ctl_named_node(const ctl_node_t *node)
}
JEMALLOC_INLINE_C const ctl_named_node_t *
-ctl_named_children(const ctl_named_node_t *node, size_t index)
+ctl_named_children(const ctl_named_node_t *node, int index)
{
const ctl_named_node_t *children = ctl_named_node(node->children);
@@ -42,25 +42,25 @@ ctl_indexed_node(const ctl_node_t *node)
/* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \
-static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
-static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
- const size_t *mib, size_t miblen, size_t i);
+static const ctl_named_node_t *n##_index(const size_t *mib, \
+ size_t miblen, size_t i);
static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats);
-static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
+static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
-static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i);
-static bool ctl_grow(tsdn_t *tsdn);
-static void ctl_refresh(tsdn_t *tsdn);
-static bool ctl_init(tsdn_t *tsdn);
-static int ctl_lookup(tsdn_t *tsdn, const char *name,
- ctl_node_t const **nodesp, size_t *mibp, size_t *depthp);
+static void ctl_arena_refresh(arena_t *arena, unsigned i);
+static bool ctl_grow(void);
+static void ctl_refresh(void);
+static bool ctl_init(void);
+static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
+ size_t *mibp, size_t *depthp);
CTL_PROTO(version)
CTL_PROTO(epoch)
@@ -77,7 +77,6 @@ CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
-CTL_PROTO(config_malloc_conf)
CTL_PROTO(config_munmap)
CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
@@ -92,9 +91,7 @@ CTL_PROTO(opt_abort)
CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas)
-CTL_PROTO(opt_purge)
CTL_PROTO(opt_lg_dirty_mult)
-CTL_PROTO(opt_decay_time)
CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
@@ -117,13 +114,10 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy)
-static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all);
CTL_PROTO(arena_i_purge)
-CTL_PROTO(arena_i_decay)
-CTL_PROTO(arena_i_reset)
+static void arena_purge(unsigned arena_ind);
CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_lg_dirty_mult)
-CTL_PROTO(arena_i_decay_time)
CTL_PROTO(arena_i_chunk_hooks)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
@@ -137,7 +131,6 @@ INDEX_PROTO(arenas_hchunk_i)
CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized)
CTL_PROTO(arenas_lg_dirty_mult)
-CTL_PROTO(arenas_decay_time)
CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
@@ -188,11 +181,9 @@ INDEX_PROTO(stats_arenas_i_hchunks_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_lg_dirty_mult)
-CTL_PROTO(stats_arenas_i_decay_time)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped)
-CTL_PROTO(stats_arenas_i_retained)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
@@ -205,7 +196,6 @@ CTL_PROTO(stats_active)
CTL_PROTO(stats_metadata)
CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
-CTL_PROTO(stats_retained)
/******************************************************************************/
/* mallctl tree. */
@@ -251,7 +241,6 @@ static const ctl_named_node_t config_node[] = {
{NAME("debug"), CTL(config_debug)},
{NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)},
- {NAME("malloc_conf"), CTL(config_malloc_conf)},
{NAME("munmap"), CTL(config_munmap)},
{NAME("prof"), CTL(config_prof)},
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
@@ -269,9 +258,7 @@ static const ctl_named_node_t opt_node[] = {
{NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)},
- {NAME("purge"), CTL(opt_purge)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
- {NAME("decay_time"), CTL(opt_decay_time)},
{NAME("stats_print"), CTL(opt_stats_print)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
@@ -301,11 +288,8 @@ static const ctl_named_node_t tcache_node[] = {
static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)},
- {NAME("decay"), CTL(arena_i_decay)},
- {NAME("reset"), CTL(arena_i_reset)},
{NAME("dss"), CTL(arena_i_dss)},
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
- {NAME("decay_time"), CTL(arena_i_decay_time)},
{NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
};
static const ctl_named_node_t super_arena_i_node[] = {
@@ -355,7 +339,6 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)},
{NAME("initialized"), CTL(arenas_initialized)},
{NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
- {NAME("decay_time"), CTL(arenas_decay_time)},
{NAME("quantum"), CTL(arenas_quantum)},
{NAME("page"), CTL(arenas_page)},
{NAME("tcache_max"), CTL(arenas_tcache_max)},
@@ -456,11 +439,9 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
- {NAME("decay_time"), CTL(stats_arenas_i_decay_time)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
- {NAME("retained"), CTL(stats_arenas_i_retained)},
{NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)},
@@ -487,7 +468,6 @@ static const ctl_named_node_t stats_node[] = {
{NAME("metadata"), CTL(stats_metadata)},
{NAME("resident"), CTL(stats_resident)},
{NAME("mapped"), CTL(stats_mapped)},
- {NAME("retained"), CTL(stats_retained)},
{NAME("arenas"), CHILD(indexed, stats_arenas)}
};
@@ -539,10 +519,8 @@ static void
ctl_arena_clear(ctl_arena_stats_t *astats)
{
- astats->nthreads = 0;
astats->dss = dss_prec_names[dss_prec_limit];
astats->lg_dirty_mult = -1;
- astats->decay_time = -1;
astats->pactive = 0;
astats->pdirty = 0;
if (config_stats) {
@@ -560,27 +538,20 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
}
static void
-ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
+ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
- if (config_stats) {
- arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
- &cstats->lg_dirty_mult, &cstats->decay_time,
- &cstats->pactive, &cstats->pdirty, &cstats->astats,
- cstats->bstats, cstats->lstats, cstats->hstats);
-
- for (i = 0; i < NBINS; i++) {
- cstats->allocated_small += cstats->bstats[i].curregs *
- index2size(i);
- cstats->nmalloc_small += cstats->bstats[i].nmalloc;
- cstats->ndalloc_small += cstats->bstats[i].ndalloc;
- cstats->nrequests_small += cstats->bstats[i].nrequests;
- }
- } else {
- arena_basic_stats_merge(tsdn, arena, &cstats->nthreads,
- &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time,
- &cstats->pactive, &cstats->pdirty);
+ arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult,
+ &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats,
+ cstats->lstats, cstats->hstats);
+
+ for (i = 0; i < NBINS; i++) {
+ cstats->allocated_small += cstats->bstats[i].curregs *
+ index2size(i);
+ cstats->nmalloc_small += cstats->bstats[i].nmalloc;
+ cstats->ndalloc_small += cstats->bstats[i].ndalloc;
+ cstats->nrequests_small += cstats->bstats[i].nrequests;
}
}
@@ -589,91 +560,89 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
{
unsigned i;
- sstats->nthreads += astats->nthreads;
sstats->pactive += astats->pactive;
sstats->pdirty += astats->pdirty;
- if (config_stats) {
- sstats->astats.mapped += astats->astats.mapped;
- sstats->astats.retained += astats->astats.retained;
- sstats->astats.npurge += astats->astats.npurge;
- sstats->astats.nmadvise += astats->astats.nmadvise;
- sstats->astats.purged += astats->astats.purged;
-
- sstats->astats.metadata_mapped +=
- astats->astats.metadata_mapped;
- sstats->astats.metadata_allocated +=
- astats->astats.metadata_allocated;
-
- sstats->allocated_small += astats->allocated_small;
- sstats->nmalloc_small += astats->nmalloc_small;
- sstats->ndalloc_small += astats->ndalloc_small;
- sstats->nrequests_small += astats->nrequests_small;
-
- sstats->astats.allocated_large +=
- astats->astats.allocated_large;
- sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
- sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
- sstats->astats.nrequests_large +=
- astats->astats.nrequests_large;
-
- sstats->astats.allocated_huge += astats->astats.allocated_huge;
- sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
- sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
-
- for (i = 0; i < NBINS; i++) {
- sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
- sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
- sstats->bstats[i].nrequests +=
- astats->bstats[i].nrequests;
- sstats->bstats[i].curregs += astats->bstats[i].curregs;
- if (config_tcache) {
- sstats->bstats[i].nfills +=
- astats->bstats[i].nfills;
- sstats->bstats[i].nflushes +=
- astats->bstats[i].nflushes;
- }
- sstats->bstats[i].nruns += astats->bstats[i].nruns;
- sstats->bstats[i].reruns += astats->bstats[i].reruns;
- sstats->bstats[i].curruns += astats->bstats[i].curruns;
+ sstats->astats.mapped += astats->astats.mapped;
+ sstats->astats.npurge += astats->astats.npurge;
+ sstats->astats.nmadvise += astats->astats.nmadvise;
+ sstats->astats.purged += astats->astats.purged;
+
+ sstats->astats.metadata_mapped += astats->astats.metadata_mapped;
+ sstats->astats.metadata_allocated += astats->astats.metadata_allocated;
+
+ sstats->allocated_small += astats->allocated_small;
+ sstats->nmalloc_small += astats->nmalloc_small;
+ sstats->ndalloc_small += astats->ndalloc_small;
+ sstats->nrequests_small += astats->nrequests_small;
+
+ sstats->astats.allocated_large += astats->astats.allocated_large;
+ sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
+ sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
+ sstats->astats.nrequests_large += astats->astats.nrequests_large;
+
+ sstats->astats.allocated_huge += astats->astats.allocated_huge;
+ sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
+ sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
+
+ for (i = 0; i < NBINS; i++) {
+ sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
+ sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
+ sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
+ sstats->bstats[i].curregs += astats->bstats[i].curregs;
+ if (config_tcache) {
+ sstats->bstats[i].nfills += astats->bstats[i].nfills;
+ sstats->bstats[i].nflushes +=
+ astats->bstats[i].nflushes;
}
+ sstats->bstats[i].nruns += astats->bstats[i].nruns;
+ sstats->bstats[i].reruns += astats->bstats[i].reruns;
+ sstats->bstats[i].curruns += astats->bstats[i].curruns;
+ }
- for (i = 0; i < nlclasses; i++) {
- sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
- sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
- sstats->lstats[i].nrequests +=
- astats->lstats[i].nrequests;
- sstats->lstats[i].curruns += astats->lstats[i].curruns;
- }
+ for (i = 0; i < nlclasses; i++) {
+ sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
+ sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
+ sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
+ sstats->lstats[i].curruns += astats->lstats[i].curruns;
+ }
- for (i = 0; i < nhclasses; i++) {
- sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
- sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
- sstats->hstats[i].curhchunks +=
- astats->hstats[i].curhchunks;
- }
+ for (i = 0; i < nhclasses; i++) {
+ sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
+ sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
+ sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks;
}
}
static void
-ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i)
+ctl_arena_refresh(arena_t *arena, unsigned i)
{
ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
ctl_arena_clear(astats);
- ctl_arena_stats_amerge(tsdn, astats, arena);
- /* Merge into sum stats as well. */
- ctl_arena_stats_smerge(sstats, astats);
+
+ sstats->nthreads += astats->nthreads;
+ if (config_stats) {
+ ctl_arena_stats_amerge(astats, arena);
+ /* Merge into sum stats as well. */
+ ctl_arena_stats_smerge(sstats, astats);
+ } else {
+ astats->pactive += arena->nactive;
+ astats->pdirty += arena->ndirty;
+ /* Merge into sum stats as well. */
+ sstats->pactive += arena->nactive;
+ sstats->pdirty += arena->ndirty;
+ }
}
static bool
-ctl_grow(tsdn_t *tsdn)
+ctl_grow(void)
{
ctl_arena_stats_t *astats;
/* Initialize new arena. */
- if (arena_init(tsdn, ctl_stats.narenas) == NULL)
+ if (arena_init(ctl_stats.narenas) == NULL)
return (true);
/* Allocate extended arena stats. */
@@ -708,32 +677,47 @@ ctl_grow(tsdn_t *tsdn)
}
static void
-ctl_refresh(tsdn_t *tsdn)
+ctl_refresh(void)
{
+ tsd_t *tsd;
unsigned i;
+ bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
+ ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
- for (i = 0; i < ctl_stats.narenas; i++)
- tarenas[i] = arena_get(tsdn, i, false);
+ tsd = tsd_fetch();
+ for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
+ tarenas[i] = arena_get(tsd, i, false, false);
+ if (tarenas[i] == NULL && !refreshed) {
+ tarenas[i] = arena_get(tsd, i, false, true);
+ refreshed = true;
+ }
+ }
+
+ for (i = 0; i < ctl_stats.narenas; i++) {
+ if (tarenas[i] != NULL)
+ ctl_stats.arenas[i].nthreads = arena_nbound(i);
+ else
+ ctl_stats.arenas[i].nthreads = 0;
+ }
for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized;
if (initialized)
- ctl_arena_refresh(tsdn, tarenas[i], i);
+ ctl_arena_refresh(tarenas[i], i);
}
if (config_stats) {
size_t base_allocated, base_resident, base_mapped;
- base_stats_get(tsdn, &base_allocated, &base_resident,
- &base_mapped);
+ base_stats_get(&base_allocated, &base_resident, &base_mapped);
ctl_stats.allocated =
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
@@ -750,19 +734,17 @@ ctl_refresh(tsdn_t *tsdn)
ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
ctl_stats.mapped = base_mapped +
ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
- ctl_stats.retained =
- ctl_stats.arenas[ctl_stats.narenas].astats.retained;
}
ctl_epoch++;
}
static bool
-ctl_init(tsdn_t *tsdn)
+ctl_init(void)
{
bool ret;
- malloc_mutex_lock(tsdn, &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
if (!ctl_initialized) {
/*
* Allocate space for one extra arena stats element, which
@@ -804,19 +786,19 @@ ctl_init(tsdn_t *tsdn)
ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0;
- ctl_refresh(tsdn);
+ ctl_refresh();
ctl_initialized = true;
}
ret = false;
label_return:
- malloc_mutex_unlock(tsdn, &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
-ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
- size_t *mibp, size_t *depthp)
+ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
+ size_t *depthp)
{
int ret;
const char *elm, *tdot, *dot;
@@ -868,7 +850,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
}
inode = ctl_indexed_node(node->children);
- node = inode->index(tsdn, mibp, *depthp, (size_t)index);
+ node = inode->index(mibp, *depthp, (size_t)index);
if (node == NULL) {
ret = ENOENT;
goto label_return;
@@ -912,8 +894,8 @@ label_return:
}
int
-ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen)
{
int ret;
size_t depth;
@@ -921,19 +903,19 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
- if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
+ if (!ctl_initialized && ctl_init()) {
ret = EAGAIN;
goto label_return;
}
depth = CTL_MAX_DEPTH;
- ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
+ ret = ctl_lookup(name, nodes, mib, &depth);
if (ret != 0)
goto label_return;
node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl)
- ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
+ ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
else {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
@@ -944,29 +926,29 @@ label_return:
}
int
-ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp)
+ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
{
int ret;
- if (!ctl_initialized && ctl_init(tsdn)) {
+ if (!ctl_initialized && ctl_init()) {
ret = EAGAIN;
goto label_return;
}
- ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp);
+ ret = ctl_lookup(name, NULL, mibp, miblenp);
label_return:
return(ret);
}
int
-ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
const ctl_named_node_t *node;
size_t i;
- if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
+ if (!ctl_initialized && ctl_init()) {
ret = EAGAIN;
goto label_return;
}
@@ -978,7 +960,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) {
/* Children are named. */
- if (node->nchildren <= (unsigned)mib[i]) {
+ if (node->nchildren <= mib[i]) {
ret = ENOENT;
goto label_return;
}
@@ -988,7 +970,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Indexed element. */
inode = ctl_indexed_node(node->children);
- node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
+ node = inode->index(mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto label_return;
@@ -998,7 +980,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Call the ctl function. */
if (node && node->ctl)
- ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
+ ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
else {
/* Partial MIB. */
ret = ENOENT;
@@ -1012,7 +994,7 @@ bool
ctl_boot(void)
{
- if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL))
+ if (malloc_mutex_init(&ctl_mtx))
return (true);
ctl_initialized = false;
@@ -1021,24 +1003,24 @@ ctl_boot(void)
}
void
-ctl_prefork(tsdn_t *tsdn)
+ctl_prefork(void)
{
- malloc_mutex_prefork(tsdn, &ctl_mtx);
+ malloc_mutex_prefork(&ctl_mtx);
}
void
-ctl_postfork_parent(tsdn_t *tsdn)
+ctl_postfork_parent(void)
{
- malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
+ malloc_mutex_postfork_parent(&ctl_mtx);
}
void
-ctl_postfork_child(tsdn_t *tsdn)
+ctl_postfork_child(void)
{
- malloc_mutex_postfork_child(tsdn, &ctl_mtx);
+ malloc_mutex_postfork_child(&ctl_mtx);
}
/******************************************************************************/
@@ -1095,8 +1077,8 @@ ctl_postfork_child(tsdn_t *tsdn)
*/
#define CTL_RO_CLGEN(c, l, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
@@ -1104,7 +1086,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
if (!(c)) \
return (ENOENT); \
if (l) \
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
@@ -1112,47 +1094,47 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
ret = 0; \
label_return: \
if (l) \
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
}
#define CTL_RO_CGEN(c, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
if (!(c)) \
return (ENOENT); \
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
}
#define CTL_RO_GEN(n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
}
@@ -1162,8 +1144,8 @@ label_return: \
*/
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
@@ -1181,8 +1163,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
@@ -1198,15 +1180,17 @@ label_return: \
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
+ tsd_t *tsd; \
\
if (!(c)) \
return (ENOENT); \
READONLY(); \
+ tsd = tsd_fetch(); \
oldval = (m(tsd)); \
READ(oldval, t); \
\
@@ -1215,17 +1199,17 @@ label_return: \
return (ret); \
}
-#define CTL_RO_CONFIG_GEN(n, t) \
+#define CTL_RO_BOOL_CONFIG_GEN(n) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
- t oldval; \
+ bool oldval; \
\
READONLY(); \
oldval = n; \
- READ(oldval, t); \
+ READ(oldval, bool); \
\
ret = 0; \
label_return: \
@@ -1237,51 +1221,48 @@ label_return: \
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
-epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
UNUSED uint64_t newval;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
WRITE(newval, uint64_t);
if (newp != NULL)
- ctl_refresh(tsd_tsdn(tsd));
+ ctl_refresh();
READ(ctl_epoch, uint64_t);
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/******************************************************************************/
-CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
-CTL_RO_CONFIG_GEN(config_debug, bool)
-CTL_RO_CONFIG_GEN(config_fill, bool)
-CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
-CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
-CTL_RO_CONFIG_GEN(config_munmap, bool)
-CTL_RO_CONFIG_GEN(config_prof, bool)
-CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
-CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
-CTL_RO_CONFIG_GEN(config_stats, bool)
-CTL_RO_CONFIG_GEN(config_tcache, bool)
-CTL_RO_CONFIG_GEN(config_tls, bool)
-CTL_RO_CONFIG_GEN(config_utrace, bool)
-CTL_RO_CONFIG_GEN(config_valgrind, bool)
-CTL_RO_CONFIG_GEN(config_xmalloc, bool)
+CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious)
+CTL_RO_BOOL_CONFIG_GEN(config_debug)
+CTL_RO_BOOL_CONFIG_GEN(config_fill)
+CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
+CTL_RO_BOOL_CONFIG_GEN(config_munmap)
+CTL_RO_BOOL_CONFIG_GEN(config_prof)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
+CTL_RO_BOOL_CONFIG_GEN(config_stats)
+CTL_RO_BOOL_CONFIG_GEN(config_tcache)
+CTL_RO_BOOL_CONFIG_GEN(config_tls)
+CTL_RO_BOOL_CONFIG_GEN(config_utrace)
+CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
+CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
-CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
-CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *)
+CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
-CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
@@ -1306,18 +1287,20 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/
static int
-thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
+ tsd_t *tsd;
arena_t *oldarena;
unsigned newind, oldind;
+ tsd = tsd_fetch();
oldarena = arena_choose(tsd, NULL);
if (oldarena == NULL)
return (EAGAIN);
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
newind = oldind = oldarena->ind;
WRITE(newind, unsigned);
READ(oldind, unsigned);
@@ -1331,7 +1314,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
/* Initialize arena if necessary. */
- newarena = arena_get(tsd_tsdn(tsd), newind, true);
+ newarena = arena_get(tsd, newind, true, true);
if (newarena == NULL) {
ret = EAGAIN;
goto label_return;
@@ -1341,15 +1324,15 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (config_tcache) {
tcache_t *tcache = tsd_tcache_get(tsd);
if (tcache != NULL) {
- tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
- oldarena, newarena);
+ tcache_arena_reassociate(tcache, oldarena,
+ newarena);
}
}
}
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@@ -1363,8 +1346,8 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
tsd_thread_deallocatedp_get, uint64_t *)
static int
-thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
@@ -1388,8 +1371,8 @@ label_return:
}
static int
-thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1407,7 +1390,7 @@ label_return:
}
static int
-thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1418,16 +1401,20 @@ thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
READ_XOR_WRITE();
if (newp != NULL) {
+ tsd_t *tsd;
+
if (newlen != sizeof(const char *)) {
ret = EINVAL;
goto label_return;
}
+ tsd = tsd_fetch();
+
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
0)
goto label_return;
} else {
- const char *oldname = prof_thread_name_get(tsd);
+ const char *oldname = prof_thread_name_get();
READ(oldname, const char *);
}
@@ -1437,7 +1424,7 @@ label_return:
}
static int
-thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1446,13 +1433,13 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (!config_prof)
return (ENOENT);
- oldval = prof_thread_active_get(tsd);
+ oldval = prof_thread_active_get();
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
- if (prof_thread_active_set(tsd, *(bool *)newp)) {
+ if (prof_thread_active_set(*(bool *)newp)) {
ret = EAGAIN;
goto label_return;
}
@@ -1467,16 +1454,19 @@ label_return:
/******************************************************************************/
static int
-tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
+ tsd_t *tsd;
unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ tsd = tsd_fetch();
+
+ malloc_mutex_lock(&ctl_mtx);
READONLY();
if (tcaches_create(tsd, &tcache_ind)) {
ret = EFAULT;
@@ -1486,20 +1476,23 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
-tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
+ tsd_t *tsd;
unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
+ tsd = tsd_fetch();
+
WRITEONLY();
tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned);
@@ -1515,15 +1508,18 @@ label_return:
}
static int
-tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
+ tsd_t *tsd;
unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
+ tsd = tsd_fetch();
+
WRITEONLY();
tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned);
@@ -1540,105 +1536,48 @@ label_return:
/******************************************************************************/
+/* ctl_mutex must be held during execution of this function. */
static void
-arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
+arena_purge(unsigned arena_ind)
{
+ tsd_t *tsd;
+ unsigned i;
+ bool refreshed;
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
- malloc_mutex_lock(tsdn, &ctl_mtx);
- {
- unsigned narenas = ctl_stats.narenas;
-
- if (arena_ind == narenas) {
- unsigned i;
- VARIABLE_ARRAY(arena_t *, tarenas, narenas);
-
- for (i = 0; i < narenas; i++)
- tarenas[i] = arena_get(tsdn, i, false);
-
- /*
- * No further need to hold ctl_mtx, since narenas and
- * tarenas contain everything needed below.
- */
- malloc_mutex_unlock(tsdn, &ctl_mtx);
-
- for (i = 0; i < narenas; i++) {
- if (tarenas[i] != NULL)
- arena_purge(tsdn, tarenas[i], all);
- }
- } else {
- arena_t *tarena;
-
- assert(arena_ind < narenas);
-
- tarena = arena_get(tsdn, arena_ind, false);
-
- /* No further need to hold ctl_mtx. */
- malloc_mutex_unlock(tsdn, &ctl_mtx);
-
- if (tarena != NULL)
- arena_purge(tsdn, tarena, all);
+ tsd = tsd_fetch();
+ for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
+ tarenas[i] = arena_get(tsd, i, false, false);
+ if (tarenas[i] == NULL && !refreshed) {
+ tarenas[i] = arena_get(tsd, i, false, true);
+ refreshed = true;
}
}
-}
-
-static int
-arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
- READONLY();
- WRITEONLY();
- arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true);
-
- ret = 0;
-label_return:
- return (ret);
-}
-
-static int
-arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
-
- READONLY();
- WRITEONLY();
- arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false);
-
- ret = 0;
-label_return:
- return (ret);
+ if (arena_ind == ctl_stats.narenas) {
+ unsigned i;
+ for (i = 0; i < ctl_stats.narenas; i++) {
+ if (tarenas[i] != NULL)
+ arena_purge_all(tarenas[i]);
+ }
+ } else {
+ assert(arena_ind < ctl_stats.narenas);
+ if (tarenas[arena_ind] != NULL)
+ arena_purge_all(tarenas[arena_ind]);
+ }
}
static int
-arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
- unsigned arena_ind;
- arena_t *arena;
READONLY();
WRITEONLY();
-
- if ((config_valgrind && unlikely(in_valgrind)) || (config_fill &&
- unlikely(opt_quarantine))) {
- ret = EFAULT;
- goto label_return;
- }
-
- arena_ind = (unsigned)mib[1];
- if (config_debug) {
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- assert(arena_ind < ctl_stats.narenas);
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- }
- assert(arena_ind >= opt_narenas);
-
- arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
-
- arena_reset(tsd, arena);
+ malloc_mutex_lock(&ctl_mtx);
+ arena_purge(mib[1]);
+ malloc_mutex_unlock(&ctl_mtx);
ret = 0;
label_return:
@@ -1646,16 +1585,16 @@ label_return:
}
static int
-arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
const char *dss = NULL;
- unsigned arena_ind = (unsigned)mib[1];
+ unsigned arena_ind = mib[1];
dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
WRITE(dss, const char *);
if (dss != NULL) {
int i;
@@ -1676,13 +1615,13 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
if (arena_ind < ctl_stats.narenas) {
- arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true);
if (arena == NULL || (dss_prec != dss_prec_limit &&
- arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) {
+ arena_dss_prec_set(arena, dss_prec))) {
ret = EFAULT;
goto label_return;
}
- dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena);
+ dss_prec_old = arena_dss_prec_get(arena);
} else {
if (dss_prec != dss_prec_limit &&
chunk_dss_prec_set(dss_prec)) {
@@ -1697,61 +1636,26 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
-arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
- unsigned arena_ind = (unsigned)mib[1];
- arena_t *arena;
-
- arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
- if (arena == NULL) {
- ret = EFAULT;
- goto label_return;
- }
-
- if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena);
- READ(oldval, ssize_t);
- }
- if (newp != NULL) {
- if (newlen != sizeof(ssize_t)) {
- ret = EINVAL;
- goto label_return;
- }
- if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena,
- *(ssize_t *)newp)) {
- ret = EFAULT;
- goto label_return;
- }
- }
-
- ret = 0;
-label_return:
- return (ret);
-}
-
-static int
-arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
- unsigned arena_ind = (unsigned)mib[1];
+ unsigned arena_ind = mib[1];
arena_t *arena;
- arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ arena = arena_get(tsd_fetch(), arena_ind, false, true);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena);
+ size_t oldval = arena_lg_dirty_mult_get(arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
@@ -1759,8 +1663,7 @@ arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
- if (arena_decay_time_set(tsd_tsdn(tsd), arena,
- *(ssize_t *)newp)) {
+ if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
@@ -1772,25 +1675,24 @@ label_return:
}
static int
-arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
- unsigned arena_ind = (unsigned)mib[1];
+ unsigned arena_ind = mib[1];
arena_t *arena;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
if (arena_ind < narenas_total_get() && (arena =
- arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
+ arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) {
if (newp != NULL) {
chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
WRITE(new_chunk_hooks, chunk_hooks_t);
- old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena,
+ old_chunk_hooks = chunk_hooks_set(arena,
&new_chunk_hooks);
READ(old_chunk_hooks, chunk_hooks_t);
} else {
- chunk_hooks_t old_chunk_hooks =
- chunk_hooks_get(tsd_tsdn(tsd), arena);
+ chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena);
READ(old_chunk_hooks, chunk_hooks_t);
}
} else {
@@ -1799,16 +1701,16 @@ arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
}
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static const ctl_named_node_t *
-arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+arena_i_index(const size_t *mib, size_t miblen, size_t i)
{
- const ctl_named_node_t *ret;
+ const ctl_named_node_t * ret;
- malloc_mutex_lock(tsdn, &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
if (i > ctl_stats.narenas) {
ret = NULL;
goto label_return;
@@ -1816,20 +1718,20 @@ arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
ret = super_arena_i_node;
label_return:
- malloc_mutex_unlock(tsdn, &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/******************************************************************************/
static int
-arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned narenas;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
READONLY();
if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL;
@@ -1840,23 +1742,23 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
-arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned nread, i;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
READONLY();
if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL;
nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
- ? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas;
+ ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
} else {
ret = 0;
nread = ctl_stats.narenas;
@@ -1866,13 +1768,13 @@ arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
-arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1896,32 +1798,6 @@ label_return:
return (ret);
}
-static int
-arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
-
- if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = arena_decay_time_default_get();
- READ(oldval, ssize_t);
- }
- if (newp != NULL) {
- if (newlen != sizeof(ssize_t)) {
- ret = EINVAL;
- goto label_return;
- }
- if (arena_decay_time_default_set(*(ssize_t *)newp)) {
- ret = EFAULT;
- goto label_return;
- }
- }
-
- ret = 0;
-label_return:
- return (ret);
-}
-
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
@@ -1931,7 +1807,7 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
static const ctl_named_node_t *
-arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > NBINS)
@@ -1940,9 +1816,9 @@ arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
}
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
-CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
+CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t)
static const ctl_named_node_t *
-arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > nlclasses)
@@ -1951,10 +1827,9 @@ arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
}
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
-CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
- size_t)
+CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t)
static const ctl_named_node_t *
-arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > nhclasses)
@@ -1963,15 +1838,15 @@ arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
}
static int
-arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
unsigned narenas;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
READONLY();
- if (ctl_grow(tsd_tsdn(tsd))) {
+ if (ctl_grow()) {
ret = EAGAIN;
goto label_return;
}
@@ -1980,15 +1855,15 @@ arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/******************************************************************************/
static int
-prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
@@ -2001,10 +1876,9 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL;
goto label_return;
}
- oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
- *(bool *)newp);
+ oldval = prof_thread_active_init_set(*(bool *)newp);
} else
- oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
+ oldval = prof_thread_active_init_get();
READ(oldval, bool);
ret = 0;
@@ -2013,8 +1887,8 @@ label_return:
}
static int
-prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
bool oldval;
@@ -2027,9 +1901,9 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
- oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
+ oldval = prof_active_set(*(bool *)newp);
} else
- oldval = prof_active_get(tsd_tsdn(tsd));
+ oldval = prof_active_get();
READ(oldval, bool);
ret = 0;
@@ -2038,8 +1912,8 @@ label_return:
}
static int
-prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
const char *filename = NULL;
@@ -2050,7 +1924,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
WRITEONLY();
WRITE(filename, const char *);
- if (prof_mdump(tsd, filename)) {
+ if (prof_mdump(filename)) {
ret = EFAULT;
goto label_return;
}
@@ -2061,8 +1935,8 @@ label_return:
}
static int
-prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
bool oldval;
@@ -2075,9 +1949,9 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
- oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
+ oldval = prof_gdump_set(*(bool *)newp);
} else
- oldval = prof_gdump_get(tsd_tsdn(tsd));
+ oldval = prof_gdump_get();
READ(oldval, bool);
ret = 0;
@@ -2086,11 +1960,12 @@ label_return:
}
static int
-prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
size_t lg_sample = lg_prof_sample;
+ tsd_t *tsd;
if (!config_prof)
return (ENOENT);
@@ -2100,6 +1975,8 @@ prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (lg_sample >= (sizeof(uint64_t) << 3))
lg_sample = (sizeof(uint64_t) << 3) - 1;
+ tsd = tsd_fetch();
+
prof_reset(tsd, lg_sample);
ret = 0;
@@ -2118,20 +1995,15 @@ CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
ssize_t)
-CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time,
- ssize_t)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
ctl_stats.arenas[mib[2]].astats.mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
- ctl_stats.arenas[mib[2]].astats.retained, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
@@ -2188,8 +2060,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
static const ctl_named_node_t *
-stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t j)
+stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
{
if (j > NBINS)
@@ -2207,8 +2078,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
static const ctl_named_node_t *
-stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t j)
+stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
{
if (j > nlclasses)
@@ -2227,8 +2097,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
static const ctl_named_node_t *
-stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t j)
+stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
{
if (j > nhclasses)
@@ -2237,11 +2106,11 @@ stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
}
static const ctl_named_node_t *
-stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
- malloc_mutex_lock(tsdn, &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
ret = NULL;
goto label_return;
@@ -2249,6 +2118,6 @@ stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
ret = super_stats_arenas_i_node;
label_return:
- malloc_mutex_unlock(tsdn, &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
diff --git a/deps/jemalloc/src/extent.c b/deps/jemalloc/src/extent.c
index 218156c60..13f94411c 100644
--- a/deps/jemalloc/src/extent.c
+++ b/deps/jemalloc/src/extent.c
@@ -3,48 +3,45 @@
/******************************************************************************/
-/*
- * Round down to the nearest chunk size that can actually be requested during
- * normal huge allocation.
- */
JEMALLOC_INLINE_C size_t
extent_quantize(size_t size)
{
- size_t ret;
- szind_t ind;
- assert(size > 0);
-
- ind = size2index(size + 1);
- if (ind == 0) {
- /* Avoid underflow. */
- return (index2size(0));
- }
- ret = index2size(ind - 1);
- assert(ret <= size);
- return (ret);
+ /*
+ * Round down to the nearest chunk size that can actually be requested
+ * during normal huge allocation.
+ */
+ return (index2size(size2index(size + 1) - 1));
}
JEMALLOC_INLINE_C int
-extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
+extent_szad_comp(extent_node_t *a, extent_node_t *b)
{
+ int ret;
size_t a_qsize = extent_quantize(extent_node_size_get(a));
size_t b_qsize = extent_quantize(extent_node_size_get(b));
- return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
-}
+ /*
+ * Compare based on quantized size rather than size, in order to sort
+ * equally useful extents only by address.
+ */
+ ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
+ if (ret == 0) {
+ uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
-JEMALLOC_INLINE_C int
-extent_sn_comp(const extent_node_t *a, const extent_node_t *b)
-{
- size_t a_sn = extent_node_sn_get(a);
- size_t b_sn = extent_node_sn_get(b);
+ ret = (a_addr > b_addr) - (a_addr < b_addr);
+ }
- return ((a_sn > b_sn) - (a_sn < b_sn));
+ return (ret);
}
+/* Generate red-black tree functions. */
+rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
+ extent_szad_comp)
+
JEMALLOC_INLINE_C int
-extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
+extent_ad_comp(extent_node_t *a, extent_node_t *b)
{
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
@@ -52,26 +49,5 @@ extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
return ((a_addr > b_addr) - (a_addr < b_addr));
}
-JEMALLOC_INLINE_C int
-extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
-{
- int ret;
-
- ret = extent_sz_comp(a, b);
- if (ret != 0)
- return (ret);
-
- ret = extent_sn_comp(a, b);
- if (ret != 0)
- return (ret);
-
- ret = extent_ad_comp(a, b);
- return (ret);
-}
-
-/* Generate red-black tree functions. */
-rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
- extent_szsnad_comp)
-
/* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
diff --git a/deps/jemalloc/src/huge.c b/deps/jemalloc/src/huge.c
index 8abd8c00c..1e9a66512 100644
--- a/deps/jemalloc/src/huge.c
+++ b/deps/jemalloc/src/huge.c
@@ -15,21 +15,12 @@ huge_node_get(const void *ptr)
}
static bool
-huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
+huge_node_set(const void *ptr, extent_node_t *node)
{
assert(extent_node_addr_get(node) == ptr);
assert(!extent_node_achunk_get(node));
- return (chunk_register(tsdn, ptr, node));
-}
-
-static void
-huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
-{
- bool err;
-
- err = huge_node_set(tsdn, ptr, node);
- assert(!err);
+ return (chunk_register(ptr, node));
}
static void
@@ -40,39 +31,39 @@ huge_node_unset(const void *ptr, const extent_node_t *node)
}
void *
-huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
+huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache)
{
+ size_t usize;
- assert(usize == s2u(usize));
+ usize = s2u(size);
+ if (usize == 0) {
+ /* size_t overflow. */
+ return (NULL);
+ }
- return (huge_palloc(tsdn, arena, usize, chunksize, zero));
+ return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
}
void *
-huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
- bool zero)
+huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
+ bool zero, tcache_t *tcache)
{
void *ret;
- size_t ausize;
- arena_t *iarena;
+ size_t usize;
extent_node_t *node;
- size_t sn;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
- assert(!tsdn_null(tsdn) || arena != NULL);
-
- ausize = sa2u(usize, alignment);
- if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
+ usize = sa2u(size, alignment);
+ if (unlikely(usize == 0))
return (NULL);
- assert(ausize >= chunksize);
+ assert(usize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
- iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
- a0get();
- node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
- CACHELINE, false, NULL, true, iarena);
+ node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
+ CACHELINE, false, tcache, true, arena);
if (node == NULL)
return (NULL);
@@ -81,35 +72,33 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- if (likely(!tsdn_null(tsdn)))
- arena = arena_choose(tsdn_tsd(tsdn), arena);
- if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
- arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
- idalloctm(tsdn, node, NULL, true, true);
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
+ size, alignment, &is_zeroed)) == NULL) {
+ idalloctm(tsd, node, tcache, true);
return (NULL);
}
- extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
+ extent_node_init(node, arena, ret, size, is_zeroed, true);
- if (huge_node_set(tsdn, ret, node)) {
- arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
- idalloctm(tsdn, node, NULL, true, true);
+ if (huge_node_set(ret, node)) {
+ arena_chunk_dalloc_huge(arena, ret, size);
+ idalloctm(tsd, node, tcache, true);
return (NULL);
}
/* Insert node into huge. */
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->huge, node, ql_link);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed)
- memset(ret, 0, usize);
+ memset(ret, 0, size);
} else if (config_fill && unlikely(opt_junk_alloc))
- memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+ memset(ret, 0xa5, size);
- arena_decay_tick(tsdn, arena);
return (ret);
}
@@ -127,7 +116,7 @@ huge_dalloc_junk(void *ptr, size_t usize)
* unmapped.
*/
if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
- memset(ptr, JEMALLOC_FREE_JUNK, usize);
+ memset(ptr, 0x5a, usize);
}
}
#ifdef JEMALLOC_JET
@@ -137,8 +126,8 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static void
-huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t usize_min, size_t usize_max, bool zero)
+huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero)
{
size_t usize, usize_next;
extent_node_t *node;
@@ -162,28 +151,24 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) {
- memset((void *)((uintptr_t)ptr + usize),
- JEMALLOC_FREE_JUNK, sdiff);
+ memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
post_zeroed = false;
} else {
- post_zeroed = !chunk_purge_wrapper(tsdn, arena,
- &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
- sdiff);
+ post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
+ ptr, CHUNK_CEILING(oldsize), usize, sdiff);
}
} else
post_zeroed = pre_zeroed;
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
/* Update the size of the huge allocation. */
- huge_node_unset(ptr, node);
assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize);
- huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
- arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
+ arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
/* Fill if necessary (growing). */
if (oldsize < usize) {
@@ -193,15 +178,14 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
usize - oldsize);
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize),
- JEMALLOC_ALLOC_JUNK, usize - oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
+ oldsize);
}
}
}
static bool
-huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t usize)
+huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
{
extent_node_t *node;
arena_t *arena;
@@ -212,7 +196,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node);
- chunk_hooks = chunk_hooks_get(tsdn, arena);
+ chunk_hooks = chunk_hooks_get(arena);
assert(oldsize > usize);
@@ -229,59 +213,53 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
sdiff);
post_zeroed = false;
} else {
- post_zeroed = !chunk_purge_wrapper(tsdn, arena,
- &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
- usize), CHUNK_CEILING(oldsize),
+ post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
+ CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
+ CHUNK_CEILING(oldsize),
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
}
} else
post_zeroed = pre_zeroed;
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
/* Update the size of the huge allocation. */
- huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
- huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
/* Zap the excess chunks. */
- arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
- extent_node_sn_get(node));
+ arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
return (false);
}
static bool
-huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t usize, bool zero) {
+huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
extent_node_t *node;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
is_zeroed_subchunk = extent_node_zeroed_get(node);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
/*
- * Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
- * update extent's zeroed field, and zero as necessary.
+ * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
+ * that it is possible to make correct junk/zero fill decisions below.
*/
- is_zeroed_chunk = false;
- if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
+ is_zeroed_chunk = zero;
+
+ if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
&is_zeroed_chunk))
return (true);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- huge_node_unset(ptr, node);
+ malloc_mutex_lock(&arena->huge_mtx);
+ /* Update the size of the huge allocation. */
extent_node_size_set(node, usize);
- extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
- is_zeroed_chunk);
- huge_node_reset(tsdn, ptr, node);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed_subchunk) {
@@ -294,21 +272,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
CHUNK_CEILING(oldsize));
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
- usize - oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
+ oldsize);
}
return (false);
}
bool
-huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
+huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
assert(s2u(oldsize) == oldsize);
- /* The following should have been caught by callers. */
- assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
/* Both allocations must be huge to avoid a move. */
if (oldsize < chunksize || usize_max < chunksize)
@@ -316,18 +292,13 @@ huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
/* Attempt to expand the allocation in-place. */
- if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
- zero)) {
- arena_decay_tick(tsdn, huge_aalloc(ptr));
+ if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
return (false);
- }
/* Try again, this time with usize_min. */
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
- CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
- ptr, oldsize, usize_min, zero)) {
- arena_decay_tick(tsdn, huge_aalloc(ptr));
+ CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
+ oldsize, usize_min, zero))
return (false);
- }
}
/*
@@ -336,46 +307,36 @@ huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
*/
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
- huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
- usize_max, zero);
- arena_decay_tick(tsdn, huge_aalloc(ptr));
+ huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
+ zero);
return (false);
}
/* Attempt to shrink the allocation in-place. */
- if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
- if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
- usize_max)) {
- arena_decay_tick(tsdn, huge_aalloc(ptr));
- return (false);
- }
- }
+ if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
+ return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
return (true);
}
static void *
-huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero)
+huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
{
if (alignment <= chunksize)
- return (huge_malloc(tsdn, arena, usize, zero));
- return (huge_palloc(tsdn, arena, usize, alignment, zero));
+ return (huge_malloc(tsd, arena, usize, zero, tcache));
+ return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
}
void *
-huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
- size_t usize, size_t alignment, bool zero, tcache_t *tcache)
+huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
size_t copysize;
- /* The following should have been caught by callers. */
- assert(usize > 0 && usize <= HUGE_MAXCLASS);
-
/* Try to avoid moving the allocation. */
- if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
- zero))
+ if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
return (ptr);
/*
@@ -383,19 +344,19 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
* different size class. In that case, fall back to allocating new
* space and copying.
*/
- ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
- zero);
+ ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
+ tcache);
if (ret == NULL)
return (NULL);
copysize = (usize < oldsize) ? usize : oldsize;
memcpy(ret, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache, true);
+ isqalloc(tsd, ptr, oldsize, tcache);
return (ret);
}
void
-huge_dalloc(tsdn_t *tsdn, void *ptr)
+huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
extent_node_t *node;
arena_t *arena;
@@ -403,18 +364,15 @@ huge_dalloc(tsdn_t *tsdn, void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
huge_node_unset(ptr, node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
ql_remove(&arena->huge, node, ql_link);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
huge_dalloc_junk(extent_node_addr_get(node),
extent_node_size_get(node));
- arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
- extent_node_addr_get(node), extent_node_size_get(node),
- extent_node_sn_get(node));
- idalloctm(tsdn, node, NULL, true, true);
-
- arena_decay_tick(tsdn, arena);
+ arena_chunk_dalloc_huge(extent_node_arena_get(node),
+ extent_node_addr_get(node), extent_node_size_get(node));
+ idalloctm(tsd, node, tcache, true);
}
arena_t *
@@ -425,7 +383,7 @@ huge_aalloc(const void *ptr)
}
size_t
-huge_salloc(tsdn_t *tsdn, const void *ptr)
+huge_salloc(const void *ptr)
{
size_t size;
extent_node_t *node;
@@ -433,15 +391,15 @@ huge_salloc(tsdn_t *tsdn, const void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
size = extent_node_size_get(node);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
return (size);
}
prof_tctx_t *
-huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
+huge_prof_tctx_get(const void *ptr)
{
prof_tctx_t *tctx;
extent_node_t *node;
@@ -449,29 +407,29 @@ huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
tctx = extent_node_prof_tctx_get(node);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
return (tctx);
}
void
-huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
+huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{
extent_node_t *node;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
extent_node_prof_tctx_set(node, tctx);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
}
void
-huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
+huge_prof_tctx_reset(const void *ptr)
{
- huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
+ huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
}
diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c
index 07389ca2f..fe77c2475 100644
--- a/deps/jemalloc/src/jemalloc.c
+++ b/deps/jemalloc/src/jemalloc.c
@@ -5,11 +5,7 @@
/* Data. */
/* Runtime configuration options. */
-const char *je_malloc_conf
-#ifndef _WIN32
- JEMALLOC_ATTR(weak)
-#endif
- ;
+const char *je_malloc_conf JEMALLOC_ATTR(weak);
bool opt_abort =
#ifdef JEMALLOC_DEBUG
true
@@ -44,14 +40,14 @@ bool opt_redzone = false;
bool opt_utrace = false;
bool opt_xmalloc = false;
bool opt_zero = false;
-unsigned opt_narenas = 0;
+size_t opt_narenas = 0;
/* Initialized to true if the process is running inside Valgrind. */
bool in_valgrind;
unsigned ncpus;
-/* Protects arenas initialization. */
+/* Protects arenas initialization (arenas, narenas_total). */
static malloc_mutex_t arenas_lock;
/*
* Arenas that are used to service external requests. Not all elements of the
@@ -61,10 +57,10 @@ static malloc_mutex_t arenas_lock;
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
* takes some action to create them and allocate from them.
*/
-arena_t **arenas;
-static unsigned narenas_total; /* Use narenas_total_*(). */
+static arena_t **arenas;
+static unsigned narenas_total;
static arena_t *a0; /* arenas[0]; read-only after initialization. */
-unsigned narenas_auto; /* Read-only after initialization. */
+static unsigned narenas_auto; /* Read-only after initialization. */
typedef enum {
malloc_init_uninitialized = 3,
@@ -74,37 +70,9 @@ typedef enum {
} malloc_init_t;
static malloc_init_t malloc_init_state = malloc_init_uninitialized;
-/* False should be the common case. Set to true to trigger initialization. */
-static bool malloc_slow = true;
-
-/* When malloc_slow is true, set the corresponding bits for sanity check. */
-enum {
- flag_opt_junk_alloc = (1U),
- flag_opt_junk_free = (1U << 1),
- flag_opt_quarantine = (1U << 2),
- flag_opt_zero = (1U << 3),
- flag_opt_utrace = (1U << 4),
- flag_in_valgrind = (1U << 5),
- flag_opt_xmalloc = (1U << 6)
-};
-static uint8_t malloc_slow_flags;
-
-JEMALLOC_ALIGNED(CACHELINE)
-const size_t pind2sz_tab[NPSIZES] = {
-#define PSZ_yes(lg_grp, ndelta, lg_delta) \
- (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
-#define PSZ_no(lg_grp, ndelta, lg_delta)
-#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
- PSZ_##psz(lg_grp, ndelta, lg_delta)
- SIZE_CLASSES
-#undef PSZ_yes
-#undef PSZ_no
-#undef SC
-};
-
JEMALLOC_ALIGNED(CACHELINE)
const size_t index2size_tab[NSIZES] = {
-#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
SIZE_CLASSES
#undef SC
@@ -176,7 +144,7 @@ const uint8_t size2index_tab[] = {
#define S2B_11(i) S2B_10(i) S2B_10(i)
#endif
#define S2B_no(i)
-#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
S2B_##lg_delta_lookup(index)
SIZE_CLASSES
#undef S2B_3
@@ -227,7 +195,7 @@ _init_init_lock(void)
* really only matters early in the process creation, before any
* separate thread normally starts doing anything. */
if (!init_lock_initialized)
- malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
+ malloc_mutex_init(&init_lock);
init_lock_initialized = true;
}
@@ -322,10 +290,18 @@ malloc_init(void)
}
/*
- * The a0*() functions are used instead of i{d,}alloc() in situations that
+ * The a0*() functions are used instead of i[mcd]alloc() in situations that
* cannot tolerate TLS variable access.
*/
+arena_t *
+a0get(void)
+{
+
+ assert(a0 != NULL);
+ return (a0);
+}
+
static void *
a0ialloc(size_t size, bool zero, bool is_metadata)
{
@@ -333,22 +309,14 @@ a0ialloc(size_t size, bool zero, bool is_metadata)
if (unlikely(malloc_init_a0()))
return (NULL);
- return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
- is_metadata, arena_get(TSDN_NULL, 0, true), true));
+ return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
}
static void
a0idalloc(void *ptr, bool is_metadata)
{
- idalloctm(TSDN_NULL, ptr, false, is_metadata, true);
-}
-
-arena_t *
-a0get(void)
-{
-
- return (a0);
+ idalloctm(NULL, ptr, false, is_metadata);
}
void *
@@ -405,228 +373,224 @@ bootstrap_free(void *ptr)
a0idalloc(ptr, false);
}
-static void
-arena_set(unsigned ind, arena_t *arena)
-{
-
- atomic_write_p((void **)&arenas[ind], arena);
-}
-
-static void
-narenas_total_set(unsigned narenas)
-{
-
- atomic_write_u(&narenas_total, narenas);
-}
-
-static void
-narenas_total_inc(void)
-{
-
- atomic_add_u(&narenas_total, 1);
-}
-
-unsigned
-narenas_total_get(void)
-{
-
- return (atomic_read_u(&narenas_total));
-}
-
/* Create a new arena and insert it into the arenas array at index ind. */
static arena_t *
-arena_init_locked(tsdn_t *tsdn, unsigned ind)
+arena_init_locked(unsigned ind)
{
arena_t *arena;
- assert(ind <= narenas_total_get());
+ /* Expand arenas if necessary. */
+ assert(ind <= narenas_total);
if (ind > MALLOCX_ARENA_MAX)
return (NULL);
- if (ind == narenas_total_get())
- narenas_total_inc();
+ if (ind == narenas_total) {
+ unsigned narenas_new = narenas_total + 1;
+ arena_t **arenas_new =
+ (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
+ sizeof(arena_t *)));
+ if (arenas_new == NULL)
+ return (NULL);
+ memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
+ arenas_new[ind] = NULL;
+ /*
+ * Deallocate only if arenas came from a0malloc() (not
+ * base_alloc()).
+ */
+ if (narenas_total != narenas_auto)
+ a0dalloc(arenas);
+ arenas = arenas_new;
+ narenas_total = narenas_new;
+ }
/*
* Another thread may have already initialized arenas[ind] if it's an
* auto arena.
*/
- arena = arena_get(tsdn, ind, false);
+ arena = arenas[ind];
if (arena != NULL) {
assert(ind < narenas_auto);
return (arena);
}
/* Actually initialize the arena. */
- arena = arena_new(tsdn, ind);
- arena_set(ind, arena);
+ arena = arenas[ind] = arena_new(ind);
return (arena);
}
arena_t *
-arena_init(tsdn_t *tsdn, unsigned ind)
+arena_init(unsigned ind)
{
arena_t *arena;
- malloc_mutex_lock(tsdn, &arenas_lock);
- arena = arena_init_locked(tsdn, ind);
- malloc_mutex_unlock(tsdn, &arenas_lock);
+ malloc_mutex_lock(&arenas_lock);
+ arena = arena_init_locked(ind);
+ malloc_mutex_unlock(&arenas_lock);
return (arena);
}
+unsigned
+narenas_total_get(void)
+{
+ unsigned narenas;
+
+ malloc_mutex_lock(&arenas_lock);
+ narenas = narenas_total;
+ malloc_mutex_unlock(&arenas_lock);
+
+ return (narenas);
+}
+
static void
-arena_bind(tsd_t *tsd, unsigned ind, bool internal)
+arena_bind_locked(tsd_t *tsd, unsigned ind)
{
arena_t *arena;
- if (!tsd_nominal(tsd))
- return;
-
- arena = arena_get(tsd_tsdn(tsd), ind, false);
- arena_nthreads_inc(arena, internal);
+ arena = arenas[ind];
+ arena->nthreads++;
- if (internal)
- tsd_iarena_set(tsd, arena);
- else
+ if (tsd_nominal(tsd))
tsd_arena_set(tsd, arena);
}
+static void
+arena_bind(tsd_t *tsd, unsigned ind)
+{
+
+ malloc_mutex_lock(&arenas_lock);
+ arena_bind_locked(tsd, ind);
+ malloc_mutex_unlock(&arenas_lock);
+}
+
void
arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
{
arena_t *oldarena, *newarena;
- oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
- newarena = arena_get(tsd_tsdn(tsd), newind, false);
- arena_nthreads_dec(oldarena, false);
- arena_nthreads_inc(newarena, false);
+ malloc_mutex_lock(&arenas_lock);
+ oldarena = arenas[oldind];
+ newarena = arenas[newind];
+ oldarena->nthreads--;
+ newarena->nthreads++;
+ malloc_mutex_unlock(&arenas_lock);
tsd_arena_set(tsd, newarena);
}
+unsigned
+arena_nbound(unsigned ind)
+{
+ unsigned nthreads;
+
+ malloc_mutex_lock(&arenas_lock);
+ nthreads = arenas[ind]->nthreads;
+ malloc_mutex_unlock(&arenas_lock);
+ return (nthreads);
+}
+
static void
-arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
+arena_unbind(tsd_t *tsd, unsigned ind)
{
arena_t *arena;
- arena = arena_get(tsd_tsdn(tsd), ind, false);
- arena_nthreads_dec(arena, internal);
- if (internal)
- tsd_iarena_set(tsd, NULL);
- else
- tsd_arena_set(tsd, NULL);
+ malloc_mutex_lock(&arenas_lock);
+ arena = arenas[ind];
+ arena->nthreads--;
+ malloc_mutex_unlock(&arenas_lock);
+ tsd_arena_set(tsd, NULL);
}
-arena_tdata_t *
-arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
+arena_t *
+arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
{
- arena_tdata_t *tdata, *arenas_tdata_old;
- arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
- unsigned narenas_tdata_old, i;
- unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
+ arena_t *arena;
+ arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
+ unsigned narenas_cache = tsd_narenas_cache_get(tsd);
unsigned narenas_actual = narenas_total_get();
- /*
- * Dissociate old tdata array (and set up for deallocation upon return)
- * if it's too small.
- */
- if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
- arenas_tdata_old = arenas_tdata;
- narenas_tdata_old = narenas_tdata;
- arenas_tdata = NULL;
- narenas_tdata = 0;
- tsd_arenas_tdata_set(tsd, arenas_tdata);
- tsd_narenas_tdata_set(tsd, narenas_tdata);
- } else {
- arenas_tdata_old = NULL;
- narenas_tdata_old = 0;
- }
-
- /* Allocate tdata array if it's missing. */
- if (arenas_tdata == NULL) {
- bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
- narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
-
- if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
- *arenas_tdata_bypassp = true;
- arenas_tdata = (arena_tdata_t *)a0malloc(
- sizeof(arena_tdata_t) * narenas_tdata);
- *arenas_tdata_bypassp = false;
+ /* Deallocate old cache if it's too small. */
+ if (arenas_cache != NULL && narenas_cache < narenas_actual) {
+ a0dalloc(arenas_cache);
+ arenas_cache = NULL;
+ narenas_cache = 0;
+ tsd_arenas_cache_set(tsd, arenas_cache);
+ tsd_narenas_cache_set(tsd, narenas_cache);
+ }
+
+ /* Allocate cache if it's missing. */
+ if (arenas_cache == NULL) {
+ bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
+ assert(ind < narenas_actual || !init_if_missing);
+ narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
+
+ if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
+ *arenas_cache_bypassp = true;
+ arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
+ narenas_cache);
+ *arenas_cache_bypassp = false;
}
- if (arenas_tdata == NULL) {
- tdata = NULL;
- goto label_return;
+ if (arenas_cache == NULL) {
+ /*
+ * This function must always tell the truth, even if
+ * it's slow, so don't let OOM, thread cleanup (note
+ * tsd_nominal check), nor recursive allocation
+ * avoidance (note arenas_cache_bypass check) get in the
+ * way.
+ */
+ if (ind >= narenas_actual)
+ return (NULL);
+ malloc_mutex_lock(&arenas_lock);
+ arena = arenas[ind];
+ malloc_mutex_unlock(&arenas_lock);
+ return (arena);
}
- assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
- tsd_arenas_tdata_set(tsd, arenas_tdata);
- tsd_narenas_tdata_set(tsd, narenas_tdata);
+ assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
+ tsd_arenas_cache_set(tsd, arenas_cache);
+ tsd_narenas_cache_set(tsd, narenas_cache);
}
/*
- * Copy to tdata array. It's possible that the actual number of arenas
- * has increased since narenas_total_get() was called above, but that
- * causes no correctness issues unless two threads concurrently execute
- * the arenas.extend mallctl, which we trust mallctl synchronization to
+ * Copy to cache. It's possible that the actual number of arenas has
+ * increased since narenas_total_get() was called above, but that causes
+ * no correctness issues unless two threads concurrently execute the
+ * arenas.extend mallctl, which we trust mallctl synchronization to
* prevent.
*/
-
- /* Copy/initialize tickers. */
- for (i = 0; i < narenas_actual; i++) {
- if (i < narenas_tdata_old) {
- ticker_copy(&arenas_tdata[i].decay_ticker,
- &arenas_tdata_old[i].decay_ticker);
- } else {
- ticker_init(&arenas_tdata[i].decay_ticker,
- DECAY_NTICKS_PER_UPDATE);
- }
- }
- if (narenas_tdata > narenas_actual) {
- memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
- * (narenas_tdata - narenas_actual));
- }
-
- /* Read the refreshed tdata array. */
- tdata = &arenas_tdata[ind];
-label_return:
- if (arenas_tdata_old != NULL)
- a0dalloc(arenas_tdata_old);
- return (tdata);
+ malloc_mutex_lock(&arenas_lock);
+ memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
+ malloc_mutex_unlock(&arenas_lock);
+ if (narenas_cache > narenas_actual) {
+ memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
+ (narenas_cache - narenas_actual));
+ }
+
+ /* Read the refreshed cache, and init the arena if necessary. */
+ arena = arenas_cache[ind];
+ if (init_if_missing && arena == NULL)
+ arena = arenas_cache[ind] = arena_init(ind);
+ return (arena);
}
/* Slow path, called only by arena_choose(). */
arena_t *
-arena_choose_hard(tsd_t *tsd, bool internal)
+arena_choose_hard(tsd_t *tsd)
{
- arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+ arena_t *ret;
if (narenas_auto > 1) {
- unsigned i, j, choose[2], first_null;
-
- /*
- * Determine binding for both non-internal and internal
- * allocation.
- *
- * choose[0]: For application allocation.
- * choose[1]: For internal metadata allocation.
- */
-
- for (j = 0; j < 2; j++)
- choose[j] = 0;
+ unsigned i, choose, first_null;
+ choose = 0;
first_null = narenas_auto;
- malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
- assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
+ malloc_mutex_lock(&arenas_lock);
+ assert(a0get() != NULL);
for (i = 1; i < narenas_auto; i++) {
- if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
+ if (arenas[i] != NULL) {
/*
* Choose the first arena that has the lowest
* number of threads assigned to it.
*/
- for (j = 0; j < 2; j++) {
- if (arena_nthreads_get(arena_get(
- tsd_tsdn(tsd), i, false), !!j) <
- arena_nthreads_get(arena_get(
- tsd_tsdn(tsd), choose[j], false),
- !!j))
- choose[j] = i;
- }
+ if (arenas[i]->nthreads <
+ arenas[choose]->nthreads)
+ choose = i;
} else if (first_null == narenas_auto) {
/*
* Record the index of the first uninitialized
@@ -641,40 +605,27 @@ arena_choose_hard(tsd_t *tsd, bool internal)
}
}
- for (j = 0; j < 2; j++) {
- if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
- choose[j], false), !!j) == 0 || first_null ==
- narenas_auto) {
- /*
- * Use an unloaded arena, or the least loaded
- * arena if all arenas are already initialized.
- */
- if (!!j == internal) {
- ret = arena_get(tsd_tsdn(tsd),
- choose[j], false);
- }
- } else {
- arena_t *arena;
-
- /* Initialize a new arena. */
- choose[j] = first_null;
- arena = arena_init_locked(tsd_tsdn(tsd),
- choose[j]);
- if (arena == NULL) {
- malloc_mutex_unlock(tsd_tsdn(tsd),
- &arenas_lock);
- return (NULL);
- }
- if (!!j == internal)
- ret = arena;
+ if (arenas[choose]->nthreads == 0
+ || first_null == narenas_auto) {
+ /*
+ * Use an unloaded arena, or the least loaded arena if
+ * all arenas are already initialized.
+ */
+ ret = arenas[choose];
+ } else {
+ /* Initialize a new arena. */
+ choose = first_null;
+ ret = arena_init_locked(choose);
+ if (ret == NULL) {
+ malloc_mutex_unlock(&arenas_lock);
+ return (NULL);
}
- arena_bind(tsd, choose[j], !!j);
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
+ arena_bind_locked(tsd, choose);
+ malloc_mutex_unlock(&arenas_lock);
} else {
- ret = arena_get(tsd_tsdn(tsd), 0, false);
- arena_bind(tsd, 0, false);
- arena_bind(tsd, 0, true);
+ ret = a0get();
+ arena_bind(tsd, 0);
}
return (ret);
@@ -695,49 +646,36 @@ thread_deallocated_cleanup(tsd_t *tsd)
}
void
-iarena_cleanup(tsd_t *tsd)
-{
- arena_t *iarena;
-
- iarena = tsd_iarena_get(tsd);
- if (iarena != NULL)
- arena_unbind(tsd, iarena->ind, true);
-}
-
-void
arena_cleanup(tsd_t *tsd)
{
arena_t *arena;
arena = tsd_arena_get(tsd);
if (arena != NULL)
- arena_unbind(tsd, arena->ind, false);
+ arena_unbind(tsd, arena->ind);
}
void
-arenas_tdata_cleanup(tsd_t *tsd)
+arenas_cache_cleanup(tsd_t *tsd)
{
- arena_tdata_t *arenas_tdata;
+ arena_t **arenas_cache;
- /* Prevent tsd->arenas_tdata from being (re)created. */
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
-
- arenas_tdata = tsd_arenas_tdata_get(tsd);
- if (arenas_tdata != NULL) {
- tsd_arenas_tdata_set(tsd, NULL);
- a0dalloc(arenas_tdata);
+ arenas_cache = tsd_arenas_cache_get(tsd);
+ if (arenas_cache != NULL) {
+ tsd_arenas_cache_set(tsd, NULL);
+ a0dalloc(arenas_cache);
}
}
void
-narenas_tdata_cleanup(tsd_t *tsd)
+narenas_cache_cleanup(tsd_t *tsd)
{
/* Do nothing. */
}
void
-arenas_tdata_bypass_cleanup(tsd_t *tsd)
+arenas_cache_bypass_cleanup(tsd_t *tsd)
{
/* Do nothing. */
@@ -748,11 +686,8 @@ stats_print_atexit(void)
{
if (config_tcache && config_stats) {
- tsdn_t *tsdn;
unsigned narenas, i;
- tsdn = tsdn_fetch();
-
/*
* Merge stats from extant threads. This is racy, since
* individual threads do not lock when recording tcache stats
@@ -761,7 +696,7 @@ stats_print_atexit(void)
* continue to allocate.
*/
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena = arena_get(tsdn, i, false);
+ arena_t *arena = arenas[i];
if (arena != NULL) {
tcache_t *tcache;
@@ -771,11 +706,11 @@ stats_print_atexit(void)
* and bin locks in the opposite order,
* deadlocks may result.
*/
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
ql_foreach(tcache, &arena->tcache_ql, link) {
- tcache_stats_merge(tsdn, tcache, arena);
+ tcache_stats_merge(tcache, arena);
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
}
}
}
@@ -812,20 +747,6 @@ malloc_ncpus(void)
SYSTEM_INFO si;
GetSystemInfo(&si);
result = si.dwNumberOfProcessors;
-#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
- /*
- * glibc >= 2.6 has the CPU_COUNT macro.
- *
- * glibc's sysconf() uses isspace(). glibc allocates for the first time
- * *before* setting up the isspace tables. Therefore we need a
- * different method to get the number of CPUs.
- */
- {
- cpu_set_t set;
-
- pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
- result = CPU_COUNT(&set);
- }
#else
result = sysconf(_SC_NPROCESSORS_ONLN);
#endif
@@ -918,26 +839,6 @@ malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
}
static void
-malloc_slow_flag_init(void)
-{
- /*
- * Combine the runtime options into malloc_slow for fast path. Called
- * after processing all the options.
- */
- malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
- | (opt_junk_free ? flag_opt_junk_free : 0)
- | (opt_quarantine ? flag_opt_quarantine : 0)
- | (opt_zero ? flag_opt_zero : 0)
- | (opt_utrace ? flag_opt_utrace : 0)
- | (opt_xmalloc ? flag_opt_xmalloc : 0);
-
- if (config_valgrind)
- malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
-
- malloc_slow = (malloc_slow_flags != 0);
-}
-
-static void
malloc_conf_init(void)
{
unsigned i;
@@ -963,13 +864,10 @@ malloc_conf_init(void)
opt_tcache = false;
}
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 3; i++) {
/* Get runtime configuration. */
switch (i) {
case 0:
- opts = config_malloc_conf;
- break;
- case 1:
if (je_malloc_conf != NULL) {
/*
* Use options that were compiled into the
@@ -982,8 +880,8 @@ malloc_conf_init(void)
opts = buf;
}
break;
- case 2: {
- ssize_t linklen = 0;
+ case 1: {
+ int linklen = 0;
#ifndef _WIN32
int saved_errno = errno;
const char *linkname =
@@ -1009,7 +907,7 @@ malloc_conf_init(void)
buf[linklen] = '\0';
opts = buf;
break;
- } case 3: {
+ } case 2: {
const char *envname =
#ifdef JEMALLOC_PREFIX
JEMALLOC_CPREFIX"MALLOC_CONF"
@@ -1056,11 +954,7 @@ malloc_conf_init(void)
if (cont) \
continue; \
}
-#define CONF_MIN_no(um, min) false
-#define CONF_MIN_yes(um, min) ((um) < (min))
-#define CONF_MAX_no(um, max) false
-#define CONF_MAX_yes(um, max) ((um) > (max))
-#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
+#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
if (CONF_MATCH(n)) { \
uintmax_t um; \
char *end; \
@@ -1073,35 +967,24 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} else if (clip) { \
- if (CONF_MIN_##check_min(um, \
- (min))) \
- o = (t)(min); \
- else if (CONF_MAX_##check_max( \
- um, (max))) \
- o = (t)(max); \
+ if ((min) != 0 && um < (min)) \
+ o = (min); \
+ else if (um > (max)) \
+ o = (max); \
else \
- o = (t)um; \
+ o = um; \
} else { \
- if (CONF_MIN_##check_min(um, \
- (min)) || \
- CONF_MAX_##check_max(um, \
- (max))) { \
+ if (((min) != 0 && um < (min)) \
+ || um > (max)) { \
malloc_conf_error( \
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
} else \
- o = (t)um; \
+ o = um; \
} \
continue; \
}
-#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
- clip) \
- CONF_HANDLE_T_U(unsigned, o, n, min, max, \
- check_min, check_max, clip)
-#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
- CONF_HANDLE_T_U(size_t, o, n, min, max, \
- check_min, check_max, clip)
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
if (CONF_MATCH(n)) { \
long l; \
@@ -1144,7 +1027,7 @@ malloc_conf_init(void)
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
- (sizeof(size_t) << 3) - 1, yes, yes, true)
+ (sizeof(size_t) << 3) - 1, true)
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
@@ -1169,47 +1052,17 @@ malloc_conf_init(void)
}
continue;
}
- CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
- UINT_MAX, yes, no, false)
- if (strncmp("purge", k, klen) == 0) {
- int i;
- bool match = false;
- for (i = 0; i < purge_mode_limit; i++) {
- if (strncmp(purge_mode_names[i], v,
- vlen) == 0) {
- opt_purge = (purge_mode_t)i;
- match = true;
- break;
- }
- }
- if (!match) {
- malloc_conf_error("Invalid conf value",
- k, klen, v, vlen);
- }
- continue;
- }
+ CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
+ SIZE_T_MAX, false)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1)
- CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
- NSTIME_SEC_MAX);
CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
if (config_fill) {
if (CONF_MATCH("junk")) {
if (CONF_MATCH_VALUE("true")) {
- if (config_valgrind &&
- unlikely(in_valgrind)) {
- malloc_conf_error(
- "Deallocation-time "
- "junk filling cannot "
- "be enabled while "
- "running inside "
- "Valgrind", k, klen, v,
- vlen);
- } else {
- opt_junk = "true";
- opt_junk_alloc = true;
- opt_junk_free = true;
- }
+ opt_junk = "true";
+ opt_junk_alloc = opt_junk_free =
+ true;
} else if (CONF_MATCH_VALUE("false")) {
opt_junk = "false";
opt_junk_alloc = opt_junk_free =
@@ -1219,20 +1072,9 @@ malloc_conf_init(void)
opt_junk_alloc = true;
opt_junk_free = false;
} else if (CONF_MATCH_VALUE("free")) {
- if (config_valgrind &&
- unlikely(in_valgrind)) {
- malloc_conf_error(
- "Deallocation-time "
- "junk filling cannot "
- "be enabled while "
- "running inside "
- "Valgrind", k, klen, v,
- vlen);
- } else {
- opt_junk = "free";
- opt_junk_alloc = false;
- opt_junk_free = true;
- }
+ opt_junk = "free";
+ opt_junk_alloc = false;
+ opt_junk_free = true;
} else {
malloc_conf_error(
"Invalid conf value", k,
@@ -1241,7 +1083,7 @@ malloc_conf_init(void)
continue;
}
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
- 0, SIZE_T_MAX, no, no, false)
+ 0, SIZE_T_MAX, false)
CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
CONF_HANDLE_BOOL(opt_zero, "zero", true)
}
@@ -1278,8 +1120,8 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
"prof_thread_active_init", true)
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
- "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
- - 1, no, yes, true)
+ "lg_prof_sample", 0,
+ (sizeof(uint64_t) << 3) - 1, true)
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
true)
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
@@ -1295,14 +1137,7 @@ malloc_conf_init(void)
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
#undef CONF_MATCH
-#undef CONF_MATCH_VALUE
#undef CONF_HANDLE_BOOL
-#undef CONF_MIN_no
-#undef CONF_MIN_yes
-#undef CONF_MAX_no
-#undef CONF_MAX_yes
-#undef CONF_HANDLE_T_U
-#undef CONF_HANDLE_UNSIGNED
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P
@@ -1310,6 +1145,7 @@ malloc_conf_init(void)
}
}
+/* init_lock must be held. */
static bool
malloc_init_hard_needed(void)
{
@@ -1325,14 +1161,11 @@ malloc_init_hard_needed(void)
}
#ifdef JEMALLOC_THREADED_INIT
if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
- spin_t spinner;
-
/* Busy-wait until the initializing thread completes. */
- spin_init(&spinner);
do {
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
- spin_adaptive(&spinner);
- malloc_mutex_lock(TSDN_NULL, &init_lock);
+ malloc_mutex_unlock(&init_lock);
+ CPU_SPINWAIT;
+ malloc_mutex_lock(&init_lock);
} while (!malloc_initialized());
return (false);
}
@@ -1340,8 +1173,9 @@ malloc_init_hard_needed(void)
return (true);
}
+/* init_lock must be held. */
static bool
-malloc_init_hard_a0_locked()
+malloc_init_hard_a0_locked(void)
{
malloc_initializer = INITIALIZER;
@@ -1357,7 +1191,6 @@ malloc_init_hard_a0_locked()
abort();
}
}
- pages_boot();
if (base_boot())
return (true);
if (chunk_boot())
@@ -1366,28 +1199,26 @@ malloc_init_hard_a0_locked()
return (true);
if (config_prof)
prof_boot1();
- arena_boot();
- if (config_tcache && tcache_boot(TSDN_NULL))
+ if (arena_boot())
+ return (true);
+ if (config_tcache && tcache_boot())
return (true);
- if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
+ if (malloc_mutex_init(&arenas_lock))
return (true);
/*
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
- narenas_auto = 1;
- narenas_total_set(narenas_auto);
+ narenas_total = narenas_auto = 1;
arenas = &a0;
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
/*
* Initialize one arena here. The rest are lazily created in
* arena_choose_hard().
*/
- if (arena_init(TSDN_NULL, 0) == NULL)
+ if (arena_init(0) == NULL)
return (true);
-
malloc_init_state = malloc_init_a0_initialized;
-
return (false);
}
@@ -1396,42 +1227,45 @@ malloc_init_hard_a0(void)
{
bool ret;
- malloc_mutex_lock(TSDN_NULL, &init_lock);
+ malloc_mutex_lock(&init_lock);
ret = malloc_init_hard_a0_locked();
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ malloc_mutex_unlock(&init_lock);
return (ret);
}
-/* Initialize data structures which may trigger recursive allocation. */
-static bool
+/*
+ * Initialize data structures which may trigger recursive allocation.
+ *
+ * init_lock must be held.
+ */
+static void
malloc_init_hard_recursible(void)
{
malloc_init_state = malloc_init_recursible;
+ malloc_mutex_unlock(&init_lock);
ncpus = malloc_ncpus();
-#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
- && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
- !defined(__native_client__))
- /* LinuxThreads' pthread_atfork() allocates. */
+#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
+ && !defined(_WIN32) && !defined(__native_client__))
+ /* LinuxThreads's pthread_atfork() allocates. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
if (opt_abort)
abort();
- return (true);
}
#endif
-
- return (false);
+ malloc_mutex_lock(&init_lock);
}
+/* init_lock must be held. */
static bool
-malloc_init_hard_finish(tsdn_t *tsdn)
+malloc_init_hard_finish(void)
{
- if (malloc_mutex_boot())
+ if (mutex_boot())
return (true);
if (opt_narenas == 0) {
@@ -1446,69 +1280,68 @@ malloc_init_hard_finish(tsdn_t *tsdn)
}
narenas_auto = opt_narenas;
/*
- * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
+ * Make sure that the arenas array can be allocated. In practice, this
+ * limit is enough to allow the allocator to function, but the ctl
+ * machinery will fail to allocate memory at far lower limits.
*/
- if (narenas_auto > MALLOCX_ARENA_MAX) {
- narenas_auto = MALLOCX_ARENA_MAX;
+ if (narenas_auto > chunksize / sizeof(arena_t *)) {
+ narenas_auto = chunksize / sizeof(arena_t *);
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
narenas_auto);
}
- narenas_total_set(narenas_auto);
+ narenas_total = narenas_auto;
/* Allocate and initialize arenas. */
- arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) *
- (MALLOCX_ARENA_MAX+1));
+ arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
if (arenas == NULL)
return (true);
+ /*
+ * Zero the array. In practice, this should always be pre-zeroed,
+ * since it was just mmap()ed, but let's be sure.
+ */
+ memset(arenas, 0, sizeof(arena_t *) * narenas_total);
/* Copy the pointer to the one arena that was already initialized. */
- arena_set(0, a0);
+ arenas[0] = a0;
malloc_init_state = malloc_init_initialized;
- malloc_slow_flag_init();
-
return (false);
}
static bool
malloc_init_hard(void)
{
- tsd_t *tsd;
#if defined(_WIN32) && _WIN32_WINNT < 0x0600
_init_init_lock();
#endif
- malloc_mutex_lock(TSDN_NULL, &init_lock);
+ malloc_mutex_lock(&init_lock);
if (!malloc_init_hard_needed()) {
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ malloc_mutex_unlock(&init_lock);
return (false);
}
if (malloc_init_state != malloc_init_a0_initialized &&
malloc_init_hard_a0_locked()) {
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ malloc_mutex_unlock(&init_lock);
return (true);
}
-
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
- /* Recursive allocation relies on functional tsd. */
- tsd = malloc_tsd_boot0();
- if (tsd == NULL)
+ if (malloc_tsd_boot0()) {
+ malloc_mutex_unlock(&init_lock);
return (true);
- if (malloc_init_hard_recursible())
- return (true);
- malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
-
- if (config_prof && prof_boot2(tsd)) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+ }
+ if (config_prof && prof_boot2()) {
+ malloc_mutex_unlock(&init_lock);
return (true);
}
- if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+ malloc_init_hard_recursible();
+
+ if (malloc_init_hard_finish()) {
+ malloc_mutex_unlock(&init_lock);
return (true);
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+ malloc_mutex_unlock(&init_lock);
malloc_tsd_boot1();
return (false);
}
@@ -1522,104 +1355,61 @@ malloc_init_hard(void)
*/
static void *
-ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
- prof_tctx_t *tctx, bool slow_path)
+imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
void *p;
if (tctx == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
- szind_t ind_large = size2index(LARGE_MINCLASS);
- p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
+ p = imalloc(tsd, LARGE_MINCLASS);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ arena_prof_promoted(p, usize);
} else
- p = ialloc(tsd, usize, ind, zero, slow_path);
+ p = imalloc(tsd, usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path)
+imalloc_prof(tsd_t *tsd, size_t usize)
{
void *p;
prof_tctx_t *tctx;
tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
- p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path);
+ p = imalloc_prof_sample(tsd, usize, tctx);
else
- p = ialloc(tsd, usize, ind, zero, slow_path);
+ p = imalloc(tsd, usize);
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
- prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
+ prof_malloc(p, usize, tctx);
return (p);
}
-/*
- * ialloc_body() is inlined so that fast and slow paths are generated separately
- * with statically known slow_path.
- *
- * This function guarantees that *tsdn is non-NULL on success.
- */
JEMALLOC_ALWAYS_INLINE_C void *
-ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
- bool slow_path)
+imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
{
- tsd_t *tsd;
- szind_t ind;
-
- if (slow_path && unlikely(malloc_init())) {
- *tsdn = NULL;
- return (NULL);
- }
- tsd = tsd_fetch();
- *tsdn = tsd_tsdn(tsd);
- witness_assert_lockless(tsd_tsdn(tsd));
-
- ind = size2index(size);
- if (unlikely(ind >= NSIZES))
+ if (unlikely(malloc_init()))
return (NULL);
+ *tsd = tsd_fetch();
- if (config_stats || (config_prof && opt_prof) || (slow_path &&
- config_valgrind && unlikely(in_valgrind))) {
- *usize = index2size(ind);
- assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
+ if (config_prof && opt_prof) {
+ *usize = s2u(size);
+ if (unlikely(*usize == 0))
+ return (NULL);
+ return (imalloc_prof(*tsd, *usize));
}
- if (config_prof && opt_prof)
- return (ialloc_prof(tsd, *usize, ind, zero, slow_path));
-
- return (ialloc(tsd, size, ind, zero, slow_path));
-}
-
-JEMALLOC_ALWAYS_INLINE_C void
-ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
- bool update_errno, bool slow_path)
-{
-
- assert(!tsdn_null(tsdn) || ret == NULL);
-
- if (unlikely(ret == NULL)) {
- if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_printf("<jemalloc>: Error in %s(): out of "
- "memory\n", func);
- abort();
- }
- if (update_errno)
- set_errno(ENOMEM);
- }
- if (config_stats && likely(ret != NULL)) {
- assert(usize == isalloc(tsdn, ret, config_prof));
- *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
- }
- witness_assert_lockless(tsdn);
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ *usize = s2u(size);
+ return (imalloc(*tsd, size));
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@@ -1628,22 +1418,27 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_malloc(size_t size)
{
void *ret;
- tsdn_t *tsdn;
+ tsd_t *tsd;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0)
size = 1;
- if (likely(!malloc_slow)) {
- ret = ialloc_body(size, false, &tsdn, &usize, false);
- ialloc_post_check(ret, tsdn, usize, "malloc", true, false);
- } else {
- ret = ialloc_body(size, false, &tsdn, &usize, true);
- ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
- UTRACE(0, size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
+ ret = imalloc_body(size, &tsd, &usize);
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error in malloc(): "
+ "out of memory\n");
+ abort();
+ }
+ set_errno(ENOMEM);
}
-
+ if (config_stats && likely(ret != NULL)) {
+ assert(usize == isalloc(ret, config_prof));
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ }
+ UTRACE(0, size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
return (ret);
}
@@ -1660,7 +1455,7 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ arena_prof_promoted(p, usize);
} else
p = ipalloc(tsd, usize, alignment, false);
@@ -1682,7 +1477,7 @@ imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
- prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
+ prof_malloc(p, usize, tctx);
return (p);
}
@@ -1699,12 +1494,10 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
assert(min_alignment != 0);
if (unlikely(malloc_init())) {
- tsd = NULL;
result = NULL;
goto label_oom;
}
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
if (size == 0)
size = 1;
@@ -1722,7 +1515,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
}
usize = sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ if (unlikely(usize == 0)) {
result = NULL;
goto label_oom;
}
@@ -1739,13 +1532,10 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
ret = 0;
label_return:
if (config_stats && likely(result != NULL)) {
- assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof));
+ assert(usize == isalloc(result, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, result);
- JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
- false);
- witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
label_oom:
assert(result == NULL);
@@ -1755,7 +1545,6 @@ label_oom:
abort();
}
ret = ENOMEM;
- witness_assert_lockless(tsd_tsdn(tsd));
goto label_return;
}
@@ -1763,10 +1552,9 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW
JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void **memptr, size_t alignment, size_t size)
{
- int ret;
-
- ret = imemalign(memptr, alignment, size, sizeof(void *));
-
+ int ret = imemalign(memptr, alignment, size, sizeof(void *));
+ JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
+ config_prof), false);
return (ret);
}
@@ -1782,45 +1570,114 @@ je_aligned_alloc(size_t alignment, size_t size)
ret = NULL;
set_errno(err);
}
-
+ JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
+ false);
return (ret);
}
+static void *
+icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
+{
+ void *p;
+
+ if (tctx == NULL)
+ return (NULL);
+ if (usize <= SMALL_MAXCLASS) {
+ p = icalloc(tsd, LARGE_MINCLASS);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = icalloc(tsd, usize);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+icalloc_prof(tsd_t *tsd, size_t usize)
+{
+ void *p;
+ prof_tctx_t *tctx;
+
+ tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = icalloc_prof_sample(tsd, usize, tctx);
+ else
+ p = icalloc(tsd, usize);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ return (NULL);
+ }
+ prof_malloc(p, usize, tctx);
+
+ return (p);
+}
+
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
je_calloc(size_t num, size_t size)
{
void *ret;
- tsdn_t *tsdn;
+ tsd_t *tsd;
size_t num_size;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+ if (unlikely(malloc_init())) {
+ num_size = 0;
+ ret = NULL;
+ goto label_return;
+ }
+ tsd = tsd_fetch();
+
num_size = num * size;
if (unlikely(num_size == 0)) {
if (num == 0 || size == 0)
num_size = 1;
- else
- num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */
+ else {
+ ret = NULL;
+ goto label_return;
+ }
/*
* Try to avoid division here. We know that it isn't possible to
* overflow during multiplication if neither operand uses any of the
* most significant half of the bits in a size_t.
*/
} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
- 2))) && (num_size / size != num)))
- num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */
+ 2))) && (num_size / size != num))) {
+ /* size_t overflow. */
+ ret = NULL;
+ goto label_return;
+ }
- if (likely(!malloc_slow)) {
- ret = ialloc_body(num_size, true, &tsdn, &usize, false);
- ialloc_post_check(ret, tsdn, usize, "calloc", true, false);
+ if (config_prof && opt_prof) {
+ usize = s2u(num_size);
+ if (unlikely(usize == 0)) {
+ ret = NULL;
+ goto label_return;
+ }
+ ret = icalloc_prof(tsd, usize);
} else {
- ret = ialloc_body(num_size, true, &tsdn, &usize, true);
- ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
- UTRACE(0, num_size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ usize = s2u(num_size);
+ ret = icalloc(tsd, num_size);
}
+label_return:
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error in calloc(): out of "
+ "memory\n");
+ abort();
+ }
+ set_errno(ENOMEM);
+ }
+ if (config_stats && likely(ret != NULL)) {
+ assert(usize == isalloc(ret, config_prof));
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ }
+ UTRACE(0, num_size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
return (ret);
}
@@ -1836,7 +1693,7 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ arena_prof_promoted(p, usize);
} else
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
@@ -1851,7 +1708,7 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
+ old_tctx = prof_tctx_get(old_ptr);
tctx = prof_alloc_prep(tsd, usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
@@ -1868,41 +1725,32 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
}
JEMALLOC_INLINE_C void
-ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
+ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
- witness_assert_lockless(tsd_tsdn(tsd));
-
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
if (config_prof && opt_prof) {
- usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ usize = isalloc(ptr, config_prof);
prof_free(tsd, ptr, usize);
} else if (config_stats || config_valgrind)
- usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ usize = isalloc(ptr, config_prof);
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
-
- if (likely(!slow_path))
- iqalloc(tsd, ptr, tcache, false);
- else {
- if (config_valgrind && unlikely(in_valgrind))
- rzsize = p2rz(tsd_tsdn(tsd), ptr);
- iqalloc(tsd, ptr, tcache, true);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
- }
+ if (config_valgrind && unlikely(in_valgrind))
+ rzsize = p2rz(ptr);
+ iqalloc(tsd, ptr, tcache);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
JEMALLOC_INLINE_C void
-isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
+isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
{
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
- witness_assert_lockless(tsd_tsdn(tsd));
-
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
@@ -1911,8 +1759,8 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
if (config_valgrind && unlikely(in_valgrind))
- rzsize = p2rz(tsd_tsdn(tsd), ptr);
- isqalloc(tsd, ptr, usize, tcache, slow_path);
+ rzsize = p2rz(ptr);
+ isqalloc(tsd, ptr, usize, tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
@@ -1922,57 +1770,44 @@ JEMALLOC_ALLOC_SIZE(2)
je_realloc(void *ptr, size_t size)
{
void *ret;
- tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
+ tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (unlikely(size == 0)) {
if (ptr != NULL) {
- tsd_t *tsd;
-
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
tsd = tsd_fetch();
- ifree(tsd, ptr, tcache_get(tsd, false), true);
+ ifree(tsd, ptr, tcache_get(tsd, false));
return (NULL);
}
size = 1;
}
if (likely(ptr != NULL)) {
- tsd_t *tsd;
-
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
-
- old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
- if (config_valgrind && unlikely(in_valgrind)) {
- old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
- u2rz(old_usize);
- }
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && unlikely(in_valgrind))
+ old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
if (config_prof && opt_prof) {
usize = s2u(size);
- ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
- NULL : irealloc_prof(tsd, ptr, old_usize, usize);
+ ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd,
+ ptr, old_usize, usize);
} else {
if (config_stats || (config_valgrind &&
unlikely(in_valgrind)))
usize = s2u(size);
ret = iralloc(tsd, ptr, old_usize, size, 0, false);
}
- tsdn = tsd_tsdn(tsd);
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
- if (likely(!malloc_slow))
- ret = ialloc_body(size, false, &tsdn, &usize, false);
- else
- ret = ialloc_body(size, false, &tsdn, &usize, true);
- assert(!tsdn_null(tsdn) || ret == NULL);
+ ret = imalloc_body(size, &tsd, &usize);
}
if (unlikely(ret == NULL)) {
@@ -1984,17 +1819,13 @@ je_realloc(void *ptr, size_t size)
set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
- tsd_t *tsd;
-
- assert(usize == isalloc(tsdn, ret, config_prof));
- tsd = tsdn_tsd(tsdn);
+ assert(usize == isalloc(ret, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
- JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr,
- old_usize, old_rzsize, maybe, false);
- witness_assert_lockless(tsdn);
+ JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
+ old_rzsize, true, false);
return (ret);
}
@@ -2005,12 +1836,7 @@ je_free(void *ptr)
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) {
tsd_t *tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
- if (likely(!malloc_slow))
- ifree(tsd, ptr, tcache_get(tsd, false), false);
- else
- ifree(tsd, ptr, tcache_get(tsd, false), true);
- witness_assert_lockless(tsd_tsdn(tsd));
+ ifree(tsd, ptr, tcache_get(tsd, false));
}
}
@@ -2031,6 +1857,7 @@ je_memalign(size_t alignment, size_t size)
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
ret = NULL;
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
@@ -2044,6 +1871,7 @@ je_valloc(size_t size)
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
ret = NULL;
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
@@ -2073,29 +1901,6 @@ JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
je_memalign;
# endif
-
-#ifdef CPU_COUNT
-/*
- * To enable static linking with glibc, the libc specific malloc interface must
- * be implemented also, so none of glibc's malloc.o functions are added to the
- * link.
- */
-#define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
-/* To force macro expansion of je_ prefix before stringification. */
-#define PREALIAS(je_fn) ALIAS(je_fn)
-void *__libc_malloc(size_t size) PREALIAS(je_malloc);
-void __libc_free(void* ptr) PREALIAS(je_free);
-void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
-void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
-void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
-void *__libc_valloc(size_t size) PREALIAS(je_valloc);
-int __posix_memalign(void** r, size_t a, size_t s)
- PREALIAS(je_posix_memalign);
-#undef PREALIAS
-#undef ALIAS
-
-#endif
-
#endif
/*
@@ -2107,7 +1912,7 @@ int __posix_memalign(void** r, size_t a, size_t s)
*/
JEMALLOC_ALWAYS_INLINE_C bool
-imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
+imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
{
@@ -2118,8 +1923,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
*usize = sa2u(size, *alignment);
}
- if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
- return (true);
+ assert(*usize != 0);
*zero = MALLOCX_ZERO_GET(flags);
if ((flags & MALLOCX_TCACHE_MASK) != 0) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
@@ -2130,7 +1934,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
*tcache = tcache_get(tsd, true);
if ((flags & MALLOCX_ARENA_MASK) != 0) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
- *arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
+ *arena = arena_get(tsd, arena_ind, true, true);
if (unlikely(*arena == NULL))
return (true);
} else
@@ -2138,44 +1942,59 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
return (false);
}
+JEMALLOC_ALWAYS_INLINE_C bool
+imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
+ size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
+{
+
+ if (likely(flags == 0)) {
+ *usize = s2u(size);
+ assert(*usize != 0);
+ *alignment = 0;
+ *zero = false;
+ *tcache = tcache_get(tsd, true);
+ *arena = NULL;
+ return (false);
+ } else {
+ return (imallocx_flags_decode_hard(tsd, size, flags, usize,
+ alignment, zero, tcache, arena));
+ }
+}
+
JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena, bool slow_path)
+imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena)
{
- szind_t ind;
if (unlikely(alignment != 0))
- return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
- ind = size2index(usize);
- assert(ind < NSIZES);
- return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena,
- slow_path));
+ return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
+ if (unlikely(zero))
+ return (icalloct(tsd, usize, tcache, arena));
+ return (imalloct(tsd, usize, tcache, arena));
}
static void *
-imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena, bool slow_path)
+imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena)
{
void *p;
if (usize <= SMALL_MAXCLASS) {
assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
- p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero,
- tcache, arena, slow_path);
+ p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
+ arena);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsdn, p, usize);
- } else {
- p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
- slow_path);
- }
+ arena_prof_promoted(p, usize);
+ } else
+ p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
+imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
{
void *p;
size_t alignment;
@@ -2188,27 +2007,25 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
&zero, &tcache, &arena)))
return (NULL);
tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
- if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
- p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero,
- tcache, arena, slow_path);
- } else if ((uintptr_t)tctx > (uintptr_t)1U) {
- p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero,
- tcache, arena, slow_path);
+ if (likely((uintptr_t)tctx == (uintptr_t)1U))
+ p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
+ else if ((uintptr_t)tctx > (uintptr_t)1U) {
+ p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
+ arena);
} else
p = NULL;
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
- prof_malloc(tsd_tsdn(tsd), p, *usize, tctx);
+ prof_malloc(p, *usize, tctx);
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
- bool slow_path)
+imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
{
void *p;
size_t alignment;
@@ -2216,53 +2033,18 @@ imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
tcache_t *tcache;
arena_t *arena;
- if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
- &zero, &tcache, &arena)))
- return (NULL);
- p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache,
- arena, slow_path);
- assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
- return (p);
-}
-
-/* This function guarantees that *tsdn is non-NULL on success. */
-JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
- bool slow_path)
-{
- tsd_t *tsd;
-
- if (slow_path && unlikely(malloc_init())) {
- *tsdn = NULL;
- return (NULL);
- }
-
- tsd = tsd_fetch();
- *tsdn = tsd_tsdn(tsd);
- witness_assert_lockless(tsd_tsdn(tsd));
-
if (likely(flags == 0)) {
- szind_t ind = size2index(size);
- if (unlikely(ind >= NSIZES))
- return (NULL);
- if (config_stats || (config_prof && opt_prof) || (slow_path &&
- config_valgrind && unlikely(in_valgrind))) {
- *usize = index2size(ind);
- assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
- }
-
- if (config_prof && opt_prof) {
- return (ialloc_prof(tsd, *usize, ind, false,
- slow_path));
- }
-
- return (ialloc(tsd, size, ind, false, slow_path));
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ *usize = s2u(size);
+ return (imalloc(tsd, size));
}
- if (config_prof && opt_prof)
- return (imallocx_prof(tsd, size, flags, usize, slow_path));
-
- return (imallocx_no_prof(tsd, size, flags, usize, slow_path));
+ if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
+ &alignment, &zero, &tcache, &arena)))
+ return (NULL);
+ p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+ return (p);
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@@ -2270,24 +2052,37 @@ void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_mallocx(size_t size, int flags)
{
- tsdn_t *tsdn;
+ tsd_t *tsd;
void *p;
size_t usize;
assert(size != 0);
- if (likely(!malloc_slow)) {
- p = imallocx_body(size, flags, &tsdn, &usize, false);
- ialloc_post_check(p, tsdn, usize, "mallocx", false, false);
- } else {
- p = imallocx_body(size, flags, &tsdn, &usize, true);
- ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
- UTRACE(0, size, p);
- JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
- MALLOCX_ZERO_GET(flags));
- }
+ if (unlikely(malloc_init()))
+ goto label_oom;
+ tsd = tsd_fetch();
+ if (config_prof && opt_prof)
+ p = imallocx_prof(tsd, size, flags, &usize);
+ else
+ p = imallocx_no_prof(tsd, size, flags, &usize);
+ if (unlikely(p == NULL))
+ goto label_oom;
+
+ if (config_stats) {
+ assert(usize == isalloc(p, config_prof));
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ }
+ UTRACE(0, size, p);
+ JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
return (p);
+label_oom:
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
+ abort();
+ }
+ UTRACE(0, size, 0);
+ return (NULL);
}
static void *
@@ -2304,7 +2099,7 @@ irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
zero, tcache, arena);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ arena_prof_promoted(p, usize);
} else {
p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
tcache, arena);
@@ -2323,8 +2118,8 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
- tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
+ old_tctx = prof_tctx_get(old_ptr);
+ tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
alignment, zero, tcache, arena, tctx);
@@ -2333,7 +2128,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
tcache, arena);
}
if (unlikely(p == NULL)) {
- prof_alloc_rollback(tsd, tctx, false);
+ prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
@@ -2346,9 +2141,9 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
* be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize.
*/
- *usize = isalloc(tsd_tsdn(tsd), p, config_prof);
+ *usize = isalloc(p, config_prof);
}
- prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
+ prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
old_usize, old_tctx);
return (p);
@@ -2374,11 +2169,10 @@ je_rallocx(void *ptr, size_t size, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
- arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
+ arena = arena_get(tsd, arena_ind, true, true);
if (unlikely(arena == NULL))
goto label_oom;
} else
@@ -2392,14 +2186,13 @@ je_rallocx(void *ptr, size_t size, int flags)
} else
tcache = tcache_get(tsd, true);
- old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ old_usize = isalloc(ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
- goto label_oom;
+ assert(usize != 0);
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
zero, tcache, arena);
if (unlikely(p == NULL))
@@ -2410,7 +2203,7 @@ je_rallocx(void *ptr, size_t size, int flags)
if (unlikely(p == NULL))
goto label_oom;
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
- usize = isalloc(tsd_tsdn(tsd), p, config_prof);
+ usize = isalloc(p, config_prof);
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
@@ -2419,9 +2212,8 @@ je_rallocx(void *ptr, size_t size, int flags)
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, p);
- JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr,
- old_usize, old_rzsize, no, zero);
- witness_assert_lockless(tsd_tsdn(tsd));
+ JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
+ old_rzsize, false, zero);
return (p);
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
@@ -2429,33 +2221,31 @@ label_oom:
abort();
}
UTRACE(ptr, size, 0);
- witness_assert_lockless(tsd_tsdn(tsd));
return (NULL);
}
JEMALLOC_ALWAYS_INLINE_C size_t
-ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
- size_t extra, size_t alignment, bool zero)
+ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, bool zero)
{
size_t usize;
- if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero))
+ if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
return (old_usize);
- usize = isalloc(tsdn, ptr, config_prof);
+ usize = isalloc(ptr, config_prof);
return (usize);
}
static size_t
-ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
- size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
+ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, bool zero, prof_tctx_t *tctx)
{
size_t usize;
if (tctx == NULL)
return (old_usize);
- usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
- zero);
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero);
return (usize);
}
@@ -2469,36 +2259,23 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
+ old_tctx = prof_tctx_get(ptr);
/*
* usize isn't knowable before ixalloc() returns when extra is non-zero.
* Therefore, compute its maximum possible value and use that in
* prof_alloc_prep() to decide whether to capture a backtrace.
* prof_realloc() will use the actual usize to decide whether to sample.
*/
- if (alignment == 0) {
- usize_max = s2u(size+extra);
- assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
- } else {
- usize_max = sa2u(size+extra, alignment);
- if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
- /*
- * usize_max is out of range, and chances are that
- * allocation will fail, but use the maximum possible
- * value and carry on with prof_alloc_prep(), just in
- * case allocation succeeds.
- */
- usize_max = HUGE_MAXCLASS;
- }
- }
+ usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
+ alignment);
+ assert(usize_max != 0);
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
-
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
- usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
- size, extra, alignment, zero, tctx);
+ usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
+ alignment, zero, tctx);
} else {
- usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
- extra, alignment, zero);
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero);
}
if (usize == old_usize) {
prof_alloc_rollback(tsd, tctx, false);
@@ -2525,25 +2302,18 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
- old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ old_usize = isalloc(ptr, config_prof);
- /*
- * The API explicitly absolves itself of protecting against (size +
- * extra) numerical overflow, but we may need to clamp extra to avoid
- * exceeding HUGE_MAXCLASS.
- *
- * Ordinarily, size limit checking is handled deeper down, but here we
- * have to check as part of (size + extra) clamping, since we need the
- * clamped value in the above helper functions.
- */
- if (unlikely(size > HUGE_MAXCLASS)) {
- usize = old_usize;
- goto label_not_resized;
- }
- if (unlikely(HUGE_MAXCLASS - size < extra))
+ /* Clamp extra if necessary to avoid (size + extra) overflow. */
+ if (unlikely(size + extra > HUGE_MAXCLASS)) {
+ /* Check for size overflow. */
+ if (unlikely(size > HUGE_MAXCLASS)) {
+ usize = old_usize;
+ goto label_not_resized;
+ }
extra = HUGE_MAXCLASS - size;
+ }
if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
@@ -2552,8 +2322,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
alignment, zero);
} else {
- usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
- extra, alignment, zero);
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero);
}
if (unlikely(usize == old_usize))
goto label_not_resized;
@@ -2562,11 +2332,10 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
- JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr,
- old_usize, old_rzsize, no, zero);
+ JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
+ old_rzsize, false, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
- witness_assert_lockless(tsd_tsdn(tsd));
return (usize);
}
@@ -2575,20 +2344,15 @@ JEMALLOC_ATTR(pure)
je_sallocx(const void *ptr, int flags)
{
size_t usize;
- tsdn_t *tsdn;
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
- tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
-
if (config_ivsalloc)
- usize = ivsalloc(tsdn, ptr, config_prof);
+ usize = ivsalloc(ptr, config_prof);
else
- usize = isalloc(tsdn, ptr, config_prof);
+ usize = isalloc(ptr, config_prof);
- witness_assert_lockless(tsdn);
return (usize);
}
@@ -2602,7 +2366,6 @@ je_dallocx(void *ptr, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
tcache = NULL;
@@ -2612,25 +2375,19 @@ je_dallocx(void *ptr, int flags)
tcache = tcache_get(tsd, false);
UTRACE(ptr, 0, 0);
- if (likely(!malloc_slow))
- ifree(tsd, ptr, tcache, false);
- else
- ifree(tsd, ptr, tcache, true);
- witness_assert_lockless(tsd_tsdn(tsd));
+ ifree(tsd_fetch(), ptr, tcache);
}
JEMALLOC_ALWAYS_INLINE_C size_t
-inallocx(tsdn_t *tsdn, size_t size, int flags)
+inallocx(size_t size, int flags)
{
size_t usize;
- witness_assert_lockless(tsdn);
-
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
usize = s2u(size);
else
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
- witness_assert_lockless(tsdn);
+ assert(usize != 0);
return (usize);
}
@@ -2643,11 +2400,10 @@ je_sdallocx(void *ptr, size_t size, int flags)
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- tsd = tsd_fetch();
- usize = inallocx(tsd_tsdn(tsd), size, flags);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
+ usize = inallocx(size, flags);
+ assert(usize == isalloc(ptr, config_prof));
- witness_assert_lockless(tsd_tsdn(tsd));
+ tsd = tsd_fetch();
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
tcache = NULL;
@@ -2657,116 +2413,75 @@ je_sdallocx(void *ptr, size_t size, int flags)
tcache = tcache_get(tsd, false);
UTRACE(ptr, 0, 0);
- if (likely(!malloc_slow))
- isfree(tsd, ptr, usize, tcache, false);
- else
- isfree(tsd, ptr, usize, tcache, true);
- witness_assert_lockless(tsd_tsdn(tsd));
+ isfree(tsd, ptr, usize, tcache);
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)
je_nallocx(size_t size, int flags)
{
- size_t usize;
- tsdn_t *tsdn;
assert(size != 0);
if (unlikely(malloc_init()))
return (0);
- tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
-
- usize = inallocx(tsdn, size, flags);
- if (unlikely(usize > HUGE_MAXCLASS))
- return (0);
-
- witness_assert_lockless(tsdn);
- return (usize);
+ return (inallocx(size, flags));
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
- int ret;
- tsd_t *tsd;
if (unlikely(malloc_init()))
return (EAGAIN);
- tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
- ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
- witness_assert_lockless(tsd_tsdn(tsd));
- return (ret);
+ return (ctl_byname(name, oldp, oldlenp, newp, newlen));
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
{
- int ret;
- tsdn_t *tsdn;
if (unlikely(malloc_init()))
return (EAGAIN);
- tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
- ret = ctl_nametomib(tsdn, name, mibp, miblenp);
- witness_assert_lockless(tsdn);
- return (ret);
+ return (ctl_nametomib(name, mibp, miblenp));
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
- int ret;
- tsd_t *tsd;
if (unlikely(malloc_init()))
return (EAGAIN);
- tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
- ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
- witness_assert_lockless(tsd_tsdn(tsd));
- return (ret);
+ return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
- tsdn_t *tsdn;
- tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
stats_print(write_cb, cbopaque, opts);
- witness_assert_lockless(tsdn);
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{
size_t ret;
- tsdn_t *tsdn;
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
- tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
-
if (config_ivsalloc)
- ret = ivsalloc(tsdn, ptr, config_prof);
+ ret = ivsalloc(ptr, config_prof);
else
- ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof);
+ ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
- witness_assert_lockless(tsdn);
return (ret);
}
@@ -2792,7 +2507,6 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
* to trigger the deadlock described above, but doing so would involve forking
* via a library constructor that runs before jemalloc's runs.
*/
-#ifndef JEMALLOC_JET
JEMALLOC_ATTR(constructor)
static void
jemalloc_constructor(void)
@@ -2800,7 +2514,6 @@ jemalloc_constructor(void)
malloc_init();
}
-#endif
#ifndef JEMALLOC_MUTEX_INIT_CB
void
@@ -2810,9 +2523,7 @@ JEMALLOC_EXPORT void
_malloc_prefork(void)
#endif
{
- tsd_t *tsd;
- unsigned i, j, narenas;
- arena_t *arena;
+ unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized())
@@ -2820,40 +2531,16 @@ _malloc_prefork(void)
#endif
assert(malloc_initialized());
- tsd = tsd_fetch();
-
- narenas = narenas_total_get();
-
- witness_prefork(tsd);
/* Acquire all mutexes in a safe order. */
- ctl_prefork(tsd_tsdn(tsd));
- malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
- prof_prefork0(tsd_tsdn(tsd));
- for (i = 0; i < 3; i++) {
- for (j = 0; j < narenas; j++) {
- if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
- NULL) {
- switch (i) {
- case 0:
- arena_prefork0(tsd_tsdn(tsd), arena);
- break;
- case 1:
- arena_prefork1(tsd_tsdn(tsd), arena);
- break;
- case 2:
- arena_prefork2(tsd_tsdn(tsd), arena);
- break;
- default: not_reached();
- }
- }
- }
- }
- base_prefork(tsd_tsdn(tsd));
- for (i = 0; i < narenas; i++) {
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
- arena_prefork3(tsd_tsdn(tsd), arena);
+ ctl_prefork();
+ prof_prefork();
+ malloc_mutex_prefork(&arenas_lock);
+ for (i = 0; i < narenas_total; i++) {
+ if (arenas[i] != NULL)
+ arena_prefork(arenas[i]);
}
- prof_prefork1(tsd_tsdn(tsd));
+ chunk_prefork();
+ base_prefork();
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -2864,8 +2551,7 @@ JEMALLOC_EXPORT void
_malloc_postfork(void)
#endif
{
- tsd_t *tsd;
- unsigned i, narenas;
+ unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized())
@@ -2873,44 +2559,35 @@ _malloc_postfork(void)
#endif
assert(malloc_initialized());
- tsd = tsd_fetch();
-
- witness_postfork_parent(tsd);
/* Release all mutexes, now that fork() has completed. */
- base_postfork_parent(tsd_tsdn(tsd));
- for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena;
-
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
- arena_postfork_parent(tsd_tsdn(tsd), arena);
+ base_postfork_parent();
+ chunk_postfork_parent();
+ for (i = 0; i < narenas_total; i++) {
+ if (arenas[i] != NULL)
+ arena_postfork_parent(arenas[i]);
}
- prof_postfork_parent(tsd_tsdn(tsd));
- malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
- ctl_postfork_parent(tsd_tsdn(tsd));
+ malloc_mutex_postfork_parent(&arenas_lock);
+ prof_postfork_parent();
+ ctl_postfork_parent();
}
void
jemalloc_postfork_child(void)
{
- tsd_t *tsd;
- unsigned i, narenas;
+ unsigned i;
assert(malloc_initialized());
- tsd = tsd_fetch();
-
- witness_postfork_child(tsd);
/* Release all mutexes, now that fork() has completed. */
- base_postfork_child(tsd_tsdn(tsd));
- for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena;
-
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
- arena_postfork_child(tsd_tsdn(tsd), arena);
+ base_postfork_child();
+ chunk_postfork_child();
+ for (i = 0; i < narenas_total; i++) {
+ if (arenas[i] != NULL)
+ arena_postfork_child(arenas[i]);
}
- prof_postfork_child(tsd_tsdn(tsd));
- malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
- ctl_postfork_child(tsd_tsdn(tsd));
+ malloc_mutex_postfork_child(&arenas_lock);
+ prof_postfork_child();
+ ctl_postfork_child();
}
/******************************************************************************/
@@ -2930,10 +2607,9 @@ je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* indication that this is not a LARGE alloc */
arena_t *arena = extent_node_arena_get(&chunk->node);
size_t rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- arena_run_t *run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
+ arena_run_t *run = &arena_miscelm_get(chunk, rpages_ind)->run;
arena_bin_t *bin = &arena->bins[run->binind];
- tsd_t *tsd = tsd_fetch();
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_lock(&bin->lock);
/* runs that are in the same chunk in as the current chunk, are likely to be the next currun */
if (chunk != (arena_chunk_t *)CHUNK_ADDR2BASE(bin->runcur)) {
arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
@@ -2942,7 +2618,7 @@ je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
*run_util = ((bin_info->nregs - run->nfree)<<16) / bin_info->nregs;
defrag = 1;
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
}
}
return defrag;
diff --git a/deps/jemalloc/src/mutex.c b/deps/jemalloc/src/mutex.c
index 6333e73d6..2d47af976 100644
--- a/deps/jemalloc/src/mutex.c
+++ b/deps/jemalloc/src/mutex.c
@@ -69,7 +69,7 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
#endif
bool
-malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
+malloc_mutex_init(malloc_mutex_t *mutex)
{
#ifdef _WIN32
@@ -80,8 +80,6 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
_CRT_SPINCOUNT))
return (true);
# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- mutex->lock = OS_UNFAIR_LOCK_INIT;
#elif (defined(JEMALLOC_OSSPIN))
mutex->lock = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
@@ -105,34 +103,31 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
}
pthread_mutexattr_destroy(&attr);
#endif
- if (config_debug)
- witness_init(&mutex->witness, name, rank, NULL);
return (false);
}
void
-malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex)
+malloc_mutex_prefork(malloc_mutex_t *mutex)
{
- malloc_mutex_lock(tsdn, mutex);
+ malloc_mutex_lock(mutex);
}
void
-malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex)
+malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
{
- malloc_mutex_unlock(tsdn, mutex);
+ malloc_mutex_unlock(mutex);
}
void
-malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
+malloc_mutex_postfork_child(malloc_mutex_t *mutex)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
- malloc_mutex_unlock(tsdn, mutex);
+ malloc_mutex_unlock(mutex);
#else
- if (malloc_mutex_init(mutex, mutex->witness.name,
- mutex->witness.rank)) {
+ if (malloc_mutex_init(mutex)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
if (opt_abort)
@@ -142,7 +137,7 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
}
bool
-malloc_mutex_boot(void)
+mutex_boot(void)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
diff --git a/deps/jemalloc/src/nstime.c b/deps/jemalloc/src/nstime.c
deleted file mode 100644
index 0948e29fa..000000000
--- a/deps/jemalloc/src/nstime.c
+++ /dev/null
@@ -1,194 +0,0 @@
-#include "jemalloc/internal/jemalloc_internal.h"
-
-#define BILLION UINT64_C(1000000000)
-
-void
-nstime_init(nstime_t *time, uint64_t ns)
-{
-
- time->ns = ns;
-}
-
-void
-nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec)
-{
-
- time->ns = sec * BILLION + nsec;
-}
-
-uint64_t
-nstime_ns(const nstime_t *time)
-{
-
- return (time->ns);
-}
-
-uint64_t
-nstime_sec(const nstime_t *time)
-{
-
- return (time->ns / BILLION);
-}
-
-uint64_t
-nstime_nsec(const nstime_t *time)
-{
-
- return (time->ns % BILLION);
-}
-
-void
-nstime_copy(nstime_t *time, const nstime_t *source)
-{
-
- *time = *source;
-}
-
-int
-nstime_compare(const nstime_t *a, const nstime_t *b)
-{
-
- return ((a->ns > b->ns) - (a->ns < b->ns));
-}
-
-void
-nstime_add(nstime_t *time, const nstime_t *addend)
-{
-
- assert(UINT64_MAX - time->ns >= addend->ns);
-
- time->ns += addend->ns;
-}
-
-void
-nstime_subtract(nstime_t *time, const nstime_t *subtrahend)
-{
-
- assert(nstime_compare(time, subtrahend) >= 0);
-
- time->ns -= subtrahend->ns;
-}
-
-void
-nstime_imultiply(nstime_t *time, uint64_t multiplier)
-{
-
- assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
- 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
-
- time->ns *= multiplier;
-}
-
-void
-nstime_idivide(nstime_t *time, uint64_t divisor)
-{
-
- assert(divisor != 0);
-
- time->ns /= divisor;
-}
-
-uint64_t
-nstime_divide(const nstime_t *time, const nstime_t *divisor)
-{
-
- assert(divisor->ns != 0);
-
- return (time->ns / divisor->ns);
-}
-
-#ifdef _WIN32
-# define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time)
-{
- FILETIME ft;
- uint64_t ticks_100ns;
-
- GetSystemTimeAsFileTime(&ft);
- ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-
- nstime_init(time, ticks_100ns * 100);
-}
-#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
-# define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time)
-{
- struct timespec ts;
-
- clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
- nstime_init2(time, ts.tv_sec, ts.tv_nsec);
-}
-#elif JEMALLOC_HAVE_CLOCK_MONOTONIC
-# define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time)
-{
- struct timespec ts;
-
- clock_gettime(CLOCK_MONOTONIC, &ts);
- nstime_init2(time, ts.tv_sec, ts.tv_nsec);
-}
-#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
-# define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time)
-{
-
- nstime_init(time, mach_absolute_time());
-}
-#else
-# define NSTIME_MONOTONIC false
-static void
-nstime_get(nstime_t *time)
-{
- struct timeval tv;
-
- gettimeofday(&tv, NULL);
- nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
-}
-#endif
-
-#ifdef JEMALLOC_JET
-#undef nstime_monotonic
-#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic)
-#endif
-bool
-nstime_monotonic(void)
-{
-
- return (NSTIME_MONOTONIC);
-#undef NSTIME_MONOTONIC
-}
-#ifdef JEMALLOC_JET
-#undef nstime_monotonic
-#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
-nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef nstime_update
-#define nstime_update JEMALLOC_N(n_nstime_update)
-#endif
-bool
-nstime_update(nstime_t *time)
-{
- nstime_t old_time;
-
- nstime_copy(&old_time, time);
- nstime_get(time);
-
- /* Handle non-monotonic clocks. */
- if (unlikely(nstime_compare(&old_time, time) > 0)) {
- nstime_copy(time, &old_time);
- return (true);
- }
-
- return (false);
-}
-#ifdef JEMALLOC_JET
-#undef nstime_update
-#define nstime_update JEMALLOC_N(nstime_update)
-nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update);
-#endif
diff --git a/deps/jemalloc/src/pages.c b/deps/jemalloc/src/pages.c
index 5f0c9669d..83a167f67 100644
--- a/deps/jemalloc/src/pages.c
+++ b/deps/jemalloc/src/pages.c
@@ -1,49 +1,29 @@
#define JEMALLOC_PAGES_C_
#include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
-#include <sys/sysctl.h>
-#endif
-
-/******************************************************************************/
-/* Data. */
-
-#ifndef _WIN32
-# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
-# define PAGES_PROT_DECOMMIT (PROT_NONE)
-static int mmap_flags;
-#endif
-static bool os_overcommits;
-
/******************************************************************************/
void *
-pages_map(void *addr, size_t size, bool *commit)
+pages_map(void *addr, size_t size)
{
void *ret;
assert(size != 0);
- if (os_overcommits)
- *commit = true;
-
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
- ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
+ ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
- {
- int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
-
- ret = mmap(addr, size, prot, mmap_flags, -1, 0);
- }
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+ -1, 0);
assert(ret != NULL);
if (ret == MAP_FAILED)
@@ -87,8 +67,7 @@ pages_unmap(void *addr, size_t size)
}
void *
-pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
- bool *commit)
+pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
{
void *ret = (void *)((uintptr_t)addr + leadsize);
@@ -98,7 +77,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
void *new_addr;
pages_unmap(addr, alloc_size);
- new_addr = pages_map(ret, size, commit);
+ new_addr = pages_map(ret, size);
if (new_addr == ret)
return (ret);
if (new_addr)
@@ -122,17 +101,17 @@ static bool
pages_commit_impl(void *addr, size_t size, bool commit)
{
- if (os_overcommits)
- return (true);
-
-#ifdef _WIN32
- return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
- PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
-#else
- {
- int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
- void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
- -1, 0);
+#ifndef _WIN32
+ /*
+ * The following decommit/commit implementation is functional, but
+ * always disabled because it doesn't add value beyong improved
+ * debugging (at the cost of extra system calls) on systems that
+ * overcommit.
+ */
+ if (false) {
+ int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
+ void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
+ MAP_FIXED, -1, 0);
if (result == MAP_FAILED)
return (true);
if (result != addr) {
@@ -146,6 +125,7 @@ pages_commit_impl(void *addr, size_t size, bool commit)
return (false);
}
#endif
+ return (true);
}
bool
@@ -170,16 +150,15 @@ pages_purge(void *addr, size_t size)
#ifdef _WIN32
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
unzeroed = true;
-#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \
- defined(JEMALLOC_PURGE_MADVISE_DONTNEED))
-# if defined(JEMALLOC_PURGE_MADVISE_FREE)
-# define JEMALLOC_MADV_PURGE MADV_FREE
-# define JEMALLOC_MADV_ZEROS false
-# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
+#elif defined(JEMALLOC_HAVE_MADVISE)
+# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
# define JEMALLOC_MADV_ZEROS true
+# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+# define JEMALLOC_MADV_PURGE MADV_FREE
+# define JEMALLOC_MADV_ZEROS false
# else
-# error No madvise(2) flag defined for purging unused dirty pages
+# error "No madvise(2) flag defined for purging unused dirty pages."
# endif
int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
@@ -192,111 +171,3 @@ pages_purge(void *addr, size_t size)
return (unzeroed);
}
-bool
-pages_huge(void *addr, size_t size)
-{
-
- assert(PAGE_ADDR2BASE(addr) == addr);
- assert(PAGE_CEILING(size) == size);
-
-#ifdef JEMALLOC_THP
- return (madvise(addr, size, MADV_HUGEPAGE) != 0);
-#else
- return (false);
-#endif
-}
-
-bool
-pages_nohuge(void *addr, size_t size)
-{
-
- assert(PAGE_ADDR2BASE(addr) == addr);
- assert(PAGE_CEILING(size) == size);
-
-#ifdef JEMALLOC_THP
- return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
-#else
- return (false);
-#endif
-}
-
-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
-static bool
-os_overcommits_sysctl(void)
-{
- int vm_overcommit;
- size_t sz;
-
- sz = sizeof(vm_overcommit);
- if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
- return (false); /* Error. */
-
- return ((vm_overcommit & 0x3) == 0);
-}
-#endif
-
-#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
-/*
- * Use syscall(2) rather than {open,read,close}(2) when possible to avoid
- * reentry during bootstrapping if another library has interposed system call
- * wrappers.
- */
-static bool
-os_overcommits_proc(void)
-{
- int fd;
- char buf[1];
- ssize_t nread;
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
- fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
-#else
- fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
-#endif
- if (fd == -1)
- return (false); /* Error. */
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
- nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
-#else
- nread = read(fd, &buf, sizeof(buf));
-#endif
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
- syscall(SYS_close, fd);
-#else
- close(fd);
-#endif
-
- if (nread < 1)
- return (false); /* Error. */
- /*
- * /proc/sys/vm/overcommit_memory meanings:
- * 0: Heuristic overcommit.
- * 1: Always overcommit.
- * 2: Never overcommit.
- */
- return (buf[0] == '0' || buf[0] == '1');
-}
-#endif
-
-void
-pages_boot(void)
-{
-
-#ifndef _WIN32
- mmap_flags = MAP_PRIVATE | MAP_ANON;
-#endif
-
-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
- os_overcommits = os_overcommits_sysctl();
-#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
- os_overcommits = os_overcommits_proc();
-# ifdef MAP_NORESERVE
- if (os_overcommits)
- mmap_flags |= MAP_NORESERVE;
-# endif
-#else
- os_overcommits = false;
-#endif
-}
diff --git a/deps/jemalloc/src/prng.c b/deps/jemalloc/src/prng.c
deleted file mode 100644
index 76646a2a4..000000000
--- a/deps/jemalloc/src/prng.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define JEMALLOC_PRNG_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/deps/jemalloc/src/prof.c b/deps/jemalloc/src/prof.c
index c89dade1f..5d2b9598f 100644
--- a/deps/jemalloc/src/prof.c
+++ b/deps/jemalloc/src/prof.c
@@ -109,7 +109,7 @@ static char prof_dump_buf[
1
#endif
];
-static size_t prof_dump_buf_end;
+static unsigned prof_dump_buf_end;
static int prof_dump_fd;
/* Do not dump any profiles until bootstrapping is complete. */
@@ -121,13 +121,13 @@ static bool prof_booted = false;
* definition.
*/
-static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
+static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
-static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+static bool prof_tdata_should_destroy(prof_tdata_t *tdata,
bool even_if_attached);
static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
bool even_if_attached);
-static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
+static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
/******************************************************************************/
/* Red-black trees. */
@@ -213,23 +213,22 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
}
if ((uintptr_t)tctx > (uintptr_t)1U) {
- malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ malloc_mutex_lock(tctx->tdata->lock);
tctx->prepared = false;
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
+ if (prof_tctx_should_destroy(tctx))
prof_tctx_destroy(tsd, tctx);
else
- malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ malloc_mutex_unlock(tctx->tdata->lock);
}
}
void
-prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx)
+prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
- prof_tctx_set(tsdn, ptr, usize, tctx);
+ prof_tctx_set(ptr, usize, tctx);
- malloc_mutex_lock(tsdn, tctx->tdata->lock);
+ malloc_mutex_lock(tctx->tdata->lock);
tctx->cnts.curobjs++;
tctx->cnts.curbytes += usize;
if (opt_prof_accum) {
@@ -237,23 +236,23 @@ prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
tctx->cnts.accumbytes += usize;
}
tctx->prepared = false;
- malloc_mutex_unlock(tsdn, tctx->tdata->lock);
+ malloc_mutex_unlock(tctx->tdata->lock);
}
void
prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
- malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ malloc_mutex_lock(tctx->tdata->lock);
assert(tctx->cnts.curobjs > 0);
assert(tctx->cnts.curbytes >= usize);
tctx->cnts.curobjs--;
tctx->cnts.curbytes -= usize;
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
+ if (prof_tctx_should_destroy(tctx))
prof_tctx_destroy(tsd, tctx);
else
- malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ malloc_mutex_unlock(tctx->tdata->lock);
}
void
@@ -278,7 +277,7 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
tdata->enq = true;
}
- malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ malloc_mutex_lock(&bt2gctx_mtx);
}
JEMALLOC_INLINE_C void
@@ -288,7 +287,7 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
cassert(config_prof);
assert(tdata == prof_tdata_get(tsd, false));
- malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ malloc_mutex_unlock(&bt2gctx_mtx);
if (tdata != NULL) {
bool idump, gdump;
@@ -301,9 +300,9 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
tdata->enq_gdump = false;
if (idump)
- prof_idump(tsd_tsdn(tsd));
+ prof_idump();
if (gdump)
- prof_gdump(tsd_tsdn(tsd));
+ prof_gdump();
}
}
@@ -547,15 +546,14 @@ prof_tdata_mutex_choose(uint64_t thr_uid)
}
static prof_gctx_t *
-prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
+prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
{
/*
* Create a single allocation that has space for vec of length bt->len.
*/
- size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
- prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
- size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
- true);
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t,
+ vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true),
+ true, NULL);
if (gctx == NULL)
return (NULL);
gctx->lock = prof_gctx_mutex_choose();
@@ -587,7 +585,7 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
* into this function.
*/
prof_enter(tsd, tdata_self);
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_lock(gctx->lock);
assert(gctx->nlimbo != 0);
if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
/* Remove gctx from bt2gctx. */
@@ -595,25 +593,24 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
not_reached();
prof_leave(tsd, tdata_self);
/* Destroy gctx. */
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true);
+ malloc_mutex_unlock(gctx->lock);
+ idalloctm(tsd, gctx, tcache_get(tsd, false), true);
} else {
/*
* Compensate for increment in prof_tctx_destroy() or
* prof_lookup().
*/
gctx->nlimbo--;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
prof_leave(tsd, tdata_self);
}
}
+/* tctx->tdata->lock must be held. */
static bool
-prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
+prof_tctx_should_destroy(prof_tctx_t *tctx)
{
- malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
-
if (opt_prof_accum)
return (false);
if (tctx->cnts.curobjs != 0)
@@ -636,6 +633,7 @@ prof_gctx_should_destroy(prof_gctx_t *gctx)
return (true);
}
+/* tctx->tdata->lock is held upon entry, and released before return. */
static void
prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
{
@@ -643,8 +641,6 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
prof_gctx_t *gctx = tctx->gctx;
bool destroy_tdata, destroy_tctx, destroy_gctx;
- malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
assert(tctx->cnts.curobjs == 0);
assert(tctx->cnts.curbytes == 0);
assert(!opt_prof_accum);
@@ -652,10 +648,10 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
assert(tctx->cnts.accumbytes == 0);
ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
- destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ destroy_tdata = prof_tdata_should_destroy(tdata, false);
+ malloc_mutex_unlock(tdata->lock);
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_lock(gctx->lock);
switch (tctx->state) {
case prof_tctx_state_nominal:
tctx_tree_remove(&gctx->tctxs, tctx);
@@ -695,19 +691,17 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
destroy_tctx = false;
destroy_gctx = false;
}
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
if (destroy_gctx) {
prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
tdata);
}
- malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
if (destroy_tdata)
prof_tdata_destroy(tsd, tdata, false);
if (destroy_tctx)
- idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true);
+ idalloctm(tsd, tctx, tcache_get(tsd, false), true);
}
static bool
@@ -727,7 +721,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
prof_enter(tsd, tdata);
if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
/* bt has never been seen before. Insert it. */
- gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
+ gctx.p = prof_gctx_create(tsd, bt);
if (gctx.v == NULL) {
prof_leave(tsd, tdata);
return (true);
@@ -736,7 +730,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
/* OOM. */
prof_leave(tsd, tdata);
- idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true);
+ idalloctm(tsd, gctx.v, tcache_get(tsd, false), true);
return (true);
}
new_gctx = true;
@@ -745,9 +739,9 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
* Increment nlimbo, in order to avoid a race condition with
* prof_tctx_destroy()/prof_gctx_try_destroy().
*/
- malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
+ malloc_mutex_lock(gctx.p->lock);
gctx.p->nlimbo++;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
+ malloc_mutex_unlock(gctx.p->lock);
new_gctx = false;
}
prof_leave(tsd, tdata);
@@ -774,12 +768,13 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
if (tdata == NULL)
return (NULL);
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ malloc_mutex_lock(tdata->lock);
not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
if (!not_found) /* Note double negative! */
ret.p->prepared = true;
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ malloc_mutex_unlock(tdata->lock);
if (not_found) {
+ tcache_t *tcache;
void *btkey;
prof_gctx_t *gctx;
bool new_gctx, error;
@@ -793,9 +788,9 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
return (NULL);
/* Link a prof_tctx_t into gctx for this thread. */
- ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
- size2index(sizeof(prof_tctx_t)), false, NULL, true,
- arena_ichoose(tsd, NULL), true);
+ tcache = tcache_get(tsd, true);
+ ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true,
+ NULL);
if (ret.p == NULL) {
if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
@@ -809,41 +804,41 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
ret.p->tctx_uid = tdata->tctx_uid_next++;
ret.p->prepared = true;
ret.p->state = prof_tctx_state_initializing;
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ malloc_mutex_lock(tdata->lock);
error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ malloc_mutex_unlock(tdata->lock);
if (error) {
if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true);
+ idalloctm(tsd, ret.v, tcache, true);
return (NULL);
}
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_lock(gctx->lock);
ret.p->state = prof_tctx_state_nominal;
tctx_tree_insert(&gctx->tctxs, ret.p);
gctx->nlimbo--;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
}
return (ret.p);
}
-/*
- * The bodies of this function and prof_leakcheck() are compiled out unless heap
- * profiling is enabled, so that it is possible to compile jemalloc with
- * floating point support completely disabled. Avoiding floating point code is
- * important on memory-constrained systems, but it also enables a workaround for
- * versions of glibc that don't properly save/restore floating point registers
- * during dynamic lazy symbol loading (which internally calls into whatever
- * malloc implementation happens to be integrated into the application). Note
- * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
- * memory moves, so jemalloc must be compiled with such optimizations disabled
- * (e.g.
- * -mno-sse) in order for the workaround to be complete.
- */
void
prof_sample_threshold_update(prof_tdata_t *tdata)
{
+ /*
+ * The body of this function is compiled out unless heap profiling is
+ * enabled, so that it is possible to compile jemalloc with floating
+ * point support completely disabled. Avoiding floating point code is
+ * important on memory-constrained systems, but it also enables a
+ * workaround for versions of glibc that don't properly save/restore
+ * floating point registers during dynamic lazy symbol loading (which
+ * internally calls into whatever malloc implementation happens to be
+ * integrated into the application). Note that some compilers (e.g.
+ * gcc 4.8) may use floating point registers for fast memory moves, so
+ * jemalloc must be compiled with such optimizations disabled (e.g.
+ * -mno-sse) in order for the workaround to be complete.
+ */
#ifdef JEMALLOC_PROF
uint64_t r;
double u;
@@ -874,7 +869,8 @@ prof_sample_threshold_update(prof_tdata_t *tdata)
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
*/
- r = prng_lg_range_u64(&tdata->prng_state, 53);
+ prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005),
+ UINT64_C(1442695040888963407));
u = (double)r * (1.0/9007199254740992.0L);
tdata->bytes_until_sample = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
@@ -897,13 +893,11 @@ size_t
prof_tdata_count(void)
{
size_t tdata_count = 0;
- tsdn_t *tsdn;
- tsdn = tsdn_fetch();
- malloc_mutex_lock(tsdn, &tdatas_mtx);
+ malloc_mutex_lock(&tdatas_mtx);
tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
(void *)&tdata_count);
- malloc_mutex_unlock(tsdn, &tdatas_mtx);
+ malloc_mutex_unlock(&tdatas_mtx);
return (tdata_count);
}
@@ -922,9 +916,9 @@ prof_bt_count(void)
if (tdata == NULL)
return (0);
- malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ malloc_mutex_lock(&bt2gctx_mtx);
bt_count = ckh_count(&bt2gctx);
- malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ malloc_mutex_unlock(&bt2gctx_mtx);
return (bt_count);
}
@@ -994,7 +988,7 @@ prof_dump_close(bool propagate_err)
static bool
prof_dump_write(bool propagate_err, const char *s)
{
- size_t i, slen, n;
+ unsigned i, slen, n;
cassert(config_prof);
@@ -1037,21 +1031,20 @@ prof_dump_printf(bool propagate_err, const char *format, ...)
return (ret);
}
+/* tctx->tdata->lock is held. */
static void
-prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
+prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
{
- malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
-
- malloc_mutex_lock(tsdn, tctx->gctx->lock);
+ malloc_mutex_lock(tctx->gctx->lock);
switch (tctx->state) {
case prof_tctx_state_initializing:
- malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+ malloc_mutex_unlock(tctx->gctx->lock);
return;
case prof_tctx_state_nominal:
tctx->state = prof_tctx_state_dumping;
- malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+ malloc_mutex_unlock(tctx->gctx->lock);
memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
@@ -1070,12 +1063,11 @@ prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
}
}
+/* gctx->lock is held. */
static void
-prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
+prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
{
- malloc_mutex_assert_owner(tsdn, gctx->lock);
-
gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
if (opt_prof_accum) {
@@ -1084,12 +1076,10 @@ prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
}
}
+/* tctx->gctx is held. */
static prof_tctx_t *
prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
- tsdn_t *tsdn = (tsdn_t *)arg;
-
- malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
switch (tctx->state) {
case prof_tctx_state_nominal:
@@ -1097,7 +1087,7 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
break;
case prof_tctx_state_dumping:
case prof_tctx_state_purgatory:
- prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
+ prof_tctx_merge_gctx(tctx, tctx->gctx);
break;
default:
not_reached();
@@ -1106,18 +1096,11 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
return (NULL);
}
-struct prof_tctx_dump_iter_arg_s {
- tsdn_t *tsdn;
- bool propagate_err;
-};
-
+/* gctx->lock is held. */
static prof_tctx_t *
-prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
- struct prof_tctx_dump_iter_arg_s *arg =
- (struct prof_tctx_dump_iter_arg_s *)opaque;
-
- malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
+ bool propagate_err = *(bool *)arg;
switch (tctx->state) {
case prof_tctx_state_initializing:
@@ -1126,7 +1109,7 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
break;
case prof_tctx_state_dumping:
case prof_tctx_state_purgatory:
- if (prof_dump_printf(arg->propagate_err,
+ if (prof_dump_printf(propagate_err,
" t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
"%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
@@ -1139,14 +1122,12 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
return (NULL);
}
+/* tctx->gctx is held. */
static prof_tctx_t *
prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
- tsdn_t *tsdn = (tsdn_t *)arg;
prof_tctx_t *ret;
- malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
-
switch (tctx->state) {
case prof_tctx_state_nominal:
/* New since dumping started; ignore. */
@@ -1167,12 +1148,12 @@ label_return:
}
static void
-prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
+prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
{
cassert(config_prof);
- malloc_mutex_lock(tsdn, gctx->lock);
+ malloc_mutex_lock(gctx->lock);
/*
* Increment nlimbo so that gctx won't go away before dump.
@@ -1184,26 +1165,19 @@ prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
- malloc_mutex_unlock(tsdn, gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
}
-struct prof_gctx_merge_iter_arg_s {
- tsdn_t *tsdn;
- size_t leak_ngctx;
-};
-
static prof_gctx_t *
-prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
{
- struct prof_gctx_merge_iter_arg_s *arg =
- (struct prof_gctx_merge_iter_arg_s *)opaque;
+ size_t *leak_ngctx = (size_t *)arg;
- malloc_mutex_lock(arg->tsdn, gctx->lock);
- tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
- (void *)arg->tsdn);
+ malloc_mutex_lock(gctx->lock);
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
if (gctx->cnt_summed.curobjs != 0)
- arg->leak_ngctx++;
- malloc_mutex_unlock(arg->tsdn, gctx->lock);
+ (*leak_ngctx)++;
+ malloc_mutex_unlock(gctx->lock);
return (NULL);
}
@@ -1222,7 +1196,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
*/
while ((gctx = gctx_tree_first(gctxs)) != NULL) {
gctx_tree_remove(gctxs, gctx);
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_lock(gctx->lock);
{
prof_tctx_t *next;
@@ -1230,15 +1204,14 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
do {
prof_tctx_t *to_destroy =
tctx_tree_iter(&gctx->tctxs, next,
- prof_tctx_finish_iter,
- (void *)tsd_tsdn(tsd));
+ prof_tctx_finish_iter, NULL);
if (to_destroy != NULL) {
next = tctx_tree_next(&gctx->tctxs,
to_destroy);
tctx_tree_remove(&gctx->tctxs,
to_destroy);
- idalloctm(tsd_tsdn(tsd), to_destroy,
- NULL, true, true);
+ idalloctm(tsd, to_destroy,
+ tcache_get(tsd, false), true);
} else
next = NULL;
} while (next != NULL);
@@ -1246,26 +1219,19 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
gctx->nlimbo--;
if (prof_gctx_should_destroy(gctx)) {
gctx->nlimbo++;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
} else
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
}
}
-struct prof_tdata_merge_iter_arg_s {
- tsdn_t *tsdn;
- prof_cnt_t cnt_all;
-};
-
static prof_tdata_t *
-prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *opaque)
+prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
{
- struct prof_tdata_merge_iter_arg_s *arg =
- (struct prof_tdata_merge_iter_arg_s *)opaque;
+ prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
- malloc_mutex_lock(arg->tsdn, tdata->lock);
+ malloc_mutex_lock(tdata->lock);
if (!tdata->expired) {
size_t tabind;
union {
@@ -1277,17 +1243,17 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
&tctx.v);)
- prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
+ prof_tctx_merge_tdata(tctx.p, tdata);
- arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
- arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
+ cnt_all->curobjs += tdata->cnt_summed.curobjs;
+ cnt_all->curbytes += tdata->cnt_summed.curbytes;
if (opt_prof_accum) {
- arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
- arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
+ cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
+ cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
}
} else
tdata->dumping = false;
- malloc_mutex_unlock(arg->tsdn, tdata->lock);
+ malloc_mutex_unlock(tdata->lock);
return (NULL);
}
@@ -1316,7 +1282,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
#endif
static bool
-prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
+prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
{
bool ret;
@@ -1327,10 +1293,10 @@ prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
return (true);
- malloc_mutex_lock(tsdn, &tdatas_mtx);
+ malloc_mutex_lock(&tdatas_mtx);
ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
(void *)&propagate_err) != NULL);
- malloc_mutex_unlock(tsdn, &tdatas_mtx);
+ malloc_mutex_unlock(&tdatas_mtx);
return (ret);
}
#ifdef JEMALLOC_JET
@@ -1339,16 +1305,15 @@ prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
#endif
+/* gctx->lock is held. */
static bool
-prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
- const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
+prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
+ prof_gctx_tree_t *gctxs)
{
bool ret;
unsigned i;
- struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
cassert(config_prof);
- malloc_mutex_assert_owner(tsdn, gctx->lock);
/* Avoid dumping such gctx's that have no useful data. */
if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
@@ -1382,10 +1347,8 @@ prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
goto label_return;
}
- prof_tctx_dump_iter_arg.tsdn = tsdn;
- prof_tctx_dump_iter_arg.propagate_err = propagate_err;
if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
- (void *)&prof_tctx_dump_iter_arg) != NULL) {
+ (void *)&propagate_err) != NULL) {
ret = true;
goto label_return;
}
@@ -1395,7 +1358,6 @@ label_return:
return (ret);
}
-#ifndef _WIN32
JEMALLOC_FORMAT_PRINTF(1, 2)
static int
prof_open_maps(const char *format, ...)
@@ -1411,18 +1373,6 @@ prof_open_maps(const char *format, ...)
return (mfd);
}
-#endif
-
-static int
-prof_getpid(void)
-{
-
-#ifdef _WIN32
- return (GetCurrentProcessId());
-#else
- return (getpid());
-#endif
-}
static bool
prof_dump_maps(bool propagate_err)
@@ -1433,11 +1383,9 @@ prof_dump_maps(bool propagate_err)
cassert(config_prof);
#ifdef __FreeBSD__
mfd = prof_open_maps("/proc/curproc/map");
-#elif defined(_WIN32)
- mfd = -1; // Not implemented
#else
{
- int pid = prof_getpid();
+ int pid = getpid();
mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
if (mfd == -1)
@@ -1478,66 +1426,39 @@ label_return:
return (ret);
}
-/*
- * See prof_sample_threshold_update() comment for why the body of this function
- * is conditionally compiled.
- */
static void
prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
const char *filename)
{
-#ifdef JEMALLOC_PROF
- /*
- * Scaling is equivalent AdjustSamples() in jeprof, but the result may
- * differ slightly from what jeprof reports, because here we scale the
- * summary values, whereas jeprof scales each context individually and
- * reports the sums of the scaled values.
- */
if (cnt_all->curbytes != 0) {
- double sample_period = (double)((uint64_t)1 << lg_prof_sample);
- double ratio = (((double)cnt_all->curbytes) /
- (double)cnt_all->curobjs) / sample_period;
- double scale_factor = 1.0 / (1.0 - exp(-ratio));
- uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
- * scale_factor);
- uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
- scale_factor);
-
- malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
- " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
- curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
- 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
+ malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
+ FMTu64" object%s, %zu context%s\n",
+ cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
+ cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
+ leak_ngctx, (leak_ngctx != 1) ? "s" : "");
malloc_printf(
"<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
filename);
}
-#endif
}
-struct prof_gctx_dump_iter_arg_s {
- tsdn_t *tsdn;
- bool propagate_err;
-};
-
static prof_gctx_t *
-prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
{
prof_gctx_t *ret;
- struct prof_gctx_dump_iter_arg_s *arg =
- (struct prof_gctx_dump_iter_arg_s *)opaque;
+ bool propagate_err = *(bool *)arg;
- malloc_mutex_lock(arg->tsdn, gctx->lock);
+ malloc_mutex_lock(gctx->lock);
- if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
- gctxs)) {
+ if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
ret = gctx;
goto label_return;
}
ret = NULL;
label_return:
- malloc_mutex_unlock(arg->tsdn, gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
return (ret);
}
@@ -1545,14 +1466,13 @@ static bool
prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
{
prof_tdata_t *tdata;
- struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
+ prof_cnt_t cnt_all;
size_t tabind;
union {
prof_gctx_t *p;
void *v;
} gctx;
- struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
- struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
+ size_t leak_ngctx;
prof_gctx_tree_t gctxs;
cassert(config_prof);
@@ -1561,7 +1481,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
if (tdata == NULL)
return (true);
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_lock(&prof_dump_mtx);
prof_enter(tsd, tdata);
/*
@@ -1570,24 +1490,20 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
*/
gctx_tree_new(&gctxs);
for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
- prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs);
+ prof_dump_gctx_prep(gctx.p, &gctxs);
/*
* Iterate over tdatas, and for the non-expired ones snapshot their tctx
* stats and merge them into the associated gctx's.
*/
- prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd);
- memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t));
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
- tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
- (void *)&prof_tdata_merge_iter_arg);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+ memset(&cnt_all, 0, sizeof(prof_cnt_t));
+ malloc_mutex_lock(&tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
+ malloc_mutex_unlock(&tdatas_mtx);
/* Merge tctx stats into gctx's. */
- prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd);
- prof_gctx_merge_iter_arg.leak_ngctx = 0;
- gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter,
- (void *)&prof_gctx_merge_iter_arg);
+ leak_ngctx = 0;
+ gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
prof_leave(tsd, tdata);
@@ -1596,15 +1512,12 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
goto label_open_close_error;
/* Dump profile header. */
- if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
- &prof_tdata_merge_iter_arg.cnt_all))
+ if (prof_dump_header(propagate_err, &cnt_all))
goto label_write_error;
/* Dump per gctx profile stats. */
- prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd);
- prof_gctx_dump_iter_arg.propagate_err = propagate_err;
if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
- (void *)&prof_gctx_dump_iter_arg) != NULL)
+ (void *)&propagate_err) != NULL)
goto label_write_error;
/* Dump /proc/<pid>/maps if possible. */
@@ -1615,18 +1528,17 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
goto label_open_close_error;
prof_gctx_finish(tsd, &gctxs);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_unlock(&prof_dump_mtx);
+
+ if (leakcheck)
+ prof_leakcheck(&cnt_all, leak_ngctx, filename);
- if (leakcheck) {
- prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
- prof_gctx_merge_iter_arg.leak_ngctx, filename);
- }
return (false);
label_write_error:
prof_dump_close(propagate_err);
label_open_close_error:
prof_gctx_finish(tsd, &gctxs);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_unlock(&prof_dump_mtx);
return (true);
}
@@ -1642,12 +1554,12 @@ prof_dump_filename(char *filename, char v, uint64_t vseq)
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c%"FMTu64".heap",
- opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
+ opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
} else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c.heap",
- opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
+ opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
}
prof_dump_seq++;
}
@@ -1666,23 +1578,23 @@ prof_fdump(void)
return;
tsd = tsd_fetch();
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'f', VSEQ_INVALID);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(tsd, false, filename, opt_prof_leak);
}
void
-prof_idump(tsdn_t *tsdn)
+prof_idump(void)
{
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
- if (!prof_booted || tsdn_null(tsdn))
+ if (!prof_booted)
return;
- tsd = tsdn_tsd(tsdn);
+ tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL)
return;
@@ -1693,48 +1605,50 @@ prof_idump(tsdn_t *tsdn)
if (opt_prof_prefix[0] != '\0') {
char filename[PATH_MAX + 1];
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(tsd, false, filename, false);
}
}
bool
-prof_mdump(tsd_t *tsd, const char *filename)
+prof_mdump(const char *filename)
{
+ tsd_t *tsd;
char filename_buf[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
if (!opt_prof || !prof_booted)
return (true);
+ tsd = tsd_fetch();
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
if (opt_prof_prefix[0] == '\0')
return (true);
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++;
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
filename = filename_buf;
}
return (prof_dump(tsd, true, filename, false));
}
void
-prof_gdump(tsdn_t *tsdn)
+prof_gdump(void)
{
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
- if (!prof_booted || tsdn_null(tsdn))
+ if (!prof_booted)
return;
- tsd = tsdn_tsd(tsdn);
+ tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL)
return;
@@ -1745,10 +1659,10 @@ prof_gdump(tsdn_t *tsdn)
if (opt_prof_prefix[0] != '\0') {
char filename[DUMP_FILENAME_BUFSIZE];
- malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++;
- malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(tsd, false, filename, false);
}
}
@@ -1777,14 +1691,14 @@ prof_bt_keycomp(const void *k1, const void *k2)
}
JEMALLOC_INLINE_C uint64_t
-prof_thr_uid_alloc(tsdn_t *tsdn)
+prof_thr_uid_alloc(void)
{
uint64_t thr_uid;
- malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_lock(&next_thr_uid_mtx);
thr_uid = next_thr_uid;
next_thr_uid++;
- malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_unlock(&next_thr_uid_mtx);
return (thr_uid);
}
@@ -1794,13 +1708,14 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
char *thread_name, bool active)
{
prof_tdata_t *tdata;
+ tcache_t *tcache;
cassert(config_prof);
/* Initialize an empty cache for this thread. */
- tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
- size2index(sizeof(prof_tdata_t)), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
+ tcache = tcache_get(tsd, true);
+ tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false,
+ tcache, true, NULL);
if (tdata == NULL)
return (NULL);
@@ -1812,9 +1727,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata->expired = false;
tdata->tctx_uid_next = 0;
- if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
- prof_bt_keycomp)) {
- idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
+ if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
+ prof_bt_hash, prof_bt_keycomp)) {
+ idalloctm(tsd, tdata, tcache, true);
return (NULL);
}
@@ -1828,9 +1743,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata->dumping = false;
tdata->active = active;
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_lock(&tdatas_mtx);
tdata_tree_insert(&tdatas, tdata);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_unlock(&tdatas_mtx);
return (tdata);
}
@@ -1839,12 +1754,13 @@ prof_tdata_t *
prof_tdata_init(tsd_t *tsd)
{
- return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
- NULL, prof_thread_active_init_get(tsd_tsdn(tsd))));
+ return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
+ prof_thread_active_init_get()));
}
+/* tdata->lock must be held. */
static bool
-prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
+prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
{
if (tdata->attached && !even_if_attached)
@@ -1854,40 +1770,32 @@ prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
return (true);
}
-static bool
-prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
- bool even_if_attached)
-{
-
- malloc_mutex_assert_owner(tsdn, tdata->lock);
-
- return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
-}
-
+/* tdatas_mtx must be held. */
static void
prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
bool even_if_attached)
{
+ tcache_t *tcache;
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
+ assert(prof_tdata_should_destroy(tdata, even_if_attached));
+ assert(tsd_prof_tdata_get(tsd) != tdata);
tdata_tree_remove(&tdatas, tdata);
- assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
-
+ tcache = tcache_get(tsd, false);
if (tdata->thread_name != NULL)
- idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
+ idalloctm(tsd, tdata->thread_name, tcache, true);
ckh_delete(tsd, &tdata->bt2tctx);
- idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
+ idalloctm(tsd, tdata, tcache, true);
}
static void
prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
{
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_lock(&tdatas_mtx);
prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_unlock(&tdatas_mtx);
}
static void
@@ -1895,10 +1803,9 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
{
bool destroy_tdata;
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ malloc_mutex_lock(tdata->lock);
if (tdata->attached) {
- destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
- true);
+ destroy_tdata = prof_tdata_should_destroy(tdata, true);
/*
* Only detach if !destroy_tdata, because detaching would allow
* another thread to win the race to destroy tdata.
@@ -1908,7 +1815,7 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
tsd_prof_tdata_set(tsd, NULL);
} else
destroy_tdata = false;
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ malloc_mutex_unlock(tdata->lock);
if (destroy_tdata)
prof_tdata_destroy(tsd, tdata, true);
}
@@ -1919,7 +1826,7 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
uint64_t thr_uid = tdata->thr_uid;
uint64_t thr_discrim = tdata->thr_discrim + 1;
char *thread_name = (tdata->thread_name != NULL) ?
- prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
+ prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
bool active = tdata->active;
prof_tdata_detach(tsd, tdata);
@@ -1928,18 +1835,18 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
}
static bool
-prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
+prof_tdata_expire(prof_tdata_t *tdata)
{
bool destroy_tdata;
- malloc_mutex_lock(tsdn, tdata->lock);
+ malloc_mutex_lock(tdata->lock);
if (!tdata->expired) {
tdata->expired = true;
destroy_tdata = tdata->attached ? false :
- prof_tdata_should_destroy(tsdn, tdata, false);
+ prof_tdata_should_destroy(tdata, false);
} else
destroy_tdata = false;
- malloc_mutex_unlock(tsdn, tdata->lock);
+ malloc_mutex_unlock(tdata->lock);
return (destroy_tdata);
}
@@ -1947,9 +1854,8 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
static prof_tdata_t *
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
{
- tsdn_t *tsdn = (tsdn_t *)arg;
- return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
+ return (prof_tdata_expire(tdata) ? tdata : NULL);
}
void
@@ -1959,15 +1865,15 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
assert(lg_sample < (sizeof(uint64_t) << 3));
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_lock(&prof_dump_mtx);
+ malloc_mutex_lock(&tdatas_mtx);
lg_prof_sample = lg_sample;
next = NULL;
do {
prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
- prof_tdata_reset_iter, (void *)tsd);
+ prof_tdata_reset_iter, NULL);
if (to_destroy != NULL) {
next = tdata_tree_next(&tdatas, to_destroy);
prof_tdata_destroy_locked(tsd, to_destroy, false);
@@ -1975,8 +1881,8 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
next = NULL;
} while (next != NULL);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_unlock(&tdatas_mtx);
+ malloc_mutex_unlock(&prof_dump_mtx);
}
void
@@ -1993,33 +1899,35 @@ prof_tdata_cleanup(tsd_t *tsd)
}
bool
-prof_active_get(tsdn_t *tsdn)
+prof_active_get(void)
{
bool prof_active_current;
- malloc_mutex_lock(tsdn, &prof_active_mtx);
+ malloc_mutex_lock(&prof_active_mtx);
prof_active_current = prof_active;
- malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ malloc_mutex_unlock(&prof_active_mtx);
return (prof_active_current);
}
bool
-prof_active_set(tsdn_t *tsdn, bool active)
+prof_active_set(bool active)
{
bool prof_active_old;
- malloc_mutex_lock(tsdn, &prof_active_mtx);
+ malloc_mutex_lock(&prof_active_mtx);
prof_active_old = prof_active;
prof_active = active;
- malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ malloc_mutex_unlock(&prof_active_mtx);
return (prof_active_old);
}
const char *
-prof_thread_name_get(tsd_t *tsd)
+prof_thread_name_get(void)
{
+ tsd_t *tsd;
prof_tdata_t *tdata;
+ tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return ("");
@@ -2027,7 +1935,7 @@ prof_thread_name_get(tsd_t *tsd)
}
static char *
-prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
+prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
{
char *ret;
size_t size;
@@ -2039,8 +1947,7 @@ prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
if (size == 1)
return ("");
- ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
+ ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL);
if (ret == NULL)
return (NULL);
memcpy(ret, thread_name, size);
@@ -2067,12 +1974,13 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
return (EFAULT);
}
- s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
+ s = prof_thread_name_alloc(tsd, thread_name);
if (s == NULL)
return (EAGAIN);
if (tdata->thread_name != NULL) {
- idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
+ idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
+ true);
tdata->thread_name = NULL;
}
if (strlen(s) > 0)
@@ -2081,10 +1989,12 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
}
bool
-prof_thread_active_get(tsd_t *tsd)
+prof_thread_active_get(void)
{
+ tsd_t *tsd;
prof_tdata_t *tdata;
+ tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (false);
@@ -2092,10 +2002,12 @@ prof_thread_active_get(tsd_t *tsd)
}
bool
-prof_thread_active_set(tsd_t *tsd, bool active)
+prof_thread_active_set(bool active)
{
+ tsd_t *tsd;
prof_tdata_t *tdata;
+ tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (true);
@@ -2104,48 +2016,48 @@ prof_thread_active_set(tsd_t *tsd, bool active)
}
bool
-prof_thread_active_init_get(tsdn_t *tsdn)
+prof_thread_active_init_get(void)
{
bool active_init;
- malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_lock(&prof_thread_active_init_mtx);
active_init = prof_thread_active_init;
- malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_unlock(&prof_thread_active_init_mtx);
return (active_init);
}
bool
-prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
+prof_thread_active_init_set(bool active_init)
{
bool active_init_old;
- malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_lock(&prof_thread_active_init_mtx);
active_init_old = prof_thread_active_init;
prof_thread_active_init = active_init;
- malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_unlock(&prof_thread_active_init_mtx);
return (active_init_old);
}
bool
-prof_gdump_get(tsdn_t *tsdn)
+prof_gdump_get(void)
{
bool prof_gdump_current;
- malloc_mutex_lock(tsdn, &prof_gdump_mtx);
+ malloc_mutex_lock(&prof_gdump_mtx);
prof_gdump_current = prof_gdump_val;
- malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
+ malloc_mutex_unlock(&prof_gdump_mtx);
return (prof_gdump_current);
}
bool
-prof_gdump_set(tsdn_t *tsdn, bool gdump)
+prof_gdump_set(bool gdump)
{
bool prof_gdump_old;
- malloc_mutex_lock(tsdn, &prof_gdump_mtx);
+ malloc_mutex_lock(&prof_gdump_mtx);
prof_gdump_old = prof_gdump_val;
prof_gdump_val = gdump;
- malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
+ malloc_mutex_unlock(&prof_gdump_mtx);
return (prof_gdump_old);
}
@@ -2186,54 +2098,47 @@ prof_boot1(void)
}
bool
-prof_boot2(tsd_t *tsd)
+prof_boot2(void)
{
cassert(config_prof);
if (opt_prof) {
+ tsd_t *tsd;
unsigned i;
lg_prof_sample = opt_lg_prof_sample;
prof_active = opt_prof_active;
- if (malloc_mutex_init(&prof_active_mtx, "prof_active",
- WITNESS_RANK_PROF_ACTIVE))
+ if (malloc_mutex_init(&prof_active_mtx))
return (true);
prof_gdump_val = opt_prof_gdump;
- if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
- WITNESS_RANK_PROF_GDUMP))
+ if (malloc_mutex_init(&prof_gdump_mtx))
return (true);
prof_thread_active_init = opt_prof_thread_active_init;
- if (malloc_mutex_init(&prof_thread_active_init_mtx,
- "prof_thread_active_init",
- WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
+ if (malloc_mutex_init(&prof_thread_active_init_mtx))
return (true);
+ tsd = tsd_fetch();
if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp))
return (true);
- if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
- WITNESS_RANK_PROF_BT2GCTX))
+ if (malloc_mutex_init(&bt2gctx_mtx))
return (true);
tdata_tree_new(&tdatas);
- if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
- WITNESS_RANK_PROF_TDATAS))
+ if (malloc_mutex_init(&tdatas_mtx))
return (true);
next_thr_uid = 0;
- if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
- WITNESS_RANK_PROF_NEXT_THR_UID))
+ if (malloc_mutex_init(&next_thr_uid_mtx))
return (true);
- if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
- WITNESS_RANK_PROF_DUMP_SEQ))
+ if (malloc_mutex_init(&prof_dump_seq_mtx))
return (true);
- if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
- WITNESS_RANK_PROF_DUMP))
+ if (malloc_mutex_init(&prof_dump_mtx))
return (true);
if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
@@ -2243,23 +2148,21 @@ prof_boot2(tsd_t *tsd)
abort();
}
- gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- PROF_NCTX_LOCKS * sizeof(malloc_mutex_t));
+ gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
+ sizeof(malloc_mutex_t));
if (gctx_locks == NULL)
return (true);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
- if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
- WITNESS_RANK_PROF_GCTX))
+ if (malloc_mutex_init(&gctx_locks[i]))
return (true);
}
- tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t));
+ tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
+ sizeof(malloc_mutex_t));
if (tdata_locks == NULL)
return (true);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
- if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
- WITNESS_RANK_PROF_TDATA))
+ if (malloc_mutex_init(&tdata_locks[i]))
return (true);
}
}
@@ -2278,77 +2181,56 @@ prof_boot2(tsd_t *tsd)
}
void
-prof_prefork0(tsdn_t *tsdn)
+prof_prefork(void)
{
if (opt_prof) {
unsigned i;
- malloc_mutex_prefork(tsdn, &prof_dump_mtx);
- malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
- malloc_mutex_prefork(tsdn, &tdatas_mtx);
- for (i = 0; i < PROF_NTDATA_LOCKS; i++)
- malloc_mutex_prefork(tsdn, &tdata_locks[i]);
+ malloc_mutex_prefork(&tdatas_mtx);
+ malloc_mutex_prefork(&bt2gctx_mtx);
+ malloc_mutex_prefork(&next_thr_uid_mtx);
+ malloc_mutex_prefork(&prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_prefork(tsdn, &gctx_locks[i]);
- }
-}
-
-void
-prof_prefork1(tsdn_t *tsdn)
-{
-
- if (opt_prof) {
- malloc_mutex_prefork(tsdn, &prof_active_mtx);
- malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
- malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
- malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
- malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_prefork(&gctx_locks[i]);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_prefork(&tdata_locks[i]);
}
}
void
-prof_postfork_parent(tsdn_t *tsdn)
+prof_postfork_parent(void)
{
if (opt_prof) {
unsigned i;
- malloc_mutex_postfork_parent(tsdn,
- &prof_thread_active_init_mtx);
- malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
- malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
- malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
- malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
+ malloc_mutex_postfork_parent(&tdata_locks[i]);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_parent(&gctx_locks[i]);
+ malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
+ malloc_mutex_postfork_parent(&next_thr_uid_mtx);
+ malloc_mutex_postfork_parent(&bt2gctx_mtx);
+ malloc_mutex_postfork_parent(&tdatas_mtx);
}
}
void
-prof_postfork_child(tsdn_t *tsdn)
+prof_postfork_child(void)
{
if (opt_prof) {
unsigned i;
- malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
- malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
- malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
- malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
- malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
+ malloc_mutex_postfork_child(&tdata_locks[i]);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_child(&gctx_locks[i]);
+ malloc_mutex_postfork_child(&prof_dump_seq_mtx);
+ malloc_mutex_postfork_child(&next_thr_uid_mtx);
+ malloc_mutex_postfork_child(&bt2gctx_mtx);
+ malloc_mutex_postfork_child(&tdatas_mtx);
}
}
diff --git a/deps/jemalloc/src/quarantine.c b/deps/jemalloc/src/quarantine.c
index 18903fb5c..6c43dfcaa 100644
--- a/deps/jemalloc/src/quarantine.c
+++ b/deps/jemalloc/src/quarantine.c
@@ -13,22 +13,22 @@
/* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
-static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine);
-static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine,
+static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine);
+static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
size_t upper_bound);
/******************************************************************************/
static quarantine_t *
-quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs)
+quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
{
quarantine_t *quarantine;
- size_t size;
- size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) *
- sizeof(quarantine_obj_t));
- quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size),
- false, NULL, true, arena_get(TSDN_NULL, 0, true), true);
+ assert(tsd_nominal(tsd));
+
+ quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs)
+ + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false,
+ tcache_get(tsd, true), true, NULL);
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
@@ -47,7 +47,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (!tsd_nominal(tsd))
return;
- quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT);
+ quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
/*
* Check again whether quarantine has been initialized, because
* quarantine_init() may have triggered recursive initialization.
@@ -55,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (tsd_quarantine_get(tsd) == NULL)
tsd_quarantine_set(tsd, quarantine);
else
- idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
+ idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
}
static quarantine_t *
@@ -63,9 +63,9 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_t *ret;
- ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1);
+ ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
if (ret == NULL) {
- quarantine_drain_one(tsd_tsdn(tsd), quarantine);
+ quarantine_drain_one(tsd, quarantine);
return (quarantine);
}
@@ -87,18 +87,18 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
- idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
+ idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
tsd_quarantine_set(tsd, ret);
return (ret);
}
static void
-quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
+quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
- assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof));
- idalloctm(tsdn, obj->ptr, NULL, false, true);
+ assert(obj->usize == isalloc(obj->ptr, config_prof));
+ idalloctm(tsd, obj->ptr, NULL, false);
quarantine->curbytes -= obj->usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
@@ -106,24 +106,24 @@ quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
}
static void
-quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound)
+quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
- quarantine_drain_one(tsdn, quarantine);
+ quarantine_drain_one(tsd, quarantine);
}
void
quarantine(tsd_t *tsd, void *ptr)
{
quarantine_t *quarantine;
- size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ size_t usize = isalloc(ptr, config_prof);
cassert(config_fill);
assert(opt_quarantine);
if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
- idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
+ idalloctm(tsd, ptr, NULL, false);
return;
}
/*
@@ -133,7 +133,7 @@ quarantine(tsd_t *tsd, void *ptr)
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
- quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound);
+ quarantine_drain(tsd, quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
@@ -158,11 +158,11 @@ quarantine(tsd_t *tsd, void *ptr)
&& usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize);
else
- memset(ptr, JEMALLOC_FREE_JUNK, usize);
+ memset(ptr, 0x5a, usize);
}
} else {
assert(quarantine->curbytes == 0);
- idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
+ idalloctm(tsd, ptr, NULL, false);
}
}
@@ -176,8 +176,8 @@ quarantine_cleanup(tsd_t *tsd)
quarantine = tsd_quarantine_get(tsd);
if (quarantine != NULL) {
- quarantine_drain(tsd_tsdn(tsd), quarantine, 0);
- idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
+ quarantine_drain(tsd, quarantine, 0);
+ idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
tsd_quarantine_set(tsd, NULL);
}
}
diff --git a/deps/jemalloc/src/rtree.c b/deps/jemalloc/src/rtree.c
index f2e2997d5..af0d97e75 100644
--- a/deps/jemalloc/src/rtree.c
+++ b/deps/jemalloc/src/rtree.c
@@ -15,8 +15,6 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
{
unsigned bits_in_leaf, height, i;
- assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
- RTREE_BITS_PER_LEVEL));
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
@@ -96,15 +94,12 @@ rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
rtree_node_elm_t *node;
if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
- spin_t spinner;
-
/*
* Another thread is already in the process of initializing.
* Spin-wait until initialization is complete.
*/
- spin_init(&spinner);
do {
- spin_adaptive(&spinner);
+ CPU_SPINWAIT;
node = atomic_read_p((void **)elmp);
} while (node == RTREE_NODE_INITIALIZING);
} else {
@@ -128,5 +123,5 @@ rtree_node_elm_t *
rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
{
- return (rtree_node_init(rtree, level+1, &elm->child));
+ return (rtree_node_init(rtree, level, &elm->child));
}
diff --git a/deps/jemalloc/src/spin.c b/deps/jemalloc/src/spin.c
deleted file mode 100644
index 5242d95aa..000000000
--- a/deps/jemalloc/src/spin.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define JEMALLOC_SPIN_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/deps/jemalloc/src/stats.c b/deps/jemalloc/src/stats.c
index 1360f3bd0..154c3e74c 100755..100644
--- a/deps/jemalloc/src/stats.c
+++ b/deps/jemalloc/src/stats.c
@@ -3,7 +3,7 @@
#define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \
- xmallctl(n, (void *)v, &sz, NULL, 0); \
+ xmallctl(n, v, &sz, NULL, 0); \
} while (0)
#define CTL_M2_GET(n, i, v, t) do { \
@@ -12,7 +12,7 @@
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \
- xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
+ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_M2_M4_GET(n, i, j, v, t) do { \
@@ -22,7 +22,7 @@
xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \
mib[4] = (j); \
- xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
+ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
/******************************************************************************/
@@ -33,106 +33,85 @@ bool opt_stats_print = false;
size_t stats_cactive = 0;
/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, unsigned i);
+static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, unsigned i);
+static void stats_arena_hchunks_print(
+ void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
+static void stats_arena_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, unsigned i, bool bins, bool large, bool huge);
+
+/******************************************************************************/
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool large, bool huge, unsigned i)
+ unsigned i)
{
size_t page;
- bool config_tcache, in_gap, in_gap_prev;
+ bool config_tcache, in_gap;
unsigned nbins, j;
CTL_GET("arenas.page", &page, size_t);
- CTL_GET("arenas.nbins", &nbins, unsigned);
- if (json) {
+ CTL_GET("config.tcache", &config_tcache, bool);
+ if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"bins\": [\n");
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs curruns regs"
+ " pgs util nfills nflushes newruns"
+ " reruns\n");
} else {
- CTL_GET("config.tcache", &config_tcache, bool);
- if (config_tcache) {
- malloc_cprintf(write_cb, cbopaque,
- "bins: size ind allocated nmalloc"
- " ndalloc nrequests curregs"
- " curruns regs pgs util nfills"
- " nflushes newruns reruns\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "bins: size ind allocated nmalloc"
- " ndalloc nrequests curregs"
- " curruns regs pgs util newruns"
- " reruns\n");
- }
+ malloc_cprintf(write_cb, cbopaque,
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs curruns regs"
+ " pgs util newruns reruns\n");
}
+ CTL_GET("arenas.nbins", &nbins, unsigned);
for (j = 0, in_gap = false; j < nbins; j++) {
uint64_t nruns;
- size_t reg_size, run_size, curregs;
- size_t curruns;
- uint32_t nregs;
- uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
- uint64_t nreruns;
CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
uint64_t);
- in_gap_prev = in_gap;
- in_gap = (nruns == 0);
-
- if (!json && in_gap_prev && !in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
-
- CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
- CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
- CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t);
-
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
- size_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
- &nrequests, uint64_t);
- if (config_tcache) {
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j,
- &nfills, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j,
- &nflushes, uint64_t);
- }
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &nreruns,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns,
- size_t);
+ if (nruns == 0)
+ in_gap = true;
+ else {
+ size_t reg_size, run_size, curregs, availregs, milli;
+ size_t curruns;
+ uint32_t nregs;
+ uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
+ uint64_t reruns;
+ char util[6]; /* "x.yyy". */
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t{\n"
- "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
- "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
- "\t\t\t\t\t\t\"curregs\": %zu,\n"
- "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n",
- nmalloc,
- ndalloc,
- curregs,
- nrequests);
- if (config_tcache) {
+ if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
- "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n",
- nfills,
- nflushes);
+ " ---\n");
+ in_gap = false;
}
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\t\"nreruns\": %"FMTu64",\n"
- "\t\t\t\t\t\t\"curruns\": %zu\n"
- "\t\t\t\t\t}%s\n",
- nreruns,
- curruns,
- (j + 1 < nbins) ? "," : "");
- } else if (!in_gap) {
- size_t availregs, milli;
- char util[6]; /* "x.yyy". */
+ CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
+ CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
+ CTL_M2_GET("arenas.bin.0.run_size", j, &run_size,
+ size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j,
+ &nmalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j,
+ &ndalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j,
+ &curregs, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ if (config_tcache) {
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i,
+ j, &nfills, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes",
+ i, j, &nflushes, uint64_t);
+ }
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j,
+ &reruns, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j,
+ &curruns, size_t);
availregs = nregs * curruns;
milli = (availregs != 0) ? (1000 * curregs) / availregs
@@ -159,7 +138,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
reg_size, j, curregs * reg_size, nmalloc,
ndalloc, nrequests, curregs, curruns, nregs,
run_size / page, util, nfills, nflushes,
- nruns, nreruns);
+ nruns, reruns);
} else {
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64
@@ -168,38 +147,28 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
" %12"FMTu64"\n",
reg_size, j, curregs * reg_size, nmalloc,
ndalloc, nrequests, curregs, curruns, nregs,
- run_size / page, util, nruns, nreruns);
+ run_size / page, util, nruns, reruns);
}
}
}
- if (json) {
+ if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t]%s\n", (large || huge) ? "," : "");
- } else {
- if (in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
+ " ---\n");
}
}
static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool huge, unsigned i)
+ unsigned i)
{
unsigned nbins, nlruns, j;
- bool in_gap, in_gap_prev;
+ bool in_gap;
+ malloc_cprintf(write_cb, cbopaque,
+ "large: size ind allocated nmalloc ndalloc"
+ " nrequests curruns\n");
CTL_GET("arenas.nbins", &nbins, unsigned);
CTL_GET("arenas.nlruns", &nlruns, unsigned);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"lruns\": [\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "large: size ind allocated nmalloc"
- " ndalloc nrequests curruns\n");
- }
for (j = 0, in_gap = false; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, curruns;
@@ -210,25 +179,17 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
uint64_t);
CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
&nrequests, uint64_t);
- in_gap_prev = in_gap;
- in_gap = (nrequests == 0);
-
- if (!json && in_gap_prev && !in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
-
- CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
- CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns,
- size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t{\n"
- "\t\t\t\t\t\t\"curruns\": %zu\n"
- "\t\t\t\t\t}%s\n",
- curruns,
- (j + 1 < nlruns) ? "," : "");
- } else if (!in_gap) {
+ if (nrequests == 0)
+ in_gap = true;
+ else {
+ CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j,
+ &curruns, size_t);
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ in_gap = false;
+ }
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64" %12zu\n",
@@ -236,35 +197,25 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
ndalloc, nrequests, curruns);
}
}
- if (json) {
+ if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t]%s\n", huge ? "," : "");
- } else {
- if (in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
+ " ---\n");
}
}
static void
stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
- void *cbopaque, bool json, unsigned i)
+ void *cbopaque, unsigned i)
{
unsigned nbins, nlruns, nhchunks, j;
- bool in_gap, in_gap_prev;
+ bool in_gap;
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: size ind allocated nmalloc ndalloc"
+ " nrequests curhchunks\n");
CTL_GET("arenas.nbins", &nbins, unsigned);
CTL_GET("arenas.nlruns", &nlruns, unsigned);
CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"hchunks\": [\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "huge: size ind allocated nmalloc"
- " ndalloc nrequests curhchunks\n");
- }
for (j = 0, in_gap = false; j < nhchunks; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t hchunk_size, curhchunks;
@@ -275,25 +226,18 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
&ndalloc, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
&nrequests, uint64_t);
- in_gap_prev = in_gap;
- in_gap = (nrequests == 0);
-
- if (!json && in_gap_prev && !in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
-
- CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t);
- CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j,
- &curhchunks, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t{\n"
- "\t\t\t\t\t\t\"curhchunks\": %zu\n"
- "\t\t\t\t\t}%s\n",
- curhchunks,
- (j + 1 < nhchunks) ? "," : "");
- } else if (!in_gap) {
+ if (nrequests == 0)
+ in_gap = true;
+ else {
+ CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size,
+ size_t);
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i,
+ j, &curhchunks, size_t);
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ in_gap = false;
+ }
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64" %12zu\n",
@@ -302,25 +246,20 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
nrequests, curhchunks);
}
}
- if (json) {
+ if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t]\n");
- } else {
- if (in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
+ " ---\n");
}
}
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, unsigned i, bool bins, bool large, bool huge)
+ unsigned i, bool bins, bool large, bool huge)
{
unsigned nthreads;
const char *dss;
- ssize_t lg_dirty_mult, decay_time;
- size_t page, pactive, pdirty, mapped, retained;
+ ssize_t lg_dirty_mult;
+ size_t page, pactive, pdirty, mapped;
size_t metadata_mapped, metadata_allocated;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
@@ -333,435 +272,240 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("arenas.page", &page, size_t);
CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"nthreads\": %u,\n", nthreads);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "assigned threads: %u\n", nthreads);
- }
-
+ malloc_cprintf(write_cb, cbopaque,
+ "assigned threads: %u\n", nthreads);
CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"dss\": \"%s\",\n", dss);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "dss allocation precedence: %s\n", dss);
- }
-
+ malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
+ dss);
CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
- if (json) {
+ if (lg_dirty_mult >= 0) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"lg_dirty_mult\": %zd,\n", lg_dirty_mult);
+ "min active:dirty page ratio: %u:1\n",
+ (1U << lg_dirty_mult));
} else {
- if (opt_purge == purge_mode_ratio) {
- if (lg_dirty_mult >= 0) {
- malloc_cprintf(write_cb, cbopaque,
- "min active:dirty page ratio: %u:1\n",
- (1U << lg_dirty_mult));
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "min active:dirty page ratio: N/A\n");
- }
- }
- }
-
- CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
- if (json) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"decay_time\": %zd,\n", decay_time);
- } else {
- if (opt_purge == purge_mode_decay) {
- if (decay_time >= 0) {
- malloc_cprintf(write_cb, cbopaque,
- "decay time: %zd\n", decay_time);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "decay time: N/A\n");
- }
- }
+ "min active:dirty page ratio: N/A\n");
}
-
CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"pactive\": %zu,\n", pactive);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"pdirty\": %zu,\n", pdirty);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"npurge\": %"FMTu64",\n", npurge);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"purged\": %"FMTu64",\n", purged);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64
- ", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
- }
-
+ malloc_cprintf(write_cb, cbopaque,
+ "dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64
+ " madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge ==
+ 1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged);
+
+ malloc_cprintf(write_cb, cbopaque,
+ " allocated nmalloc ndalloc"
+ " nrequests\n");
CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
size_t);
CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
uint64_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"small\": {\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t},\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- " allocated nmalloc"
- " ndalloc nrequests\n");
- malloc_cprintf(write_cb, cbopaque,
- "small: %12zu %12"FMTu64" %12"FMTu64
- " %12"FMTu64"\n",
- small_allocated, small_nmalloc, small_ndalloc,
- small_nrequests);
- }
-
+ malloc_cprintf(write_cb, cbopaque,
+ "small: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
size_t);
CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
uint64_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"large\": {\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t},\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "large: %12zu %12"FMTu64" %12"FMTu64
- " %12"FMTu64"\n",
- large_allocated, large_nmalloc, large_ndalloc,
- large_nrequests);
- }
-
+ malloc_cprintf(write_cb, cbopaque,
+ "large: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
uint64_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"huge\": {\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"allocated\": %zu,\n", huge_allocated);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", huge_nmalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", huge_ndalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", huge_nrequests);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t},\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "huge: %12zu %12"FMTu64" %12"FMTu64
- " %12"FMTu64"\n",
- huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
- malloc_cprintf(write_cb, cbopaque,
- "total: %12zu %12"FMTu64" %12"FMTu64
- " %12"FMTu64"\n",
- small_allocated + large_allocated + huge_allocated,
- small_nmalloc + large_nmalloc + huge_nmalloc,
- small_ndalloc + large_ndalloc + huge_ndalloc,
- small_nrequests + large_nrequests + huge_nrequests);
- }
- if (!json) {
- malloc_cprintf(write_cb, cbopaque,
- "active: %12zu\n", pactive * page);
- }
-
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
+ malloc_cprintf(write_cb, cbopaque,
+ "total: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ small_allocated + large_allocated + huge_allocated,
+ small_nmalloc + large_nmalloc + huge_nmalloc,
+ small_ndalloc + large_ndalloc + huge_ndalloc,
+ small_nrequests + large_nrequests + huge_nrequests);
+ malloc_cprintf(write_cb, cbopaque,
+ "active: %12zu\n", pactive * page);
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"mapped\": %zu,\n", mapped);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "mapped: %12zu\n", mapped);
- }
-
- CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"retained\": %zu,\n", retained);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "retained: %12zu\n", retained);
- }
-
+ malloc_cprintf(write_cb, cbopaque,
+ "mapped: %12zu\n", mapped);
CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
size_t);
CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"metadata\": {\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"mapped\": %zu,\n", metadata_mapped);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t},\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "metadata: mapped: %zu, allocated: %zu\n",
- metadata_mapped, metadata_allocated);
- }
+ malloc_cprintf(write_cb, cbopaque,
+ "metadata: mapped: %zu, allocated: %zu\n",
+ metadata_mapped, metadata_allocated);
- if (bins) {
- stats_arena_bins_print(write_cb, cbopaque, json, large, huge,
- i);
- }
+ if (bins)
+ stats_arena_bins_print(write_cb, cbopaque, i);
if (large)
- stats_arena_lruns_print(write_cb, cbopaque, json, huge, i);
+ stats_arena_lruns_print(write_cb, cbopaque, i);
if (huge)
- stats_arena_hchunks_print(write_cb, cbopaque, json, i);
+ stats_arena_hchunks_print(write_cb, cbopaque, i);
}
-static void
-stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool merged, bool unmerged)
+void
+stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts)
{
- const char *cpv;
- bool bv;
- unsigned uv;
- uint32_t u32v;
- uint64_t u64v;
- ssize_t ssv;
- size_t sv, bsz, usz, ssz, sssz, cpsz;
+ int err;
+ uint64_t epoch;
+ size_t u64sz;
+ bool general = true;
+ bool merged = true;
+ bool unmerged = true;
+ bool bins = true;
+ bool large = true;
+ bool huge = true;
- bsz = sizeof(bool);
- usz = sizeof(unsigned);
- ssz = sizeof(size_t);
- sssz = sizeof(ssize_t);
- cpsz = sizeof(const char *);
+ /*
+ * Refresh stats, in case mallctl() was called by the application.
+ *
+ * Check for OOM here, since refreshing the ctl cache can trigger
+ * allocation. In practice, none of the subsequent mallctl()-related
+ * calls in this function will cause OOM if this one succeeds.
+ * */
+ epoch = 1;
+ u64sz = sizeof(uint64_t);
+ err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
+ if (err != 0) {
+ if (err == EAGAIN) {
+ malloc_write("<jemalloc>: Memory allocation failure in "
+ "mallctl(\"epoch\", ...)\n");
+ return;
+ }
+ malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
+ "...)\n");
+ abort();
+ }
- CTL_GET("version", &cpv, const char *);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"version\": \"%s\",\n", cpv);
- } else
- malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
+ if (opts != NULL) {
+ unsigned i;
- /* config. */
-#define CONFIG_WRITE_BOOL_JSON(n, c) \
- if (json) { \
- CTL_GET("config."#n, &bv, bool); \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \
- (c)); \
+ for (i = 0; opts[i] != '\0'; i++) {
+ switch (opts[i]) {
+ case 'g':
+ general = false;
+ break;
+ case 'm':
+ merged = false;
+ break;
+ case 'a':
+ unmerged = false;
+ break;
+ case 'b':
+ bins = false;
+ break;
+ case 'l':
+ large = false;
+ break;
+ case 'h':
+ huge = false;
+ break;
+ default:;
+ }
+ }
}
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"config\": {\n");
- }
+ malloc_cprintf(write_cb, cbopaque,
+ "___ Begin jemalloc statistics ___\n");
+ if (general) {
+ const char *cpv;
+ bool bv;
+ unsigned uv;
+ ssize_t ssv;
+ size_t sv, bsz, ssz, sssz, cpsz;
- CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",")
+ bsz = sizeof(bool);
+ ssz = sizeof(size_t);
+ sssz = sizeof(ssize_t);
+ cpsz = sizeof(const char *);
- CTL_GET("config.debug", &bv, bool);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"debug\": %s,\n", bv ? "true" : "false");
- } else {
+ CTL_GET("version", &cpv, const char *);
+ malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
+ CTL_GET("config.debug", &bv, bool);
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
bv ? "enabled" : "disabled");
- }
-
- CONFIG_WRITE_BOOL_JSON(fill, ",")
- CONFIG_WRITE_BOOL_JSON(lazy_lock, ",")
-
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"malloc_conf\": \"%s\",\n",
- config_malloc_conf);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "config.malloc_conf: \"%s\"\n", config_malloc_conf);
- }
- CONFIG_WRITE_BOOL_JSON(munmap, ",")
- CONFIG_WRITE_BOOL_JSON(prof, ",")
- CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
- CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
- CONFIG_WRITE_BOOL_JSON(stats, ",")
- CONFIG_WRITE_BOOL_JSON(tcache, ",")
- CONFIG_WRITE_BOOL_JSON(tls, ",")
- CONFIG_WRITE_BOOL_JSON(utrace, ",")
- CONFIG_WRITE_BOOL_JSON(valgrind, ",")
- CONFIG_WRITE_BOOL_JSON(xmalloc, "")
-
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t},\n");
- }
-#undef CONFIG_WRITE_BOOL_JSON
-
- /* opt. */
-#define OPT_WRITE_BOOL(n, c) \
- if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
- "false", (c)); \
- } else { \
+#define OPT_WRITE_BOOL(n) \
+ if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
- } \
- }
-#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \
- bool bv2; \
- if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \
- je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
- "false", (c)); \
- } else { \
+ }
+#define OPT_WRITE_BOOL_MUTABLE(n, m) { \
+ bool bv2; \
+ if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \
+ je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s ("#m": %s)\n", bv ? "true" \
: "false", bv2 ? "true" : "false"); \
} \
- } \
}
-#define OPT_WRITE_UNSIGNED(n, c) \
- if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %u%s\n", uv, (c)); \
- } else { \
- malloc_cprintf(write_cb, cbopaque, \
- " opt."#n": %u\n", uv); \
- } \
- }
-#define OPT_WRITE_SIZE_T(n, c) \
- if (je_mallctl("opt."#n, (void *)&sv, &ssz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %zu%s\n", sv, (c)); \
- } else { \
+#define OPT_WRITE_SIZE_T(n) \
+ if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
- } \
- }
-#define OPT_WRITE_SSIZE_T(n, c) \
- if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
- } else { \
+ }
+#define OPT_WRITE_SSIZE_T(n) \
+ if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
- } \
- }
-#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \
- ssize_t ssv2; \
- if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \
- je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
- } else { \
+ }
+#define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \
+ ssize_t ssv2; \
+ if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \
+ je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd ("#m": %zd)\n", \
ssv, ssv2); \
} \
- } \
}
-#define OPT_WRITE_CHAR_P(n, c) \
- if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \
- } else { \
+#define OPT_WRITE_CHAR_P(n) \
+ if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": \"%s\"\n", cpv); \
- } \
- }
+ }
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"opt\": {\n");
- } else {
malloc_cprintf(write_cb, cbopaque,
"Run-time option settings:\n");
- }
- OPT_WRITE_BOOL(abort, ",")
- OPT_WRITE_SIZE_T(lg_chunk, ",")
- OPT_WRITE_CHAR_P(dss, ",")
- OPT_WRITE_UNSIGNED(narenas, ",")
- OPT_WRITE_CHAR_P(purge, ",")
- if (json || opt_purge == purge_mode_ratio) {
- OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult,
- arenas.lg_dirty_mult, ",")
- }
- if (json || opt_purge == purge_mode_decay) {
- OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",")
- }
- OPT_WRITE_CHAR_P(junk, ",")
- OPT_WRITE_SIZE_T(quarantine, ",")
- OPT_WRITE_BOOL(redzone, ",")
- OPT_WRITE_BOOL(zero, ",")
- OPT_WRITE_BOOL(utrace, ",")
- OPT_WRITE_BOOL(xmalloc, ",")
- OPT_WRITE_BOOL(tcache, ",")
- OPT_WRITE_SSIZE_T(lg_tcache_max, ",")
- OPT_WRITE_BOOL(prof, ",")
- OPT_WRITE_CHAR_P(prof_prefix, ",")
- OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",")
- OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init,
- ",")
- OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",")
- OPT_WRITE_BOOL(prof_accum, ",")
- OPT_WRITE_SSIZE_T(lg_prof_interval, ",")
- OPT_WRITE_BOOL(prof_gdump, ",")
- OPT_WRITE_BOOL(prof_final, ",")
- OPT_WRITE_BOOL(prof_leak, ",")
- /*
- * stats_print is always emitted, so as long as stats_print comes last
- * it's safe to unconditionally omit the comma here (rather than having
- * to conditionally omit it elsewhere depending on configuration).
- */
- OPT_WRITE_BOOL(stats_print, "")
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t},\n");
- }
+ OPT_WRITE_BOOL(abort)
+ OPT_WRITE_SIZE_T(lg_chunk)
+ OPT_WRITE_CHAR_P(dss)
+ OPT_WRITE_SIZE_T(narenas)
+ OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult)
+ OPT_WRITE_BOOL(stats_print)
+ OPT_WRITE_CHAR_P(junk)
+ OPT_WRITE_SIZE_T(quarantine)
+ OPT_WRITE_BOOL(redzone)
+ OPT_WRITE_BOOL(zero)
+ OPT_WRITE_BOOL(utrace)
+ OPT_WRITE_BOOL(valgrind)
+ OPT_WRITE_BOOL(xmalloc)
+ OPT_WRITE_BOOL(tcache)
+ OPT_WRITE_SSIZE_T(lg_tcache_max)
+ OPT_WRITE_BOOL(prof)
+ OPT_WRITE_CHAR_P(prof_prefix)
+ OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active)
+ OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init,
+ prof.thread_active_init)
+ OPT_WRITE_SSIZE_T(lg_prof_sample)
+ OPT_WRITE_BOOL(prof_accum)
+ OPT_WRITE_SSIZE_T(lg_prof_interval)
+ OPT_WRITE_BOOL(prof_gdump)
+ OPT_WRITE_BOOL(prof_final)
+ OPT_WRITE_BOOL(prof_leak)
#undef OPT_WRITE_BOOL
#undef OPT_WRITE_BOOL_MUTABLE
@@ -769,386 +513,128 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
- /* arenas. */
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"arenas\": {\n");
- }
+ malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
- CTL_GET("arenas.narenas", &uv, unsigned);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"narenas\": %u,\n", uv);
- } else
+ CTL_GET("arenas.narenas", &uv, unsigned);
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
- CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"lg_dirty_mult\": %zd,\n", ssv);
- } else if (opt_purge == purge_mode_ratio) {
- if (ssv >= 0) {
- malloc_cprintf(write_cb, cbopaque,
- "Min active:dirty page ratio per arena: "
- "%u:1\n", (1U << ssv));
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "Min active:dirty page ratio per arena: "
- "N/A\n");
- }
- }
- CTL_GET("arenas.decay_time", &ssv, ssize_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"decay_time\": %zd,\n", ssv);
- } else if (opt_purge == purge_mode_decay) {
- malloc_cprintf(write_cb, cbopaque,
- "Unused dirty page decay time: %zd%s\n",
- ssv, (ssv < 0) ? " (no decay)" : "");
- }
+ malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
+ sizeof(void *));
- CTL_GET("arenas.quantum", &sv, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"quantum\": %zu,\n", sv);
- } else
- malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
+ CTL_GET("arenas.quantum", &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n",
+ sv);
- CTL_GET("arenas.page", &sv, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"page\": %zu,\n", sv);
- } else
+ CTL_GET("arenas.page", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
- if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
- if (json) {
+ CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
+ if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"tcache_max\": %zu,\n", sv);
+ "Min active:dirty page ratio per arena: %u:1\n",
+ (1U << ssv));
} else {
malloc_cprintf(write_cb, cbopaque,
- "Maximum thread-cached size class: %zu\n", sv);
+ "Min active:dirty page ratio per arena: N/A\n");
}
- }
-
- if (json) {
- unsigned nbins, nlruns, nhchunks, i;
-
- CTL_GET("arenas.nbins", &nbins, unsigned);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"nbins\": %u,\n", nbins);
-
- CTL_GET("arenas.nhbins", &uv, unsigned);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"nhbins\": %u,\n", uv);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"bin\": [\n");
- for (i = 0; i < nbins; i++) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t{\n");
-
- CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"size\": %zu,\n", sv);
-
- CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v);
-
- CTL_M2_GET("arenas.bin.0.run_size", i, &sv, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"run_size\": %zu\n", sv);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : "");
- }
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t],\n");
-
- CTL_GET("arenas.nlruns", &nlruns, unsigned);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"nlruns\": %u,\n", nlruns);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"lrun\": [\n");
- for (i = 0; i < nlruns; i++) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t{\n");
-
- CTL_M2_GET("arenas.lrun.0.size", i, &sv, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"size\": %zu\n", sv);
-
+ if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t}%s\n", (i + 1 < nlruns) ? "," : "");
+ "Maximum thread-cached size class: %zu\n", sv);
}
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t],\n");
-
- CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"nhchunks\": %u,\n", nhchunks);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"hchunk\": [\n");
- for (i = 0; i < nhchunks; i++) {
+ if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) {
+ CTL_GET("prof.lg_sample", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t{\n");
+ "Average profile sample interval: %"FMTu64
+ " (2^%zu)\n", (((uint64_t)1U) << sv), sv);
- CTL_M2_GET("arenas.hchunk.0.size", i, &sv, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"size\": %zu\n", sv);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t}%s\n", (i + 1 < nhchunks) ? "," : "");
+ CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
+ if (ssv >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "Average profile dump interval: %"FMTu64
+ " (2^%zd)\n",
+ (((uint64_t)1U) << ssv), ssv);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "Average profile dump interval: N/A\n");
+ }
}
+ CTL_GET("opt.lg_chunk", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t]\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t},\n");
+ "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv);
}
- /* prof. */
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"prof\": {\n");
-
- CTL_GET("prof.thread_active_init", &bv, bool);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" :
- "false");
-
- CTL_GET("prof.active", &bv, bool);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"active\": %s,\n", bv ? "true" : "false");
-
- CTL_GET("prof.gdump", &bv, bool);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false");
-
- CTL_GET("prof.interval", &u64v, uint64_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"interval\": %"FMTu64",\n", u64v);
-
- CTL_GET("prof.lg_sample", &ssv, ssize_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"lg_sample\": %zd\n", ssv);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t}%s\n", (config_stats || merged || unmerged) ? "," :
- "");
- }
-}
-
-static void
-stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool merged, bool unmerged, bool bins, bool large, bool huge)
-{
- size_t *cactive;
- size_t allocated, active, metadata, resident, mapped, retained;
-
- CTL_GET("stats.cactive", &cactive, size_t *);
- CTL_GET("stats.allocated", &allocated, size_t);
- CTL_GET("stats.active", &active, size_t);
- CTL_GET("stats.metadata", &metadata, size_t);
- CTL_GET("stats.resident", &resident, size_t);
- CTL_GET("stats.mapped", &mapped, size_t);
- CTL_GET("stats.retained", &retained, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"stats\": {\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"cactive\": %zu,\n", atomic_read_z(cactive));
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"allocated\": %zu,\n", allocated);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"active\": %zu,\n", active);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"metadata\": %zu,\n", metadata);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"resident\": %zu,\n", resident);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"mapped\": %zu,\n", mapped);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"retained\": %zu\n", retained);
+ if (config_stats) {
+ size_t *cactive;
+ size_t allocated, active, metadata, resident, mapped;
- malloc_cprintf(write_cb, cbopaque,
- "\t\t}%s\n", (merged || unmerged) ? "," : "");
- } else {
+ CTL_GET("stats.cactive", &cactive, size_t *);
+ CTL_GET("stats.allocated", &allocated, size_t);
+ CTL_GET("stats.active", &active, size_t);
+ CTL_GET("stats.metadata", &metadata, size_t);
+ CTL_GET("stats.resident", &resident, size_t);
+ CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, metadata: %zu,"
- " resident: %zu, mapped: %zu, retained: %zu\n",
- allocated, active, metadata, resident, mapped, retained);
+ " resident: %zu, mapped: %zu\n",
+ allocated, active, metadata, resident, mapped);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n",
atomic_read_z(cactive));
- }
- if (merged || unmerged) {
- unsigned narenas;
-
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"stats.arenas\": {\n");
- }
-
- CTL_GET("arenas.narenas", &narenas, unsigned);
- {
- VARIABLE_ARRAY(bool, initialized, narenas);
- size_t isz;
- unsigned i, j, ninitialized;
-
- isz = sizeof(bool) * narenas;
- xmallctl("arenas.initialized", (void *)initialized,
- &isz, NULL, 0);
- for (i = ninitialized = 0; i < narenas; i++) {
- if (initialized[i])
- ninitialized++;
- }
+ if (merged) {
+ unsigned narenas;
+
+ CTL_GET("arenas.narenas", &narenas, unsigned);
+ {
+ VARIABLE_ARRAY(bool, initialized, narenas);
+ size_t isz;
+ unsigned i, ninitialized;
+
+ isz = sizeof(bool) * narenas;
+ xmallctl("arenas.initialized", initialized,
+ &isz, NULL, 0);
+ for (i = ninitialized = 0; i < narenas; i++) {
+ if (initialized[i])
+ ninitialized++;
+ }
- /* Merged stats. */
- if (merged && (ninitialized > 1 || !unmerged)) {
- /* Print merged arena stats. */
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"merged\": {\n");
- } else {
+ if (ninitialized > 1 || !unmerged) {
+ /* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
- }
- stats_arena_print(write_cb, cbopaque, json,
- narenas, bins, large, huge);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t}%s\n", (ninitialized > 1) ?
- "," : "");
- }
- }
-
- /* Unmerged stats. */
- for (i = j = 0; i < narenas; i++) {
- if (initialized[i]) {
- if (json) {
- j++;
- malloc_cprintf(write_cb,
- cbopaque,
- "\t\t\t\"%u\": {\n", i);
- } else {
- malloc_cprintf(write_cb,
- cbopaque, "\narenas[%u]:\n",
- i);
- }
stats_arena_print(write_cb, cbopaque,
- json, i, bins, large, huge);
- if (json) {
- malloc_cprintf(write_cb,
- cbopaque,
- "\t\t\t}%s\n", (j <
- ninitialized) ? "," : "");
- }
+ narenas, bins, large, huge);
}
}
}
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t}\n");
- }
- }
-}
+ if (unmerged) {
+ unsigned narenas;
-void
-stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
-{
- int err;
- uint64_t epoch;
- size_t u64sz;
- bool json = false;
- bool general = true;
- bool merged = true;
- bool unmerged = true;
- bool bins = true;
- bool large = true;
- bool huge = true;
+ /* Print stats for each arena. */
- /*
- * Refresh stats, in case mallctl() was called by the application.
- *
- * Check for OOM here, since refreshing the ctl cache can trigger
- * allocation. In practice, none of the subsequent mallctl()-related
- * calls in this function will cause OOM if this one succeeds.
- * */
- epoch = 1;
- u64sz = sizeof(uint64_t);
- err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
- sizeof(uint64_t));
- if (err != 0) {
- if (err == EAGAIN) {
- malloc_write("<jemalloc>: Memory allocation failure in "
- "mallctl(\"epoch\", ...)\n");
- return;
- }
- malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
- "...)\n");
- abort();
- }
+ CTL_GET("arenas.narenas", &narenas, unsigned);
+ {
+ VARIABLE_ARRAY(bool, initialized, narenas);
+ size_t isz;
+ unsigned i;
- if (opts != NULL) {
- unsigned i;
+ isz = sizeof(bool) * narenas;
+ xmallctl("arenas.initialized", initialized,
+ &isz, NULL, 0);
- for (i = 0; opts[i] != '\0'; i++) {
- switch (opts[i]) {
- case 'J':
- json = true;
- break;
- case 'g':
- general = false;
- break;
- case 'm':
- merged = false;
- break;
- case 'a':
- unmerged = false;
- break;
- case 'b':
- bins = false;
- break;
- case 'l':
- large = false;
- break;
- case 'h':
- huge = false;
- break;
- default:;
+ for (i = 0; i < narenas; i++) {
+ if (initialized[i]) {
+ malloc_cprintf(write_cb,
+ cbopaque,
+ "\narenas[%u]:\n", i);
+ stats_arena_print(write_cb,
+ cbopaque, i, bins, large,
+ huge);
+ }
+ }
}
}
}
-
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "{\n"
- "\t\"jemalloc\": {\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "___ Begin jemalloc statistics ___\n");
- }
-
- if (general)
- stats_general_print(write_cb, cbopaque, json, merged, unmerged);
- if (config_stats) {
- stats_print_helper(write_cb, cbopaque, json, merged, unmerged,
- bins, large, huge);
- }
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t}\n"
- "}\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "--- End jemalloc statistics ---\n");
- }
+ malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
}
diff --git a/deps/jemalloc/src/tcache.c b/deps/jemalloc/src/tcache.c
index 21540ff46..fdafd0c62 100755..100644
--- a/deps/jemalloc/src/tcache.c
+++ b/deps/jemalloc/src/tcache.c
@@ -10,7 +10,7 @@ ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
tcache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */
-unsigned nhbins;
+size_t nhbins;
size_t tcache_maxclass;
tcaches_t *tcaches;
@@ -23,11 +23,10 @@ static tcaches_t *tcaches_avail;
/******************************************************************************/
-size_t
-tcache_salloc(tsdn_t *tsdn, const void *ptr)
+size_t tcache_salloc(const void *ptr)
{
- return (arena_salloc(tsdn, ptr, false));
+ return (arena_salloc(ptr, false));
}
void
@@ -68,19 +67,20 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0;
+ tcache->ev_cnt = 0;
}
void *
-tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
+tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind)
{
void *ret;
- arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
+ arena_tcache_fill_small(arena, tbin, binind, config_prof ?
tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
- ret = tcache_alloc_easy(tbin, tcache_success);
+ ret = tcache_alloc_easy(tbin);
return (ret);
}
@@ -102,18 +102,17 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
- *(tbin->avail - 1));
+ tbin->avail[0]);
arena_t *bin_arena = extent_node_arena_get(&chunk->node);
arena_bin_t *bin = &bin_arena->bins[binind];
if (config_prof && bin_arena == arena) {
- if (arena_prof_accum(tsd_tsdn(tsd), arena,
- tcache->prof_accumbytes))
- prof_idump(tsd_tsdn(tsd));
+ if (arena_prof_accum(arena, tcache->prof_accumbytes))
+ prof_idump();
tcache->prof_accumbytes = 0;
}
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_lock(&bin->lock);
if (config_stats && bin_arena == arena) {
assert(!merged_stats);
merged_stats = true;
@@ -123,16 +122,16 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
- ptr = *(tbin->avail - 1 - i);
+ ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) == bin_arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm =
- arena_bitselm_get_mutable(chunk, pageind);
- arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
- bin_arena, chunk, ptr, bitselm);
+ arena_bitselm_get(chunk, pageind);
+ arena_dalloc_bin_junked_locked(bin_arena, chunk,
+ ptr, bitselm);
} else {
/*
* This object was allocated via a different
@@ -140,12 +139,11 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
- *(tbin->avail - 1 - ndeferred) = ptr;
+ tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
+ malloc_mutex_unlock(&bin->lock);
}
if (config_stats && !merged_stats) {
/*
@@ -153,15 +151,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_bin_t *bin = &arena->bins[binind];
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_lock(&bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
}
- memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
- sizeof(void *));
+ memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
+ rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
@@ -184,13 +182,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
- *(tbin->avail - 1));
+ tbin->avail[0]);
arena_t *locked_arena = extent_node_arena_get(&chunk->node);
UNUSED bool idump;
if (config_prof)
idump = false;
- malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
+ malloc_mutex_lock(&locked_arena->lock);
if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
@@ -208,13 +206,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
- ptr = *(tbin->avail - 1 - i);
+ ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) ==
locked_arena) {
- arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
- locked_arena, chunk, ptr);
+ arena_dalloc_large_junked_locked(locked_arena,
+ chunk, ptr);
} else {
/*
* This object was allocated via a different
@@ -222,56 +220,62 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
* Stash the object, so that it can be handled
* in a future pass.
*/
- *(tbin->avail - 1 - ndeferred) = ptr;
+ tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
+ malloc_mutex_unlock(&locked_arena->lock);
if (config_prof && idump)
- prof_idump(tsd_tsdn(tsd));
- arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
- ndeferred);
+ prof_idump();
}
if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+ malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
}
- memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
- sizeof(void *));
+ memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
+ rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
-static void
-tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
+void
+tcache_arena_associate(tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Link into list of extant tcaches. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
}
}
-static void
-tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
+void
+tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
+{
+
+ tcache_arena_dissociate(tcache, oldarena);
+ tcache_arena_associate(tcache, newarena);
+}
+
+void
+tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_debug) {
bool in_ql = false;
tcache_t *iter;
@@ -284,20 +288,11 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
assert(in_ql);
}
ql_remove(&arena->tcache_ql, tcache, link);
- tcache_stats_merge(tsdn, tcache, arena);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ tcache_stats_merge(tcache, arena);
+ malloc_mutex_unlock(&arena->lock);
}
}
-void
-tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
- arena_t *newarena)
-{
-
- tcache_arena_dissociate(tsdn, tcache, oldarena);
- tcache_arena_associate(tsdn, tcache, newarena);
-}
-
tcache_t *
tcache_get_hard(tsd_t *tsd)
{
@@ -311,11 +306,11 @@ tcache_get_hard(tsd_t *tsd)
arena = arena_choose(tsd, NULL);
if (unlikely(arena == NULL))
return (NULL);
- return (tcache_create(tsd_tsdn(tsd), arena));
+ return (tcache_create(tsd, arena));
}
tcache_t *
-tcache_create(tsdn_t *tsdn, arena_t *arena)
+tcache_create(tsd_t *tsd, arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
@@ -329,26 +324,18 @@ tcache_create(tsdn_t *tsdn, arena_t *arena)
/* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE);
- tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
- arena_get(TSDN_NULL, 0, true));
+ tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
if (tcache == NULL)
return (NULL);
- tcache_arena_associate(tsdn, tcache, arena);
-
- ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
+ tcache_arena_associate(tcache, arena);
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
for (i = 0; i < nhbins; i++) {
tcache->tbins[i].lg_fill_div = 1;
- stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
- /*
- * avail points past the available space. Allocations will
- * access the slots toward higher addresses (for the benefit of
- * prefetch).
- */
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
(uintptr_t)stack_offset);
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
return (tcache);
@@ -361,7 +348,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
unsigned i;
arena = arena_choose(tsd, NULL);
- tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
+ tcache_arena_dissociate(tcache, arena);
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
@@ -369,9 +356,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
if (config_stats && tbin->tstats.nrequests != 0) {
arena_bin_t *bin = &arena->bins[i];
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
}
}
@@ -380,19 +367,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+ malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
- arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
- prof_idump(tsd_tsdn(tsd));
+ arena_prof_accum(arena, tcache->prof_accumbytes))
+ prof_idump();
- idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
+ idalloctm(tsd, tcache, false, true);
}
void
@@ -416,22 +403,21 @@ tcache_enabled_cleanup(tsd_t *tsd)
/* Do nothing. */
}
+/* Caller must own arena->lock. */
void
-tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
+tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
unsigned i;
cassert(config_stats);
- malloc_mutex_assert_owner(tsdn, &arena->lock);
-
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
- malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
tbin->tstats.nrequests = 0;
}
@@ -447,12 +433,11 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
bool
tcaches_create(tsd_t *tsd, unsigned *r_ind)
{
- arena_t *arena;
tcache_t *tcache;
tcaches_t *elm;
if (tcaches == NULL) {
- tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) *
+ tcaches = base_alloc(sizeof(tcache_t *) *
(MALLOCX_TCACHE_MAX+1));
if (tcaches == NULL)
return (true);
@@ -460,10 +445,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
return (true);
- arena = arena_ichoose(tsd, NULL);
- if (unlikely(arena == NULL))
- return (true);
- tcache = tcache_create(tsd_tsdn(tsd), arena);
+ tcache = tcache_create(tsd, a0get());
if (tcache == NULL)
return (true);
@@ -471,7 +453,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
elm = tcaches_avail;
tcaches_avail = tcaches_avail->next;
elm->tcache = tcache;
- *r_ind = (unsigned)(elm - tcaches);
+ *r_ind = elm - tcaches;
} else {
elm = &tcaches[tcaches_past];
elm->tcache = tcache;
@@ -509,7 +491,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind)
}
bool
-tcache_boot(tsdn_t *tsdn)
+tcache_boot(void)
{
unsigned i;
@@ -517,17 +499,17 @@ tcache_boot(tsdn_t *tsdn)
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
* known.
*/
- if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
+ if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
- else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass)
+ else if ((1U << opt_lg_tcache_max) > large_maxclass)
tcache_maxclass = large_maxclass;
else
- tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
+ tcache_maxclass = (1U << opt_lg_tcache_max);
nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
- tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
+ tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);
diff --git a/deps/jemalloc/src/ticker.c b/deps/jemalloc/src/ticker.c
deleted file mode 100644
index db0902404..000000000
--- a/deps/jemalloc/src/ticker.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define JEMALLOC_TICKER_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/deps/jemalloc/src/tsd.c b/deps/jemalloc/src/tsd.c
index ec69a51c3..9ffe9afef 100644
--- a/deps/jemalloc/src/tsd.c
+++ b/deps/jemalloc/src/tsd.c
@@ -77,7 +77,7 @@ tsd_cleanup(void *arg)
/* Do nothing. */
break;
case tsd_state_nominal:
-#define O(n, t) \
+#define O(n, t) \
n##_cleanup(tsd);
MALLOC_TSD
#undef O
@@ -106,17 +106,15 @@ MALLOC_TSD
}
}
-tsd_t *
+bool
malloc_tsd_boot0(void)
{
- tsd_t *tsd;
ncleanups = 0;
if (tsd_boot0())
- return (NULL);
- tsd = tsd_fetch();
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
- return (tsd);
+ return (true);
+ *tsd_arenas_cache_bypassp_get(tsd_fetch()) = true;
+ return (false);
}
void
@@ -124,7 +122,7 @@ malloc_tsd_boot1(void)
{
tsd_boot1();
- *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false;
+ *tsd_arenas_cache_bypassp_get(tsd_fetch()) = false;
}
#ifdef _WIN32
@@ -150,15 +148,13 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
#ifdef _MSC_VER
# ifdef _M_IX86
# pragma comment(linker, "/INCLUDE:__tls_used")
-# pragma comment(linker, "/INCLUDE:_tls_callback")
# else
# pragma comment(linker, "/INCLUDE:_tls_used")
-# pragma comment(linker, "/INCLUDE:tls_callback")
# endif
# pragma section(".CRT$XLY",long,read)
#endif
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
-BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
+static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
@@ -171,10 +167,10 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
tsd_init_block_t *iter;
/* Check whether this thread has already inserted into the list. */
- malloc_mutex_lock(TSDN_NULL, &head->lock);
+ malloc_mutex_lock(&head->lock);
ql_foreach(iter, &head->blocks, link) {
if (iter->thread == self) {
- malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ malloc_mutex_unlock(&head->lock);
return (iter->data);
}
}
@@ -182,7 +178,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
ql_elm_new(block, link);
block->thread = self;
ql_tail_insert(&head->blocks, block, link);
- malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ malloc_mutex_unlock(&head->lock);
return (NULL);
}
@@ -190,8 +186,8 @@ void
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
{
- malloc_mutex_lock(TSDN_NULL, &head->lock);
+ malloc_mutex_lock(&head->lock);
ql_remove(&head->blocks, block, link);
- malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ malloc_mutex_unlock(&head->lock);
}
#endif
diff --git a/deps/jemalloc/src/util.c b/deps/jemalloc/src/util.c
index dd8c23630..4cb0d6c1e 100755..100644
--- a/deps/jemalloc/src/util.c
+++ b/deps/jemalloc/src/util.c
@@ -1,7 +1,3 @@
-/*
- * Define simple versions of assertion macros that won't recurse in case
- * of assertion failures in malloc_*printf().
- */
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \
@@ -14,7 +10,6 @@
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
- unreachable(); \
} while (0)
#define not_implemented() do { \
@@ -49,19 +44,15 @@ static void
wrtmessage(void *cbopaque, const char *s)
{
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
+#ifdef SYS_write
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
* the possibility of memory allocation within libc. This is necessary
* on FreeBSD; most operating systems do not have this problem though.
- *
- * syscall() returns long or int, depending on platform, so capture the
- * unused result in the widest plausible type to avoid compiler
- * warnings.
*/
- UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
+ UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
#else
- UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s));
+ UNUSED int result = write(STDERR_FILENO, s, strlen(s));
#endif
}
@@ -91,7 +82,7 @@ buferror(int err, char *buf, size_t buflen)
#ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
- (LPSTR)buf, (DWORD)buflen, NULL);
+ (LPSTR)buf, buflen, NULL);
return (0);
#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen);
@@ -200,7 +191,7 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
p++;
}
if (neg)
- ret = (uintmax_t)(-((intmax_t)ret));
+ ret = -ret;
if (p == ns) {
/* No conversion performed. */
@@ -315,9 +306,10 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
return (s);
}
-size_t
+int
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
{
+ int ret;
size_t i;
const char *f;
@@ -408,8 +400,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
int prec = -1;
int width = -1;
unsigned char len = '?';
- char *s;
- size_t slen;
f++;
/* Flags. */
@@ -500,6 +490,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
}
/* Conversion specifier. */
switch (*f) {
+ char *s;
+ size_t slen;
case '%':
/* %% */
APPEND_C(*f);
@@ -585,19 +577,20 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
str[i] = '\0';
else
str[size - 1] = '\0';
+ ret = i;
#undef APPEND_C
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
- return (i);
+ return (ret);
}
JEMALLOC_FORMAT_PRINTF(3, 4)
-size_t
+int
malloc_snprintf(char *str, size_t size, const char *format, ...)
{
- size_t ret;
+ int ret;
va_list ap;
va_start(ap, format);
@@ -655,12 +648,3 @@ malloc_printf(const char *format, ...)
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
}
-
-/*
- * Restore normal assertion macros, in order to make it possible to compile all
- * C files as a single concatenation.
- */
-#undef assert
-#undef not_reached
-#undef not_implemented
-#include "jemalloc/internal/assert.h"
diff --git a/deps/jemalloc/src/witness.c b/deps/jemalloc/src/witness.c
deleted file mode 100644
index 23753f246..000000000
--- a/deps/jemalloc/src/witness.c
+++ /dev/null
@@ -1,136 +0,0 @@
-#define JEMALLOC_WITNESS_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-void
-witness_init(witness_t *witness, const char *name, witness_rank_t rank,
- witness_comp_t *comp)
-{
-
- witness->name = name;
- witness->rank = rank;
- witness->comp = comp;
-}
-
-#ifdef JEMALLOC_JET
-#undef witness_lock_error
-#define witness_lock_error JEMALLOC_N(n_witness_lock_error)
-#endif
-void
-witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
-{
- witness_t *w;
-
- malloc_printf("<jemalloc>: Lock rank order reversal:");
- ql_foreach(w, witnesses, link) {
- malloc_printf(" %s(%u)", w->name, w->rank);
- }
- malloc_printf(" %s(%u)\n", witness->name, witness->rank);
- abort();
-}
-#ifdef JEMALLOC_JET
-#undef witness_lock_error
-#define witness_lock_error JEMALLOC_N(witness_lock_error)
-witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef witness_owner_error
-#define witness_owner_error JEMALLOC_N(n_witness_owner_error)
-#endif
-void
-witness_owner_error(const witness_t *witness)
-{
-
- malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
- witness->rank);
- abort();
-}
-#ifdef JEMALLOC_JET
-#undef witness_owner_error
-#define witness_owner_error JEMALLOC_N(witness_owner_error)
-witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef witness_not_owner_error
-#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error)
-#endif
-void
-witness_not_owner_error(const witness_t *witness)
-{
-
- malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
- witness->rank);
- abort();
-}
-#ifdef JEMALLOC_JET
-#undef witness_not_owner_error
-#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
-witness_not_owner_error_t *witness_not_owner_error =
- JEMALLOC_N(n_witness_not_owner_error);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef witness_lockless_error
-#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error)
-#endif
-void
-witness_lockless_error(const witness_list_t *witnesses)
-{
- witness_t *w;
-
- malloc_printf("<jemalloc>: Should not own any locks:");
- ql_foreach(w, witnesses, link) {
- malloc_printf(" %s(%u)", w->name, w->rank);
- }
- malloc_printf("\n");
- abort();
-}
-#ifdef JEMALLOC_JET
-#undef witness_lockless_error
-#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
-witness_lockless_error_t *witness_lockless_error =
- JEMALLOC_N(n_witness_lockless_error);
-#endif
-
-void
-witnesses_cleanup(tsd_t *tsd)
-{
-
- witness_assert_lockless(tsd_tsdn(tsd));
-
- /* Do nothing. */
-}
-
-void
-witness_fork_cleanup(tsd_t *tsd)
-{
-
- /* Do nothing. */
-}
-
-void
-witness_prefork(tsd_t *tsd)
-{
-
- tsd_witness_fork_set(tsd, true);
-}
-
-void
-witness_postfork_parent(tsd_t *tsd)
-{
-
- tsd_witness_fork_set(tsd, false);
-}
-
-void
-witness_postfork_child(tsd_t *tsd)
-{
-#ifndef JEMALLOC_MUTEX_INIT_CB
- witness_list_t *witnesses;
-
- witnesses = tsd_witnessesp_get(tsd);
- ql_new(witnesses);
-#endif
- tsd_witness_fork_set(tsd, false);
-}
diff --git a/deps/jemalloc/src/zone.c b/deps/jemalloc/src/zone.c
index 0571920e4..12e1734a9 100644
--- a/deps/jemalloc/src/zone.c
+++ b/deps/jemalloc/src/zone.c
@@ -4,7 +4,7 @@
#endif
/*
- * The malloc_default_purgeable_zone() function is only available on >= 10.6.
+ * The malloc_default_purgeable_zone function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import.
*/
extern malloc_zone_t *malloc_default_purgeable_zone(void)
@@ -13,9 +13,8 @@ JEMALLOC_ATTR(weak_import);
/******************************************************************************/
/* Data. */
-static malloc_zone_t *default_zone, *purgeable_zone;
-static malloc_zone_t jemalloc_zone;
-static struct malloc_introspection_t jemalloc_zone_introspect;
+static malloc_zone_t zone;
+static struct malloc_introspection_t zone_introspect;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
@@ -57,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
- return (ivsalloc(tsdn_fetch(), ptr, config_prof));
+ return (ivsalloc(ptr, config_prof));
}
static void *
@@ -88,7 +87,7 @@ static void
zone_free(malloc_zone_t *zone, void *ptr)
{
- if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) {
+ if (ivsalloc(ptr, config_prof) != 0) {
je_free(ptr);
return;
}
@@ -100,7 +99,7 @@ static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
- if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0)
+ if (ivsalloc(ptr, config_prof) != 0)
return (je_realloc(ptr, size));
return (realloc(ptr, size));
@@ -122,11 +121,9 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
- size_t alloc_size;
- alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof);
- if (alloc_size != 0) {
- assert(alloc_size == size);
+ if (ivsalloc(ptr, config_prof) != 0) {
+ assert(ivsalloc(ptr, config_prof) == size);
je_free(ptr);
return;
}
@@ -165,103 +162,89 @@ static void
zone_force_unlock(malloc_zone_t *zone)
{
- /*
- * Call jemalloc_postfork_child() rather than
- * jemalloc_postfork_parent(), because this function is executed by both
- * parent and child. The parent can tolerate having state
- * reinitialized, but the child cannot unlock mutexes that were locked
- * by the parent.
- */
if (isthreaded)
- jemalloc_postfork_child();
+ jemalloc_postfork_parent();
}
-static void
-zone_init(void)
+JEMALLOC_ATTR(constructor)
+void
+register_zone(void)
{
- jemalloc_zone.size = (void *)zone_size;
- jemalloc_zone.malloc = (void *)zone_malloc;
- jemalloc_zone.calloc = (void *)zone_calloc;
- jemalloc_zone.valloc = (void *)zone_valloc;
- jemalloc_zone.free = (void *)zone_free;
- jemalloc_zone.realloc = (void *)zone_realloc;
- jemalloc_zone.destroy = (void *)zone_destroy;
- jemalloc_zone.zone_name = "jemalloc_zone";
- jemalloc_zone.batch_malloc = NULL;
- jemalloc_zone.batch_free = NULL;
- jemalloc_zone.introspect = &jemalloc_zone_introspect;
- jemalloc_zone.version = JEMALLOC_ZONE_VERSION;
+ /*
+ * If something else replaced the system default zone allocator, don't
+ * register jemalloc's.
+ */
+ malloc_zone_t *default_zone = malloc_default_zone();
+ malloc_zone_t *purgeable_zone = NULL;
+ if (!default_zone->zone_name ||
+ strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
+ return;
+ }
+
+ zone.size = (void *)zone_size;
+ zone.malloc = (void *)zone_malloc;
+ zone.calloc = (void *)zone_calloc;
+ zone.valloc = (void *)zone_valloc;
+ zone.free = (void *)zone_free;
+ zone.realloc = (void *)zone_realloc;
+ zone.destroy = (void *)zone_destroy;
+ zone.zone_name = "jemalloc_zone";
+ zone.batch_malloc = NULL;
+ zone.batch_free = NULL;
+ zone.introspect = &zone_introspect;
+ zone.version = JEMALLOC_ZONE_VERSION;
#if (JEMALLOC_ZONE_VERSION >= 5)
- jemalloc_zone.memalign = zone_memalign;
+ zone.memalign = zone_memalign;
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
- jemalloc_zone.free_definite_size = zone_free_definite_size;
+ zone.free_definite_size = zone_free_definite_size;
#endif
#if (JEMALLOC_ZONE_VERSION >= 8)
- jemalloc_zone.pressure_relief = NULL;
+ zone.pressure_relief = NULL;
#endif
- jemalloc_zone_introspect.enumerator = NULL;
- jemalloc_zone_introspect.good_size = (void *)zone_good_size;
- jemalloc_zone_introspect.check = NULL;
- jemalloc_zone_introspect.print = NULL;
- jemalloc_zone_introspect.log = NULL;
- jemalloc_zone_introspect.force_lock = (void *)zone_force_lock;
- jemalloc_zone_introspect.force_unlock = (void *)zone_force_unlock;
- jemalloc_zone_introspect.statistics = NULL;
+ zone_introspect.enumerator = NULL;
+ zone_introspect.good_size = (void *)zone_good_size;
+ zone_introspect.check = NULL;
+ zone_introspect.print = NULL;
+ zone_introspect.log = NULL;
+ zone_introspect.force_lock = (void *)zone_force_lock;
+ zone_introspect.force_unlock = (void *)zone_force_unlock;
+ zone_introspect.statistics = NULL;
#if (JEMALLOC_ZONE_VERSION >= 6)
- jemalloc_zone_introspect.zone_locked = NULL;
+ zone_introspect.zone_locked = NULL;
#endif
#if (JEMALLOC_ZONE_VERSION >= 7)
- jemalloc_zone_introspect.enable_discharge_checking = NULL;
- jemalloc_zone_introspect.disable_discharge_checking = NULL;
- jemalloc_zone_introspect.discharge = NULL;
-# ifdef __BLOCKS__
- jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
-# else
- jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
-# endif
+ zone_introspect.enable_discharge_checking = NULL;
+ zone_introspect.disable_discharge_checking = NULL;
+ zone_introspect.discharge = NULL;
+#ifdef __BLOCKS__
+ zone_introspect.enumerate_discharged_pointers = NULL;
+#else
+ zone_introspect.enumerate_unavailable_without_blocks = NULL;
+#endif
#endif
-}
-
-static malloc_zone_t *
-zone_default_get(void)
-{
- malloc_zone_t **zones = NULL;
- unsigned int num_zones = 0;
/*
- * On OSX 10.12, malloc_default_zone returns a special zone that is not
- * present in the list of registered zones. That zone uses a "lite zone"
- * if one is present (apparently enabled when malloc stack logging is
- * enabled), or the first registered zone otherwise. In practice this
- * means unless malloc stack logging is enabled, the first registered
- * zone is the default. So get the list of zones to get the first one,
- * instead of relying on malloc_default_zone.
+ * The default purgeable zone is created lazily by OSX's libc. It uses
+ * the default zone when it is created for "small" allocations
+ * (< 15 KiB), but assumes the default zone is a scalable_zone. This
+ * obviously fails when the default zone is the jemalloc zone, so
+ * malloc_default_purgeable_zone is called beforehand so that the
+ * default purgeable zone is created when the default zone is still
+ * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
+ * to check for the existence of malloc_default_purgeable_zone() at
+ * run time.
*/
- if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
- (vm_address_t**)&zones, &num_zones)) {
- /*
- * Reset the value in case the failure happened after it was
- * set.
- */
- num_zones = 0;
- }
-
- if (num_zones)
- return (zones[0]);
-
- return (malloc_default_zone());
-}
+ if (malloc_default_purgeable_zone != NULL)
+ purgeable_zone = malloc_default_purgeable_zone();
-/* As written, this function can only promote jemalloc_zone. */
-static void
-zone_promote(void)
-{
- malloc_zone_t *zone;
+ /* Register the custom zone. At this point it won't be the default. */
+ malloc_zone_register(&zone);
do {
+ default_zone = malloc_default_zone();
/*
* Unregister and reregister the default zone. On OSX >= 10.6,
* unregistering takes the last registered zone and places it
@@ -272,7 +255,6 @@ zone_promote(void)
*/
malloc_zone_unregister(default_zone);
malloc_zone_register(default_zone);
-
/*
* On OSX 10.6, having the default purgeable zone appear before
* the default zone makes some things crash because it thinks it
@@ -284,47 +266,9 @@ zone_promote(void)
* above, i.e. the default zone. Registering it again then puts
* it at the end, obviously after the default zone.
*/
- if (purgeable_zone != NULL) {
+ if (purgeable_zone) {
malloc_zone_unregister(purgeable_zone);
malloc_zone_register(purgeable_zone);
}
-
- zone = zone_default_get();
- } while (zone != &jemalloc_zone);
-}
-
-JEMALLOC_ATTR(constructor)
-void
-zone_register(void)
-{
-
- /*
- * If something else replaced the system default zone allocator, don't
- * register jemalloc's.
- */
- default_zone = zone_default_get();
- if (!default_zone->zone_name || strcmp(default_zone->zone_name,
- "DefaultMallocZone") != 0)
- return;
-
- /*
- * The default purgeable zone is created lazily by OSX's libc. It uses
- * the default zone when it is created for "small" allocations
- * (< 15 KiB), but assumes the default zone is a scalable_zone. This
- * obviously fails when the default zone is the jemalloc zone, so
- * malloc_default_purgeable_zone() is called beforehand so that the
- * default purgeable zone is created when the default zone is still
- * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
- * to check for the existence of malloc_default_purgeable_zone() at
- * run time.
- */
- purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
- malloc_default_purgeable_zone();
-
- /* Register the custom zone. At this point it won't be the default. */
- zone_init();
- malloc_zone_register(&jemalloc_zone);
-
- /* Promote the custom zone to be default. */
- zone_promote();
+ } while (malloc_default_zone() != &zone);
}