summaryrefslogtreecommitdiff
path: root/deps/jemalloc/src
diff options
context:
space:
mode:
authorOran Agra <oran@redislabs.com>2023-05-01 15:38:08 +0300
committerOran Agra <oran@redislabs.com>2023-05-01 15:38:08 +0300
commitb8beda3cf8e5c8218fbe84539d6b5117b3f909d9 (patch)
tree66934c98c93ac48f6ca912ad547e651221e525d2 /deps/jemalloc/src
parentd659c734569be4ed32a270bac2527ccf35418c43 (diff)
parent6d23d3ac3b3f9d13ad2ce99029e69a9ea9f2e517 (diff)
downloadredis-b8beda3cf8e5c8218fbe84539d6b5117b3f909d9.tar.gz
Merge commit jemalloc 5.3.0
Diffstat (limited to 'deps/jemalloc/src')
-rw-r--r--deps/jemalloc/src/arena.c2063
-rw-r--r--deps/jemalloc/src/background_thread.c335
-rw-r--r--deps/jemalloc/src/base.c193
-rw-r--r--deps/jemalloc/src/bin.c30
-rw-r--r--deps/jemalloc/src/bin_info.c30
-rw-r--r--deps/jemalloc/src/bitmap.c1
-rw-r--r--deps/jemalloc/src/buf_writer.c144
-rw-r--r--deps/jemalloc/src/cache_bin.c99
-rw-r--r--deps/jemalloc/src/ckh.c7
-rw-r--r--deps/jemalloc/src/counter.c30
-rw-r--r--deps/jemalloc/src/ctl.c1687
-rw-r--r--deps/jemalloc/src/decay.c295
-rw-r--r--deps/jemalloc/src/ecache.c35
-rw-r--r--deps/jemalloc/src/edata.c6
-rw-r--r--deps/jemalloc/src/edata_cache.c154
-rw-r--r--deps/jemalloc/src/ehooks.c275
-rw-r--r--deps/jemalloc/src/emap.c386
-rw-r--r--deps/jemalloc/src/eset.c282
-rw-r--r--deps/jemalloc/src/exp_grow.c8
-rw-r--r--deps/jemalloc/src/extent.c2481
-rw-r--r--deps/jemalloc/src/extent_dss.c42
-rw-r--r--deps/jemalloc/src/extent_mmap.c1
-rw-r--r--deps/jemalloc/src/fxp.c124
-rw-r--r--deps/jemalloc/src/hash.c3
-rw-r--r--deps/jemalloc/src/hook.c6
-rw-r--r--deps/jemalloc/src/hpa.c1044
-rw-r--r--deps/jemalloc/src/hpa_hooks.c63
-rw-r--r--deps/jemalloc/src/hpdata.c325
-rw-r--r--deps/jemalloc/src/inspect.c77
-rw-r--r--deps/jemalloc/src/jemalloc.c2174
-rw-r--r--deps/jemalloc/src/jemalloc_cpp.cpp131
-rw-r--r--deps/jemalloc/src/large.c299
-rw-r--r--deps/jemalloc/src/malloc_io.c46
-rw-r--r--deps/jemalloc/src/mutex.c19
-rw-r--r--deps/jemalloc/src/mutex_pool.c18
-rw-r--r--deps/jemalloc/src/nstime.c127
-rw-r--r--deps/jemalloc/src/pa.c277
-rw-r--r--deps/jemalloc/src/pa_extra.c191
-rw-r--r--deps/jemalloc/src/pac.c587
-rw-r--r--deps/jemalloc/src/pages.c209
-rw-r--r--deps/jemalloc/src/pai.c31
-rw-r--r--deps/jemalloc/src/peak_event.c82
-rw-r--r--deps/jemalloc/src/prng.c3
-rw-r--r--deps/jemalloc/src/prof.c2923
-rw-r--r--deps/jemalloc/src/prof_data.c1447
-rw-r--r--deps/jemalloc/src/prof_log.c717
-rw-r--r--deps/jemalloc/src/prof_recent.c600
-rw-r--r--deps/jemalloc/src/prof_stats.c57
-rw-r--r--deps/jemalloc/src/prof_sys.c669
-rw-r--r--deps/jemalloc/src/psset.c385
-rw-r--r--deps/jemalloc/src/rtree.c75
-rw-r--r--deps/jemalloc/src/safety_check.c16
-rw-r--r--deps/jemalloc/src/san.c208
-rw-r--r--deps/jemalloc/src/san_bump.c104
-rw-r--r--deps/jemalloc/src/sc.c17
-rw-r--r--deps/jemalloc/src/sec.c422
-rw-r--r--deps/jemalloc/src/stats.c794
-rw-r--r--deps/jemalloc/src/sz.c52
-rw-r--r--deps/jemalloc/src/tcache.c1137
-rw-r--r--deps/jemalloc/src/thread_event.c343
-rw-r--r--deps/jemalloc/src/ticker.c31
-rwxr-xr-xdeps/jemalloc/src/ticker.py15
-rw-r--r--deps/jemalloc/src/tsd.c75
-rw-r--r--deps/jemalloc/src/witness.c44
64 files changed, 16416 insertions, 8135 deletions
diff --git a/deps/jemalloc/src/arena.c b/deps/jemalloc/src/arena.c
index ba50e4103..857b27c52 100644
--- a/deps/jemalloc/src/arena.c
+++ b/deps/jemalloc/src/arena.c
@@ -1,11 +1,12 @@
-#define JEMALLOC_ARENA_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/div.h"
+#include "jemalloc/internal/decay.h"
+#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/san.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/safety_check.h"
@@ -35,34 +36,37 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
static atomic_zd_t dirty_decay_ms_default;
static atomic_zd_t muzzy_decay_ms_default;
-const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
-#define STEP(step, h, x, y) \
- h,
- SMOOTHSTEP
-#undef STEP
-};
+emap_t arena_emap_global;
+pa_central_t arena_pa_central_global;
-static div_info_t arena_binind_div_info[SC_NBINS];
+div_info_t arena_binind_div_info[SC_NBINS];
size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
+
+uint32_t arena_bin_offsets[SC_NBINS];
+static unsigned nbins_total;
+
static unsigned huge_arena_ind;
+const arena_config_t arena_config_default = {
+ /* .extent_hooks = */ (extent_hooks_t *)&ehooks_default_extent_hooks,
+ /* .metadata_use_hooks = */ true,
+};
+
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
-static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
- arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
- size_t npages_decay_max, bool is_background_thread);
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread, bool all);
-static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
- bin_t *bin);
-static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin);
+static void
+arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
+ size_t npages_new);
/******************************************************************************/
@@ -72,19 +76,17 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
*nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena_dss_prec_get(arena)];
- *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
- *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
- *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
- *ndirty += extents_npages_get(&arena->extents_dirty);
- *nmuzzy += extents_npages_get(&arena->extents_muzzy);
+ *dirty_decay_ms = arena_decay_ms_get(arena, extent_state_dirty);
+ *muzzy_decay_ms = arena_decay_ms_get(arena, extent_state_muzzy);
+ pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy);
}
void
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
- bin_stats_t *bstats, arena_stats_large_t *lstats,
- arena_stats_extents_t *estats) {
+ bin_stats_data_t *bstats, arena_stats_large_t *lstats,
+ pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats) {
cassert(config_stats);
arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
@@ -93,122 +95,74 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t base_allocated, base_resident, base_mapped, metadata_thp;
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
&base_mapped, &metadata_thp);
+ size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac);
+ astats->mapped += base_mapped + pac_mapped_sz;
+ astats->resident += base_resident;
- arena_stats_lock(tsdn, &arena->stats);
+ LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
- arena_stats_accum_zu(&astats->mapped, base_mapped
- + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
- arena_stats_accum_zu(&astats->retained,
- extents_npages_get(&arena->extents_retained) << LG_PAGE);
-
- atomic_store_zu(&astats->extent_avail,
- atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
- ATOMIC_RELAXED);
-
- arena_stats_accum_u64(&astats->decay_dirty.npurge,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_dirty.npurge));
- arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_dirty.nmadvise));
- arena_stats_accum_u64(&astats->decay_dirty.purged,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_dirty.purged));
-
- arena_stats_accum_u64(&astats->decay_muzzy.npurge,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_muzzy.npurge));
- arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_muzzy.nmadvise));
- arena_stats_accum_u64(&astats->decay_muzzy.purged,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_muzzy.purged));
-
- arena_stats_accum_zu(&astats->base, base_allocated);
- arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
- arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
- arena_stats_accum_zu(&astats->resident, base_resident +
- (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
- extents_npages_get(&arena->extents_dirty) +
- extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
- arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
- &arena->stats.abandoned_vm, ATOMIC_RELAXED));
+ astats->base += base_allocated;
+ atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena));
+ astats->metadata_thp += metadata_thp;
for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
- uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
+ uint64_t nmalloc = locked_read_u64(tsdn,
+ LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].nmalloc);
- arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
- arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
+ locked_inc_u64_unsynchronized(&lstats[i].nmalloc, nmalloc);
+ astats->nmalloc_large += nmalloc;
- uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
+ uint64_t ndalloc = locked_read_u64(tsdn,
+ LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].ndalloc);
- arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
- arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
+ locked_inc_u64_unsynchronized(&lstats[i].ndalloc, ndalloc);
+ astats->ndalloc_large += ndalloc;
- uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
+ uint64_t nrequests = locked_read_u64(tsdn,
+ LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].nrequests);
- arena_stats_accum_u64(&lstats[i].nrequests,
- nmalloc + nrequests);
- arena_stats_accum_u64(&astats->nrequests_large,
+ locked_inc_u64_unsynchronized(&lstats[i].nrequests,
nmalloc + nrequests);
+ astats->nrequests_large += nmalloc + nrequests;
/* nfill == nmalloc for large currently. */
- arena_stats_accum_u64(&lstats[i].nfills, nmalloc);
- arena_stats_accum_u64(&astats->nfills_large, nmalloc);
+ locked_inc_u64_unsynchronized(&lstats[i].nfills, nmalloc);
+ astats->nfills_large += nmalloc;
- uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats,
+ uint64_t nflush = locked_read_u64(tsdn,
+ LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].nflushes);
- arena_stats_accum_u64(&lstats[i].nflushes, nflush);
- arena_stats_accum_u64(&astats->nflushes_large, nflush);
+ locked_inc_u64_unsynchronized(&lstats[i].nflushes, nflush);
+ astats->nflushes_large += nflush;
assert(nmalloc >= ndalloc);
assert(nmalloc - ndalloc <= SIZE_T_MAX);
size_t curlextents = (size_t)(nmalloc - ndalloc);
lstats[i].curlextents += curlextents;
- arena_stats_accum_zu(&astats->allocated_large,
- curlextents * sz_index2size(SC_NBINS + i));
- }
-
- for (pszind_t i = 0; i < SC_NPSIZES; i++) {
- size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
- retained_bytes;
- dirty = extents_nextents_get(&arena->extents_dirty, i);
- muzzy = extents_nextents_get(&arena->extents_muzzy, i);
- retained = extents_nextents_get(&arena->extents_retained, i);
- dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i);
- muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i);
- retained_bytes =
- extents_nbytes_get(&arena->extents_retained, i);
-
- atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes,
- ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes,
- ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].retained_bytes, retained_bytes,
- ATOMIC_RELAXED);
- }
-
- arena_stats_unlock(tsdn, &arena->stats);
-
- /* tcache_bytes counts currently cached bytes. */
- atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
+ astats->allocated_large +=
+ curlextents * sz_index2size(SC_NBINS + i);
+ }
+
+ pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats,
+ estats, hpastats, secstats, &astats->resident);
+
+ LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
+
+ /* Currently cached bytes and sanitizer-stashed bytes in tcache. */
+ astats->tcache_bytes = 0;
+ astats->tcache_stashed_bytes = 0;
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
cache_bin_array_descriptor_t *descriptor;
ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
- szind_t i = 0;
- for (; i < SC_NBINS; i++) {
- cache_bin_t *tbin = &descriptor->bins_small[i];
- arena_stats_accum_zu(&astats->tcache_bytes,
- tbin->ncached * sz_index2size(i));
- }
- for (; i < nhbins; i++) {
- cache_bin_t *tbin = &descriptor->bins_large[i];
- arena_stats_accum_zu(&astats->tcache_bytes,
- tbin->ncached * sz_index2size(i));
+ for (szind_t i = 0; i < nhbins; i++) {
+ cache_bin_t *cache_bin = &descriptor->bins[i];
+ cache_bin_sz_t ncached, nstashed;
+ cache_bin_nitems_get_remote(cache_bin,
+ &tcache_bin_info[i], &ncached, &nstashed);
+
+ astats->tcache_bytes += ncached * sz_index2size(i);
+ astats->tcache_stashed_bytes += nstashed *
+ sz_index2size(i);
}
}
malloc_mutex_prof_read(tsdn,
@@ -224,21 +178,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
/* Gather per arena mutex profiling data. */
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
- READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
- arena_prof_mutex_extent_avail)
- READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
- arena_prof_mutex_extents_dirty)
- READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
- arena_prof_mutex_extents_muzzy)
- READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
- arena_prof_mutex_extents_retained)
- READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
- arena_prof_mutex_decay_dirty)
- READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
- arena_prof_mutex_decay_muzzy)
READ_ARENA_MUTEX_PROF_DATA(base->mtx,
- arena_prof_mutex_base)
+ arena_prof_mutex_base);
#undef READ_ARENA_MUTEX_PROF_DATA
+ pa_shard_mtx_stats_read(tsdn, &arena->pa_shard,
+ astats->mutex_prof_data);
nstime_copy(&astats->uptime, &arena->create_time);
nstime_update(&astats->uptime);
@@ -247,55 +191,67 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
for (szind_t i = 0; i < SC_NBINS; i++) {
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_stats_merge(tsdn, &bstats[i],
- &arena->bins[i].bin_shards[j]);
+ arena_get_bin(arena, i, j));
}
}
}
-void
-arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent) {
+static void
+arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
+ bool is_background_thread) {
+ if (!background_thread_enabled() || is_background_thread) {
+ return;
+ }
+ background_thread_info_t *info =
+ arena_background_thread_info_get(arena);
+ if (background_thread_indefinite_sleep(info)) {
+ arena_maybe_do_deferred_work(tsdn, arena,
+ &arena->pa_shard.pac.decay_dirty, 0);
+ }
+}
+
+/*
+ * React to deferred work generated by a PAI function.
+ */
+void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
- extent);
- if (arena_dirty_decay_ms_get(arena) == 0) {
+ if (decay_immediately(&arena->pa_shard.pac.decay_dirty)) {
arena_decay_dirty(tsdn, arena, false, true);
- } else {
- arena_background_thread_inactivity_check(tsdn, arena, false);
}
+ arena_background_thread_inactivity_check(tsdn, arena, false);
}
static void *
-arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
+arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) {
void *ret;
- arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ slab_data_t *slab_data = edata_slab_data_get(slab);
size_t regind;
- assert(extent_nfree_get(slab) > 0);
+ assert(edata_nfree_get(slab) > 0);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
- ret = (void *)((uintptr_t)extent_addr_get(slab) +
+ ret = (void *)((uintptr_t)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
- extent_nfree_dec(slab);
+ edata_nfree_dec(slab);
return ret;
}
static void
-arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
+arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info,
unsigned cnt, void** ptrs) {
- arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ slab_data_t *slab_data = edata_slab_data_get(slab);
- assert(extent_nfree_get(slab) >= cnt);
+ assert(edata_nfree_get(slab) >= cnt);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
for (unsigned i = 0; i < cnt; i++) {
size_t regind = bitmap_sfu(slab_data->bitmap,
&bin_info->bitmap_info);
- *(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) +
+ *(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
}
#else
@@ -316,7 +272,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
* Load from memory locations only once, outside the
* hot loop below.
*/
- uintptr_t base = (uintptr_t)extent_addr_get(slab);
+ uintptr_t base = (uintptr_t)edata_addr_get(slab);
uintptr_t regsize = (uintptr_t)bin_info->reg_size;
while (pop--) {
size_t bit = cfs_lu(&g);
@@ -328,56 +284,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
slab_data->bitmap[group] = g;
}
#endif
- extent_nfree_sub(slab, cnt);
-}
-
-#ifndef JEMALLOC_JET
-static
-#endif
-size_t
-arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
- size_t diff, regind;
-
- /* Freeing a pointer outside the slab can cause assertion failure. */
- assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
- assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
- /* Freeing an interior pointer can cause assertion failure. */
- assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
- (uintptr_t)bin_infos[binind].reg_size == 0);
-
- diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
-
- /* Avoid doing division with a variable divisor. */
- regind = div_compute(&arena_binind_div_info[binind], diff);
-
- assert(regind < bin_infos[binind].nregs);
-
- return regind;
-}
-
-static void
-arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
- szind_t binind = extent_szind_get(slab);
- const bin_info_t *bin_info = &bin_infos[binind];
- size_t regind = arena_slab_regind(slab, binind, ptr);
-
- assert(extent_nfree_get(slab) < bin_info->nregs);
- /* Freeing an unallocated pointer can cause assertion failure. */
- assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
-
- bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
- extent_nfree_inc(slab);
-}
-
-static void
-arena_nactive_add(arena_t *arena, size_t add_pages) {
- atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
-}
-
-static void
-arena_nactive_sub(arena_t *arena, size_t sub_pages) {
- assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
- atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
+ edata_nfree_sub(slab, cnt);
}
static void
@@ -392,7 +299,7 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
index = sz_size2index(usize);
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
- arena_stats_add_u64(tsdn, &arena->stats,
+ locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[hindex].nmalloc, 1);
}
@@ -408,551 +315,118 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
index = sz_size2index(usize);
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
- arena_stats_add_u64(tsdn, &arena->stats,
+ locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[hindex].ndalloc, 1);
}
static void
arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
size_t usize) {
- arena_large_dalloc_stats_update(tsdn, arena, oldusize);
arena_large_malloc_stats_update(tsdn, arena, usize);
+ arena_large_dalloc_stats_update(tsdn, arena, oldusize);
}
-static bool
-arena_may_have_muzzy(arena_t *arena) {
- return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0));
-}
-
-extent_t *
+edata_t *
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool *zero) {
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+ size_t alignment, bool zero) {
+ bool deferred_work_generated = false;
+ szind_t szind = sz_size2index(usize);
+ size_t esize = usize + sz_large_pad;
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
+ bool guarded = san_large_extent_decide_guard(tsdn,
+ arena_get_ehooks(arena), esize, alignment);
+ edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
+ /* slab */ false, szind, zero, guarded, &deferred_work_generated);
+ assert(deferred_work_generated == false);
- szind_t szind = sz_size2index(usize);
- size_t mapped_add;
- bool commit = true;
- extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
- szind, zero, &commit);
- if (extent == NULL && arena_may_have_muzzy(arena)) {
- extent = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
- false, szind, zero, &commit);
- }
- size_t size = usize + sz_large_pad;
- if (extent == NULL) {
- extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
- usize, sz_large_pad, alignment, false, szind, zero,
- &commit);
+ if (edata != NULL) {
if (config_stats) {
- /*
- * extent may be NULL on OOM, but in that case
- * mapped_add isn't used below, so there's no need to
- * conditionlly set it to 0 here.
- */
- mapped_add = size;
+ LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
+ arena_large_malloc_stats_update(tsdn, arena, usize);
+ LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
- } else if (config_stats) {
- mapped_add = 0;
}
- if (extent != NULL) {
- if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
- arena_large_malloc_stats_update(tsdn, arena, usize);
- if (mapped_add != 0) {
- arena_stats_add_zu(tsdn, &arena->stats,
- &arena->stats.mapped, mapped_add);
- }
- arena_stats_unlock(tsdn, &arena->stats);
- }
- arena_nactive_add(arena, size >> LG_PAGE);
+ if (edata != NULL && sz_large_pad != 0) {
+ arena_cache_oblivious_randomize(tsdn, arena, edata, alignment);
}
- return extent;
+ return edata;
}
void
-arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
+ LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_dalloc_stats_update(tsdn, arena,
- extent_usize_get(extent));
- arena_stats_unlock(tsdn, &arena->stats);
+ edata_usize_get(edata));
+ LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
- arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
}
void
-arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t oldusize) {
- size_t usize = extent_usize_get(extent);
- size_t udiff = oldusize - usize;
+ size_t usize = edata_usize_get(edata);
if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
+ LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
- arena_stats_unlock(tsdn, &arena->stats);
+ LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
- arena_nactive_sub(arena, udiff >> LG_PAGE);
}
void
-arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t oldusize) {
- size_t usize = extent_usize_get(extent);
- size_t udiff = usize - oldusize;
+ size_t usize = edata_usize_get(edata);
if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
+ LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
- arena_stats_unlock(tsdn, &arena->stats);
- }
- arena_nactive_add(arena, udiff >> LG_PAGE);
-}
-
-static ssize_t
-arena_decay_ms_read(arena_decay_t *decay) {
- return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
-}
-
-static void
-arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
- atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
-}
-
-static void
-arena_decay_deadline_init(arena_decay_t *decay) {
- /*
- * Generate a new deadline that is uniformly random within the next
- * epoch after the current one.
- */
- nstime_copy(&decay->deadline, &decay->epoch);
- nstime_add(&decay->deadline, &decay->interval);
- if (arena_decay_ms_read(decay) > 0) {
- nstime_t jitter;
-
- nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
- nstime_ns(&decay->interval)));
- nstime_add(&decay->deadline, &jitter);
- }
-}
-
-static bool
-arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
- return (nstime_compare(&decay->deadline, time) <= 0);
-}
-
-static size_t
-arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
- uint64_t sum;
- size_t npages_limit_backlog;
- unsigned i;
-
- /*
- * For each element of decay_backlog, multiply by the corresponding
- * fixed-point smoothstep decay factor. Sum the products, then divide
- * to round down to the nearest whole number of pages.
- */
- sum = 0;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
- sum += decay->backlog[i] * h_steps[i];
- }
- npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
-
- return npages_limit_backlog;
-}
-
-static void
-arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
- size_t npages_delta = (current_npages > decay->nunpurged) ?
- current_npages - decay->nunpurged : 0;
- decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
-
- if (config_debug) {
- if (current_npages > decay->ceil_npages) {
- decay->ceil_npages = current_npages;
- }
- size_t npages_limit = arena_decay_backlog_npages_limit(decay);
- assert(decay->ceil_npages >= npages_limit);
- if (decay->ceil_npages > npages_limit) {
- decay->ceil_npages = npages_limit;
- }
+ LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
}
-static void
-arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
- size_t current_npages) {
- if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
- memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
- sizeof(size_t));
- } else {
- size_t nadvance_z = (size_t)nadvance_u64;
-
- assert((uint64_t)nadvance_z == nadvance_u64);
-
- memmove(decay->backlog, &decay->backlog[nadvance_z],
- (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
- if (nadvance_z > 1) {
- memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
- nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
- }
- }
-
- arena_decay_backlog_update_last(decay, current_npages);
-}
-
-static void
-arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, size_t current_npages, size_t npages_limit,
- bool is_background_thread) {
- if (current_npages > npages_limit) {
- arena_decay_to_limit(tsdn, arena, decay, extents, false,
- npages_limit, current_npages - npages_limit,
- is_background_thread);
- }
-}
-
-static void
-arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
- size_t current_npages) {
- assert(arena_decay_deadline_reached(decay, time));
-
- nstime_t delta;
- nstime_copy(&delta, time);
- nstime_subtract(&delta, &decay->epoch);
-
- uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
- assert(nadvance_u64 > 0);
-
- /* Add nadvance_u64 decay intervals to epoch. */
- nstime_copy(&delta, &decay->interval);
- nstime_imultiply(&delta, nadvance_u64);
- nstime_add(&decay->epoch, &delta);
-
- /* Set a new deadline. */
- arena_decay_deadline_init(decay);
-
- /* Update the backlog. */
- arena_decay_backlog_update(decay, nadvance_u64, current_npages);
-}
-
-static void
-arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, const nstime_t *time, bool is_background_thread) {
- size_t current_npages = extents_npages_get(extents);
- arena_decay_epoch_advance_helper(decay, time, current_npages);
-
- size_t npages_limit = arena_decay_backlog_npages_limit(decay);
- /* We may unlock decay->mtx when try_purge(). Finish logging first. */
- decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
- current_npages;
-
- if (!background_thread_enabled() || is_background_thread) {
- arena_decay_try_purge(tsdn, arena, decay, extents,
- current_npages, npages_limit, is_background_thread);
- }
-}
-
-static void
-arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
- arena_decay_ms_write(decay, decay_ms);
- if (decay_ms > 0) {
- nstime_init(&decay->interval, (uint64_t)decay_ms *
- KQU(1000000));
- nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
- }
-
- nstime_init(&decay->epoch, 0);
- nstime_update(&decay->epoch);
- decay->jitter_state = (uint64_t)(uintptr_t)decay;
- arena_decay_deadline_init(decay);
- decay->nunpurged = 0;
- memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
-}
-
-static bool
-arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
- arena_stats_decay_t *stats) {
- if (config_debug) {
- for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
- assert(((char *)decay)[i] == 0);
- }
- decay->ceil_npages = 0;
- }
- if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
- decay->purging = false;
- arena_decay_reinit(decay, decay_ms);
- /* Memory is zeroed, so there is no need to clear stats. */
- if (config_stats) {
- decay->stats = stats;
- }
- return false;
-}
-
-static bool
-arena_decay_ms_valid(ssize_t decay_ms) {
- if (decay_ms < -1) {
- return false;
- }
- if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
- KQU(1000)) {
- return true;
- }
- return false;
-}
-
-static bool
-arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, bool is_background_thread) {
- malloc_mutex_assert_owner(tsdn, &decay->mtx);
-
- /* Purge all or nothing if the option is disabled. */
- ssize_t decay_ms = arena_decay_ms_read(decay);
- if (decay_ms <= 0) {
- if (decay_ms == 0) {
- arena_decay_to_limit(tsdn, arena, decay, extents, false,
- 0, extents_npages_get(extents),
- is_background_thread);
- }
- return false;
- }
-
- nstime_t time;
- nstime_init(&time, 0);
- nstime_update(&time);
- if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
- > 0)) {
- /*
- * Time went backwards. Move the epoch back in time and
- * generate a new deadline, with the expectation that time
- * typically flows forward for long enough periods of time that
- * epochs complete. Unfortunately, this strategy is susceptible
- * to clock jitter triggering premature epoch advances, but
- * clock jitter estimation and compensation isn't feasible here
- * because calls into this code are event-driven.
- */
- nstime_copy(&decay->epoch, &time);
- arena_decay_deadline_init(decay);
+/*
+ * In situations where we're not forcing a decay (i.e. because the user
+ * specifically requested it), should we purge ourselves, or wait for the
+ * background thread to get to it.
+ */
+static pac_purge_eagerness_t
+arena_decide_unforced_purge_eagerness(bool is_background_thread) {
+ if (is_background_thread) {
+ return PAC_PURGE_ALWAYS;
+ } else if (!is_background_thread && background_thread_enabled()) {
+ return PAC_PURGE_NEVER;
} else {
- /* Verify that time does not go backwards. */
- assert(nstime_compare(&decay->epoch, &time) <= 0);
+ return PAC_PURGE_ON_EPOCH_ADVANCE;
}
-
- /*
- * If the deadline has been reached, advance to the current epoch and
- * purge to the new limit if necessary. Note that dirty pages created
- * during the current epoch are not subject to purge until a future
- * epoch, so as a result purging only happens during epoch advances, or
- * being triggered by background threads (scheduled event).
- */
- bool advance_epoch = arena_decay_deadline_reached(decay, &time);
- if (advance_epoch) {
- arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
- is_background_thread);
- } else if (is_background_thread) {
- arena_decay_try_purge(tsdn, arena, decay, extents,
- extents_npages_get(extents),
- arena_decay_backlog_npages_limit(decay),
- is_background_thread);
- }
-
- return advance_epoch;
-}
-
-static ssize_t
-arena_decay_ms_get(arena_decay_t *decay) {
- return arena_decay_ms_read(decay);
-}
-
-ssize_t
-arena_dirty_decay_ms_get(arena_t *arena) {
- return arena_decay_ms_get(&arena->decay_dirty);
-}
-
-ssize_t
-arena_muzzy_decay_ms_get(arena_t *arena) {
- return arena_decay_ms_get(&arena->decay_muzzy);
-}
-
-static bool
-arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, ssize_t decay_ms) {
- if (!arena_decay_ms_valid(decay_ms)) {
- return true;
- }
-
- malloc_mutex_lock(tsdn, &decay->mtx);
- /*
- * Restart decay backlog from scratch, which may cause many dirty pages
- * to be immediately purged. It would conceptually be possible to map
- * the old backlog onto the new backlog, but there is no justification
- * for such complexity since decay_ms changes are intended to be
- * infrequent, either between the {-1, 0, >0} states, or a one-time
- * arbitrary change during initial arena configuration.
- */
- arena_decay_reinit(decay, decay_ms);
- arena_maybe_decay(tsdn, arena, decay, extents, false);
- malloc_mutex_unlock(tsdn, &decay->mtx);
-
- return false;
-}
-
-bool
-arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
- ssize_t decay_ms) {
- return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
- &arena->extents_dirty, decay_ms);
}
bool
-arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
+arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
ssize_t decay_ms) {
- return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
- &arena->extents_muzzy, decay_ms);
+ pac_purge_eagerness_t eagerness = arena_decide_unforced_purge_eagerness(
+ /* is_background_thread */ false);
+ return pa_decay_ms_set(tsdn, &arena->pa_shard, state, decay_ms,
+ eagerness);
}
-static size_t
-arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
- size_t npages_decay_max, extent_list_t *decay_extents) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- /* Stash extents according to npages_limit. */
- size_t nstashed = 0;
- extent_t *extent;
- while (nstashed < npages_decay_max &&
- (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
- npages_limit)) != NULL) {
- extent_list_append(decay_extents, extent);
- nstashed += extent_size_get(extent) >> LG_PAGE;
- }
- return nstashed;
-}
-
-static size_t
-arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
- bool all, extent_list_t *decay_extents, bool is_background_thread) {
- size_t nmadvise, nunmapped;
- size_t npurged;
-
- if (config_stats) {
- nmadvise = 0;
- nunmapped = 0;
- }
- npurged = 0;
-
- ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
- for (extent_t *extent = extent_list_first(decay_extents); extent !=
- NULL; extent = extent_list_first(decay_extents)) {
- if (config_stats) {
- nmadvise++;
- }
- size_t npages = extent_size_get(extent) >> LG_PAGE;
- npurged += npages;
- extent_list_remove(decay_extents, extent);
- switch (extents_state_get(extents)) {
- case extent_state_active:
- not_reached();
- case extent_state_dirty:
- if (!all && muzzy_decay_ms != 0 &&
- !extent_purge_lazy_wrapper(tsdn, arena,
- r_extent_hooks, extent, 0,
- extent_size_get(extent))) {
- extents_dalloc(tsdn, arena, r_extent_hooks,
- &arena->extents_muzzy, extent);
- arena_background_thread_inactivity_check(tsdn,
- arena, is_background_thread);
- break;
- }
- /* Fall through. */
- case extent_state_muzzy:
- extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
- extent);
- if (config_stats) {
- nunmapped += npages;
- }
- break;
- case extent_state_retained:
- default:
- not_reached();
- }
- }
-
- if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
- arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
- 1);
- arena_stats_add_u64(tsdn, &arena->stats,
- &decay->stats->nmadvise, nmadvise);
- arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
- npurged);
- arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
- nunmapped << LG_PAGE);
- arena_stats_unlock(tsdn, &arena->stats);
- }
-
- return npurged;
-}
-
-/*
- * npages_limit: Decay at most npages_decay_max pages without violating the
- * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
- * bound on number of pages in order to prevent unbounded growth (namely in
- * stashed), otherwise unbounded new pages could be added to extents during the
- * current decay run, so that the purging thread never finishes.
- */
-static void
-arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
- bool is_background_thread) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 1);
- malloc_mutex_assert_owner(tsdn, &decay->mtx);
-
- if (decay->purging) {
- return;
- }
- decay->purging = true;
- malloc_mutex_unlock(tsdn, &decay->mtx);
-
- extent_hooks_t *extent_hooks = extent_hooks_get(arena);
-
- extent_list_t decay_extents;
- extent_list_init(&decay_extents);
-
- size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
- npages_limit, npages_decay_max, &decay_extents);
- if (npurge != 0) {
- size_t npurged = arena_decay_stashed(tsdn, arena,
- &extent_hooks, decay, extents, all, &decay_extents,
- is_background_thread);
- assert(npurged == npurge);
- }
-
- malloc_mutex_lock(tsdn, &decay->mtx);
- decay->purging = false;
+ssize_t
+arena_decay_ms_get(arena_t *arena, extent_state_t state) {
+ return pa_decay_ms_get(&arena->pa_shard, state);
}
static bool
-arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, bool is_background_thread, bool all) {
+arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache,
+ bool is_background_thread, bool all) {
if (all) {
malloc_mutex_lock(tsdn, &decay->mtx);
- arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
- extents_npages_get(extents), is_background_thread);
+ pac_decay_all(tsdn, &arena->pa_shard.pac, decay, decay_stats,
+ ecache, /* fully_decay */ all);
malloc_mutex_unlock(tsdn, &decay->mtx);
-
return false;
}
@@ -960,20 +434,20 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
/* No need to wait if another thread is in progress. */
return true;
}
-
- bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
- is_background_thread);
+ pac_purge_eagerness_t eagerness =
+ arena_decide_unforced_purge_eagerness(is_background_thread);
+ bool epoch_advanced = pac_maybe_decay_purge(tsdn, &arena->pa_shard.pac,
+ decay, decay_stats, ecache, eagerness);
size_t npages_new;
if (epoch_advanced) {
/* Backlog is updated on epoch advance. */
- npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
+ npages_new = decay_epoch_npages_delta(decay);
}
malloc_mutex_unlock(tsdn, &decay->mtx);
if (have_background_thread && background_thread_enabled() &&
epoch_advanced && !is_background_thread) {
- background_thread_interval_check(tsdn, arena, decay,
- npages_new);
+ arena_maybe_do_deferred_work(tsdn, arena, decay, npages_new);
}
return false;
@@ -982,53 +456,143 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
static bool
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) {
- return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
- &arena->extents_dirty, is_background_thread, all);
+ return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
+ &arena->pa_shard.pac.stats->decay_dirty,
+ &arena->pa_shard.pac.ecache_dirty, is_background_thread, all);
}
static bool
arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) {
- return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
- &arena->extents_muzzy, is_background_thread, all);
+ if (pa_shard_dont_decay_muzzy(&arena->pa_shard)) {
+ return false;
+ }
+ return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
+ &arena->pa_shard.pac.stats->decay_muzzy,
+ &arena->pa_shard.pac.ecache_muzzy, is_background_thread, all);
}
void
arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
+ if (all) {
+ /*
+ * We should take a purge of "all" to mean "save as much memory
+ * as possible", including flushing any caches (for situations
+ * like thread death, or manual purge calls).
+ */
+ sec_flush(tsdn, &arena->pa_shard.hpa_sec);
+ }
if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
return;
}
arena_decay_muzzy(tsdn, arena, is_background_thread, all);
}
+static bool
+arena_should_decay_early(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
+ background_thread_info_t *info, nstime_t *remaining_sleep,
+ size_t npages_new) {
+ malloc_mutex_assert_owner(tsdn, &info->mtx);
+
+ if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
+ return false;
+ }
+
+ if (!decay_gradually(decay)) {
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+ return false;
+ }
+
+ nstime_init(remaining_sleep, background_thread_wakeup_time_get(info));
+ if (nstime_compare(remaining_sleep, &decay->epoch) <= 0) {
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+ return false;
+ }
+ nstime_subtract(remaining_sleep, &decay->epoch);
+ if (npages_new > 0) {
+ uint64_t npurge_new = decay_npages_purge_in(decay,
+ remaining_sleep, npages_new);
+ info->npages_to_purge_new += npurge_new;
+ }
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+ return info->npages_to_purge_new >
+ ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD;
+}
+
+/*
+ * Check if deferred work needs to be done sooner than planned.
+ * For decay we might want to wake up earlier because of an influx of dirty
+ * pages. Rather than waiting for previously estimated time, we proactively
+ * purge those pages.
+ * If background thread sleeps indefinitely, always wake up because some
+ * deferred work has been generated.
+ */
static void
-arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
- arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
+arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
+ size_t npages_new) {
+ background_thread_info_t *info = arena_background_thread_info_get(
+ arena);
+ if (malloc_mutex_trylock(tsdn, &info->mtx)) {
+ /*
+ * Background thread may hold the mutex for a long period of
+ * time. We'd like to avoid the variance on application
+ * threads. So keep this non-blocking, and leave the work to a
+ * future epoch.
+ */
+ return;
+ }
+ if (!background_thread_is_started(info)) {
+ goto label_done;
+ }
+
+ nstime_t remaining_sleep;
+ if (background_thread_indefinite_sleep(info)) {
+ background_thread_wakeup_early(info, NULL);
+ } else if (arena_should_decay_early(tsdn, arena, decay, info,
+ &remaining_sleep, npages_new)) {
+ info->npages_to_purge_new = 0;
+ background_thread_wakeup_early(info, &remaining_sleep);
+ }
+label_done:
+ malloc_mutex_unlock(tsdn, &info->mtx);
+}
+
+/* Called from background threads. */
+void
+arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena) {
+ arena_decay(tsdn, arena, true, false);
+ pa_shard_do_deferred_work(tsdn, &arena->pa_shard);
+}
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
- arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
+void
+arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
+ bool deferred_work_generated = false;
+ pa_dalloc(tsdn, &arena->pa_shard, slab, &deferred_work_generated);
+ if (deferred_work_generated) {
+ arena_handle_deferred_work(tsdn, arena);
+ }
}
static void
-arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
- assert(extent_nfree_get(slab) > 0);
- extent_heap_insert(&bin->slabs_nonfull, slab);
+arena_bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab) {
+ assert(edata_nfree_get(slab) > 0);
+ edata_heap_insert(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs++;
}
}
static void
-arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
- extent_heap_remove(&bin->slabs_nonfull, slab);
+arena_bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab) {
+ edata_heap_remove(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs--;
}
}
-static extent_t *
+static edata_t *
arena_bin_slabs_nonfull_tryget(bin_t *bin) {
- extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
+ edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull);
if (slab == NULL) {
return NULL;
}
@@ -1040,30 +604,30 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) {
}
static void
-arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
- assert(extent_nfree_get(slab) == 0);
+arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) {
+ assert(edata_nfree_get(slab) == 0);
/*
* Tracking extents is required by arena_reset, which is not allowed
- * for auto arenas. Bypass this step to avoid touching the extent
+ * for auto arenas. Bypass this step to avoid touching the edata
* linkage (often results in cache misses) for auto arenas.
*/
if (arena_is_auto(arena)) {
return;
}
- extent_list_append(&bin->slabs_full, slab);
+ edata_list_active_append(&bin->slabs_full, slab);
}
static void
-arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
+arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) {
if (arena_is_auto(arena)) {
return;
}
- extent_list_remove(&bin->slabs_full, slab);
+ edata_list_active_remove(&bin->slabs_full, slab);
}
static void
arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
- extent_t *slab;
+ edata_t *slab;
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (bin->slabcur != NULL) {
@@ -1073,13 +637,13 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
- while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
+ while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
- for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
- slab = extent_list_first(&bin->slabs_full)) {
+ for (slab = edata_list_active_first(&bin->slabs_full); slab != NULL;
+ slab = edata_list_active_first(&bin->slabs_full)) {
arena_bin_slabs_full_remove(arena, bin, slab);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
@@ -1111,16 +675,15 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Large allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
- for (extent_t *extent = extent_list_first(&arena->large); extent !=
- NULL; extent = extent_list_first(&arena->large)) {
- void *ptr = extent_base_get(extent);
+ for (edata_t *edata = edata_list_active_first(&arena->large);
+ edata != NULL; edata = edata_list_active_first(&arena->large)) {
+ void *ptr = edata_base_get(edata);
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ emap_alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
+ &alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
if (config_stats || (config_prof && opt_prof)) {
@@ -1131,7 +694,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
if (config_prof && opt_prof) {
prof_free(tsd, ptr, usize, &alloc_ctx);
}
- large_dalloc(tsd_tsdn(tsd), extent);
+ large_dalloc(tsd_tsdn(tsd), edata);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
@@ -1139,32 +702,95 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Bins. */
for (unsigned i = 0; i < SC_NBINS; i++) {
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- arena_bin_reset(tsd, arena,
- &arena->bins[i].bin_shards[j]);
+ arena_bin_reset(tsd, arena, arena_get_bin(arena, i, j));
}
}
+ pa_shard_reset(tsd_tsdn(tsd), &arena->pa_shard);
+}
+
+static void
+arena_prepare_base_deletion_sync_finish(tsd_t *tsd, malloc_mutex_t **mutexes,
+ unsigned n_mtx) {
+ for (unsigned i = 0; i < n_mtx; i++) {
+ malloc_mutex_lock(tsd_tsdn(tsd), mutexes[i]);
+ malloc_mutex_unlock(tsd_tsdn(tsd), mutexes[i]);
+ }
+}
+
+#define ARENA_DESTROY_MAX_DELAYED_MTX 32
+static void
+arena_prepare_base_deletion_sync(tsd_t *tsd, malloc_mutex_t *mtx,
+ malloc_mutex_t **delayed_mtx, unsigned *n_delayed) {
+ if (!malloc_mutex_trylock(tsd_tsdn(tsd), mtx)) {
+ /* No contention. */
+ malloc_mutex_unlock(tsd_tsdn(tsd), mtx);
+ return;
+ }
+ unsigned n = *n_delayed;
+ assert(n < ARENA_DESTROY_MAX_DELAYED_MTX);
+ /* Add another to the batch. */
+ delayed_mtx[n++] = mtx;
- atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
+ if (n == ARENA_DESTROY_MAX_DELAYED_MTX) {
+ arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n);
+ n = 0;
+ }
+ *n_delayed = n;
}
static void
-arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
+arena_prepare_base_deletion(tsd_t *tsd, base_t *base_to_destroy) {
/*
- * Iterate over the retained extents and destroy them. This gives the
- * extent allocator underlying the extent hooks an opportunity to unmap
- * all retained memory without having to keep its own metadata
- * structures. In practice, virtual memory for dss-allocated extents is
- * leaked here, so best practice is to avoid dss for arenas to be
- * destroyed, or provide custom extent hooks that track retained
- * dss-based extents for later reuse.
+ * In order to coalesce, emap_try_acquire_edata_neighbor will attempt to
+ * check neighbor edata's state to determine eligibility. This means
+ * under certain conditions, the metadata from an arena can be accessed
+ * w/o holding any locks from that arena. In order to guarantee safe
+ * memory access, the metadata and the underlying base allocator needs
+ * to be kept alive, until all pending accesses are done.
+ *
+ * 1) with opt_retain, the arena boundary implies the is_head state
+ * (tracked in the rtree leaf), and the coalesce flow will stop at the
+ * head state branch. Therefore no cross arena metadata access
+ * possible.
+ *
+ * 2) w/o opt_retain, the arena id needs to be read from the edata_t,
+ * meaning read only cross-arena metadata access is possible. The
+ * coalesce attempt will stop at the arena_id mismatch, and is always
+ * under one of the ecache locks. To allow safe passthrough of such
+ * metadata accesses, the loop below will iterate through all manual
+ * arenas' ecache locks. As all the metadata from this base allocator
+ * have been unlinked from the rtree, after going through all the
+ * relevant ecache locks, it's safe to say that a) pending accesses are
+ * all finished, and b) no new access will be generated.
*/
- extent_hooks_t *extent_hooks = extent_hooks_get(arena);
- extent_t *extent;
- while ((extent = extents_evict(tsdn, arena, &extent_hooks,
- &arena->extents_retained, 0)) != NULL) {
- extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
+ if (opt_retain) {
+ return;
}
+ unsigned destroy_ind = base_ind_get(base_to_destroy);
+ assert(destroy_ind >= manual_arena_base);
+
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+ malloc_mutex_t *delayed_mtx[ARENA_DESTROY_MAX_DELAYED_MTX];
+ unsigned n_delayed = 0, total = narenas_total_get();
+ for (unsigned i = 0; i < total; i++) {
+ if (i == destroy_ind) {
+ continue;
+ }
+ arena_t *arena = arena_get(tsdn, i, false);
+ if (arena == NULL) {
+ continue;
+ }
+ pac_t *pac = &arena->pa_shard.pac;
+ arena_prepare_base_deletion_sync(tsd, &pac->ecache_dirty.mtx,
+ delayed_mtx, &n_delayed);
+ arena_prepare_base_deletion_sync(tsd, &pac->ecache_muzzy.mtx,
+ delayed_mtx, &n_delayed);
+ arena_prepare_base_deletion_sync(tsd, &pac->ecache_retained.mtx,
+ delayed_mtx, &n_delayed);
+ }
+ arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n_delayed);
}
+#undef ARENA_DESTROY_MAX_DELAYED_MTX
void
arena_destroy(tsd_t *tsd, arena_t *arena) {
@@ -1175,13 +801,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
/*
* No allocations have occurred since arena_reset() was called.
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
- * extents, so only retained extents may remain.
+ * extents, so only retained extents may remain and it's safe to call
+ * pa_shard_destroy_retained.
*/
- assert(extents_npages_get(&arena->extents_dirty) == 0);
- assert(extents_npages_get(&arena->extents_muzzy) == 0);
-
- /* Deallocate retained memory. */
- arena_destroy_retained(tsd_tsdn(tsd), arena);
+ pa_shard_destroy(tsd_tsdn(tsd), &arena->pa_shard);
/*
* Remove the arena pointer from the arenas array. We rely on the fact
@@ -1197,316 +820,370 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
/*
* Destroy the base allocator, which manages all metadata ever mapped by
- * this arena.
+ * this arena. The prepare function will make sure no pending access to
+ * the metadata in this base anymore.
*/
+ arena_prepare_base_deletion(tsd, arena->base);
base_delete(tsd_tsdn(tsd), arena->base);
}
-static extent_t *
-arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
- szind_t szind) {
- extent_t *slab;
- bool zero, commit;
-
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- zero = false;
- commit = true;
- slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
- bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
-
- if (config_stats && slab != NULL) {
- arena_stats_mapped_add(tsdn, &arena->stats,
- bin_info->slab_size);
- }
-
- return slab;
-}
-
-static extent_t *
+static edata_t *
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
const bin_info_t *bin_info) {
+ bool deferred_work_generated = false;
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
- szind_t szind = sz_size2index(bin_info->reg_size);
- bool zero = false;
- bool commit = true;
- extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
- binind, &zero, &commit);
- if (slab == NULL && arena_may_have_muzzy(arena)) {
- slab = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
- true, binind, &zero, &commit);
+ bool guarded = san_slab_extent_decide_guard(tsdn,
+ arena_get_ehooks(arena));
+ edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
+ /* alignment */ PAGE, /* slab */ true, /* szind */ binind,
+ /* zero */ false, guarded, &deferred_work_generated);
+
+ if (deferred_work_generated) {
+ arena_handle_deferred_work(tsdn, arena);
}
+
if (slab == NULL) {
- slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
- bin_info, szind);
- if (slab == NULL) {
- return NULL;
- }
+ return NULL;
}
- assert(extent_slab_get(slab));
+ assert(edata_slab_get(slab));
/* Initialize slab internals. */
- arena_slab_data_t *slab_data = extent_slab_data_get(slab);
- extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
+ slab_data_t *slab_data = edata_slab_data_get(slab);
+ edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
- arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
-
return slab;
}
-static extent_t *
-arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, unsigned binshard) {
- extent_t *slab;
- const bin_info_t *bin_info;
-
- /* Look for a usable slab. */
- slab = arena_bin_slabs_nonfull_tryget(bin);
- if (slab != NULL) {
- return slab;
- }
- /* No existing slabs have any space available. */
-
- bin_info = &bin_infos[binind];
-
- /* Allocate a new slab. */
- malloc_mutex_unlock(tsdn, &bin->lock);
- /******************************/
- slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info);
- /********************************/
- malloc_mutex_lock(tsdn, &bin->lock);
- if (slab != NULL) {
- if (config_stats) {
- bin->stats.nslabs++;
- bin->stats.curslabs++;
- }
- return slab;
+/*
+ * Before attempting the _with_fresh_slab approaches below, the _no_fresh_slab
+ * variants (i.e. through slabcur and nonfull) must be tried first.
+ */
+static void
+arena_bin_refill_slabcur_with_fresh_slab(tsdn_t *tsdn, arena_t *arena,
+ bin_t *bin, szind_t binind, edata_t *fresh_slab) {
+ malloc_mutex_assert_owner(tsdn, &bin->lock);
+ /* Only called after slabcur and nonfull both failed. */
+ assert(bin->slabcur == NULL);
+ assert(edata_heap_first(&bin->slabs_nonfull) == NULL);
+ assert(fresh_slab != NULL);
+
+ /* A new slab from arena_slab_alloc() */
+ assert(edata_nfree_get(fresh_slab) == bin_infos[binind].nregs);
+ if (config_stats) {
+ bin->stats.nslabs++;
+ bin->stats.curslabs++;
}
+ bin->slabcur = fresh_slab;
+}
- /*
- * arena_slab_alloc() failed, but another thread may have made
- * sufficient memory available while this one dropped bin->lock above,
- * so search one more time.
- */
- slab = arena_bin_slabs_nonfull_tryget(bin);
- if (slab != NULL) {
- return slab;
- }
+/* Refill slabcur and then alloc using the fresh slab */
+static void *
+arena_bin_malloc_with_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ szind_t binind, edata_t *fresh_slab) {
+ malloc_mutex_assert_owner(tsdn, &bin->lock);
+ arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena, bin, binind,
+ fresh_slab);
- return NULL;
+ return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
}
-/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
-static void *
-arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, unsigned binshard) {
- const bin_info_t *bin_info;
- extent_t *slab;
+static bool
+arena_bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, arena_t *arena,
+ bin_t *bin) {
+ malloc_mutex_assert_owner(tsdn, &bin->lock);
+ /* Only called after arena_slab_reg_alloc[_batch] failed. */
+ assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0);
- bin_info = &bin_infos[binind];
- if (!arena_is_auto(arena) && bin->slabcur != NULL) {
- arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
- bin->slabcur = NULL;
- }
- slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard);
if (bin->slabcur != NULL) {
- /*
- * Another thread updated slabcur while this one ran without the
- * bin lock in arena_bin_nonfull_slab_get().
- */
- if (extent_nfree_get(bin->slabcur) > 0) {
- void *ret = arena_slab_reg_alloc(bin->slabcur,
- bin_info);
- if (slab != NULL) {
- /*
- * arena_slab_alloc() may have allocated slab,
- * or it may have been pulled from
- * slabs_nonfull. Therefore it is unsafe to
- * make any assumptions about how slab has
- * previously been used, and
- * arena_bin_lower_slab() must be called, as if
- * a region were just deallocated from the slab.
- */
- if (extent_nfree_get(slab) == bin_info->nregs) {
- arena_dalloc_bin_slab(tsdn, arena, slab,
- bin);
- } else {
- arena_bin_lower_slab(tsdn, arena, slab,
- bin);
- }
- }
- return ret;
- }
-
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
- bin->slabcur = NULL;
- }
-
- if (slab == NULL) {
- return NULL;
}
- bin->slabcur = slab;
- assert(extent_nfree_get(bin->slabcur) > 0);
+ /* Look for a usable slab. */
+ bin->slabcur = arena_bin_slabs_nonfull_tryget(bin);
+ assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) > 0);
- return arena_slab_reg_alloc(slab, bin_info);
+ return (bin->slabcur == NULL);
}
-/* Choose a bin shard and return the locked bin. */
bin_t *
-arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
- unsigned *binshard) {
- bin_t *bin;
+arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
+ unsigned *binshard_p) {
+ unsigned binshard;
if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
- *binshard = 0;
+ binshard = 0;
} else {
- *binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
+ binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
}
- assert(*binshard < bin_infos[binind].n_shards);
- bin = &arena->bins[binind].bin_shards[*binshard];
- malloc_mutex_lock(tsdn, &bin->lock);
-
- return bin;
+ assert(binshard < bin_infos[binind].n_shards);
+ if (binshard_p != NULL) {
+ *binshard_p = binshard;
+ }
+ return arena_get_bin(arena, binind, binshard);
}
void
-arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
- unsigned i, nfill, cnt;
+arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
+ cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
+ const unsigned nfill) {
+ assert(cache_bin_ncached_get_local(cache_bin, cache_bin_info) == 0);
+
+ const bin_info_t *bin_info = &bin_infos[binind];
+
+ CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill);
+ cache_bin_init_ptr_array_for_fill(cache_bin, cache_bin_info, &ptrs,
+ nfill);
+ /*
+ * Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
+ * slabs. After both are exhausted, new slabs will be allocated through
+ * arena_slab_alloc().
+ *
+ * Bin lock is only taken / released right before / after the while(...)
+ * refill loop, with new slab allocation (which has its own locking)
+ * kept outside of the loop. This setup facilitates flat combining, at
+ * the cost of the nested loop (through goto label_refill).
+ *
+ * To optimize for cases with contention and limited resources
+ * (e.g. hugepage-backed or non-overcommit arenas), each fill-iteration
+ * gets one chance of slab_alloc, and a retry of bin local resources
+ * after the slab allocation (regardless if slab_alloc failed, because
+ * the bin lock is dropped during the slab allocation).
+ *
+ * In other words, new slab allocation is allowed, as long as there was
+ * progress since the previous slab_alloc. This is tracked with
+ * made_progress below, initialized to true to jump start the first
+ * iteration.
+ *
+ * In other words (again), the loop will only terminate early (i.e. stop
+ * with filled < nfill) after going through the three steps: a) bin
+ * local exhausted, b) unlock and slab_alloc returns null, c) re-lock
+ * and bin local fails again.
+ */
+ bool made_progress = true;
+ edata_t *fresh_slab = NULL;
+ bool alloc_and_retry = false;
+ unsigned filled = 0;
+ unsigned binshard;
+ bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
+
+label_refill:
+ malloc_mutex_lock(tsdn, &bin->lock);
- assert(tbin->ncached == 0);
+ while (filled < nfill) {
+ /* Try batch-fill from slabcur first. */
+ edata_t *slabcur = bin->slabcur;
+ if (slabcur != NULL && edata_nfree_get(slabcur) > 0) {
+ unsigned tofill = nfill - filled;
+ unsigned nfree = edata_nfree_get(slabcur);
+ unsigned cnt = tofill < nfree ? tofill : nfree;
+
+ arena_slab_reg_alloc_batch(slabcur, bin_info, cnt,
+ &ptrs.ptr[filled]);
+ made_progress = true;
+ filled += cnt;
+ continue;
+ }
+ /* Next try refilling slabcur from nonfull slabs. */
+ if (!arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) {
+ assert(bin->slabcur != NULL);
+ continue;
+ }
+
+ /* Then see if a new slab was reserved already. */
+ if (fresh_slab != NULL) {
+ arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena,
+ bin, binind, fresh_slab);
+ assert(bin->slabcur != NULL);
+ fresh_slab = NULL;
+ continue;
+ }
+
+ /* Try slab_alloc if made progress (or never did slab_alloc). */
+ if (made_progress) {
+ assert(bin->slabcur == NULL);
+ assert(fresh_slab == NULL);
+ alloc_and_retry = true;
+ /* Alloc a new slab then come back. */
+ break;
+ }
+
+ /* OOM. */
+
+ assert(fresh_slab == NULL);
+ assert(!alloc_and_retry);
+ break;
+ } /* while (filled < nfill) loop. */
- if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
- prof_idump(tsdn);
+ if (config_stats && !alloc_and_retry) {
+ bin->stats.nmalloc += filled;
+ bin->stats.nrequests += cache_bin->tstats.nrequests;
+ bin->stats.curregs += filled;
+ bin->stats.nfills++;
+ cache_bin->tstats.nrequests = 0;
+ }
+
+ malloc_mutex_unlock(tsdn, &bin->lock);
+
+ if (alloc_and_retry) {
+ assert(fresh_slab == NULL);
+ assert(filled < nfill);
+ assert(made_progress);
+
+ fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
+ bin_info);
+ /* fresh_slab NULL case handled in the for loop. */
+
+ alloc_and_retry = false;
+ made_progress = false;
+ goto label_refill;
}
+ assert(filled == nfill || (fresh_slab == NULL && !made_progress));
+ /* Release if allocated but not used. */
+ if (fresh_slab != NULL) {
+ assert(edata_nfree_get(fresh_slab) == bin_info->nregs);
+ arena_slab_dalloc(tsdn, arena, fresh_slab);
+ fresh_slab = NULL;
+ }
+
+ cache_bin_finish_fill(cache_bin, cache_bin_info, &ptrs, filled);
+ arena_decay_tick(tsdn, arena);
+}
+
+size_t
+arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
+ void **ptrs, size_t nfill, bool zero) {
+ assert(binind < SC_NBINS);
+ const bin_info_t *bin_info = &bin_infos[binind];
+ const size_t nregs = bin_info->nregs;
+ assert(nregs > 0);
+ const size_t usize = bin_info->reg_size;
+
+ const bool manual_arena = !arena_is_auto(arena);
unsigned binshard;
- bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
-
- for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
- tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
- extent_t *slab;
- if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
- 0) {
- unsigned tofill = nfill - i;
- cnt = tofill < extent_nfree_get(slab) ?
- tofill : extent_nfree_get(slab);
- arena_slab_reg_alloc_batch(
- slab, &bin_infos[binind], cnt,
- tbin->avail - nfill + i);
- } else {
- cnt = 1;
- void *ptr = arena_bin_malloc_hard(tsdn, arena, bin,
- binind, binshard);
- /*
- * OOM. tbin->avail isn't yet filled down to its first
- * element, so the successful allocations (if any) must
- * be moved just before tbin->avail before bailing out.
- */
- if (ptr == NULL) {
- if (i > 0) {
- memmove(tbin->avail - i,
- tbin->avail - nfill,
- i * sizeof(void *));
- }
- break;
- }
- /* Insert such that low regions get used first. */
- *(tbin->avail - nfill + i) = ptr;
+ bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
+
+ size_t nslab = 0;
+ size_t filled = 0;
+ edata_t *slab = NULL;
+ edata_list_active_t fulls;
+ edata_list_active_init(&fulls);
+
+ while (filled < nfill && (slab = arena_slab_alloc(tsdn, arena, binind,
+ binshard, bin_info)) != NULL) {
+ assert((size_t)edata_nfree_get(slab) == nregs);
+ ++nslab;
+ size_t batch = nfill - filled;
+ if (batch > nregs) {
+ batch = nregs;
+ }
+ assert(batch > 0);
+ arena_slab_reg_alloc_batch(slab, bin_info, (unsigned)batch,
+ &ptrs[filled]);
+ assert(edata_addr_get(slab) == ptrs[filled]);
+ if (zero) {
+ memset(ptrs[filled], 0, batch * usize);
}
- if (config_fill && unlikely(opt_junk_alloc)) {
- for (unsigned j = 0; j < cnt; j++) {
- void* ptr = *(tbin->avail - nfill + i + j);
- arena_alloc_junk_small(ptr, &bin_infos[binind],
- true);
+ filled += batch;
+ if (batch == nregs) {
+ if (manual_arena) {
+ edata_list_active_append(&fulls, slab);
}
+ slab = NULL;
}
}
+
+ malloc_mutex_lock(tsdn, &bin->lock);
+ /*
+ * Only the last slab can be non-empty, and the last slab is non-empty
+ * iff slab != NULL.
+ */
+ if (slab != NULL) {
+ arena_bin_lower_slab(tsdn, arena, slab, bin);
+ }
+ if (manual_arena) {
+ edata_list_active_concat(&bin->slabs_full, &fulls);
+ }
+ assert(edata_list_active_empty(&fulls));
if (config_stats) {
- bin->stats.nmalloc += i;
- bin->stats.nrequests += tbin->tstats.nrequests;
- bin->stats.curregs += i;
- bin->stats.nfills++;
- tbin->tstats.nrequests = 0;
+ bin->stats.nslabs += nslab;
+ bin->stats.curslabs += nslab;
+ bin->stats.nmalloc += filled;
+ bin->stats.nrequests += filled;
+ bin->stats.curregs += filled;
}
malloc_mutex_unlock(tsdn, &bin->lock);
- tbin->ncached = i;
+
arena_decay_tick(tsdn, arena);
+ return filled;
}
-void
-arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
- if (!zero) {
- memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
+/*
+ * Without allocating a new slab, try arena_slab_reg_alloc() and re-fill
+ * bin->slabcur if necessary.
+ */
+static void *
+arena_bin_malloc_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ szind_t binind) {
+ malloc_mutex_assert_owner(tsdn, &bin->lock);
+ if (bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0) {
+ if (arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) {
+ return NULL;
+ }
}
-}
-static void
-arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
- memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
+ assert(bin->slabcur != NULL && edata_nfree_get(bin->slabcur) > 0);
+ return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
}
-arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
- arena_dalloc_junk_small_impl;
static void *
arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
- void *ret;
- bin_t *bin;
- size_t usize;
- extent_t *slab;
-
assert(binind < SC_NBINS);
- usize = sz_index2size(binind);
+ const bin_info_t *bin_info = &bin_infos[binind];
+ size_t usize = sz_index2size(binind);
unsigned binshard;
- bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
-
- if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
- ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
- } else {
- ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
- }
+ bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ edata_t *fresh_slab = NULL;
+ void *ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind);
if (ret == NULL) {
malloc_mutex_unlock(tsdn, &bin->lock);
- return NULL;
+ /******************************/
+ fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
+ bin_info);
+ /********************************/
+ malloc_mutex_lock(tsdn, &bin->lock);
+ /* Retry since the lock was dropped. */
+ ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind);
+ if (ret == NULL) {
+ if (fresh_slab == NULL) {
+ /* OOM */
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ return NULL;
+ }
+ ret = arena_bin_malloc_with_fresh_slab(tsdn, arena, bin,
+ binind, fresh_slab);
+ fresh_slab = NULL;
+ }
}
-
if (config_stats) {
bin->stats.nmalloc++;
bin->stats.nrequests++;
bin->stats.curregs++;
}
malloc_mutex_unlock(tsdn, &bin->lock);
- if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
- prof_idump(tsdn);
- }
- if (!zero) {
- if (config_fill) {
- if (unlikely(opt_junk_alloc)) {
- arena_alloc_junk_small(ret,
- &bin_infos[binind], false);
- } else if (unlikely(opt_zero)) {
- memset(ret, 0, usize);
- }
- }
- } else {
- if (config_fill && unlikely(opt_junk_alloc)) {
- arena_alloc_junk_small(ret, &bin_infos[binind],
- true);
- }
+ if (fresh_slab != NULL) {
+ arena_slab_dalloc(tsdn, arena, fresh_slab);
+ }
+ if (zero) {
memset(ret, 0, usize);
}
-
arena_decay_tick(tsdn, arena);
+
return ret;
}
@@ -1533,10 +1210,17 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache) {
void *ret;
- if (usize <= SC_SMALL_MAXCLASS
- && (alignment < PAGE
- || (alignment == PAGE && (usize & PAGE_MASK) == 0))) {
+ if (usize <= SC_SMALL_MAXCLASS) {
/* Small; alignment doesn't require special slab placement. */
+
+ /* usize should be a result of sz_sa2u() */
+ assert((usize & (alignment - 1)) == 0);
+
+ /*
+ * Small usize can't come from an alignment larger than a page.
+ */
+ assert(alignment <= PAGE);
+
ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
zero, tcache, true);
} else {
@@ -1560,33 +1244,22 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
}
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true);
- arena_t *arena = extent_arena_get(extent);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
szind_t szind = sz_size2index(usize);
- extent_szind_set(extent, szind);
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
- szind, false);
-
- prof_accum_cancel(tsdn, &arena->prof_accum, usize);
+ edata_szind_set(edata, szind);
+ emap_remap(tsdn, &arena_emap_global, edata, szind, /* slab */ false);
assert(isalloc(tsdn, ptr) == usize);
}
static size_t
-arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
+arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
- extent_szind_set(extent, SC_NBINS);
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
- SC_NBINS, false);
+ edata_szind_set(edata, SC_NBINS);
+ emap_remap(tsdn, &arena_emap_global, edata, SC_NBINS, /* slab */ false);
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
@@ -1599,9 +1272,9 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
cassert(config_prof);
assert(opt_prof);
- extent_t *extent = iealloc(tsdn, ptr);
- size_t usize = extent_usize_get(extent);
- size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ size_t usize = edata_usize_get(edata);
+ size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
/*
* Currently, we only do redzoning for small sampled
@@ -1614,17 +1287,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
sz_size2index(bumped_usize), slow_path);
} else {
- large_dalloc(tsdn, extent);
+ large_dalloc(tsdn, edata);
}
}
static void
-arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
+arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) {
/* Dissociate slab from bin. */
if (slab == bin->slabcur) {
bin->slabcur = NULL;
} else {
- szind_t binind = extent_szind_get(slab);
+ szind_t binind = edata_szind_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
/*
@@ -1641,24 +1314,9 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
}
static void
-arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin) {
- assert(slab != bin->slabcur);
-
- malloc_mutex_unlock(tsdn, &bin->lock);
- /******************************/
- arena_slab_dalloc(tsdn, arena, slab);
- /****************************/
- malloc_mutex_lock(tsdn, &bin->lock);
- if (config_stats) {
- bin->stats.curslabs--;
- }
-}
-
-static void
-arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
- bin_t *bin) {
- assert(extent_nfree_get(slab) > 0);
+ assert(edata_nfree_get(slab) > 0);
/*
* Make sure that if bin->slabcur is non-NULL, it refers to the
@@ -1666,9 +1324,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
* than proactively keeping it pointing at the oldest/lowest non-full
* slab.
*/
- if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
+ if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) {
/* Switch slabcur. */
- if (extent_nfree_get(bin->slabcur) > 0) {
+ if (edata_nfree_get(bin->slabcur) > 0) {
arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
} else {
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
@@ -1683,56 +1341,54 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
}
static void
-arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, extent_t *slab, void *ptr, bool junked) {
- arena_slab_data_t *slab_data = extent_slab_data_get(slab);
- const bin_info_t *bin_info = &bin_infos[binind];
-
- if (!junked && config_fill && unlikely(opt_junk_free)) {
- arena_dalloc_junk_small(ptr, bin_info);
- }
-
- arena_slab_reg_dalloc(slab, slab_data, ptr);
- unsigned nfree = extent_nfree_get(slab);
- if (nfree == bin_info->nregs) {
- arena_dissociate_bin_slab(arena, slab, bin);
- arena_dalloc_bin_slab(tsdn, arena, slab, bin);
- } else if (nfree == 1 && slab != bin->slabcur) {
- arena_bin_slabs_full_remove(arena, bin, slab);
- arena_bin_lower_slab(tsdn, arena, slab, bin);
- }
+arena_dalloc_bin_slab_prepare(tsdn_t *tsdn, edata_t *slab, bin_t *bin) {
+ malloc_mutex_assert_owner(tsdn, &bin->lock);
+ assert(slab != bin->slabcur);
if (config_stats) {
- bin->stats.ndalloc++;
- bin->stats.curregs--;
+ bin->stats.curslabs--;
}
}
void
-arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, extent_t *extent, void *ptr) {
- arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
- true);
+arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
+ edata_t *slab, bin_t *bin) {
+ arena_dissociate_bin_slab(arena, slab, bin);
+ arena_dalloc_bin_slab_prepare(tsdn, slab, bin);
+}
+
+void
+arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
+ edata_t *slab, bin_t *bin) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ arena_bin_lower_slab(tsdn, arena, slab, bin);
}
static void
-arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
- szind_t binind = extent_szind_get(extent);
- unsigned binshard = extent_binshard_get(extent);
- bin_t *bin = &arena->bins[binind].bin_shards[binshard];
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
+ szind_t binind = edata_szind_get(edata);
+ unsigned binshard = edata_binshard_get(edata);
+ bin_t *bin = arena_get_bin(arena, binind, binshard);
malloc_mutex_lock(tsdn, &bin->lock);
- arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
- false);
+ arena_dalloc_bin_locked_info_t info;
+ arena_dalloc_bin_locked_begin(&info, binind);
+ bool ret = arena_dalloc_bin_locked_step(tsdn, arena, bin,
+ &info, binind, edata, ptr);
+ arena_dalloc_bin_locked_finish(tsdn, arena, bin, &info);
malloc_mutex_unlock(tsdn, &bin->lock);
+
+ if (ret) {
+ arena_slab_dalloc(tsdn, arena, edata);
+ }
}
void
arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
- extent_t *extent = iealloc(tsdn, ptr);
- arena_t *arena = extent_arena_get(extent);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ arena_t *arena = arena_get_from_edata(edata);
- arena_dalloc_bin(tsdn, arena, extent, ptr);
+ arena_dalloc_bin(tsdn, arena, edata, ptr);
arena_decay_tick(tsdn, arena);
}
@@ -1743,7 +1399,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
/* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
- extent_t *extent = iealloc(tsdn, ptr);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
if (unlikely(size > SC_LARGE_MAXCLASS)) {
ret = true;
goto done;
@@ -1766,18 +1422,19 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
goto done;
}
- arena_decay_tick(tsdn, extent_arena_get(extent));
+ arena_t *arena = arena_get_from_edata(edata);
+ arena_decay_tick(tsdn, arena);
ret = false;
} else if (oldsize >= SC_LARGE_MINCLASS
&& usize_max >= SC_LARGE_MINCLASS) {
- ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
+ ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max,
zero);
} else {
ret = true;
}
done:
- assert(extent == iealloc(tsdn, ptr));
- *newsize = extent_usize_get(extent);
+ assert(edata == emap_edata_lookup(tsdn, &arena_emap_global, ptr));
+ *newsize = edata_usize_get(edata);
return ret;
}
@@ -1800,7 +1457,7 @@ void *
arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
- size_t usize = sz_s2u(size);
+ size_t usize = alignment == 0 ? sz_s2u(size) : sz_sa2u(size, alignment);
if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
return NULL;
}
@@ -1850,6 +1507,29 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
return ret;
}
+ehooks_t *
+arena_get_ehooks(arena_t *arena) {
+ return base_ehooks_get(arena->base);
+}
+
+extent_hooks_t *
+arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
+ extent_hooks_t *extent_hooks) {
+ background_thread_info_t *info;
+ if (have_background_thread) {
+ info = arena_background_thread_info_get(arena);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ }
+ /* No using the HPA now that we have the custom hooks. */
+ pa_shard_disable_hpa(tsd_tsdn(tsd), &arena->pa_shard);
+ extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
+ if (have_background_thread) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+
+ return ret;
+}
+
dss_prec_t
arena_dss_prec_get(arena_t *arena) {
return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
@@ -1871,7 +1551,7 @@ arena_dirty_decay_ms_default_get(void) {
bool
arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
- if (!arena_decay_ms_valid(decay_ms)) {
+ if (!decay_ms_valid(decay_ms)) {
return true;
}
atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
@@ -1885,7 +1565,7 @@ arena_muzzy_decay_ms_default_get(void) {
bool
arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
- if (!arena_decay_ms_valid(decay_ms)) {
+ if (!decay_ms_valid(decay_ms)) {
return true;
}
atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
@@ -1896,26 +1576,8 @@ bool
arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
size_t *new_limit) {
assert(opt_retain);
-
- pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
- if (new_limit != NULL) {
- size_t limit = *new_limit;
- /* Grow no more than the new limit. */
- if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
- return true;
- }
- }
-
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
- if (old_limit != NULL) {
- *old_limit = sz_pind2sz(arena->retain_grow_limit);
- }
- if (new_limit != NULL) {
- arena->retain_grow_limit = new_ind;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
-
- return false;
+ return pac_retain_grow_limit_get_set(tsd_tsdn(tsd),
+ &arena->pa_shard.pac, old_limit, new_limit);
}
unsigned
@@ -1933,13 +1595,8 @@ arena_nthreads_dec(arena_t *arena, bool internal) {
atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
}
-size_t
-arena_extent_sn_next(arena_t *arena) {
- return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
-}
-
arena_t *
-arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
arena_t *arena;
base_t *base;
unsigned i;
@@ -1947,16 +1604,13 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
if (ind == 0) {
base = b0get();
} else {
- base = base_new(tsdn, ind, extent_hooks);
+ base = base_new(tsdn, ind, config->extent_hooks,
+ config->metadata_use_hooks);
if (base == NULL) {
return NULL;
}
}
- unsigned nbins_total = 0;
- for (i = 0; i < SC_NBINS; i++) {
- nbins_total += bin_infos[i].n_shards;
- }
size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
if (arena == NULL) {
@@ -1980,110 +1634,56 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
}
}
- if (config_prof) {
- if (prof_accum_init(tsdn, &arena->prof_accum)) {
- goto label_error;
- }
- }
-
- if (config_cache_oblivious) {
- /*
- * A nondeterministic seed based on the address of arena reduces
- * the likelihood of lockstep non-uniform cache index
- * utilization among identical concurrent processes, but at the
- * cost of test repeatability. For debug builds, instead use a
- * deterministic seed.
- */
- atomic_store_zu(&arena->offset_state, config_debug ? ind :
- (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
- }
-
- atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
-
atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
ATOMIC_RELAXED);
- atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
-
- extent_list_init(&arena->large);
+ edata_list_active_init(&arena->large);
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
goto label_error;
}
- /*
- * Delay coalescing for dirty extents despite the disruptive effect on
- * memory layout for best-fit extent allocation, since cached extents
- * are likely to be reused soon after deallocation, and the cost of
- * merging/splitting extents is non-trivial.
- */
- if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
- true)) {
- goto label_error;
- }
- /*
- * Coalesce muzzy extents immediately, because operations on them are in
- * the critical path much less often than for dirty extents.
- */
- if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
- false)) {
- goto label_error;
- }
- /*
- * Coalesce retained extents immediately, in part because they will
- * never be evicted (and therefore there's no opportunity for delayed
- * coalescing), but also because operations on retained extents are not
- * in the critical path.
- */
- if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
- false)) {
- goto label_error;
- }
-
- if (arena_decay_init(&arena->decay_dirty,
- arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
- goto label_error;
- }
- if (arena_decay_init(&arena->decay_muzzy,
- arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
- goto label_error;
- }
-
- arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
- arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS);
- if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
- WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
- goto label_error;
- }
-
- extent_avail_new(&arena->extent_avail);
- if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
- WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
+ nstime_t cur_time;
+ nstime_init_update(&cur_time);
+ if (pa_shard_init(tsdn, &arena->pa_shard, &arena_pa_central_global,
+ &arena_emap_global, base, ind, &arena->stats.pa_shard_stats,
+ LOCKEDINT_MTX(arena->stats.mtx), &cur_time, oversize_threshold,
+ arena_dirty_decay_ms_default_get(),
+ arena_muzzy_decay_ms_default_get())) {
goto label_error;
}
/* Initialize bins. */
- uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
- for (i = 0; i < SC_NBINS; i++) {
- unsigned nshards = bin_infos[i].n_shards;
- arena->bins[i].bin_shards = (bin_t *)bin_addr;
- bin_addr += nshards * sizeof(bin_t);
- for (unsigned j = 0; j < nshards; j++) {
- bool err = bin_init(&arena->bins[i].bin_shards[j]);
- if (err) {
- goto label_error;
- }
+ for (i = 0; i < nbins_total; i++) {
+ bool err = bin_init(&arena->bins[i]);
+ if (err) {
+ goto label_error;
}
}
- assert(bin_addr == (uintptr_t)arena + arena_size);
arena->base = base;
/* Set arena before creating background threads. */
arena_set(ind, arena);
+ arena->ind = ind;
- nstime_init(&arena->create_time, 0);
- nstime_update(&arena->create_time);
+ nstime_init_update(&arena->create_time);
+
+ /*
+ * We turn on the HPA if set to. There are two exceptions:
+ * - Custom extent hooks (we should only return memory allocated from
+ * them in that case).
+ * - Arena 0 initialization. In this case, we're mid-bootstrapping, and
+ * so arena_hpa_global is not yet initialized.
+ */
+ if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) {
+ hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
+ hpa_shard_opts.deferral_allowed = background_thread_enabled();
+ if (pa_shard_enable_hpa(tsdn, &arena->pa_shard,
+ &hpa_shard_opts, &opt_hpa_sec_opts)) {
+ goto label_error;
+ }
+ }
/* We don't support reentrancy for arena 0 bootstrapping. */
if (ind != 0) {
@@ -2129,10 +1729,12 @@ arena_choose_huge(tsd_t *tsd) {
* expected for huge allocations.
*/
if (arena_dirty_decay_ms_default_get() > 0) {
- arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
+ arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
+ extent_state_dirty, 0);
}
if (arena_muzzy_decay_ms_default_get() > 0) {
- arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
+ arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
+ extent_state_muzzy, 0);
}
}
@@ -2167,8 +1769,8 @@ arena_is_huge(unsigned arena_ind) {
return (arena_ind == huge_arena_ind);
}
-void
-arena_boot(sc_data_t *sc_data) {
+bool
+arena_boot(sc_data_t *sc_data, base_t *base, bool hpa) {
arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
for (unsigned i = 0; i < SC_NBINS; i++) {
@@ -2176,12 +1778,20 @@ arena_boot(sc_data_t *sc_data) {
div_init(&arena_binind_div_info[i],
(1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
}
+
+ uint32_t cur_offset = (uint32_t)offsetof(arena_t, bins);
+ for (szind_t i = 0; i < SC_NBINS; i++) {
+ arena_bin_offsets[i] = cur_offset;
+ nbins_total += bin_infos[i].n_shards;
+ cur_offset += (uint32_t)(bin_infos[i].n_shards * sizeof(bin_t));
+ }
+ return pa_central_init(&arena_pa_central_global, base, hpa,
+ &hpa_hooks_default);
}
void
arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
- malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
+ pa_shard_prefork0(tsdn, &arena->pa_shard);
}
void
@@ -2193,59 +1803,50 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
void
arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
+ pa_shard_prefork2(tsdn, &arena->pa_shard);
}
void
arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
- extents_prefork(tsdn, &arena->extents_dirty);
- extents_prefork(tsdn, &arena->extents_muzzy);
- extents_prefork(tsdn, &arena->extents_retained);
+ pa_shard_prefork3(tsdn, &arena->pa_shard);
}
void
arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
+ pa_shard_prefork4(tsdn, &arena->pa_shard);
}
void
arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
- base_prefork(tsdn, arena->base);
+ pa_shard_prefork5(tsdn, &arena->pa_shard);
}
void
arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->large_mtx);
+ base_prefork(tsdn, arena->base);
}
void
arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
- for (unsigned i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_prefork(tsdn, &arena->bins[i].bin_shards[j]);
- }
+ malloc_mutex_prefork(tsdn, &arena->large_mtx);
+}
+
+void
+arena_prefork8(tsdn_t *tsdn, arena_t *arena) {
+ for (unsigned i = 0; i < nbins_total; i++) {
+ bin_prefork(tsdn, &arena->bins[i]);
}
}
void
arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
- unsigned i;
-
- for (i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_postfork_parent(tsdn,
- &arena->bins[i].bin_shards[j]);
- }
+ for (unsigned i = 0; i < nbins_total; i++) {
+ bin_postfork_parent(tsdn, &arena->bins[i]);
}
+
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
base_postfork_parent(tsdn, arena->base);
- malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
- extents_postfork_parent(tsdn, &arena->extents_dirty);
- extents_postfork_parent(tsdn, &arena->extents_muzzy);
- extents_postfork_parent(tsdn, &arena->extents_retained);
- malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
- malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
- malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
+ pa_shard_postfork_parent(tsdn, &arena->pa_shard);
if (config_stats) {
malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
}
@@ -2253,8 +1854,6 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
void
arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
- unsigned i;
-
atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
@@ -2266,32 +1865,26 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
if (config_stats) {
ql_new(&arena->tcache_ql);
ql_new(&arena->cache_bin_array_descriptor_ql);
- tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
- if (tcache != NULL && tcache->arena == arena) {
- ql_elm_new(tcache, link);
- ql_tail_insert(&arena->tcache_ql, tcache, link);
+ tcache_slow_t *tcache_slow = tcache_slow_get(tsdn_tsd(tsdn));
+ if (tcache_slow != NULL && tcache_slow->arena == arena) {
+ tcache_t *tcache = tcache_slow->tcache;
+ ql_elm_new(tcache_slow, link);
+ ql_tail_insert(&arena->tcache_ql, tcache_slow, link);
cache_bin_array_descriptor_init(
- &tcache->cache_bin_array_descriptor,
- tcache->bins_small, tcache->bins_large);
+ &tcache_slow->cache_bin_array_descriptor,
+ tcache->bins);
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
- &tcache->cache_bin_array_descriptor, link);
+ &tcache_slow->cache_bin_array_descriptor, link);
}
}
- for (i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]);
- }
+ for (unsigned i = 0; i < nbins_total; i++) {
+ bin_postfork_child(tsdn, &arena->bins[i]);
}
+
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base);
- malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
- extents_postfork_child(tsdn, &arena->extents_dirty);
- extents_postfork_child(tsdn, &arena->extents_muzzy);
- extents_postfork_child(tsdn, &arena->extents_retained);
- malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
- malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
- malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
+ pa_shard_postfork_child(tsdn, &arena->pa_shard);
if (config_stats) {
malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
}
diff --git a/deps/jemalloc/src/background_thread.c b/deps/jemalloc/src/background_thread.c
index 57b9b256b..3bb8d26cd 100644
--- a/deps/jemalloc/src/background_thread.c
+++ b/deps/jemalloc/src/background_thread.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_BACKGROUND_THREAD_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -54,8 +53,9 @@ pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
bool background_threads_enable(tsd_t *tsd) NOT_REACHED
bool background_threads_disable(tsd_t *tsd) NOT_REACHED
-void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
- arena_decay_t *decay, size_t npages_new) NOT_REACHED
+bool background_thread_is_started(background_thread_info_t *info) NOT_REACHED
+void background_thread_wakeup_early(background_thread_info_t *info,
+ nstime_t *remaining_sleep) NOT_REACHED
void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
@@ -74,7 +74,7 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
info->npages_to_purge_new = 0;
if (config_stats) {
info->tot_n_runs = 0;
- nstime_init(&info->tot_sleep_time, 0);
+ nstime_init_zero(&info->tot_sleep_time);
}
}
@@ -82,136 +82,40 @@ static inline bool
set_current_thread_affinity(int cpu) {
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t cpuset;
+#else
+# ifndef __NetBSD__
+ cpuset_t cpuset;
+# else
+ cpuset_t *cpuset;
+# endif
+#endif
+
+#ifndef __NetBSD__
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
- int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
+#else
+ cpuset = cpuset_create();
+#endif
- return (ret != 0);
+#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
+ return (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0);
#else
- return false;
+# ifndef __NetBSD__
+ int ret = pthread_setaffinity_np(pthread_self(), sizeof(cpuset_t),
+ &cpuset);
+# else
+ int ret = pthread_setaffinity_np(pthread_self(), cpuset_size(cpuset),
+ cpuset);
+ cpuset_destroy(cpuset);
+# endif
+ return ret != 0;
#endif
}
-/* Threshold for determining when to wake up the background thread. */
-#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
#define BILLION UINT64_C(1000000000)
/* Minimal sleep interval 100 ms. */
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
-static inline size_t
-decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
- size_t i;
- uint64_t sum = 0;
- for (i = 0; i < interval; i++) {
- sum += decay->backlog[i] * h_steps[i];
- }
- for (; i < SMOOTHSTEP_NSTEPS; i++) {
- sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
- }
-
- return (size_t)(sum >> SMOOTHSTEP_BFP);
-}
-
-static uint64_t
-arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
- extents_t *extents) {
- if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
- /* Use minimal interval if decay is contended. */
- return BACKGROUND_THREAD_MIN_INTERVAL_NS;
- }
-
- uint64_t interval;
- ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
- if (decay_time <= 0) {
- /* Purging is eagerly done or disabled currently. */
- interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
- goto label_done;
- }
-
- uint64_t decay_interval_ns = nstime_ns(&decay->interval);
- assert(decay_interval_ns > 0);
- size_t npages = extents_npages_get(extents);
- if (npages == 0) {
- unsigned i;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
- if (decay->backlog[i] > 0) {
- break;
- }
- }
- if (i == SMOOTHSTEP_NSTEPS) {
- /* No dirty pages recorded. Sleep indefinitely. */
- interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
- goto label_done;
- }
- }
- if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- /* Use max interval. */
- interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
- goto label_done;
- }
-
- size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
- size_t ub = SMOOTHSTEP_NSTEPS;
- /* Minimal 2 intervals to ensure reaching next epoch deadline. */
- lb = (lb < 2) ? 2 : lb;
- if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
- (lb + 2 > ub)) {
- interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
- goto label_done;
- }
-
- assert(lb + 2 <= ub);
- size_t npurge_lb, npurge_ub;
- npurge_lb = decay_npurge_after_interval(decay, lb);
- if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- interval = decay_interval_ns * lb;
- goto label_done;
- }
- npurge_ub = decay_npurge_after_interval(decay, ub);
- if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- interval = decay_interval_ns * ub;
- goto label_done;
- }
-
- unsigned n_search = 0;
- size_t target, npurge;
- while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
- && (lb + 2 < ub)) {
- target = (lb + ub) / 2;
- npurge = decay_npurge_after_interval(decay, target);
- if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- ub = target;
- npurge_ub = npurge;
- } else {
- lb = target;
- npurge_lb = npurge;
- }
- assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
- }
- interval = decay_interval_ns * (ub + lb) / 2;
-label_done:
- interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
- BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
- malloc_mutex_unlock(tsdn, &decay->mtx);
-
- return interval;
-}
-
-/* Compute purge interval for background threads. */
-static uint64_t
-arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
- uint64_t i1, i2;
- i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
- &arena->extents_dirty);
- if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
- return i1;
- }
- i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
- &arena->extents_muzzy);
-
- return i1 < i2 ? i1 : i2;
-}
-
static void
background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
uint64_t interval) {
@@ -228,7 +132,8 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
int ret;
if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
- assert(background_thread_indefinite_sleep(info));
+ background_thread_wakeup_time_set(tsdn, info,
+ BACKGROUND_THREAD_INDEFINITE_SLEEP);
ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
assert(ret == 0);
} else {
@@ -236,8 +141,7 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
/* We need malloc clock (can be different from tv). */
nstime_t next_wakeup;
- nstime_init(&next_wakeup, 0);
- nstime_update(&next_wakeup);
+ nstime_init_update(&next_wakeup);
nstime_iadd(&next_wakeup, interval);
assert(nstime_ns(&next_wakeup) <
BACKGROUND_THREAD_INDEFINITE_SLEEP);
@@ -254,8 +158,6 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
assert(!background_thread_indefinite_sleep(info));
ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
assert(ret == ETIMEDOUT || ret == 0);
- background_thread_wakeup_time_set(tsdn, info,
- BACKGROUND_THREAD_INDEFINITE_SLEEP);
}
if (config_stats) {
gettimeofday(&tv, NULL);
@@ -283,28 +185,48 @@ background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
}
static inline void
-background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
- uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
+background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info,
+ unsigned ind) {
+ uint64_t ns_until_deferred = BACKGROUND_THREAD_DEFERRED_MAX;
unsigned narenas = narenas_total_get();
+ bool slept_indefinitely = background_thread_indefinite_sleep(info);
for (unsigned i = ind; i < narenas; i += max_background_threads) {
arena_t *arena = arena_get(tsdn, i, false);
if (!arena) {
continue;
}
- arena_decay(tsdn, arena, true, false);
- if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
+ /*
+ * If thread was woken up from the indefinite sleep, don't
+ * do the work instantly, but rather check when the deferred
+ * work that caused this thread to wake up is scheduled for.
+ */
+ if (!slept_indefinitely) {
+ arena_do_deferred_work(tsdn, arena);
+ }
+ if (ns_until_deferred <= BACKGROUND_THREAD_MIN_INTERVAL_NS) {
/* Min interval will be used. */
continue;
}
- uint64_t interval = arena_decay_compute_purge_interval(tsdn,
- arena);
- assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS);
- if (min_interval > interval) {
- min_interval = interval;
+ uint64_t ns_arena_deferred = pa_shard_time_until_deferred_work(
+ tsdn, &arena->pa_shard);
+ if (ns_arena_deferred < ns_until_deferred) {
+ ns_until_deferred = ns_arena_deferred;
}
}
- background_thread_sleep(tsdn, info, min_interval);
+
+ uint64_t sleep_ns;
+ if (ns_until_deferred == BACKGROUND_THREAD_DEFERRED_MAX) {
+ sleep_ns = BACKGROUND_THREAD_INDEFINITE_SLEEP;
+ } else {
+ sleep_ns =
+ (ns_until_deferred < BACKGROUND_THREAD_MIN_INTERVAL_NS)
+ ? BACKGROUND_THREAD_MIN_INTERVAL_NS
+ : ns_until_deferred;
+
+ }
+
+ background_thread_sleep(tsdn, info, sleep_ns);
}
static bool
@@ -508,7 +430,7 @@ background_thread_entry(void *ind_arg) {
assert(thread_ind < max_background_threads);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
-#elif defined(__FreeBSD__)
+#elif defined(__FreeBSD__) || defined(__DragonFly__)
pthread_set_name_np(pthread_self(), "jemalloc_bg_thd");
#endif
if (opt_percpu_arena != percpu_arena_disabled) {
@@ -608,16 +530,16 @@ background_threads_enable(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
VARIABLE_ARRAY(bool, marked, max_background_threads);
- unsigned i, nmarked;
- for (i = 0; i < max_background_threads; i++) {
+ unsigned nmarked;
+ for (unsigned i = 0; i < max_background_threads; i++) {
marked[i] = false;
}
nmarked = 0;
/* Thread 0 is required and created at the end. */
marked[0] = true;
/* Mark the threads we need to create for thread 0. */
- unsigned n = narenas_total_get();
- for (i = 1; i < n; i++) {
+ unsigned narenas = narenas_total_get();
+ for (unsigned i = 1; i < narenas; i++) {
if (marked[i % max_background_threads] ||
arena_get(tsd_tsdn(tsd), i, false) == NULL) {
continue;
@@ -634,7 +556,18 @@ background_threads_enable(tsd_t *tsd) {
}
}
- return background_thread_create_locked(tsd, 0);
+ bool err = background_thread_create_locked(tsd, 0);
+ if (err) {
+ return true;
+ }
+ for (unsigned i = 0; i < narenas; i++) {
+ arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);
+ if (arena != NULL) {
+ pa_shard_set_deferral_allowed(tsd_tsdn(tsd),
+ &arena->pa_shard, true);
+ }
+ }
+ return false;
}
bool
@@ -648,92 +581,36 @@ background_threads_disable(tsd_t *tsd) {
return true;
}
assert(n_background_threads == 0);
+ unsigned narenas = narenas_total_get();
+ for (unsigned i = 0; i < narenas; i++) {
+ arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);
+ if (arena != NULL) {
+ pa_shard_set_deferral_allowed(tsd_tsdn(tsd),
+ &arena->pa_shard, false);
+ }
+ }
return false;
}
-/* Check if we need to signal the background thread early. */
+bool
+background_thread_is_started(background_thread_info_t *info) {
+ return info->state == background_thread_started;
+}
+
void
-background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
- arena_decay_t *decay, size_t npages_new) {
- background_thread_info_t *info = arena_background_thread_info_get(
- arena);
- if (malloc_mutex_trylock(tsdn, &info->mtx)) {
- /*
- * Background thread may hold the mutex for a long period of
- * time. We'd like to avoid the variance on application
- * threads. So keep this non-blocking, and leave the work to a
- * future epoch.
- */
+background_thread_wakeup_early(background_thread_info_t *info,
+ nstime_t *remaining_sleep) {
+ /*
+ * This is an optimization to increase batching. At this point
+ * we know that background thread wakes up soon, so the time to cache
+ * the just freed memory is bounded and low.
+ */
+ if (remaining_sleep != NULL && nstime_ns(remaining_sleep) <
+ BACKGROUND_THREAD_MIN_INTERVAL_NS) {
return;
}
-
- if (info->state != background_thread_started) {
- goto label_done;
- }
- if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
- goto label_done;
- }
-
- ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
- if (decay_time <= 0) {
- /* Purging is eagerly done or disabled currently. */
- goto label_done_unlock2;
- }
- uint64_t decay_interval_ns = nstime_ns(&decay->interval);
- assert(decay_interval_ns > 0);
-
- nstime_t diff;
- nstime_init(&diff, background_thread_wakeup_time_get(info));
- if (nstime_compare(&diff, &decay->epoch) <= 0) {
- goto label_done_unlock2;
- }
- nstime_subtract(&diff, &decay->epoch);
- if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
- goto label_done_unlock2;
- }
-
- if (npages_new > 0) {
- size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
- /*
- * Compute how many new pages we would need to purge by the next
- * wakeup, which is used to determine if we should signal the
- * background thread.
- */
- uint64_t npurge_new;
- if (n_epoch >= SMOOTHSTEP_NSTEPS) {
- npurge_new = npages_new;
- } else {
- uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
- assert(h_steps_max >=
- h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
- npurge_new = npages_new * (h_steps_max -
- h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
- npurge_new >>= SMOOTHSTEP_BFP;
- }
- info->npages_to_purge_new += npurge_new;
- }
-
- bool should_signal;
- if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- should_signal = true;
- } else if (unlikely(background_thread_indefinite_sleep(info)) &&
- (extents_npages_get(&arena->extents_dirty) > 0 ||
- extents_npages_get(&arena->extents_muzzy) > 0 ||
- info->npages_to_purge_new > 0)) {
- should_signal = true;
- } else {
- should_signal = false;
- }
-
- if (should_signal) {
- info->npages_to_purge_new = 0;
- pthread_cond_signal(&info->cond);
- }
-label_done_unlock2:
- malloc_mutex_unlock(tsdn, &decay->mtx);
-label_done:
- malloc_mutex_unlock(tsdn, &info->mtx);
+ pthread_cond_signal(&info->cond);
}
void
@@ -794,9 +671,11 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
return true;
}
- stats->num_threads = n_background_threads;
+ nstime_init_zero(&stats->run_interval);
+ memset(&stats->max_counter_per_bg_thd, 0, sizeof(mutex_prof_data_t));
+
uint64_t num_runs = 0;
- nstime_init(&stats->run_interval, 0);
+ stats->num_threads = n_background_threads;
for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
if (malloc_mutex_trylock(tsdn, &info->mtx)) {
@@ -809,6 +688,8 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
if (info->state != background_thread_stopped) {
num_runs += info->tot_n_runs;
nstime_add(&stats->run_interval, &info->tot_sleep_time);
+ malloc_mutex_prof_max_update(tsdn,
+ &stats->max_counter_per_bg_thd, &info->mtx);
}
malloc_mutex_unlock(tsdn, &info->mtx);
}
@@ -892,7 +773,7 @@ background_thread_boot0(void) {
}
bool
-background_thread_boot1(tsdn_t *tsdn) {
+background_thread_boot1(tsdn_t *tsdn, base_t *base) {
#ifdef JEMALLOC_BACKGROUND_THREAD
assert(have_background_thread);
assert(narenas_total_get() > 0);
@@ -911,7 +792,7 @@ background_thread_boot1(tsdn_t *tsdn) {
}
background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
- b0get(), opt_max_background_threads *
+ base, opt_max_background_threads *
sizeof(background_thread_info_t), CACHELINE);
if (background_thread_info == NULL) {
return true;
diff --git a/deps/jemalloc/src/base.c b/deps/jemalloc/src/base.c
index f3c61661a..7f4d67564 100644
--- a/deps/jemalloc/src/base.c
+++ b/deps/jemalloc/src/base.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -7,6 +6,15 @@
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h"
+/*
+ * In auto mode, arenas switch to huge pages for the base allocator on the
+ * second base block. a0 switches to thp on the 5th block (after 20 megabytes
+ * of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
+ */
+
+#define BASE_AUTO_THP_THRESHOLD 2
+#define BASE_AUTO_THP_THRESHOLD_A0 5
+
/******************************************************************************/
/* Data. */
@@ -29,7 +37,7 @@ metadata_thp_madvise(void) {
}
static void *
-base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
+base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) {
void *addr;
bool zero = true;
bool commit = true;
@@ -37,22 +45,21 @@ base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size)
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
assert(size == HUGEPAGE_CEILING(size));
size_t alignment = HUGEPAGE;
- if (extent_hooks == &extent_hooks_default) {
+ if (ehooks_are_default(ehooks)) {
addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
+ if (have_madvise_huge && addr) {
+ pages_set_thp_state(addr, size);
+ }
} else {
- /* No arena context as we are creating new arenas. */
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- pre_reentrancy(tsd, NULL);
- addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
- &zero, &commit, ind);
- post_reentrancy(tsd);
+ addr = ehooks_alloc(tsdn, ehooks, NULL, size, alignment, &zero,
+ &commit);
}
return addr;
}
static void
-base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
+base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr,
size_t size) {
/*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
@@ -64,7 +71,7 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
* may in fact want the end state of all associated virtual memory to be
* in some consistent-but-allocated state.
*/
- if (extent_hooks == &extent_hooks_default) {
+ if (ehooks_are_default(ehooks)) {
if (!extent_dalloc_mmap(addr, size)) {
goto label_done;
}
@@ -80,31 +87,19 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
/* Nothing worked. This should never happen. */
not_reached();
} else {
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- pre_reentrancy(tsd, NULL);
- if (extent_hooks->dalloc != NULL &&
- !extent_hooks->dalloc(extent_hooks, addr, size, true,
- ind)) {
- goto label_post_reentrancy;
+ if (!ehooks_dalloc(tsdn, ehooks, addr, size, true)) {
+ goto label_done;
}
- if (extent_hooks->decommit != NULL &&
- !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
- ind)) {
- goto label_post_reentrancy;
+ if (!ehooks_decommit(tsdn, ehooks, addr, size, 0, size)) {
+ goto label_done;
}
- if (extent_hooks->purge_forced != NULL &&
- !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
- size, ind)) {
- goto label_post_reentrancy;
+ if (!ehooks_purge_forced(tsdn, ehooks, addr, size, 0, size)) {
+ goto label_done;
}
- if (extent_hooks->purge_lazy != NULL &&
- !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
- ind)) {
- goto label_post_reentrancy;
+ if (!ehooks_purge_lazy(tsdn, ehooks, addr, size, 0, size)) {
+ goto label_done;
}
/* Nothing worked. That's the application's problem. */
- label_post_reentrancy:
- post_reentrancy(tsd);
}
label_done:
if (metadata_thp_madvise()) {
@@ -116,14 +111,14 @@ label_done:
}
static void
-base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
+base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr,
size_t size) {
size_t sn;
sn = *extent_sn_next;
(*extent_sn_next)++;
- extent_binit(extent, addr, size, sn);
+ edata_binit(edata, addr, size, sn);
}
static size_t
@@ -169,7 +164,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
pages_huge(block, block->size);
if (config_stats) {
base->n_thp += HUGEPAGE_CEILING(block->size -
- extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
+ edata_bsize_get(&block->edata)) >> LG_HUGEPAGE;
}
block = block->next;
assert(block == NULL || (base_ind_get(base) == 0));
@@ -177,34 +172,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
}
static void *
-base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
+base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size,
size_t alignment) {
void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
assert(size == ALIGNMENT_CEILING(size, alignment));
- *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
- alignment) - (uintptr_t)extent_addr_get(extent);
- ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
- assert(extent_bsize_get(extent) >= *gap_size + size);
- extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
- *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
- extent_sn_get(extent));
+ *gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata),
+ alignment) - (uintptr_t)edata_addr_get(edata);
+ ret = (void *)((uintptr_t)edata_addr_get(edata) + *gap_size);
+ assert(edata_bsize_get(edata) >= *gap_size + size);
+ edata_binit(edata, (void *)((uintptr_t)edata_addr_get(edata) +
+ *gap_size + size), edata_bsize_get(edata) - *gap_size - size,
+ edata_sn_get(edata));
return ret;
}
static void
-base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
+base_extent_bump_alloc_post(base_t *base, edata_t *edata, size_t gap_size,
void *addr, size_t size) {
- if (extent_bsize_get(extent) > 0) {
+ if (edata_bsize_get(edata) > 0) {
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t index_floor =
- sz_size2index(extent_bsize_get(extent) + 1) - 1;
- extent_heap_insert(&base->avail[index_floor], extent);
+ sz_size2index(edata_bsize_get(edata) + 1) - 1;
+ edata_heap_insert(&base->avail[index_floor], edata);
}
if (config_stats) {
@@ -229,13 +224,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
}
static void *
-base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
+base_extent_bump_alloc(base_t *base, edata_t *edata, size_t size,
size_t alignment) {
void *ret;
size_t gap_size;
- ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
- base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
+ ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment);
+ base_extent_bump_alloc_post(base, edata, gap_size, ret, size);
return ret;
}
@@ -245,8 +240,8 @@ base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
* On success a pointer to the initialized base_block_t header is returned.
*/
static base_block_t *
-base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
- unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
+base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
+ pszind_t *pind_last, size_t *extent_sn_next, size_t size,
size_t alignment) {
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
size_t usize = ALIGNMENT_CEILING(size, alignment);
@@ -267,7 +262,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size;
- base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
+ base_block_t *block = (base_block_t *)base_map(tsdn, ehooks, ind,
block_size);
if (block == NULL) {
return NULL;
@@ -295,7 +290,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
- base_extent_init(extent_sn_next, &block->extent,
+ base_edata_init(extent_sn_next, &block->edata,
(void *)((uintptr_t)block + header_size), block_size - header_size);
return block;
}
@@ -304,17 +299,17 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
*/
-static extent_t *
+static edata_t *
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &base->mtx);
- extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
/*
* Drop mutex during base_block_alloc(), because an extent hook will be
* called.
*/
malloc_mutex_unlock(tsdn, &base->mtx);
- base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
+ base_block_t *block = base_block_alloc(tsdn, base, ehooks,
base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
alignment);
malloc_mutex_lock(tsdn, &base->mtx);
@@ -338,7 +333,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
- return &block->extent;
+ return &block->edata;
}
base_t *
@@ -347,10 +342,22 @@ b0get(void) {
}
base_t *
-base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
+ bool metadata_use_hooks) {
pszind_t pind_last = 0;
size_t extent_sn_next = 0;
- base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
+
+ /*
+ * The base will contain the ehooks eventually, but it itself is
+ * allocated using them. So we use some stack ehooks to bootstrap its
+ * memory, and then initialize the ehooks within the base_t.
+ */
+ ehooks_t fake_ehooks;
+ ehooks_init(&fake_ehooks, metadata_use_hooks ?
+ (extent_hooks_t *)extent_hooks :
+ (extent_hooks_t *)&ehooks_default_extent_hooks, ind);
+
+ base_block_t *block = base_block_alloc(tsdn, NULL, &fake_ehooks, ind,
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
if (block == NULL) {
return NULL;
@@ -359,13 +366,15 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
size_t gap_size;
size_t base_alignment = CACHELINE;
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
- base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
+ base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata,
&gap_size, base_size, base_alignment);
- base->ind = ind;
- atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
+ ehooks_init(&base->ehooks, (extent_hooks_t *)extent_hooks, ind);
+ ehooks_init(&base->ehooks_base, metadata_use_hooks ?
+ (extent_hooks_t *)extent_hooks :
+ (extent_hooks_t *)&ehooks_default_extent_hooks, ind);
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
malloc_mutex_rank_exclusive)) {
- base_unmap(tsdn, extent_hooks, ind, block, block->size);
+ base_unmap(tsdn, &fake_ehooks, ind, block, block->size);
return NULL;
}
base->pind_last = pind_last;
@@ -373,7 +382,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->blocks = block;
base->auto_thp_switched = false;
for (szind_t i = 0; i < SC_NSIZES; i++) {
- extent_heap_new(&base->avail[i]);
+ edata_heap_new(&base->avail[i]);
}
if (config_stats) {
base->allocated = sizeof(base_block_t);
@@ -386,7 +395,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
- base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
+ base_extent_bump_alloc_post(base, &block->edata, gap_size, base,
base_size);
return base;
@@ -394,26 +403,31 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
void
base_delete(tsdn_t *tsdn, base_t *base) {
- extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
base_block_t *next = base->blocks;
do {
base_block_t *block = next;
next = block->next;
- base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
+ base_unmap(tsdn, ehooks, base_ind_get(base), block,
block->size);
} while (next != NULL);
}
-extent_hooks_t *
-base_extent_hooks_get(base_t *base) {
- return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
- ATOMIC_ACQUIRE);
+ehooks_t *
+base_ehooks_get(base_t *base) {
+ return &base->ehooks;
+}
+
+ehooks_t *
+base_ehooks_get_for_metadata(base_t *base) {
+ return &base->ehooks_base;
}
extent_hooks_t *
base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
- extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
- atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
+ extent_hooks_t *old_extent_hooks =
+ ehooks_get_extent_hooks_ptr(&base->ehooks);
+ ehooks_init(&base->ehooks, extent_hooks, ehooks_ind_get(&base->ehooks));
return old_extent_hooks;
}
@@ -424,28 +438,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM;
- extent_t *extent = NULL;
+ edata_t *edata = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
- extent = extent_heap_remove_first(&base->avail[i]);
- if (extent != NULL) {
+ edata = edata_heap_remove_first(&base->avail[i]);
+ if (edata != NULL) {
/* Use existing space. */
break;
}
}
- if (extent == NULL) {
+ if (edata == NULL) {
/* Try to allocate more space. */
- extent = base_extent_alloc(tsdn, base, usize, alignment);
+ edata = base_extent_alloc(tsdn, base, usize, alignment);
}
void *ret;
- if (extent == NULL) {
+ if (edata == NULL) {
ret = NULL;
goto label_return;
}
- ret = base_extent_bump_alloc(base, extent, usize, alignment);
+ ret = base_extent_bump_alloc(base, edata, usize, alignment);
if (esn != NULL) {
- *esn = extent_sn_get(extent);
+ *esn = (size_t)edata_sn_get(edata);
}
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
@@ -465,16 +479,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL);
}
-extent_t *
-base_alloc_extent(tsdn_t *tsdn, base_t *base) {
+edata_t *
+base_alloc_edata(tsdn_t *tsdn, base_t *base) {
size_t esn;
- extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
- CACHELINE, &esn);
- if (extent == NULL) {
+ edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t),
+ EDATA_ALIGNMENT, &esn);
+ if (edata == NULL) {
return NULL;
}
- extent_esn_set(extent, esn);
- return extent;
+ edata_esn_set(edata, esn);
+ return edata;
}
void
@@ -509,6 +523,7 @@ base_postfork_child(tsdn_t *tsdn, base_t *base) {
bool
base_boot(tsdn_t *tsdn) {
- b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
+ b0 = base_new(tsdn, 0, (extent_hooks_t *)&ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
return (b0 == NULL);
}
diff --git a/deps/jemalloc/src/bin.c b/deps/jemalloc/src/bin.c
index bca6b12c3..fa2045870 100644
--- a/deps/jemalloc/src/bin.c
+++ b/deps/jemalloc/src/bin.c
@@ -6,26 +6,6 @@
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/witness.h"
-bin_info_t bin_infos[SC_NBINS];
-
-static void
-bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
- bin_info_t bin_infos[SC_NBINS]) {
- for (unsigned i = 0; i < SC_NBINS; i++) {
- bin_info_t *bin_info = &bin_infos[i];
- sc_t *sc = &sc_data->sc[i];
- bin_info->reg_size = ((size_t)1U << sc->lg_base)
- + ((size_t)sc->ndelta << sc->lg_delta);
- bin_info->slab_size = (sc->pgs << LG_PAGE);
- bin_info->nregs =
- (uint32_t)(bin_info->slab_size / bin_info->reg_size);
- bin_info->n_shards = bin_shard_sizes[i];
- bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
- bin_info->nregs);
- bin_info->bitmap_info = bitmap_info;
- }
-}
-
bool
bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size,
size_t end_size, size_t nshards) {
@@ -58,12 +38,6 @@ bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
}
}
-void
-bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
- assert(sc_data->initialized);
- bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
-}
-
bool
bin_init(bin_t *bin) {
if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
@@ -71,8 +45,8 @@ bin_init(bin_t *bin) {
return true;
}
bin->slabcur = NULL;
- extent_heap_new(&bin->slabs_nonfull);
- extent_list_init(&bin->slabs_full);
+ edata_heap_new(&bin->slabs_nonfull);
+ edata_list_active_init(&bin->slabs_full);
if (config_stats) {
memset(&bin->stats, 0, sizeof(bin_stats_t));
}
diff --git a/deps/jemalloc/src/bin_info.c b/deps/jemalloc/src/bin_info.c
new file mode 100644
index 000000000..8629ef881
--- /dev/null
+++ b/deps/jemalloc/src/bin_info.c
@@ -0,0 +1,30 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/bin_info.h"
+
+bin_info_t bin_infos[SC_NBINS];
+
+static void
+bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
+ bin_info_t infos[SC_NBINS]) {
+ for (unsigned i = 0; i < SC_NBINS; i++) {
+ bin_info_t *bin_info = &infos[i];
+ sc_t *sc = &sc_data->sc[i];
+ bin_info->reg_size = ((size_t)1U << sc->lg_base)
+ + ((size_t)sc->ndelta << sc->lg_delta);
+ bin_info->slab_size = (sc->pgs << LG_PAGE);
+ bin_info->nregs =
+ (uint32_t)(bin_info->slab_size / bin_info->reg_size);
+ bin_info->n_shards = bin_shard_sizes[i];
+ bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
+ bin_info->nregs);
+ bin_info->bitmap_info = bitmap_info;
+ }
+}
+
+void
+bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
+ assert(sc_data->initialized);
+ bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
+}
diff --git a/deps/jemalloc/src/bitmap.c b/deps/jemalloc/src/bitmap.c
index 468b3178e..0ccedc5db 100644
--- a/deps/jemalloc/src/bitmap.c
+++ b/deps/jemalloc/src/bitmap.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/deps/jemalloc/src/buf_writer.c b/deps/jemalloc/src/buf_writer.c
new file mode 100644
index 000000000..7c6f79403
--- /dev/null
+++ b/deps/jemalloc/src/buf_writer.c
@@ -0,0 +1,144 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/buf_writer.h"
+#include "jemalloc/internal/malloc_io.h"
+
+static void *
+buf_writer_allocate_internal_buf(tsdn_t *tsdn, size_t buf_len) {
+#ifdef JEMALLOC_JET
+ if (buf_len > SC_LARGE_MAXCLASS) {
+ return NULL;
+ }
+#else
+ assert(buf_len <= SC_LARGE_MAXCLASS);
+#endif
+ return iallocztm(tsdn, buf_len, sz_size2index(buf_len), false, NULL,
+ true, arena_get(tsdn, 0, false), true);
+}
+
+static void
+buf_writer_free_internal_buf(tsdn_t *tsdn, void *buf) {
+ if (buf != NULL) {
+ idalloctm(tsdn, buf, NULL, NULL, true, true);
+ }
+}
+
+static void
+buf_writer_assert(buf_writer_t *buf_writer) {
+ assert(buf_writer != NULL);
+ assert(buf_writer->write_cb != NULL);
+ if (buf_writer->buf != NULL) {
+ assert(buf_writer->buf_size > 0);
+ } else {
+ assert(buf_writer->buf_size == 0);
+ assert(buf_writer->internal_buf);
+ }
+ assert(buf_writer->buf_end <= buf_writer->buf_size);
+}
+
+bool
+buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, write_cb_t *write_cb,
+ void *cbopaque, char *buf, size_t buf_len) {
+ if (write_cb != NULL) {
+ buf_writer->write_cb = write_cb;
+ } else {
+ buf_writer->write_cb = je_malloc_message != NULL ?
+ je_malloc_message : wrtmessage;
+ }
+ buf_writer->cbopaque = cbopaque;
+ assert(buf_len >= 2);
+ if (buf != NULL) {
+ buf_writer->buf = buf;
+ buf_writer->internal_buf = false;
+ } else {
+ buf_writer->buf = buf_writer_allocate_internal_buf(tsdn,
+ buf_len);
+ buf_writer->internal_buf = true;
+ }
+ if (buf_writer->buf != NULL) {
+ buf_writer->buf_size = buf_len - 1; /* Allowing for '\0'. */
+ } else {
+ buf_writer->buf_size = 0;
+ }
+ buf_writer->buf_end = 0;
+ buf_writer_assert(buf_writer);
+ return buf_writer->buf == NULL;
+}
+
+void
+buf_writer_flush(buf_writer_t *buf_writer) {
+ buf_writer_assert(buf_writer);
+ if (buf_writer->buf == NULL) {
+ return;
+ }
+ buf_writer->buf[buf_writer->buf_end] = '\0';
+ buf_writer->write_cb(buf_writer->cbopaque, buf_writer->buf);
+ buf_writer->buf_end = 0;
+ buf_writer_assert(buf_writer);
+}
+
+void
+buf_writer_cb(void *buf_writer_arg, const char *s) {
+ buf_writer_t *buf_writer = (buf_writer_t *)buf_writer_arg;
+ buf_writer_assert(buf_writer);
+ if (buf_writer->buf == NULL) {
+ buf_writer->write_cb(buf_writer->cbopaque, s);
+ return;
+ }
+ size_t i, slen, n;
+ for (i = 0, slen = strlen(s); i < slen; i += n) {
+ if (buf_writer->buf_end == buf_writer->buf_size) {
+ buf_writer_flush(buf_writer);
+ }
+ size_t s_remain = slen - i;
+ size_t buf_remain = buf_writer->buf_size - buf_writer->buf_end;
+ n = s_remain < buf_remain ? s_remain : buf_remain;
+ memcpy(buf_writer->buf + buf_writer->buf_end, s + i, n);
+ buf_writer->buf_end += n;
+ buf_writer_assert(buf_writer);
+ }
+ assert(i == slen);
+}
+
+void
+buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer) {
+ buf_writer_assert(buf_writer);
+ buf_writer_flush(buf_writer);
+ if (buf_writer->internal_buf) {
+ buf_writer_free_internal_buf(tsdn, buf_writer->buf);
+ }
+}
+
+void
+buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
+ void *read_cbopaque) {
+ /*
+ * A tiny local buffer in case the buffered writer failed to allocate
+ * at init.
+ */
+ static char backup_buf[16];
+ static buf_writer_t backup_buf_writer;
+
+ buf_writer_assert(buf_writer);
+ assert(read_cb != NULL);
+ if (buf_writer->buf == NULL) {
+ buf_writer_init(TSDN_NULL, &backup_buf_writer,
+ buf_writer->write_cb, buf_writer->cbopaque, backup_buf,
+ sizeof(backup_buf));
+ buf_writer = &backup_buf_writer;
+ }
+ assert(buf_writer->buf != NULL);
+ ssize_t nread = 0;
+ do {
+ buf_writer->buf_end += nread;
+ buf_writer_assert(buf_writer);
+ if (buf_writer->buf_end == buf_writer->buf_size) {
+ buf_writer_flush(buf_writer);
+ }
+ nread = read_cb(read_cbopaque,
+ buf_writer->buf + buf_writer->buf_end,
+ buf_writer->buf_size - buf_writer->buf_end);
+ } while (nread > 0);
+ buf_writer_flush(buf_writer);
+}
diff --git a/deps/jemalloc/src/cache_bin.c b/deps/jemalloc/src/cache_bin.c
new file mode 100644
index 000000000..9ae072a0e
--- /dev/null
+++ b/deps/jemalloc/src/cache_bin.c
@@ -0,0 +1,99 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/bit_util.h"
+#include "jemalloc/internal/cache_bin.h"
+#include "jemalloc/internal/safety_check.h"
+
+void
+cache_bin_info_init(cache_bin_info_t *info,
+ cache_bin_sz_t ncached_max) {
+ assert(ncached_max <= CACHE_BIN_NCACHED_MAX);
+ size_t stack_size = (size_t)ncached_max * sizeof(void *);
+ assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8)));
+ info->ncached_max = (cache_bin_sz_t)ncached_max;
+}
+
+void
+cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
+ size_t *size, size_t *alignment) {
+ /* For the total bin stack region (per tcache), reserve 2 more slots so
+ * that
+ * 1) the empty position can be safely read on the fast path before
+ * checking "is_empty"; and
+ * 2) the cur_ptr can go beyond the empty position by 1 step safely on
+ * the fast path (i.e. no overflow).
+ */
+ *size = sizeof(void *) * 2;
+ for (szind_t i = 0; i < ninfos; i++) {
+ assert(infos[i].ncached_max > 0);
+ *size += infos[i].ncached_max * sizeof(void *);
+ }
+
+ /*
+ * Align to at least PAGE, to minimize the # of TLBs needed by the
+ * smaller sizes; also helps if the larger sizes don't get used at all.
+ */
+ *alignment = PAGE;
+}
+
+void
+cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
+ size_t *cur_offset) {
+ if (config_debug) {
+ size_t computed_size;
+ size_t computed_alignment;
+
+ /* Pointer should be as aligned as we asked for. */
+ cache_bin_info_compute_alloc(infos, ninfos, &computed_size,
+ &computed_alignment);
+ assert(((uintptr_t)alloc & (computed_alignment - 1)) == 0);
+ }
+
+ *(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
+ cache_bin_preceding_junk;
+ *cur_offset += sizeof(void *);
+}
+
+void
+cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
+ size_t *cur_offset) {
+ *(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
+ cache_bin_trailing_junk;
+ *cur_offset += sizeof(void *);
+}
+
+void
+cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
+ size_t *cur_offset) {
+ /*
+ * The full_position points to the lowest available space. Allocations
+ * will access the slots toward higher addresses (for the benefit of
+ * adjacent prefetch).
+ */
+ void *stack_cur = (void *)((uintptr_t)alloc + *cur_offset);
+ void *full_position = stack_cur;
+ uint16_t bin_stack_size = info->ncached_max * sizeof(void *);
+
+ *cur_offset += bin_stack_size;
+ void *empty_position = (void *)((uintptr_t)alloc + *cur_offset);
+
+ /* Init to the empty position. */
+ bin->stack_head = (void **)empty_position;
+ bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
+ bin->low_bits_full = (uint16_t)(uintptr_t)full_position;
+ bin->low_bits_empty = (uint16_t)(uintptr_t)empty_position;
+ cache_bin_sz_t free_spots = cache_bin_diff(bin,
+ bin->low_bits_full, (uint16_t)(uintptr_t)bin->stack_head,
+ /* racy */ false);
+ assert(free_spots == bin_stack_size);
+ assert(cache_bin_ncached_get_local(bin, info) == 0);
+ assert(cache_bin_empty_position_get(bin) == empty_position);
+
+ assert(bin_stack_size > 0 || empty_position == full_position);
+}
+
+bool
+cache_bin_still_zero_initialized(cache_bin_t *bin) {
+ return bin->stack_head == NULL;
+}
diff --git a/deps/jemalloc/src/ckh.c b/deps/jemalloc/src/ckh.c
index 1bf6df5a1..8db4319c5 100644
--- a/deps/jemalloc/src/ckh.c
+++ b/deps/jemalloc/src/ckh.c
@@ -34,7 +34,6 @@
* respectively.
*
******************************************************************************/
-#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/ckh.h"
@@ -357,14 +356,14 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
}
bool
-ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash,
ckh_keycomp_t *keycomp) {
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
assert(minitems > 0);
- assert(hash != NULL);
+ assert(ckh_hash != NULL);
assert(keycomp != NULL);
#ifdef CKH_COUNT
@@ -393,7 +392,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
}
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
- ckh->hash = hash;
+ ckh->hash = ckh_hash;
ckh->keycomp = keycomp;
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
diff --git a/deps/jemalloc/src/counter.c b/deps/jemalloc/src/counter.c
new file mode 100644
index 000000000..8f1ae3af4
--- /dev/null
+++ b/deps/jemalloc/src/counter.c
@@ -0,0 +1,30 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/counter.h"
+
+bool
+counter_accum_init(counter_accum_t *counter, uint64_t interval) {
+ if (LOCKEDINT_MTX_INIT(counter->mtx, "counter_accum",
+ WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ locked_init_u64_unsynchronized(&counter->accumbytes, 0);
+ counter->interval = interval;
+ return false;
+}
+
+void
+counter_prefork(tsdn_t *tsdn, counter_accum_t *counter) {
+ LOCKEDINT_MTX_PREFORK(tsdn, counter->mtx);
+}
+
+void
+counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter) {
+ LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, counter->mtx);
+}
+
+void
+counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter) {
+ LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, counter->mtx);
+}
diff --git a/deps/jemalloc/src/ctl.c b/deps/jemalloc/src/ctl.c
index 48afaa61f..135271baf 100644
--- a/deps/jemalloc/src/ctl.c
+++ b/deps/jemalloc/src/ctl.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_CTL_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -6,8 +5,16 @@
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/inspect.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/peak_event.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_log.h"
+#include "jemalloc/internal/prof_recent.h"
+#include "jemalloc/internal/prof_stats.h"
+#include "jemalloc/internal/prof_sys.h"
+#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/util.h"
@@ -60,6 +67,8 @@ CTL_PROTO(background_thread)
CTL_PROTO(max_background_threads)
CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_flush)
+CTL_PROTO(thread_peak_read)
+CTL_PROTO(thread_peak_reset)
CTL_PROTO(thread_prof_name)
CTL_PROTO(thread_prof_active)
CTL_PROTO(thread_arena)
@@ -67,6 +76,7 @@ CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
+CTL_PROTO(thread_idle)
CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug)
CTL_PROTO(config_fill)
@@ -81,7 +91,20 @@ CTL_PROTO(config_utrace)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
CTL_PROTO(opt_abort_conf)
+CTL_PROTO(opt_cache_oblivious)
+CTL_PROTO(opt_trust_madvise)
CTL_PROTO(opt_confirm_conf)
+CTL_PROTO(opt_hpa)
+CTL_PROTO(opt_hpa_slab_max_alloc)
+CTL_PROTO(opt_hpa_hugification_threshold)
+CTL_PROTO(opt_hpa_hugify_delay_ms)
+CTL_PROTO(opt_hpa_min_purge_interval_ms)
+CTL_PROTO(opt_hpa_dirty_mult)
+CTL_PROTO(opt_hpa_sec_nshards)
+CTL_PROTO(opt_hpa_sec_max_alloc)
+CTL_PROTO(opt_hpa_sec_max_bytes)
+CTL_PROTO(opt_hpa_sec_bytes_after_flush)
+CTL_PROTO(opt_hpa_sec_batch_fill_extra)
CTL_PROTO(opt_metadata_thp)
CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss)
@@ -89,19 +112,31 @@ CTL_PROTO(opt_narenas)
CTL_PROTO(opt_percpu_arena)
CTL_PROTO(opt_oversize_threshold)
CTL_PROTO(opt_background_thread)
+CTL_PROTO(opt_mutex_max_spin)
CTL_PROTO(opt_max_background_threads)
CTL_PROTO(opt_dirty_decay_ms)
CTL_PROTO(opt_muzzy_decay_ms)
CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_stats_print_opts)
+CTL_PROTO(opt_stats_interval)
+CTL_PROTO(opt_stats_interval_opts)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
CTL_PROTO(opt_utrace)
CTL_PROTO(opt_xmalloc)
+CTL_PROTO(opt_experimental_infallible_new)
CTL_PROTO(opt_tcache)
+CTL_PROTO(opt_tcache_max)
+CTL_PROTO(opt_tcache_nslots_small_min)
+CTL_PROTO(opt_tcache_nslots_small_max)
+CTL_PROTO(opt_tcache_nslots_large)
+CTL_PROTO(opt_lg_tcache_nslots_mul)
+CTL_PROTO(opt_tcache_gc_incr_bytes)
+CTL_PROTO(opt_tcache_gc_delay_bytes)
+CTL_PROTO(opt_lg_tcache_flush_small_div)
+CTL_PROTO(opt_lg_tcache_flush_large_div)
CTL_PROTO(opt_thp)
CTL_PROTO(opt_lg_extent_max_active_fit)
-CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
@@ -111,7 +146,14 @@ CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
+CTL_PROTO(opt_prof_leak_error)
CTL_PROTO(opt_prof_accum)
+CTL_PROTO(opt_prof_recent_alloc_max)
+CTL_PROTO(opt_prof_stats)
+CTL_PROTO(opt_prof_sys_thread_name)
+CTL_PROTO(opt_prof_time_res)
+CTL_PROTO(opt_lg_san_uaf_align)
+CTL_PROTO(opt_zero_realloc)
CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy)
@@ -121,6 +163,7 @@ CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_destroy)
CTL_PROTO(arena_i_dss)
+CTL_PROTO(arena_i_oversize_threshold)
CTL_PROTO(arena_i_dirty_decay_ms)
CTL_PROTO(arena_i_muzzy_decay_ms)
CTL_PROTO(arena_i_extent_hooks)
@@ -148,11 +191,18 @@ CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
CTL_PROTO(prof_gdump)
+CTL_PROTO(prof_prefix)
CTL_PROTO(prof_reset)
CTL_PROTO(prof_interval)
CTL_PROTO(lg_prof_sample)
CTL_PROTO(prof_log_start)
CTL_PROTO(prof_log_stop)
+CTL_PROTO(prof_stats_bins_i_live)
+CTL_PROTO(prof_stats_bins_i_accum)
+INDEX_PROTO(prof_stats_bins_i)
+CTL_PROTO(prof_stats_lextents_i_live)
+CTL_PROTO(prof_stats_lextents_i_accum)
+INDEX_PROTO(prof_stats_lextents_i)
CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
@@ -188,6 +238,39 @@ CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
INDEX_PROTO(stats_arenas_i_extents_j)
+CTL_PROTO(stats_arenas_i_hpa_shard_npurge_passes)
+CTL_PROTO(stats_arenas_i_hpa_shard_npurges)
+CTL_PROTO(stats_arenas_i_hpa_shard_nhugifies)
+CTL_PROTO(stats_arenas_i_hpa_shard_ndehugifies)
+
+/* We have a set of stats for full slabs. */
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)
+
+/* A parallel set for the empty slabs. */
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)
+
+/*
+ * And one for the slabs that are neither empty nor full, but indexed by how
+ * full they are.
+ */
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)
+
+INDEX_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_uptime)
CTL_PROTO(stats_arenas_i_dss)
@@ -209,8 +292,10 @@ CTL_PROTO(stats_arenas_i_base)
CTL_PROTO(stats_arenas_i_internal)
CTL_PROTO(stats_arenas_i_metadata_thp)
CTL_PROTO(stats_arenas_i_tcache_bytes)
+CTL_PROTO(stats_arenas_i_tcache_stashed_bytes)
CTL_PROTO(stats_arenas_i_resident)
CTL_PROTO(stats_arenas_i_abandoned_vm)
+CTL_PROTO(stats_arenas_i_hpa_sec_bytes)
INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
@@ -222,12 +307,21 @@ CTL_PROTO(stats_metadata_thp)
CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
CTL_PROTO(stats_retained)
+CTL_PROTO(stats_zero_reallocs)
CTL_PROTO(experimental_hooks_install)
CTL_PROTO(experimental_hooks_remove)
+CTL_PROTO(experimental_hooks_prof_backtrace)
+CTL_PROTO(experimental_hooks_prof_dump)
+CTL_PROTO(experimental_hooks_safety_check_abort)
+CTL_PROTO(experimental_thread_activity_callback)
CTL_PROTO(experimental_utilization_query)
CTL_PROTO(experimental_utilization_batch_query)
CTL_PROTO(experimental_arenas_i_pactivep)
INDEX_PROTO(experimental_arenas_i)
+CTL_PROTO(experimental_prof_recent_alloc_max)
+CTL_PROTO(experimental_prof_recent_alloc_dump)
+CTL_PROTO(experimental_batch_alloc)
+CTL_PROTO(experimental_arenas_create_ext)
#define MUTEX_STATS_CTL_PROTO_GEN(n) \
CTL_PROTO(stats_##n##_num_ops) \
@@ -275,6 +369,11 @@ static const ctl_named_node_t thread_tcache_node[] = {
{NAME("flush"), CTL(thread_tcache_flush)}
};
+static const ctl_named_node_t thread_peak_node[] = {
+ {NAME("read"), CTL(thread_peak_read)},
+ {NAME("reset"), CTL(thread_peak_reset)},
+};
+
static const ctl_named_node_t thread_prof_node[] = {
{NAME("name"), CTL(thread_prof_name)},
{NAME("active"), CTL(thread_prof_active)}
@@ -287,7 +386,9 @@ static const ctl_named_node_t thread_node[] = {
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)},
{NAME("tcache"), CHILD(named, thread_tcache)},
- {NAME("prof"), CHILD(named, thread_prof)}
+ {NAME("peak"), CHILD(named, thread_peak)},
+ {NAME("prof"), CHILD(named, thread_prof)},
+ {NAME("idle"), CTL(thread_idle)}
};
static const ctl_named_node_t config_node[] = {
@@ -308,27 +409,60 @@ static const ctl_named_node_t config_node[] = {
static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
{NAME("abort_conf"), CTL(opt_abort_conf)},
+ {NAME("cache_oblivious"), CTL(opt_cache_oblivious)},
+ {NAME("trust_madvise"), CTL(opt_trust_madvise)},
{NAME("confirm_conf"), CTL(opt_confirm_conf)},
+ {NAME("hpa"), CTL(opt_hpa)},
+ {NAME("hpa_slab_max_alloc"), CTL(opt_hpa_slab_max_alloc)},
+ {NAME("hpa_hugification_threshold"),
+ CTL(opt_hpa_hugification_threshold)},
+ {NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)},
+ {NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)},
+ {NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)},
+ {NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)},
+ {NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)},
+ {NAME("hpa_sec_max_bytes"), CTL(opt_hpa_sec_max_bytes)},
+ {NAME("hpa_sec_bytes_after_flush"),
+ CTL(opt_hpa_sec_bytes_after_flush)},
+ {NAME("hpa_sec_batch_fill_extra"),
+ CTL(opt_hpa_sec_batch_fill_extra)},
{NAME("metadata_thp"), CTL(opt_metadata_thp)},
{NAME("retain"), CTL(opt_retain)},
{NAME("dss"), CTL(opt_dss)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("percpu_arena"), CTL(opt_percpu_arena)},
{NAME("oversize_threshold"), CTL(opt_oversize_threshold)},
+ {NAME("mutex_max_spin"), CTL(opt_mutex_max_spin)},
{NAME("background_thread"), CTL(opt_background_thread)},
{NAME("max_background_threads"), CTL(opt_max_background_threads)},
{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
{NAME("stats_print"), CTL(opt_stats_print)},
{NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
+ {NAME("stats_interval"), CTL(opt_stats_interval)},
+ {NAME("stats_interval_opts"), CTL(opt_stats_interval_opts)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
{NAME("utrace"), CTL(opt_utrace)},
{NAME("xmalloc"), CTL(opt_xmalloc)},
+ {NAME("experimental_infallible_new"),
+ CTL(opt_experimental_infallible_new)},
{NAME("tcache"), CTL(opt_tcache)},
+ {NAME("tcache_max"), CTL(opt_tcache_max)},
+ {NAME("tcache_nslots_small_min"),
+ CTL(opt_tcache_nslots_small_min)},
+ {NAME("tcache_nslots_small_max"),
+ CTL(opt_tcache_nslots_small_max)},
+ {NAME("tcache_nslots_large"), CTL(opt_tcache_nslots_large)},
+ {NAME("lg_tcache_nslots_mul"), CTL(opt_lg_tcache_nslots_mul)},
+ {NAME("tcache_gc_incr_bytes"), CTL(opt_tcache_gc_incr_bytes)},
+ {NAME("tcache_gc_delay_bytes"), CTL(opt_tcache_gc_delay_bytes)},
+ {NAME("lg_tcache_flush_small_div"),
+ CTL(opt_lg_tcache_flush_small_div)},
+ {NAME("lg_tcache_flush_large_div"),
+ CTL(opt_lg_tcache_flush_large_div)},
{NAME("thp"), CTL(opt_thp)},
{NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
- {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
{NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)},
@@ -338,7 +472,14 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_final"), CTL(opt_prof_final)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
- {NAME("prof_accum"), CTL(opt_prof_accum)}
+ {NAME("prof_leak_error"), CTL(opt_prof_leak_error)},
+ {NAME("prof_accum"), CTL(opt_prof_accum)},
+ {NAME("prof_recent_alloc_max"), CTL(opt_prof_recent_alloc_max)},
+ {NAME("prof_stats"), CTL(opt_prof_stats)},
+ {NAME("prof_sys_thread_name"), CTL(opt_prof_sys_thread_name)},
+ {NAME("prof_time_resolution"), CTL(opt_prof_time_res)},
+ {NAME("lg_san_uaf_align"), CTL(opt_lg_san_uaf_align)},
+ {NAME("zero_realloc"), CTL(opt_zero_realloc)}
};
static const ctl_named_node_t tcache_node[] = {
@@ -354,6 +495,11 @@ static const ctl_named_node_t arena_i_node[] = {
{NAME("reset"), CTL(arena_i_reset)},
{NAME("destroy"), CTL(arena_i_destroy)},
{NAME("dss"), CTL(arena_i_dss)},
+ /*
+ * Undocumented for now, since we anticipate an arena API in flux after
+ * we cut the last 5-series release.
+ */
+ {NAME("oversize_threshold"), CTL(arena_i_oversize_threshold)},
{NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
{NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
@@ -408,17 +554,51 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("lookup"), CTL(arenas_lookup)}
};
+static const ctl_named_node_t prof_stats_bins_i_node[] = {
+ {NAME("live"), CTL(prof_stats_bins_i_live)},
+ {NAME("accum"), CTL(prof_stats_bins_i_accum)}
+};
+
+static const ctl_named_node_t super_prof_stats_bins_i_node[] = {
+ {NAME(""), CHILD(named, prof_stats_bins_i)}
+};
+
+static const ctl_indexed_node_t prof_stats_bins_node[] = {
+ {INDEX(prof_stats_bins_i)}
+};
+
+static const ctl_named_node_t prof_stats_lextents_i_node[] = {
+ {NAME("live"), CTL(prof_stats_lextents_i_live)},
+ {NAME("accum"), CTL(prof_stats_lextents_i_accum)}
+};
+
+static const ctl_named_node_t super_prof_stats_lextents_i_node[] = {
+ {NAME(""), CHILD(named, prof_stats_lextents_i)}
+};
+
+static const ctl_indexed_node_t prof_stats_lextents_node[] = {
+ {INDEX(prof_stats_lextents_i)}
+};
+
+static const ctl_named_node_t prof_stats_node[] = {
+ {NAME("bins"), CHILD(indexed, prof_stats_bins)},
+ {NAME("lextents"), CHILD(indexed, prof_stats_lextents)},
+};
+
static const ctl_named_node_t prof_node[] = {
{NAME("thread_active_init"), CTL(prof_thread_active_init)},
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
{NAME("gdump"), CTL(prof_gdump)},
+ {NAME("prefix"), CTL(prof_prefix)},
{NAME("reset"), CTL(prof_reset)},
{NAME("interval"), CTL(prof_interval)},
{NAME("lg_sample"), CTL(lg_prof_sample)},
{NAME("log_start"), CTL(prof_log_start)},
- {NAME("log_stop"), CTL(prof_log_stop)}
+ {NAME("log_stop"), CTL(prof_log_stop)},
+ {NAME("stats"), CHILD(named, prof_stats)}
};
+
static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
@@ -521,6 +701,75 @@ MUTEX_PROF_ARENA_MUTEXES
#undef OP
};
+static const ctl_named_node_t stats_arenas_i_hpa_shard_full_slabs_node[] = {
+ {NAME("npageslabs_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)},
+ {NAME("npageslabs_huge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)},
+ {NAME("nactive_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)},
+ {NAME("nactive_huge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)},
+ {NAME("ndirty_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)},
+ {NAME("ndirty_huge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)}
+};
+
+static const ctl_named_node_t stats_arenas_i_hpa_shard_empty_slabs_node[] = {
+ {NAME("npageslabs_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)},
+ {NAME("npageslabs_huge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)},
+ {NAME("nactive_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)},
+ {NAME("nactive_huge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)},
+ {NAME("ndirty_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)},
+ {NAME("ndirty_huge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)}
+};
+
+static const ctl_named_node_t stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
+ {NAME("npageslabs_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)},
+ {NAME("npageslabs_huge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)},
+ {NAME("nactive_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)},
+ {NAME("nactive_huge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)},
+ {NAME("ndirty_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)},
+ {NAME("ndirty_huge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)}
+};
+
+static const ctl_named_node_t super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
+ {NAME(""),
+ CHILD(named, stats_arenas_i_hpa_shard_nonfull_slabs_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_hpa_shard_nonfull_slabs_node[] =
+{
+ {INDEX(stats_arenas_i_hpa_shard_nonfull_slabs_j)}
+};
+
+static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
+ {NAME("full_slabs"), CHILD(named,
+ stats_arenas_i_hpa_shard_full_slabs)},
+ {NAME("empty_slabs"), CHILD(named,
+ stats_arenas_i_hpa_shard_empty_slabs)},
+ {NAME("nonfull_slabs"), CHILD(indexed,
+ stats_arenas_i_hpa_shard_nonfull_slabs)},
+
+ {NAME("npurge_passes"), CTL(stats_arenas_i_hpa_shard_npurge_passes)},
+ {NAME("npurges"), CTL(stats_arenas_i_hpa_shard_npurges)},
+ {NAME("nhugifies"), CTL(stats_arenas_i_hpa_shard_nhugifies)},
+ {NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)}
+};
+
static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("uptime"), CTL(stats_arenas_i_uptime)},
@@ -543,14 +792,18 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("internal"), CTL(stats_arenas_i_internal)},
{NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
{NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
+ {NAME("tcache_stashed_bytes"),
+ CTL(stats_arenas_i_tcache_stashed_bytes)},
{NAME("resident"), CTL(stats_arenas_i_resident)},
{NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)},
+ {NAME("hpa_sec_bytes"), CTL(stats_arenas_i_hpa_sec_bytes)},
{NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
{NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
{NAME("extents"), CHILD(indexed, stats_arenas_i_extents)},
- {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
+ {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)},
+ {NAME("hpa_shard"), CHILD(named, stats_arenas_i_hpa_shard)}
};
static const ctl_named_node_t super_stats_arenas_i_node[] = {
{NAME(""), CHILD(named, stats_arenas_i)}
@@ -589,12 +842,21 @@ static const ctl_named_node_t stats_node[] = {
{NAME("background_thread"),
CHILD(named, stats_background_thread)},
{NAME("mutexes"), CHILD(named, stats_mutexes)},
- {NAME("arenas"), CHILD(indexed, stats_arenas)}
+ {NAME("arenas"), CHILD(indexed, stats_arenas)},
+ {NAME("zero_reallocs"), CTL(stats_zero_reallocs)},
};
static const ctl_named_node_t experimental_hooks_node[] = {
{NAME("install"), CTL(experimental_hooks_install)},
- {NAME("remove"), CTL(experimental_hooks_remove)}
+ {NAME("remove"), CTL(experimental_hooks_remove)},
+ {NAME("prof_backtrace"), CTL(experimental_hooks_prof_backtrace)},
+ {NAME("prof_dump"), CTL(experimental_hooks_prof_dump)},
+ {NAME("safety_check_abort"), CTL(experimental_hooks_safety_check_abort)},
+};
+
+static const ctl_named_node_t experimental_thread_node[] = {
+ {NAME("activity_callback"),
+ CTL(experimental_thread_activity_callback)}
};
static const ctl_named_node_t experimental_utilization_node[] = {
@@ -613,10 +875,19 @@ static const ctl_indexed_node_t experimental_arenas_node[] = {
{INDEX(experimental_arenas_i)}
};
+static const ctl_named_node_t experimental_prof_recent_node[] = {
+ {NAME("alloc_max"), CTL(experimental_prof_recent_alloc_max)},
+ {NAME("alloc_dump"), CTL(experimental_prof_recent_alloc_dump)},
+};
+
static const ctl_named_node_t experimental_node[] = {
{NAME("hooks"), CHILD(named, experimental_hooks)},
{NAME("utilization"), CHILD(named, experimental_utilization)},
- {NAME("arenas"), CHILD(indexed, experimental_arenas)}
+ {NAME("arenas"), CHILD(indexed, experimental_arenas)},
+ {NAME("arenas_create_ext"), CTL(experimental_arenas_create_ext)},
+ {NAME("prof_recent"), CHILD(named, experimental_prof_recent)},
+ {NAME("batch_alloc"), CTL(experimental_batch_alloc)},
+ {NAME("thread"), CHILD(named, experimental_thread)}
};
static const ctl_named_node_t root_node[] = {
@@ -650,28 +921,13 @@ static const ctl_named_node_t super_root_node[] = {
* synchronized by the ctl mutex.
*/
static void
-ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
-#ifdef JEMALLOC_ATOMIC_U64
- uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
- uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
- atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
-#else
- *dst += *src;
-#endif
-}
-
-/* Likewise: with ctl mutex synchronization, reading is simple. */
-static uint64_t
-ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
-#ifdef JEMALLOC_ATOMIC_U64
- return atomic_load_u64(p, ATOMIC_RELAXED);
-#else
- return *p;
-#endif
+ctl_accum_locked_u64(locked_u64_t *dst, locked_u64_t *src) {
+ locked_inc_u64_unsynchronized(dst,
+ locked_read_u64_unsynchronized(src));
}
static void
-accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
+ctl_accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
@@ -783,11 +1039,15 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) {
ctl_arena->astats->nfills_small = 0;
ctl_arena->astats->nflushes_small = 0;
memset(ctl_arena->astats->bstats, 0, SC_NBINS *
- sizeof(bin_stats_t));
+ sizeof(bin_stats_data_t));
memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
sizeof(arena_stats_large_t));
memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
- sizeof(arena_stats_extents_t));
+ sizeof(pac_estats_t));
+ memset(&ctl_arena->astats->hpastats, 0,
+ sizeof(hpa_shard_stats_t));
+ memset(&ctl_arena->astats->secstats, 0,
+ sizeof(sec_stats_t));
}
}
@@ -801,22 +1061,19 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
&ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
&ctl_arena->pdirty, &ctl_arena->pmuzzy,
&ctl_arena->astats->astats, ctl_arena->astats->bstats,
- ctl_arena->astats->lstats, ctl_arena->astats->estats);
+ ctl_arena->astats->lstats, ctl_arena->astats->estats,
+ &ctl_arena->astats->hpastats, &ctl_arena->astats->secstats);
for (i = 0; i < SC_NBINS; i++) {
- ctl_arena->astats->allocated_small +=
- ctl_arena->astats->bstats[i].curregs *
+ bin_stats_t *bstats =
+ &ctl_arena->astats->bstats[i].stats_data;
+ ctl_arena->astats->allocated_small += bstats->curregs *
sz_index2size(i);
- ctl_arena->astats->nmalloc_small +=
- ctl_arena->astats->bstats[i].nmalloc;
- ctl_arena->astats->ndalloc_small +=
- ctl_arena->astats->bstats[i].ndalloc;
- ctl_arena->astats->nrequests_small +=
- ctl_arena->astats->bstats[i].nrequests;
- ctl_arena->astats->nfills_small +=
- ctl_arena->astats->bstats[i].nfills;
- ctl_arena->astats->nflushes_small +=
- ctl_arena->astats->bstats[i].nflushes;
+ ctl_arena->astats->nmalloc_small += bstats->nmalloc;
+ ctl_arena->astats->ndalloc_small += bstats->ndalloc;
+ ctl_arena->astats->nrequests_small += bstats->nrequests;
+ ctl_arena->astats->nfills_small += bstats->nfills;
+ ctl_arena->astats->nflushes_small += bstats->nflushes;
}
} else {
arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
@@ -848,27 +1105,32 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
ctl_arena_stats_t *astats = ctl_arena->astats;
if (!destroyed) {
- accum_atomic_zu(&sdstats->astats.mapped,
- &astats->astats.mapped);
- accum_atomic_zu(&sdstats->astats.retained,
- &astats->astats.retained);
- accum_atomic_zu(&sdstats->astats.extent_avail,
- &astats->astats.extent_avail);
- }
-
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
- &astats->astats.decay_dirty.npurge);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
- &astats->astats.decay_dirty.nmadvise);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
- &astats->astats.decay_dirty.purged);
-
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
- &astats->astats.decay_muzzy.npurge);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
- &astats->astats.decay_muzzy.nmadvise);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
- &astats->astats.decay_muzzy.purged);
+ sdstats->astats.mapped += astats->astats.mapped;
+ sdstats->astats.pa_shard_stats.pac_stats.retained
+ += astats->astats.pa_shard_stats.pac_stats.retained;
+ sdstats->astats.pa_shard_stats.edata_avail
+ += astats->astats.pa_shard_stats.edata_avail;
+ }
+
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge,
+ &astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge);
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise,
+ &astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise);
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.purged,
+ &astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged);
+
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge,
+ &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge);
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise,
+ &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise);
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged,
+ &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged);
#define OP(mtx) malloc_mutex_prof_merge( \
&(sdstats->astats.mutex_prof_data[ \
@@ -878,14 +1140,11 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
MUTEX_PROF_ARENA_MUTEXES
#undef OP
if (!destroyed) {
- accum_atomic_zu(&sdstats->astats.base,
- &astats->astats.base);
- accum_atomic_zu(&sdstats->astats.internal,
+ sdstats->astats.base += astats->astats.base;
+ sdstats->astats.resident += astats->astats.resident;
+ sdstats->astats.metadata_thp += astats->astats.metadata_thp;
+ ctl_accum_atomic_zu(&sdstats->astats.internal,
&astats->astats.internal);
- accum_atomic_zu(&sdstats->astats.resident,
- &astats->astats.resident);
- accum_atomic_zu(&sdstats->astats.metadata_thp,
- &astats->astats.metadata_thp);
} else {
assert(atomic_load_zu(
&astats->astats.internal, ATOMIC_RELAXED) == 0);
@@ -903,23 +1162,23 @@ MUTEX_PROF_ARENA_MUTEXES
sdstats->nflushes_small += astats->nflushes_small;
if (!destroyed) {
- accum_atomic_zu(&sdstats->astats.allocated_large,
- &astats->astats.allocated_large);
+ sdstats->astats.allocated_large +=
+ astats->astats.allocated_large;
} else {
- assert(atomic_load_zu(&astats->astats.allocated_large,
- ATOMIC_RELAXED) == 0);
+ assert(astats->astats.allocated_large == 0);
}
- ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
- &astats->astats.nmalloc_large);
- ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
- &astats->astats.ndalloc_large);
- ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
- &astats->astats.nrequests_large);
- accum_atomic_zu(&sdstats->astats.abandoned_vm,
- &astats->astats.abandoned_vm);
-
- accum_atomic_zu(&sdstats->astats.tcache_bytes,
- &astats->astats.tcache_bytes);
+ sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
+ sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
+ sdstats->astats.nrequests_large
+ += astats->astats.nrequests_large;
+ sdstats->astats.nflushes_large += astats->astats.nflushes_large;
+ ctl_accum_atomic_zu(
+ &sdstats->astats.pa_shard_stats.pac_stats.abandoned_vm,
+ &astats->astats.pa_shard_stats.pac_stats.abandoned_vm);
+
+ sdstats->astats.tcache_bytes += astats->astats.tcache_bytes;
+ sdstats->astats.tcache_stashed_bytes +=
+ astats->astats.tcache_stashed_bytes;
if (ctl_arena->arena_ind == 0) {
sdstats->astats.uptime = astats->astats.uptime;
@@ -927,29 +1186,26 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge bin stats. */
for (i = 0; i < SC_NBINS; i++) {
- sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
- sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
- sdstats->bstats[i].nrequests +=
- astats->bstats[i].nrequests;
+ bin_stats_t *bstats = &astats->bstats[i].stats_data;
+ bin_stats_t *merged = &sdstats->bstats[i].stats_data;
+ merged->nmalloc += bstats->nmalloc;
+ merged->ndalloc += bstats->ndalloc;
+ merged->nrequests += bstats->nrequests;
if (!destroyed) {
- sdstats->bstats[i].curregs +=
- astats->bstats[i].curregs;
+ merged->curregs += bstats->curregs;
} else {
- assert(astats->bstats[i].curregs == 0);
+ assert(bstats->curregs == 0);
}
- sdstats->bstats[i].nfills += astats->bstats[i].nfills;
- sdstats->bstats[i].nflushes +=
- astats->bstats[i].nflushes;
- sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
- sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
+ merged->nfills += bstats->nfills;
+ merged->nflushes += bstats->nflushes;
+ merged->nslabs += bstats->nslabs;
+ merged->reslabs += bstats->reslabs;
if (!destroyed) {
- sdstats->bstats[i].curslabs +=
- astats->bstats[i].curslabs;
- sdstats->bstats[i].nonfull_slabs +=
- astats->bstats[i].nonfull_slabs;
+ merged->curslabs += bstats->curslabs;
+ merged->nonfull_slabs += bstats->nonfull_slabs;
} else {
- assert(astats->bstats[i].curslabs == 0);
- assert(astats->bstats[i].nonfull_slabs == 0);
+ assert(bstats->curslabs == 0);
+ assert(bstats->nonfull_slabs == 0);
}
malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
&astats->bstats[i].mutex_data);
@@ -957,11 +1213,11 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge stats for large allocations. */
for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
- ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
+ ctl_accum_locked_u64(&sdstats->lstats[i].nmalloc,
&astats->lstats[i].nmalloc);
- ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
+ ctl_accum_locked_u64(&sdstats->lstats[i].ndalloc,
&astats->lstats[i].ndalloc);
- ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
+ ctl_accum_locked_u64(&sdstats->lstats[i].nrequests,
&astats->lstats[i].nrequests);
if (!destroyed) {
sdstats->lstats[i].curlextents +=
@@ -973,19 +1229,21 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge extents stats. */
for (i = 0; i < SC_NPSIZES; i++) {
- accum_atomic_zu(&sdstats->estats[i].ndirty,
- &astats->estats[i].ndirty);
- accum_atomic_zu(&sdstats->estats[i].nmuzzy,
- &astats->estats[i].nmuzzy);
- accum_atomic_zu(&sdstats->estats[i].nretained,
- &astats->estats[i].nretained);
- accum_atomic_zu(&sdstats->estats[i].dirty_bytes,
- &astats->estats[i].dirty_bytes);
- accum_atomic_zu(&sdstats->estats[i].muzzy_bytes,
- &astats->estats[i].muzzy_bytes);
- accum_atomic_zu(&sdstats->estats[i].retained_bytes,
- &astats->estats[i].retained_bytes);
+ sdstats->estats[i].ndirty += astats->estats[i].ndirty;
+ sdstats->estats[i].nmuzzy += astats->estats[i].nmuzzy;
+ sdstats->estats[i].nretained
+ += astats->estats[i].nretained;
+ sdstats->estats[i].dirty_bytes
+ += astats->estats[i].dirty_bytes;
+ sdstats->estats[i].muzzy_bytes
+ += astats->estats[i].muzzy_bytes;
+ sdstats->estats[i].retained_bytes
+ += astats->estats[i].retained_bytes;
}
+
+ /* Merge HPA stats. */
+ hpa_shard_stats_accum(&sdstats->hpastats, &astats->hpastats);
+ sec_stats_accum(&sdstats->secstats, &astats->secstats);
}
}
@@ -1001,7 +1259,7 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
}
static unsigned
-ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
+ctl_arena_init(tsd_t *tsd, const arena_config_t *config) {
unsigned arena_ind;
ctl_arena_t *ctl_arena;
@@ -1019,7 +1277,7 @@ ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
}
/* Initialize new arena. */
- if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
+ if (arena_init(tsd_tsdn(tsd), arena_ind, config) == NULL) {
return UINT_MAX;
}
@@ -1036,8 +1294,11 @@ ctl_background_thread_stats_read(tsdn_t *tsdn) {
if (!have_background_thread ||
background_thread_stats_read(tsdn, stats)) {
memset(stats, 0, sizeof(background_thread_stats_t));
- nstime_init(&stats->run_interval, 0);
+ nstime_init_zero(&stats->run_interval);
}
+ malloc_mutex_prof_copy(
+ &ctl_stats->mutex_prof_data[global_prof_mutex_max_per_bg_thd],
+ &stats->max_counter_per_bg_thd);
}
static void
@@ -1069,21 +1330,17 @@ ctl_refresh(tsdn_t *tsdn) {
if (config_stats) {
ctl_stats->allocated = ctl_sarena->astats->allocated_small +
- atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
- ATOMIC_RELAXED);
+ ctl_sarena->astats->astats.allocated_large;
ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
- ctl_stats->metadata = atomic_load_zu(
- &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
+ ctl_stats->metadata = ctl_sarena->astats->astats.base +
atomic_load_zu(&ctl_sarena->astats->astats.internal,
ATOMIC_RELAXED);
- ctl_stats->metadata_thp = atomic_load_zu(
- &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
- ctl_stats->resident = atomic_load_zu(
- &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
- ctl_stats->mapped = atomic_load_zu(
- &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
- ctl_stats->retained = atomic_load_zu(
- &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
+ ctl_stats->resident = ctl_sarena->astats->astats.resident;
+ ctl_stats->metadata_thp =
+ ctl_sarena->astats->astats.metadata_thp;
+ ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
+ ctl_stats->retained = ctl_sarena->astats->astats
+ .pa_shard_stats.pac_stats.retained;
ctl_background_thread_stats_read(tsdn);
@@ -1093,8 +1350,20 @@ ctl_refresh(tsdn_t *tsdn) {
malloc_mutex_unlock(tsdn, &mtx);
if (config_prof && opt_prof) {
- READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
- bt2gctx_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof, bt2gctx_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_thds_data, tdatas_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_dump, prof_dump_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_recent_alloc,
+ prof_recent_alloc_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_recent_dump,
+ prof_recent_dump_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_stats, prof_stats_mtx);
}
if (have_background_thread) {
READ_GLOBAL_MUTEX_PROF_DATA(
@@ -1191,8 +1460,9 @@ label_return:
}
static int
-ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
- size_t *mibp, size_t *depthp) {
+ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node,
+ const char *name, const ctl_named_node_t **ending_nodep, size_t *mibp,
+ size_t *depthp) {
int ret;
const char *elm, *tdot, *dot;
size_t elen, i, j;
@@ -1206,7 +1476,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
ret = ENOENT;
goto label_return;
}
- node = super_root_node;
+ node = starting_node;
for (i = 0; i < *depthp; i++) {
assert(node);
assert(node->nchildren > 0);
@@ -1220,10 +1490,6 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
if (strlen(child->name) == elen &&
strncmp(elm, child->name, elen) == 0) {
node = child;
- if (nodesp != NULL) {
- nodesp[i] =
- (const ctl_node_t *)node;
- }
mibp[i] = j;
break;
}
@@ -1250,13 +1516,11 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
goto label_return;
}
- if (nodesp != NULL) {
- nodesp[i] = (const ctl_node_t *)node;
- }
mibp[i] = (size_t)index;
}
- if (node->ctl != NULL) {
+ /* Reached the end? */
+ if (node->ctl != NULL || *dot == '\0') {
/* Terminal node. */
if (*dot != '\0') {
/*
@@ -1272,16 +1536,14 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
}
/* Update elm. */
- if (*dot == '\0') {
- /* No more elements. */
- ret = ENOENT;
- goto label_return;
- }
elm = &dot[1];
dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
strchr(elm, '\0');
elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
}
+ if (ending_nodep != NULL) {
+ *ending_nodep = node;
+ }
ret = 0;
label_return:
@@ -1293,7 +1555,6 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) {
int ret;
size_t depth;
- ctl_node_t const *nodes[CTL_MAX_DEPTH];
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
@@ -1303,12 +1564,12 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
}
depth = CTL_MAX_DEPTH;
- ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
+ ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, &node, mib,
+ &depth);
if (ret != 0) {
goto label_return;
}
- node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl) {
ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
} else {
@@ -1329,26 +1590,19 @@ ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
goto label_return;
}
- ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
+ ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, NULL, mibp,
+ miblenp);
label_return:
return(ret);
}
-int
-ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen) {
+static int
+ctl_lookupbymib(tsdn_t *tsdn, const ctl_named_node_t **ending_nodep,
+ const size_t *mib, size_t miblen) {
int ret;
- const ctl_named_node_t *node;
- size_t i;
- if (!ctl_initialized && ctl_init(tsd)) {
- ret = EAGAIN;
- goto label_return;
- }
-
- /* Iterate down the tree. */
- node = super_root_node;
- for (i = 0; i < miblen; i++) {
+ const ctl_named_node_t *node = super_root_node;
+ for (size_t i = 0; i < miblen; i++) {
assert(node);
assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) {
@@ -1363,13 +1617,36 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Indexed element. */
inode = ctl_indexed_node(node->children);
- node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
+ node = inode->index(tsdn, mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto label_return;
}
}
}
+ assert(ending_nodep != NULL);
+ *ending_nodep = node;
+ ret = 0;
+
+label_return:
+ return(ret);
+}
+
+int
+ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ const ctl_named_node_t *node;
+
+ if (!ctl_initialized && ctl_init(tsd)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
+ if (ret != 0) {
+ goto label_return;
+ }
/* Call the ctl function. */
if (node && node->ctl) {
@@ -1383,6 +1660,81 @@ label_return:
return(ret);
}
+int
+ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
+ size_t *miblenp) {
+ int ret;
+ const ctl_named_node_t *node;
+
+ if (!ctl_initialized && ctl_init(tsd)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
+ if (ret != 0) {
+ goto label_return;
+ }
+ if (node == NULL || node->ctl != NULL) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ assert(miblenp != NULL);
+ assert(*miblenp >= miblen);
+ *miblenp -= miblen;
+ ret = ctl_lookup(tsd_tsdn(tsd), node, name, NULL, mib + miblen,
+ miblenp);
+ *miblenp += miblen;
+label_return:
+ return(ret);
+}
+
+int
+ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
+ size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ const ctl_named_node_t *node;
+
+ if (!ctl_initialized && ctl_init(tsd)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
+ if (ret != 0) {
+ goto label_return;
+ }
+ if (node == NULL || node->ctl != NULL) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ assert(miblenp != NULL);
+ assert(*miblenp >= miblen);
+ *miblenp -= miblen;
+ /*
+ * The same node supplies the starting node and stores the ending node.
+ */
+ ret = ctl_lookup(tsd_tsdn(tsd), node, name, &node, mib + miblen,
+ miblenp);
+ *miblenp += miblen;
+ if (ret != 0) {
+ goto label_return;
+ }
+
+ if (node != NULL && node->ctl) {
+ ret = node->ctl(tsd, mib, *miblenp, oldp, oldlenp, newp,
+ newlen);
+ } else {
+ /* The name refers to a partial path through the ctl tree. */
+ ret = ENOENT;
+ }
+
+label_return:
+ return(ret);
+}
+
bool
ctl_boot(void) {
if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
@@ -1410,6 +1762,11 @@ ctl_postfork_child(tsdn_t *tsdn) {
malloc_mutex_postfork_child(tsdn, &ctl_mtx);
}
+void
+ctl_mtx_assert_held(tsdn_t *tsdn) {
+ malloc_mutex_assert_owner(tsdn, &ctl_mtx);
+}
+
/******************************************************************************/
/* *_ctl() functions. */
@@ -1427,6 +1784,7 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
+/* Can read or write, but not both. */
#define READ_XOR_WRITE() do { \
if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
newlen != 0)) { \
@@ -1435,12 +1793,31 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
+/* Can neither read nor write. */
+#define NEITHER_READ_NOR_WRITE() do { \
+ if (oldp != NULL || oldlenp != NULL || newp != NULL || \
+ newlen != 0) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+/* Verify that the space provided is enough. */
+#define VERIFY_READ(t) do { \
+ if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(t)) { \
+ *oldlenp = 0; \
+ ret = EINVAL; \
+ goto label_return; \
+ } \
+} while (0)
+
#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \
memcpy(oldp, (void *)&(v), copylen); \
+ *oldlenp = copylen; \
ret = EINVAL; \
goto label_return; \
} \
@@ -1458,6 +1835,14 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
+#define ASSURED_WRITE(v, t) do { \
+ if (newp == NULL || newlen != sizeof(t)) { \
+ ret = EINVAL; \
+ goto label_return; \
+ } \
+ (v) = *(t *)newp; \
+} while (0)
+
#define MIB_UNSIGNED(v, i) do { \
if (mib[i] > UINT_MAX) { \
ret = EFAULT; \
@@ -1497,8 +1882,8 @@ label_return: \
#define CTL_RO_CGEN(c, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1540,8 +1925,8 @@ label_return: \
*/
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1559,8 +1944,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1573,29 +1958,10 @@ label_return: \
return ret; \
}
-#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
-static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) { \
- int ret; \
- t oldval; \
- \
- if (!(c)) { \
- return ENOENT; \
- } \
- READONLY(); \
- oldval = (m(tsd)); \
- READ(oldval, t); \
- \
- ret = 0; \
-label_return: \
- return ret; \
-}
-
#define CTL_RO_CONFIG_GEN(n, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1761,7 +2127,34 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool)
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
+CTL_RO_NL_GEN(opt_cache_oblivious, opt_cache_oblivious, bool)
+CTL_RO_NL_GEN(opt_trust_madvise, opt_trust_madvise, bool)
CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
+
+/* HPA options. */
+CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool)
+CTL_RO_NL_GEN(opt_hpa_hugification_threshold,
+ opt_hpa_opts.hugification_threshold, size_t)
+CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t)
+CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms,
+ uint64_t)
+
+/*
+ * This will have to change before we publicly document this option; fxp_t and
+ * its representation are internal implementation details.
+ */
+CTL_RO_NL_GEN(opt_hpa_dirty_mult, opt_hpa_opts.dirty_mult, fxp_t)
+CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_opts.slab_max_alloc, size_t)
+
+/* HPA SEC options */
+CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_opts.nshards, size_t)
+CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_opts.max_alloc, size_t)
+CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_opts.max_bytes, size_t)
+CTL_RO_NL_GEN(opt_hpa_sec_bytes_after_flush, opt_hpa_sec_opts.bytes_after_flush,
+ size_t)
+CTL_RO_NL_GEN(opt_hpa_sec_batch_fill_extra, opt_hpa_sec_opts.batch_fill_extra,
+ size_t)
+
CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
const char *)
CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
@@ -1769,6 +2162,7 @@ CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
const char *)
+CTL_RO_NL_GEN(opt_mutex_max_spin, opt_mutex_max_spin, int64_t)
CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
@@ -1776,15 +2170,31 @@ CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
+CTL_RO_NL_GEN(opt_stats_interval, opt_stats_interval, int64_t)
+CTL_RO_NL_GEN(opt_stats_interval_opts, opt_stats_interval_opts, const char *)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_CGEN(config_enable_cxx, opt_experimental_infallible_new,
+ opt_experimental_infallible_new, bool)
CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
+CTL_RO_NL_GEN(opt_tcache_max, opt_tcache_max, size_t)
+CTL_RO_NL_GEN(opt_tcache_nslots_small_min, opt_tcache_nslots_small_min,
+ unsigned)
+CTL_RO_NL_GEN(opt_tcache_nslots_small_max, opt_tcache_nslots_small_max,
+ unsigned)
+CTL_RO_NL_GEN(opt_tcache_nslots_large, opt_tcache_nslots_large, unsigned)
+CTL_RO_NL_GEN(opt_lg_tcache_nslots_mul, opt_lg_tcache_nslots_mul, ssize_t)
+CTL_RO_NL_GEN(opt_tcache_gc_incr_bytes, opt_tcache_gc_incr_bytes, size_t)
+CTL_RO_NL_GEN(opt_tcache_gc_delay_bytes, opt_tcache_gc_delay_bytes, size_t)
+CTL_RO_NL_GEN(opt_lg_tcache_flush_small_div, opt_lg_tcache_flush_small_div,
+ unsigned)
+CTL_RO_NL_GEN(opt_lg_tcache_flush_large_div, opt_lg_tcache_flush_large_div,
+ unsigned)
CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
size_t)
-CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
@@ -1796,6 +2206,18 @@ CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_leak_error, opt_prof_leak_error, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_recent_alloc_max,
+ opt_prof_recent_alloc_max, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_stats, opt_prof_stats, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_sys_thread_name, opt_prof_sys_thread_name,
+ bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_time_res,
+ prof_time_res_mode_names[opt_prof_time_res], const char *)
+CTL_RO_NL_CGEN(config_uaf_detection, opt_lg_san_uaf_align,
+ opt_lg_san_uaf_align, ssize_t)
+CTL_RO_NL_GEN(opt_zero_realloc,
+ zero_realloc_mode_names[opt_zero_realloc_action], const char *)
/******************************************************************************/
@@ -1843,10 +2265,11 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
/* Set new arena/tcache associations. */
- arena_migrate(tsd, oldind, newind);
+ arena_migrate(tsd, oldarena, newarena);
if (tcache_available(tsd)) {
tcache_arena_reassociate(tsd_tsdn(tsd),
- tsd_tcachep_get(tsd), newarena);
+ tsd_tcache_slowp_get(tsd), tsd_tcachep_get(tsd),
+ newarena);
}
}
@@ -1855,14 +2278,10 @@ label_return:
return ret;
}
-CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
- uint64_t)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
- uint64_t *)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
- uint64_t)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
- tsd_thread_deallocatedp_get, uint64_t *)
+CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t)
+CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *)
+CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t)
+CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *)
static int
thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
@@ -1897,8 +2316,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
goto label_return;
}
- READONLY();
- WRITEONLY();
+ NEITHER_READ_NOR_WRITE();
tcache_flush(tsd);
@@ -1908,12 +2326,44 @@ label_return:
}
static int
+thread_peak_read_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+ if (!config_stats) {
+ return ENOENT;
+ }
+ READONLY();
+ peak_event_update(tsd);
+ uint64_t result = peak_event_max(tsd);
+ READ(result, uint64_t);
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+thread_peak_reset_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+ if (!config_stats) {
+ return ENOENT;
+ }
+ NEITHER_READ_NOR_WRITE();
+ peak_event_zero(tsd);
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -1950,8 +2400,12 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
return ENOENT;
}
- oldval = prof_thread_active_get(tsd);
+ oldval = opt_prof ? prof_thread_active_get(tsd) : false;
if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
@@ -1968,6 +2422,39 @@ label_return:
return ret;
}
+static int
+thread_idle_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+
+ NEITHER_READ_NOR_WRITE();
+
+ if (tcache_available(tsd)) {
+ tcache_flush(tsd);
+ }
+ /*
+ * This heuristic is perhaps not the most well-considered. But it
+ * matches the only idling policy we have experience with in the status
+ * quo. Over time we should investigate more principled approaches.
+ */
+ if (opt_narenas > ncpus * 2) {
+ arena_t *arena = arena_choose(tsd, NULL);
+ if (arena != NULL) {
+ arena_decay(tsd_tsdn(tsd), arena, false, true);
+ }
+ /*
+ * The missing arena case is not actually an error; a thread
+ * might be idle before it associates itself to one. This is
+ * unusual, but not wrong.
+ */
+ }
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
/******************************************************************************/
static int
@@ -1977,7 +2464,8 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind;
READONLY();
- if (tcaches_create(tsd, &tcache_ind)) {
+ VERIFY_READ(unsigned);
+ if (tcaches_create(tsd, b0get(), &tcache_ind)) {
ret = EFAULT;
goto label_return;
}
@@ -1995,12 +2483,7 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind;
WRITEONLY();
- tcache_ind = UINT_MAX;
- WRITE(tcache_ind, unsigned);
- if (tcache_ind == UINT_MAX) {
- ret = EFAULT;
- goto label_return;
- }
+ ASSURED_WRITE(tcache_ind, unsigned);
tcaches_flush(tsd, tcache_ind);
ret = 0;
@@ -2015,12 +2498,7 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind;
WRITEONLY();
- tcache_ind = UINT_MAX;
- WRITE(tcache_ind, unsigned);
- if (tcache_ind == UINT_MAX) {
- ret = EFAULT;
- goto label_return;
- }
+ ASSURED_WRITE(tcache_ind, unsigned);
tcaches_destroy(tsd, tcache_ind);
ret = 0;
@@ -2105,8 +2583,7 @@ arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
unsigned arena_ind;
- READONLY();
- WRITEONLY();
+ NEITHER_READ_NOR_WRITE();
MIB_UNSIGNED(arena_ind, 1);
arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
@@ -2121,8 +2598,7 @@ arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
unsigned arena_ind;
- READONLY();
- WRITEONLY();
+ NEITHER_READ_NOR_WRITE();
MIB_UNSIGNED(arena_ind, 1);
arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
@@ -2137,8 +2613,7 @@ arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
arena_t **arena) {
int ret;
- READONLY();
- WRITEONLY();
+ NEITHER_READ_NOR_WRITE();
MIB_UNSIGNED(*arena_ind, 1);
*arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
@@ -2211,6 +2686,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
arena_t *arena;
ctl_arena_t *ctl_darena, *ctl_arena;
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+
ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, &arena_ind, &arena);
if (ret != 0) {
@@ -2241,6 +2718,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
assert(ret == 0);
label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+
return ret;
}
@@ -2306,6 +2785,38 @@ label_return:
}
static int
+arena_i_oversize_threshold_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ unsigned arena_ind;
+ MIB_UNSIGNED(arena_ind, 1);
+
+ arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = atomic_load_zu(
+ &arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED);
+ READ(oldval, size_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(size_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ atomic_store_zu(&arena->pa_shard.pac.oversize_threshold,
+ *(size_t *)newp, ATOMIC_RELAXED);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
int ret;
@@ -2318,10 +2829,10 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EFAULT;
goto label_return;
}
+ extent_state_t state = dirty ? extent_state_dirty : extent_state_muzzy;
if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
- arena_muzzy_decay_ms_get(arena);
+ size_t oldval = arena_decay_ms_get(arena, state);
READ(oldval, ssize_t);
}
if (newp != NULL) {
@@ -2340,9 +2851,9 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
}
- if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
- *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
- arena, *(ssize_t *)newp)) {
+
+ if (arena_decay_ms_set(tsd_tsdn(tsd), arena, state,
+ *(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
@@ -2385,15 +2896,18 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
old_extent_hooks =
- (extent_hooks_t *)&extent_hooks_default;
+ (extent_hooks_t *)&ehooks_default_extent_hooks;
READ(old_extent_hooks, extent_hooks_t *);
if (newp != NULL) {
/* Initialize a new arena as a side effect. */
extent_hooks_t *new_extent_hooks
JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_extent_hooks, extent_hooks_t *);
+ arena_config_t config = arena_config_default;
+ config.extent_hooks = new_extent_hooks;
+
arena = arena_init(tsd_tsdn(tsd), arena_ind,
- new_extent_hooks);
+ &config);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
@@ -2404,11 +2918,13 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
extent_hooks_t *new_extent_hooks
JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_extent_hooks, extent_hooks_t *);
- old_extent_hooks = extent_hooks_set(tsd, arena,
- new_extent_hooks);
+ old_extent_hooks = arena_set_extent_hooks(tsd,
+ arena, new_extent_hooks);
READ(old_extent_hooks, extent_hooks_t *);
} else {
- old_extent_hooks = extent_hooks_get(arena);
+ old_extent_hooks =
+ ehooks_get_extent_hooks_ptr(
+ arena_get_ehooks(arena));
READ(old_extent_hooks, extent_hooks_t *);
}
}
@@ -2493,10 +3009,6 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
- if (*oldlenp != sizeof(unsigned)) {
- ret = EINVAL;
- goto label_return;
- }
narenas = ctl_arenas->narenas;
READ(narenas, unsigned);
@@ -2582,14 +3094,14 @@ static int
arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- extent_hooks_t *extent_hooks;
unsigned arena_ind;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- extent_hooks = (extent_hooks_t *)&extent_hooks_default;
- WRITE(extent_hooks, extent_hooks_t *);
- if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
+ VERIFY_READ(unsigned);
+ arena_config_t config = arena_config_default;
+ WRITE(config.extent_hooks, extent_hooks_t *);
+ if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
ret = EAGAIN;
goto label_return;
}
@@ -2602,26 +3114,52 @@ label_return:
}
static int
+experimental_arenas_create_ext_ctl(tsd_t *tsd,
+ const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+
+ arena_config_t config = arena_config_default;
+ VERIFY_READ(unsigned);
+ WRITE(config, arena_config_t);
+
+ if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ READ(arena_ind, unsigned);
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static int
arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
unsigned arena_ind;
void *ptr;
- extent_t *extent;
+ edata_t *edata;
arena_t *arena;
ptr = NULL;
ret = EINVAL;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(ptr, void *);
- extent = iealloc(tsd_tsdn(tsd), ptr);
- if (extent == NULL)
+ edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr);
+ if (edata == NULL) {
goto label_return;
+ }
- arena = extent_arena_get(extent);
- if (arena == NULL)
+ arena = arena_get_from_edata(edata);
+ if (arena == NULL) {
goto label_return;
+ }
arena_ind = arena_ind_get(arena);
READ(arena_ind, unsigned);
@@ -2646,6 +3184,10 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
}
if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
@@ -2653,7 +3195,8 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
*(bool *)newp);
} else {
- oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
+ oldval = opt_prof ? prof_thread_active_init_get(tsd_tsdn(tsd)) :
+ false;
}
READ(oldval, bool);
@@ -2669,7 +3212,8 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
bool oldval;
if (!config_prof) {
- return ENOENT;
+ ret = ENOENT;
+ goto label_return;
}
if (newp != NULL) {
@@ -2677,9 +3221,20 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL;
goto label_return;
}
- oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
+ bool val = *(bool *)newp;
+ if (!opt_prof) {
+ if (val) {
+ ret = ENOENT;
+ goto label_return;
+ } else {
+ /* No change needed (already off). */
+ oldval = false;
+ }
+ } else {
+ oldval = prof_active_set(tsd_tsdn(tsd), val);
+ }
} else {
- oldval = prof_active_get(tsd_tsdn(tsd));
+ oldval = opt_prof ? prof_active_get(tsd_tsdn(tsd)) : false;
}
READ(oldval, bool);
@@ -2694,7 +3249,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
int ret;
const char *filename = NULL;
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -2722,13 +3277,17 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
}
if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
} else {
- oldval = prof_gdump_get(tsd_tsdn(tsd));
+ oldval = opt_prof ? prof_gdump_get(tsd_tsdn(tsd)) : false;
}
READ(oldval, bool);
@@ -2738,12 +3297,32 @@ label_return:
}
static int
+prof_prefix_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ const char *prefix = NULL;
+
+ if (!config_prof || !opt_prof) {
+ return ENOENT;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ WRITEONLY();
+ WRITE(prefix, const char *);
+
+ ret = prof_prefix_set(tsd_tsdn(tsd), prefix) ? EFAULT : 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static int
prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
size_t lg_sample = lg_prof_sample;
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -2770,7 +3349,7 @@ prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
const char *filename = NULL;
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -2790,7 +3369,7 @@ label_return:
static int
prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -2801,6 +3380,87 @@ prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
return 0;
}
+static int
+experimental_hooks_prof_backtrace_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (oldp == NULL && newp == NULL) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (oldp != NULL) {
+ prof_backtrace_hook_t old_hook =
+ prof_backtrace_hook_get();
+ READ(old_hook, prof_backtrace_hook_t);
+ }
+ if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ prof_backtrace_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(new_hook, prof_backtrace_hook_t);
+ if (new_hook == NULL) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_backtrace_hook_set(new_hook);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+experimental_hooks_prof_dump_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (oldp == NULL && newp == NULL) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (oldp != NULL) {
+ prof_dump_hook_t old_hook =
+ prof_dump_hook_get();
+ READ(old_hook, prof_dump_hook_t);
+ }
+ if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ prof_dump_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(new_hook, prof_dump_hook_t);
+ prof_dump_hook_set(new_hook);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
+/* For integration test purpose only. No plan to move out of experimental. */
+static int
+experimental_hooks_safety_check_abort_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ WRITEONLY();
+ if (newp != NULL) {
+ if (newlen != sizeof(safety_check_abort_hook_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ safety_check_abort_hook_t hook JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(hook, safety_check_abort_hook_t);
+ safety_check_set_abort(hook);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
/******************************************************************************/
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
@@ -2818,6 +3478,9 @@ CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
+CTL_RO_CGEN(config_stats, stats_zero_reallocs,
+ atomic_load_zu(&zero_realloc_count, ATOMIC_RELAXED), size_t)
+
CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
ssize_t)
@@ -2830,55 +3493,61 @@ CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
- size_t)
+ arenas_i(mib[2])->astats->astats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
- size_t)
+ arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.retained, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
- ATOMIC_RELAXED),
- size_t)
+ arenas_i(mib[2])->astats->astats.pa_shard_stats.edata_avail, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_base,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
+ arenas_i(mib[2])->astats->astats.base,
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
- ATOMIC_RELAXED), size_t)
+ arenas_i(mib[2])->astats->astats.metadata_thp, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
- ATOMIC_RELAXED), size_t)
+ arenas_i(mib[2])->astats->astats.tcache_bytes, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_stashed_bytes,
+ arenas_i(mib[2])->astats->astats.tcache_stashed_bytes, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
+ arenas_i(mib[2])->astats->astats.resident,
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm,
+ atomic_load_zu(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm,
ATOMIC_RELAXED), size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_sec_bytes,
+ arenas_i(mib[2])->astats->secstats.bytes, size_t)
+
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
arenas_i(mib[2])->astats->allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
@@ -2892,27 +3561,21 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills,
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes,
arenas_i(mib[2])->astats->nflushes_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
- ATOMIC_RELAXED), size_t)
+ arenas_i(mib[2])->astats->astats.allocated_large, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.ndalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.nrequests_large, uint64_t)
/*
* Note: "nmalloc_large" here instead of "nfills" in the read. This is
* intentional (large has no batch fill).
*/
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.nflushes_large, uint64_t)
/* Lock profiling related APIs below. */
#define RO_MUTEX_CTL_GEN(n, l) \
@@ -2972,9 +3635,13 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
}
if (config_prof && opt_prof) {
MUTEX_PROF_RESET(bt2gctx_mtx);
+ MUTEX_PROF_RESET(tdatas_mtx);
+ MUTEX_PROF_RESET(prof_dump_mtx);
+ MUTEX_PROF_RESET(prof_recent_alloc_mtx);
+ MUTEX_PROF_RESET(prof_recent_dump_mtx);
+ MUTEX_PROF_RESET(prof_stats_mtx);
}
-
/* Per arena mutexes. */
unsigned n = narenas_total_get();
@@ -2984,18 +3651,18 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
continue;
}
MUTEX_PROF_RESET(arena->large_mtx);
- MUTEX_PROF_RESET(arena->extent_avail_mtx);
- MUTEX_PROF_RESET(arena->extents_dirty.mtx);
- MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
- MUTEX_PROF_RESET(arena->extents_retained.mtx);
- MUTEX_PROF_RESET(arena->decay_dirty.mtx);
- MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.decay_dirty.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.decay_muzzy.mtx);
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
MUTEX_PROF_RESET(arena->base->mtx);
- for (szind_t i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_t *bin = &arena->bins[i].bin_shards[j];
+ for (szind_t j = 0; j < SC_NBINS; j++) {
+ for (unsigned k = 0; k < bin_infos[j].n_shards; k++) {
+ bin_t *bin = arena_get_bin(arena, j, k);
MUTEX_PROF_RESET(bin->lock);
}
}
@@ -3005,25 +3672,25 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
- arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
- arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
- arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
- arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curregs, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
- arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nfills, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
- arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nflushes, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.reslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curslabs, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nonfull_slabs, size_t)
static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
@@ -3035,13 +3702,13 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
- ctl_arena_stats_read_u64(
+ locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
- ctl_arena_stats_read_u64(
+ locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
- ctl_arena_stats_read_u64(
+ locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
@@ -3056,29 +3723,17 @@ stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].ndirty,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].ndirty, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].nretained,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].nretained, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, size_t);
static const ctl_named_node_t *
stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
@@ -3089,6 +3744,82 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
return super_stats_arenas_i_extents_j_node;
}
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurge_passes,
+ arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, uint64_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurges,
+ arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurges, uint64_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nhugifies,
+ arenas_i(mib[2])->astats->hpastats.nonderived_stats.nhugifies, uint64_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndehugifies,
+ arenas_i(mib[2])->astats->hpastats.nonderived_stats.ndehugifies, uint64_t);
+
+/* Full, nonhuge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].nactive, size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].ndirty, size_t);
+
+/* Full, huge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].nactive, size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].ndirty, size_t);
+
+/* Empty, nonhuge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].nactive, size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].ndirty, size_t);
+
+/* Empty, huge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].nactive, size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].ndirty, size_t);
+
+/* Nonfull, nonhuge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].nactive,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].ndirty,
+ size_t);
+
+/* Nonfull, huge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].nactive,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].ndirty,
+ size_t);
+
+static const ctl_named_node_t *
+stats_arenas_i_hpa_shard_nonfull_slabs_j_index(tsdn_t *tsdn, const size_t *mib,
+ size_t miblen, size_t j) {
+ if (j >= PSSET_NPSIZES) {
+ return NULL;
+ }
+ return super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node;
+}
+
static bool
ctl_arenas_i_verify(size_t i) {
size_t a = arenas_i2a_impl(i, true, true);
@@ -3161,6 +3892,32 @@ label_return:
return ret;
}
+static int
+experimental_thread_activity_callback_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (!config_stats) {
+ return ENOENT;
+ }
+
+ activity_callback_thunk_t t_old = tsd_activity_callback_thunk_get(tsd);
+ READ(t_old, activity_callback_thunk_t);
+
+ if (newp != NULL) {
+ /*
+ * This initialization is unnecessary. If it's omitted, though,
+ * clang gets confused and warns on the subsequent use of t_new.
+ */
+ activity_callback_thunk_t t_new = {NULL, NULL};
+ WRITE(t_new, activity_callback_thunk_t);
+ tsd_activity_callback_thunk_set(tsd, t_new);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
/*
* Output six memory utilization entries for an input pointer, the first one of
* type (void *) and the remaining five of type size_t, describing the following
@@ -3178,7 +3935,8 @@ label_return:
* otherwise their values are undefined.
*
* This API is mainly intended for small class allocations, where extents are
- * used as slab.
+ * used as slab. Note that if the bin the extent belongs to is completely
+ * full, "(a)" will be NULL.
*
* In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)"
* will be zero (if stats are enabled; otherwise undefined). The other three
@@ -3232,11 +3990,11 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- assert(sizeof(extent_util_stats_verbose_t)
+ assert(sizeof(inspect_extent_util_stats_verbose_t)
== sizeof(void *) + sizeof(size_t) * 5);
if (oldp == NULL || oldlenp == NULL
- || *oldlenp != sizeof(extent_util_stats_verbose_t)
+ || *oldlenp != sizeof(inspect_extent_util_stats_verbose_t)
|| newp == NULL) {
ret = EINVAL;
goto label_return;
@@ -3244,9 +4002,9 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
void *ptr = NULL;
WRITE(ptr, void *);
- extent_util_stats_verbose_t *util_stats
- = (extent_util_stats_verbose_t *)oldp;
- extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
+ inspect_extent_util_stats_verbose_t *util_stats
+ = (inspect_extent_util_stats_verbose_t *)oldp;
+ inspect_extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
&util_stats->nfree, &util_stats->nregs, &util_stats->size,
&util_stats->bin_nfree, &util_stats->bin_nregs,
&util_stats->slabcur_addr);
@@ -3357,21 +4115,22 @@ experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- assert(sizeof(extent_util_stats_t) == sizeof(size_t) * 3);
+ assert(sizeof(inspect_extent_util_stats_t) == sizeof(size_t) * 3);
const size_t len = newlen / sizeof(const void *);
if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0
|| newlen != len * sizeof(const void *)
- || *oldlenp != len * sizeof(extent_util_stats_t)) {
+ || *oldlenp != len * sizeof(inspect_extent_util_stats_t)) {
ret = EINVAL;
goto label_return;
}
void **ptrs = (void **)newp;
- extent_util_stats_t *util_stats = (extent_util_stats_t *)oldp;
+ inspect_extent_util_stats_t *util_stats =
+ (inspect_extent_util_stats_t *)oldp;
size_t i;
for (i = 0; i < len; ++i) {
- extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
+ inspect_extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
&util_stats[i].nfree, &util_stats[i].nregs,
&util_stats[i].size);
}
@@ -3420,7 +4179,7 @@ experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \
defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
/* Expose the underlying counter for fast read. */
- pactivep = (size_t *)&(arena->nactive.repr);
+ pactivep = (size_t *)&(arena->pa_shard.nactive.repr);
READ(pactivep, size_t *);
ret = 0;
#else
@@ -3433,3 +4192,223 @@ label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
+
+static int
+experimental_prof_recent_alloc_max_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (!(config_prof && opt_prof)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ ssize_t old_max;
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ ssize_t max = *(ssize_t *)newp;
+ if (max < -1) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ old_max = prof_recent_alloc_max_ctl_write(tsd, max);
+ } else {
+ old_max = prof_recent_alloc_max_ctl_read();
+ }
+ READ(old_max, ssize_t);
+
+ ret = 0;
+
+label_return:
+ return ret;
+}
+
+typedef struct write_cb_packet_s write_cb_packet_t;
+struct write_cb_packet_s {
+ write_cb_t *write_cb;
+ void *cbopaque;
+};
+
+static int
+experimental_prof_recent_alloc_dump_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (!(config_prof && opt_prof)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ assert(sizeof(write_cb_packet_t) == sizeof(void *) * 2);
+
+ WRITEONLY();
+ write_cb_packet_t write_cb_packet;
+ ASSURED_WRITE(write_cb_packet, write_cb_packet_t);
+
+ prof_recent_alloc_dump(tsd, write_cb_packet.write_cb,
+ write_cb_packet.cbopaque);
+
+ ret = 0;
+
+label_return:
+ return ret;
+}
+
+typedef struct batch_alloc_packet_s batch_alloc_packet_t;
+struct batch_alloc_packet_s {
+ void **ptrs;
+ size_t num;
+ size_t size;
+ int flags;
+};
+
+static int
+experimental_batch_alloc_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ VERIFY_READ(size_t);
+
+ batch_alloc_packet_t batch_alloc_packet;
+ ASSURED_WRITE(batch_alloc_packet, batch_alloc_packet_t);
+ size_t filled = batch_alloc(batch_alloc_packet.ptrs,
+ batch_alloc_packet.num, batch_alloc_packet.size,
+ batch_alloc_packet.flags);
+ READ(filled, size_t);
+
+ ret = 0;
+
+label_return:
+ return ret;
+}
+
+static int
+prof_stats_bins_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned binind;
+ prof_stats_t stats;
+
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ READONLY();
+ MIB_UNSIGNED(binind, 3);
+ if (binind >= SC_NBINS) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_stats_get_live(tsd, (szind_t)binind, &stats);
+ READ(stats, prof_stats_t);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+prof_stats_bins_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned binind;
+ prof_stats_t stats;
+
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ READONLY();
+ MIB_UNSIGNED(binind, 3);
+ if (binind >= SC_NBINS) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_stats_get_accum(tsd, (szind_t)binind, &stats);
+ READ(stats, prof_stats_t);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static const ctl_named_node_t *
+prof_stats_bins_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t i) {
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ return NULL;
+ }
+ if (i >= SC_NBINS) {
+ return NULL;
+ }
+ return super_prof_stats_bins_i_node;
+}
+
+static int
+prof_stats_lextents_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned lextent_ind;
+ prof_stats_t stats;
+
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ READONLY();
+ MIB_UNSIGNED(lextent_ind, 3);
+ if (lextent_ind >= SC_NSIZES - SC_NBINS) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_stats_get_live(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
+ READ(stats, prof_stats_t);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+prof_stats_lextents_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned lextent_ind;
+ prof_stats_t stats;
+
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ READONLY();
+ MIB_UNSIGNED(lextent_ind, 3);
+ if (lextent_ind >= SC_NSIZES - SC_NBINS) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_stats_get_accum(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
+ READ(stats, prof_stats_t);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static const ctl_named_node_t *
+prof_stats_lextents_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t i) {
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ return NULL;
+ }
+ if (i >= SC_NSIZES - SC_NBINS) {
+ return NULL;
+ }
+ return super_prof_stats_lextents_i_node;
+}
diff --git a/deps/jemalloc/src/decay.c b/deps/jemalloc/src/decay.c
new file mode 100644
index 000000000..d801b2bc0
--- /dev/null
+++ b/deps/jemalloc/src/decay.c
@@ -0,0 +1,295 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/decay.h"
+
+static const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
+#define STEP(step, h, x, y) \
+ h,
+ SMOOTHSTEP
+#undef STEP
+};
+
+/*
+ * Generate a new deadline that is uniformly random within the next epoch after
+ * the current one.
+ */
+void
+decay_deadline_init(decay_t *decay) {
+ nstime_copy(&decay->deadline, &decay->epoch);
+ nstime_add(&decay->deadline, &decay->interval);
+ if (decay_ms_read(decay) > 0) {
+ nstime_t jitter;
+
+ nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
+ nstime_ns(&decay->interval)));
+ nstime_add(&decay->deadline, &jitter);
+ }
+}
+
+void
+decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
+ atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
+ if (decay_ms > 0) {
+ nstime_init(&decay->interval, (uint64_t)decay_ms *
+ KQU(1000000));
+ nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
+ }
+
+ nstime_copy(&decay->epoch, cur_time);
+ decay->jitter_state = (uint64_t)(uintptr_t)decay;
+ decay_deadline_init(decay);
+ decay->nunpurged = 0;
+ memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
+}
+
+bool
+decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
+ if (config_debug) {
+ for (size_t i = 0; i < sizeof(decay_t); i++) {
+ assert(((char *)decay)[i] == 0);
+ }
+ decay->ceil_npages = 0;
+ }
+ if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ decay->purging = false;
+ decay_reinit(decay, cur_time, decay_ms);
+ return false;
+}
+
+bool
+decay_ms_valid(ssize_t decay_ms) {
+ if (decay_ms < -1) {
+ return false;
+ }
+ if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
+ KQU(1000)) {
+ return true;
+ }
+ return false;
+}
+
+static void
+decay_maybe_update_time(decay_t *decay, nstime_t *new_time) {
+ if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch,
+ new_time) > 0)) {
+ /*
+ * Time went backwards. Move the epoch back in time and
+ * generate a new deadline, with the expectation that time
+ * typically flows forward for long enough periods of time that
+ * epochs complete. Unfortunately, this strategy is susceptible
+ * to clock jitter triggering premature epoch advances, but
+ * clock jitter estimation and compensation isn't feasible here
+ * because calls into this code are event-driven.
+ */
+ nstime_copy(&decay->epoch, new_time);
+ decay_deadline_init(decay);
+ } else {
+ /* Verify that time does not go backwards. */
+ assert(nstime_compare(&decay->epoch, new_time) <= 0);
+ }
+}
+
+static size_t
+decay_backlog_npages_limit(const decay_t *decay) {
+ /*
+ * For each element of decay_backlog, multiply by the corresponding
+ * fixed-point smoothstep decay factor. Sum the products, then divide
+ * to round down to the nearest whole number of pages.
+ */
+ uint64_t sum = 0;
+ for (unsigned i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ sum += decay->backlog[i] * h_steps[i];
+ }
+ size_t npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
+
+ return npages_limit_backlog;
+}
+
+/*
+ * Update backlog, assuming that 'nadvance_u64' time intervals have passed.
+ * Trailing 'nadvance_u64' records should be erased and 'current_npages' is
+ * placed as the newest record.
+ */
+static void
+decay_backlog_update(decay_t *decay, uint64_t nadvance_u64,
+ size_t current_npages) {
+ if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
+ memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
+ sizeof(size_t));
+ } else {
+ size_t nadvance_z = (size_t)nadvance_u64;
+
+ assert((uint64_t)nadvance_z == nadvance_u64);
+
+ memmove(decay->backlog, &decay->backlog[nadvance_z],
+ (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
+ if (nadvance_z > 1) {
+ memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
+ nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
+ }
+ }
+
+ size_t npages_delta = (current_npages > decay->nunpurged) ?
+ current_npages - decay->nunpurged : 0;
+ decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
+
+ if (config_debug) {
+ if (current_npages > decay->ceil_npages) {
+ decay->ceil_npages = current_npages;
+ }
+ size_t npages_limit = decay_backlog_npages_limit(decay);
+ assert(decay->ceil_npages >= npages_limit);
+ if (decay->ceil_npages > npages_limit) {
+ decay->ceil_npages = npages_limit;
+ }
+ }
+}
+
+static inline bool
+decay_deadline_reached(const decay_t *decay, const nstime_t *time) {
+ return (nstime_compare(&decay->deadline, time) <= 0);
+}
+
+uint64_t
+decay_npages_purge_in(decay_t *decay, nstime_t *time, size_t npages_new) {
+ uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
+ size_t n_epoch = (size_t)(nstime_ns(time) / decay_interval_ns);
+
+ uint64_t npages_purge;
+ if (n_epoch >= SMOOTHSTEP_NSTEPS) {
+ npages_purge = npages_new;
+ } else {
+ uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
+ assert(h_steps_max >=
+ h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
+ npages_purge = npages_new * (h_steps_max -
+ h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
+ npages_purge >>= SMOOTHSTEP_BFP;
+ }
+ return npages_purge;
+}
+
+bool
+decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
+ size_t npages_current) {
+ /* Handle possible non-monotonicity of time. */
+ decay_maybe_update_time(decay, new_time);
+
+ if (!decay_deadline_reached(decay, new_time)) {
+ return false;
+ }
+ nstime_t delta;
+ nstime_copy(&delta, new_time);
+ nstime_subtract(&delta, &decay->epoch);
+
+ uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
+ assert(nadvance_u64 > 0);
+
+ /* Add nadvance_u64 decay intervals to epoch. */
+ nstime_copy(&delta, &decay->interval);
+ nstime_imultiply(&delta, nadvance_u64);
+ nstime_add(&decay->epoch, &delta);
+
+ /* Set a new deadline. */
+ decay_deadline_init(decay);
+
+ /* Update the backlog. */
+ decay_backlog_update(decay, nadvance_u64, npages_current);
+
+ decay->npages_limit = decay_backlog_npages_limit(decay);
+ decay->nunpurged = (decay->npages_limit > npages_current) ?
+ decay->npages_limit : npages_current;
+
+ return true;
+}
+
+/*
+ * Calculate how many pages should be purged after 'interval'.
+ *
+ * First, calculate how many pages should remain at the moment, then subtract
+ * the number of pages that should remain after 'interval'. The difference is
+ * how many pages should be purged until then.
+ *
+ * The number of pages that should remain at a specific moment is calculated
+ * like this: pages(now) = sum(backlog[i] * h_steps[i]). After 'interval'
+ * passes, backlog would shift 'interval' positions to the left and sigmoid
+ * curve would be applied starting with backlog[interval].
+ *
+ * The implementation doesn't directly map to the description, but it's
+ * essentially the same calculation, optimized to avoid iterating over
+ * [interval..SMOOTHSTEP_NSTEPS) twice.
+ */
+static inline size_t
+decay_npurge_after_interval(decay_t *decay, size_t interval) {
+ size_t i;
+ uint64_t sum = 0;
+ for (i = 0; i < interval; i++) {
+ sum += decay->backlog[i] * h_steps[i];
+ }
+ for (; i < SMOOTHSTEP_NSTEPS; i++) {
+ sum += decay->backlog[i] *
+ (h_steps[i] - h_steps[i - interval]);
+ }
+
+ return (size_t)(sum >> SMOOTHSTEP_BFP);
+}
+
+uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
+ uint64_t npages_threshold) {
+ if (!decay_gradually(decay)) {
+ return DECAY_UNBOUNDED_TIME_TO_PURGE;
+ }
+ uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
+ assert(decay_interval_ns > 0);
+ if (npages_current == 0) {
+ unsigned i;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ if (decay->backlog[i] > 0) {
+ break;
+ }
+ }
+ if (i == SMOOTHSTEP_NSTEPS) {
+ /* No dirty pages recorded. Sleep indefinitely. */
+ return DECAY_UNBOUNDED_TIME_TO_PURGE;
+ }
+ }
+ if (npages_current <= npages_threshold) {
+ /* Use max interval. */
+ return decay_interval_ns * SMOOTHSTEP_NSTEPS;
+ }
+
+ /* Minimal 2 intervals to ensure reaching next epoch deadline. */
+ size_t lb = 2;
+ size_t ub = SMOOTHSTEP_NSTEPS;
+
+ size_t npurge_lb, npurge_ub;
+ npurge_lb = decay_npurge_after_interval(decay, lb);
+ if (npurge_lb > npages_threshold) {
+ return decay_interval_ns * lb;
+ }
+ npurge_ub = decay_npurge_after_interval(decay, ub);
+ if (npurge_ub < npages_threshold) {
+ return decay_interval_ns * ub;
+ }
+
+ unsigned n_search = 0;
+ size_t target, npurge;
+ while ((npurge_lb + npages_threshold < npurge_ub) && (lb + 2 < ub)) {
+ target = (lb + ub) / 2;
+ npurge = decay_npurge_after_interval(decay, target);
+ if (npurge > npages_threshold) {
+ ub = target;
+ npurge_ub = npurge;
+ } else {
+ lb = target;
+ npurge_lb = npurge;
+ }
+ assert(n_search < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
+ ++n_search;
+ }
+ return decay_interval_ns * (ub + lb) / 2;
+}
diff --git a/deps/jemalloc/src/ecache.c b/deps/jemalloc/src/ecache.c
new file mode 100644
index 000000000..a242227d3
--- /dev/null
+++ b/deps/jemalloc/src/ecache.c
@@ -0,0 +1,35 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/san.h"
+
+bool
+ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind,
+ bool delay_coalesce) {
+ if (malloc_mutex_init(&ecache->mtx, "extents", WITNESS_RANK_EXTENTS,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ ecache->state = state;
+ ecache->ind = ind;
+ ecache->delay_coalesce = delay_coalesce;
+ eset_init(&ecache->eset, state);
+ eset_init(&ecache->guarded_eset, state);
+
+ return false;
+}
+
+void
+ecache_prefork(tsdn_t *tsdn, ecache_t *ecache) {
+ malloc_mutex_prefork(tsdn, &ecache->mtx);
+}
+
+void
+ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache) {
+ malloc_mutex_postfork_parent(tsdn, &ecache->mtx);
+}
+
+void
+ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache) {
+ malloc_mutex_postfork_child(tsdn, &ecache->mtx);
+}
diff --git a/deps/jemalloc/src/edata.c b/deps/jemalloc/src/edata.c
new file mode 100644
index 000000000..82b6f5654
--- /dev/null
+++ b/deps/jemalloc/src/edata.c
@@ -0,0 +1,6 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+ph_gen(, edata_avail, edata_t, avail_link,
+ edata_esnead_comp)
+ph_gen(, edata_heap, edata_t, heap_link, edata_snad_comp)
diff --git a/deps/jemalloc/src/edata_cache.c b/deps/jemalloc/src/edata_cache.c
new file mode 100644
index 000000000..6bc1848cb
--- /dev/null
+++ b/deps/jemalloc/src/edata_cache.c
@@ -0,0 +1,154 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+bool
+edata_cache_init(edata_cache_t *edata_cache, base_t *base) {
+ edata_avail_new(&edata_cache->avail);
+ /*
+ * This is not strictly necessary, since the edata_cache_t is only
+ * created inside an arena, which is zeroed on creation. But this is
+ * handy as a safety measure.
+ */
+ atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED);
+ if (malloc_mutex_init(&edata_cache->mtx, "edata_cache",
+ WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ edata_cache->base = base;
+ return false;
+}
+
+edata_t *
+edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache) {
+ malloc_mutex_lock(tsdn, &edata_cache->mtx);
+ edata_t *edata = edata_avail_first(&edata_cache->avail);
+ if (edata == NULL) {
+ malloc_mutex_unlock(tsdn, &edata_cache->mtx);
+ return base_alloc_edata(tsdn, edata_cache->base);
+ }
+ edata_avail_remove(&edata_cache->avail, edata);
+ atomic_load_sub_store_zu(&edata_cache->count, 1);
+ malloc_mutex_unlock(tsdn, &edata_cache->mtx);
+ return edata;
+}
+
+void
+edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) {
+ malloc_mutex_lock(tsdn, &edata_cache->mtx);
+ edata_avail_insert(&edata_cache->avail, edata);
+ atomic_load_add_store_zu(&edata_cache->count, 1);
+ malloc_mutex_unlock(tsdn, &edata_cache->mtx);
+}
+
+void
+edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache) {
+ malloc_mutex_prefork(tsdn, &edata_cache->mtx);
+}
+
+void
+edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache) {
+ malloc_mutex_postfork_parent(tsdn, &edata_cache->mtx);
+}
+
+void
+edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache) {
+ malloc_mutex_postfork_child(tsdn, &edata_cache->mtx);
+}
+
+void
+edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) {
+ edata_list_inactive_init(&ecs->list);
+ ecs->fallback = fallback;
+ ecs->disabled = false;
+}
+
+static void
+edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn,
+ edata_cache_fast_t *ecs) {
+ edata_t *edata;
+ malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
+ for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
+ edata = edata_avail_remove_first(&ecs->fallback->avail);
+ if (edata == NULL) {
+ break;
+ }
+ edata_list_inactive_append(&ecs->list, edata);
+ atomic_load_sub_store_zu(&ecs->fallback->count, 1);
+ }
+ malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
+}
+
+edata_t *
+edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_EDATA_CACHE, 0);
+
+ if (ecs->disabled) {
+ assert(edata_list_inactive_first(&ecs->list) == NULL);
+ return edata_cache_get(tsdn, ecs->fallback);
+ }
+
+ edata_t *edata = edata_list_inactive_first(&ecs->list);
+ if (edata != NULL) {
+ edata_list_inactive_remove(&ecs->list, edata);
+ return edata;
+ }
+ /* Slow path; requires synchronization. */
+ edata_cache_fast_try_fill_from_fallback(tsdn, ecs);
+ edata = edata_list_inactive_first(&ecs->list);
+ if (edata != NULL) {
+ edata_list_inactive_remove(&ecs->list, edata);
+ } else {
+ /*
+ * Slowest path (fallback was also empty); allocate something
+ * new.
+ */
+ edata = base_alloc_edata(tsdn, ecs->fallback->base);
+ }
+ return edata;
+}
+
+static void
+edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
+ /*
+ * You could imagine smarter cache management policies (like
+ * only flushing down to some threshold in anticipation of
+ * future get requests). But just flushing everything provides
+ * a good opportunity to defrag too, and lets us share code between the
+ * flush and disable pathways.
+ */
+ edata_t *edata;
+ size_t nflushed = 0;
+ malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
+ while ((edata = edata_list_inactive_first(&ecs->list)) != NULL) {
+ edata_list_inactive_remove(&ecs->list, edata);
+ edata_avail_insert(&ecs->fallback->avail, edata);
+ nflushed++;
+ }
+ atomic_load_add_store_zu(&ecs->fallback->count, nflushed);
+ malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
+}
+
+void
+edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_EDATA_CACHE, 0);
+
+ if (ecs->disabled) {
+ assert(edata_list_inactive_first(&ecs->list) == NULL);
+ edata_cache_put(tsdn, ecs->fallback, edata);
+ return;
+ }
+
+ /*
+ * Prepend rather than append, to do LIFO ordering in the hopes of some
+ * cache locality.
+ */
+ edata_list_inactive_prepend(&ecs->list, edata);
+}
+
+void
+edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
+ edata_cache_fast_flush_all(tsdn, ecs);
+ ecs->disabled = true;
+}
diff --git a/deps/jemalloc/src/ehooks.c b/deps/jemalloc/src/ehooks.c
new file mode 100644
index 000000000..383e9de6a
--- /dev/null
+++ b/deps/jemalloc/src/ehooks.c
@@ -0,0 +1,275 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/ehooks.h"
+#include "jemalloc/internal/extent_mmap.h"
+
+void
+ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind) {
+ /* All other hooks are optional; this one is not. */
+ assert(extent_hooks->alloc != NULL);
+ ehooks->ind = ind;
+ ehooks_set_extent_hooks_ptr(ehooks, extent_hooks);
+}
+
+/*
+ * If the caller specifies (!*zero), it is still possible to receive zeroed
+ * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
+ * advantage of this to avoid demanding zeroed extents, but taking advantage of
+ * them if they are returned.
+ */
+static void *
+extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
+ void *ret;
+
+ assert(size != 0);
+ assert(alignment != 0);
+
+ /* "primary" dss. */
+ if (have_dss && dss_prec == dss_prec_primary && (ret =
+ extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL) {
+ return ret;
+ }
+ /* mmap. */
+ if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
+ != NULL) {
+ return ret;
+ }
+ /* "secondary" dss. */
+ if (have_dss && dss_prec == dss_prec_secondary && (ret =
+ extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL) {
+ return ret;
+ }
+
+ /* All strategies for allocation failed. */
+ return NULL;
+}
+
+void *
+ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
+ arena_t *arena = arena_get(tsdn, arena_ind, false);
+ /* NULL arena indicates arena_create. */
+ assert(arena != NULL || alignment == HUGEPAGE);
+ dss_prec_t dss = (arena == NULL) ? dss_prec_disabled :
+ (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED);
+ void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment,
+ zero, commit, dss);
+ if (have_madvise_huge && ret) {
+ pages_set_thp_state(ret, size);
+ }
+ return ret;
+}
+
+static void *
+ehooks_default_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
+ return ehooks_default_alloc_impl(tsdn_fetch(), new_addr, size,
+ ALIGNMENT_CEILING(alignment, PAGE), zero, commit, arena_ind);
+}
+
+bool
+ehooks_default_dalloc_impl(void *addr, size_t size) {
+ if (!have_dss || !extent_in_dss(addr)) {
+ return extent_dalloc_mmap(addr, size);
+ }
+ return true;
+}
+
+static bool
+ehooks_default_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ return ehooks_default_dalloc_impl(addr, size);
+}
+
+void
+ehooks_default_destroy_impl(void *addr, size_t size) {
+ if (!have_dss || !extent_in_dss(addr)) {
+ pages_unmap(addr, size);
+ }
+}
+
+static void
+ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ ehooks_default_destroy_impl(addr, size);
+}
+
+bool
+ehooks_default_commit_impl(void *addr, size_t offset, size_t length) {
+ return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+
+static bool
+ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ return ehooks_default_commit_impl(addr, offset, length);
+}
+
+bool
+ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) {
+ return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+
+static bool
+ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ return ehooks_default_decommit_impl(addr, offset, length);
+}
+
+#ifdef PAGES_CAN_PURGE_LAZY
+bool
+ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) {
+ return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+
+static bool
+ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ assert(addr != NULL);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+ return ehooks_default_purge_lazy_impl(addr, offset, length);
+}
+#endif
+
+#ifdef PAGES_CAN_PURGE_FORCED
+bool
+ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) {
+ return pages_purge_forced((void *)((uintptr_t)addr +
+ (uintptr_t)offset), length);
+}
+
+static bool
+ehooks_default_purge_forced(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind) {
+ assert(addr != NULL);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+ return ehooks_default_purge_forced_impl(addr, offset, length);
+}
+#endif
+
+bool
+ehooks_default_split_impl() {
+ if (!maps_coalesce) {
+ /*
+ * Without retain, only whole regions can be purged (required by
+ * MEM_RELEASE on Windows) -- therefore disallow splitting. See
+ * comments in extent_head_no_merge().
+ */
+ return !opt_retain;
+ }
+
+ return false;
+}
+
+static bool
+ehooks_default_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
+ return ehooks_default_split_impl();
+}
+
+bool
+ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b) {
+ assert(addr_a < addr_b);
+ /*
+ * For non-DSS cases --
+ * a) W/o maps_coalesce, merge is not always allowed (Windows):
+ * 1) w/o retain, never merge (first branch below).
+ * 2) with retain, only merge extents from the same VirtualAlloc
+ * region (in which case MEM_DECOMMIT is utilized for purging).
+ *
+ * b) With maps_coalesce, it's always possible to merge.
+ * 1) w/o retain, always allow merge (only about dirty / muzzy).
+ * 2) with retain, to preserve the SN / first-fit, merge is still
+ * disallowed if b is a head extent, i.e. no merging across
+ * different mmap regions.
+ *
+ * a2) and b2) are implemented in emap_try_acquire_edata_neighbor, and
+ * sanity checked in the second branch below.
+ */
+ if (!maps_coalesce && !opt_retain) {
+ return true;
+ }
+ if (config_debug) {
+ edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global,
+ addr_a);
+ bool head_a = edata_is_head_get(a);
+ edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global,
+ addr_b);
+ bool head_b = edata_is_head_get(b);
+ emap_assert_mapped(tsdn, &arena_emap_global, a);
+ emap_assert_mapped(tsdn, &arena_emap_global, b);
+ assert(extent_neighbor_head_state_mergeable(head_a, head_b,
+ /* forward */ true));
+ }
+ if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
+ return true;
+ }
+
+ return false;
+}
+
+bool
+ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
+ tsdn_t *tsdn = tsdn_fetch();
+
+ return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
+}
+
+void
+ehooks_default_zero_impl(void *addr, size_t size) {
+ /*
+ * By default, we try to zero out memory using OS-provided demand-zeroed
+ * pages. If the user has specifically requested hugepages, though, we
+ * don't want to purge in the middle of a hugepage (which would break it
+ * up), so we act conservatively and use memset.
+ */
+ bool needs_memset = true;
+ if (opt_thp != thp_mode_always) {
+ needs_memset = pages_purge_forced(addr, size);
+ }
+ if (needs_memset) {
+ memset(addr, 0, size);
+ }
+}
+
+void
+ehooks_default_guard_impl(void *guard1, void *guard2) {
+ pages_mark_guards(guard1, guard2);
+}
+
+void
+ehooks_default_unguard_impl(void *guard1, void *guard2) {
+ pages_unmark_guards(guard1, guard2);
+}
+
+const extent_hooks_t ehooks_default_extent_hooks = {
+ ehooks_default_alloc,
+ ehooks_default_dalloc,
+ ehooks_default_destroy,
+ ehooks_default_commit,
+ ehooks_default_decommit,
+#ifdef PAGES_CAN_PURGE_LAZY
+ ehooks_default_purge_lazy,
+#else
+ NULL,
+#endif
+#ifdef PAGES_CAN_PURGE_FORCED
+ ehooks_default_purge_forced,
+#else
+ NULL,
+#endif
+ ehooks_default_split,
+ ehooks_default_merge
+};
diff --git a/deps/jemalloc/src/emap.c b/deps/jemalloc/src/emap.c
new file mode 100644
index 000000000..9cc95a724
--- /dev/null
+++ b/deps/jemalloc/src/emap.c
@@ -0,0 +1,386 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/emap.h"
+
+enum emap_lock_result_e {
+ emap_lock_result_success,
+ emap_lock_result_failure,
+ emap_lock_result_no_extent
+};
+typedef enum emap_lock_result_e emap_lock_result_t;
+
+bool
+emap_init(emap_t *emap, base_t *base, bool zeroed) {
+ return rtree_new(&emap->rtree, base, zeroed);
+}
+
+void
+emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ extent_state_t state) {
+ witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE);
+
+ edata_state_set(edata, state);
+
+ EMAP_DECLARE_RTREE_CTX;
+ rtree_leaf_elm_t *elm1 = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
+ rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
+ /* init_missing */ false);
+ assert(elm1 != NULL);
+ rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE ? NULL :
+ rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_last_get(edata), /* dependent */ true,
+ /* init_missing */ false);
+
+ rtree_leaf_elm_state_update(tsdn, &emap->rtree, elm1, elm2, state);
+
+ emap_assert_mapped(tsdn, emap, edata);
+}
+
+static inline edata_t *
+emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ extent_pai_t pai, extent_state_t expected_state, bool forward,
+ bool expanding) {
+ witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE);
+ assert(!edata_guarded_get(edata));
+ assert(!expanding || forward);
+ assert(!edata_state_in_transition(expected_state));
+ assert(expected_state == extent_state_dirty ||
+ expected_state == extent_state_muzzy ||
+ expected_state == extent_state_retained);
+
+ void *neighbor_addr = forward ? edata_past_get(edata) :
+ edata_before_get(edata);
+ /*
+ * This is subtle; the rtree code asserts that its input pointer is
+ * non-NULL, and this is a useful thing to check. But it's possible
+ * that edata corresponds to an address of (void *)PAGE (in practice,
+ * this has only been observed on FreeBSD when address-space
+ * randomization is on, but it could in principle happen anywhere). In
+ * this case, edata_before_get(edata) is NULL, triggering the assert.
+ */
+ if (neighbor_addr == NULL) {
+ return NULL;
+ }
+
+ EMAP_DECLARE_RTREE_CTX;
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
+ rtree_ctx, (uintptr_t)neighbor_addr, /* dependent*/ false,
+ /* init_missing */ false);
+ if (elm == NULL) {
+ return NULL;
+ }
+
+ rtree_contents_t neighbor_contents = rtree_leaf_elm_read(tsdn,
+ &emap->rtree, elm, /* dependent */ true);
+ if (!extent_can_acquire_neighbor(edata, neighbor_contents, pai,
+ expected_state, forward, expanding)) {
+ return NULL;
+ }
+
+ /* From this point, the neighbor edata can be safely acquired. */
+ edata_t *neighbor = neighbor_contents.edata;
+ assert(edata_state_get(neighbor) == expected_state);
+ emap_update_edata_state(tsdn, emap, neighbor, extent_state_merging);
+ if (expanding) {
+ extent_assert_can_expand(edata, neighbor);
+ } else {
+ extent_assert_can_coalesce(edata, neighbor);
+ }
+
+ return neighbor;
+}
+
+edata_t *
+emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ extent_pai_t pai, extent_state_t expected_state, bool forward) {
+ return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
+ expected_state, forward, /* expand */ false);
+}
+
+edata_t *
+emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
+ edata_t *edata, extent_pai_t pai, extent_state_t expected_state) {
+ /* Try expanding forward. */
+ return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
+ expected_state, /* forward */ true, /* expand */ true);
+}
+
+void
+emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ extent_state_t new_state) {
+ assert(emap_edata_in_transition(tsdn, emap, edata));
+ assert(emap_edata_is_acquired(tsdn, emap, edata));
+
+ emap_update_edata_state(tsdn, emap, edata, new_state);
+}
+
+static bool
+emap_rtree_leaf_elms_lookup(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
+ const edata_t *edata, bool dependent, bool init_missing,
+ rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
+ *r_elm_a = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_base_get(edata), dependent, init_missing);
+ if (!dependent && *r_elm_a == NULL) {
+ return true;
+ }
+ assert(*r_elm_a != NULL);
+
+ *r_elm_b = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_last_get(edata), dependent, init_missing);
+ if (!dependent && *r_elm_b == NULL) {
+ return true;
+ }
+ assert(*r_elm_b != NULL);
+
+ return false;
+}
+
+static void
+emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a,
+ rtree_leaf_elm_t *elm_b, edata_t *edata, szind_t szind, bool slab) {
+ rtree_contents_t contents;
+ contents.edata = edata;
+ contents.metadata.szind = szind;
+ contents.metadata.slab = slab;
+ contents.metadata.is_head = (edata == NULL) ? false :
+ edata_is_head_get(edata);
+ contents.metadata.state = (edata == NULL) ? 0 : edata_state_get(edata);
+ rtree_leaf_elm_write(tsdn, &emap->rtree, elm_a, contents);
+ if (elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &emap->rtree, elm_b, contents);
+ }
+}
+
+bool
+emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ szind_t szind, bool slab) {
+ assert(edata_state_get(edata) == extent_state_active);
+ EMAP_DECLARE_RTREE_CTX;
+
+ rtree_leaf_elm_t *elm_a, *elm_b;
+ bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
+ false, true, &elm_a, &elm_b);
+ if (err) {
+ return true;
+ }
+ assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
+ /* dependent */ false).edata == NULL);
+ assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
+ /* dependent */ false).edata == NULL);
+ emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, edata, szind, slab);
+ return false;
+}
+
+/* Invoked *after* emap_register_boundary. */
+void
+emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ szind_t szind) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ assert(edata_slab_get(edata));
+ assert(edata_state_get(edata) == extent_state_active);
+
+ if (config_debug) {
+ /* Making sure the boundary is registered already. */
+ rtree_leaf_elm_t *elm_a, *elm_b;
+ bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx,
+ edata, /* dependent */ true, /* init_missing */ false,
+ &elm_a, &elm_b);
+ assert(!err);
+ rtree_contents_t contents_a, contents_b;
+ contents_a = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
+ /* dependent */ true);
+ contents_b = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
+ /* dependent */ true);
+ assert(contents_a.edata == edata && contents_b.edata == edata);
+ assert(contents_a.metadata.slab && contents_b.metadata.slab);
+ }
+
+ rtree_contents_t contents;
+ contents.edata = edata;
+ contents.metadata.szind = szind;
+ contents.metadata.slab = true;
+ contents.metadata.state = extent_state_active;
+ contents.metadata.is_head = false; /* Not allowed to access. */
+
+ assert(edata_size_get(edata) > (2 << LG_PAGE));
+ rtree_write_range(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_base_get(edata) + PAGE,
+ (uintptr_t)edata_last_get(edata) - PAGE, contents);
+}
+
+void
+emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
+ /*
+ * The edata must be either in an acquired state, or protected by state
+ * based locks.
+ */
+ if (!emap_edata_is_acquired(tsdn, emap, edata)) {
+ witness_assert_positive_depth_to_rank(
+ tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
+ }
+
+ EMAP_DECLARE_RTREE_CTX;
+ rtree_leaf_elm_t *elm_a, *elm_b;
+
+ emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
+ true, false, &elm_a, &elm_b);
+ emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, NULL, SC_NSIZES,
+ false);
+}
+
+void
+emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ assert(edata_slab_get(edata));
+ if (edata_size_get(edata) > (2 << LG_PAGE)) {
+ rtree_clear_range(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_base_get(edata) + PAGE,
+ (uintptr_t)edata_last_get(edata) - PAGE);
+ }
+}
+
+void
+emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
+ bool slab) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ if (szind != SC_NSIZES) {
+ rtree_contents_t contents;
+ contents.edata = edata;
+ contents.metadata.szind = szind;
+ contents.metadata.slab = slab;
+ contents.metadata.is_head = edata_is_head_get(edata);
+ contents.metadata.state = edata_state_get(edata);
+
+ rtree_write(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_addr_get(edata), contents);
+ /*
+ * Recall that this is called only for active->inactive and
+ * inactive->active transitions (since only active extents have
+ * meaningful values for szind and slab). Active, non-slab
+ * extents only need to handle lookups at their head (on
+ * deallocation), so we don't bother filling in the end
+ * boundary.
+ *
+ * For slab extents, we do the end-mapping change. This still
+ * leaves the interior unmodified; an emap_register_interior
+ * call is coming in those cases, though.
+ */
+ if (slab && edata_size_get(edata) > PAGE) {
+ uintptr_t key = (uintptr_t)edata_past_get(edata)
+ - (uintptr_t)PAGE;
+ rtree_write(tsdn, &emap->rtree, rtree_ctx, key,
+ contents);
+ }
+ }
+}
+
+bool
+emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
+ edata_t *edata, size_t size_a, edata_t *trail, size_t size_b) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ /*
+ * We use incorrect constants for things like arena ind, zero, ranged,
+ * and commit state, and head status. This is a fake edata_t, used to
+ * facilitate a lookup.
+ */
+ edata_t lead = {0};
+ edata_init(&lead, 0U, edata_addr_get(edata), size_a, false, 0, 0,
+ extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
+
+ emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, &lead, false, true,
+ &prepare->lead_elm_a, &prepare->lead_elm_b);
+ emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, false, true,
+ &prepare->trail_elm_a, &prepare->trail_elm_b);
+
+ if (prepare->lead_elm_a == NULL || prepare->lead_elm_b == NULL
+ || prepare->trail_elm_a == NULL || prepare->trail_elm_b == NULL) {
+ return true;
+ }
+ return false;
+}
+
+void
+emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
+ edata_t *lead, size_t size_a, edata_t *trail, size_t size_b) {
+ /*
+ * We should think about not writing to the lead leaf element. We can
+ * get into situations where a racing realloc-like call can disagree
+ * with a size lookup request. I think it's fine to declare that these
+ * situations are race bugs, but there's an argument to be made that for
+ * things like xallocx, a size lookup call should return either the old
+ * size or the new size, but not anything else.
+ */
+ emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a,
+ prepare->lead_elm_b, lead, SC_NSIZES, /* slab */ false);
+ emap_rtree_write_acquired(tsdn, emap, prepare->trail_elm_a,
+ prepare->trail_elm_b, trail, SC_NSIZES, /* slab */ false);
+}
+
+void
+emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
+ edata_t *lead, edata_t *trail) {
+ EMAP_DECLARE_RTREE_CTX;
+ emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, lead, true, false,
+ &prepare->lead_elm_a, &prepare->lead_elm_b);
+ emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, true, false,
+ &prepare->trail_elm_a, &prepare->trail_elm_b);
+}
+
+void
+emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
+ edata_t *lead, edata_t *trail) {
+ rtree_contents_t clear_contents;
+ clear_contents.edata = NULL;
+ clear_contents.metadata.szind = SC_NSIZES;
+ clear_contents.metadata.slab = false;
+ clear_contents.metadata.is_head = false;
+ clear_contents.metadata.state = (extent_state_t)0;
+
+ if (prepare->lead_elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &emap->rtree,
+ prepare->lead_elm_b, clear_contents);
+ }
+
+ rtree_leaf_elm_t *merged_b;
+ if (prepare->trail_elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &emap->rtree,
+ prepare->trail_elm_a, clear_contents);
+ merged_b = prepare->trail_elm_b;
+ } else {
+ merged_b = prepare->trail_elm_a;
+ }
+
+ emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a, merged_b,
+ lead, SC_NSIZES, false);
+}
+
+void
+emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_base_get(edata));
+ assert(contents.edata == edata);
+ assert(contents.metadata.is_head == edata_is_head_get(edata));
+ assert(contents.metadata.state == edata_state_get(edata));
+}
+
+void
+emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
+ emap_full_alloc_ctx_t context1 = {0};
+ emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_base_get(edata),
+ &context1);
+ assert(context1.edata == NULL);
+
+ emap_full_alloc_ctx_t context2 = {0};
+ emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_last_get(edata),
+ &context2);
+ assert(context2.edata == NULL);
+}
diff --git a/deps/jemalloc/src/eset.c b/deps/jemalloc/src/eset.c
new file mode 100644
index 000000000..6f8f335e1
--- /dev/null
+++ b/deps/jemalloc/src/eset.c
@@ -0,0 +1,282 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/eset.h"
+
+#define ESET_NPSIZES (SC_NPSIZES + 1)
+
+static void
+eset_bin_init(eset_bin_t *bin) {
+ edata_heap_new(&bin->heap);
+ /*
+ * heap_min doesn't need initialization; it gets filled in when the bin
+ * goes from non-empty to empty.
+ */
+}
+
+static void
+eset_bin_stats_init(eset_bin_stats_t *bin_stats) {
+ atomic_store_zu(&bin_stats->nextents, 0, ATOMIC_RELAXED);
+ atomic_store_zu(&bin_stats->nbytes, 0, ATOMIC_RELAXED);
+}
+
+void
+eset_init(eset_t *eset, extent_state_t state) {
+ for (unsigned i = 0; i < ESET_NPSIZES; i++) {
+ eset_bin_init(&eset->bins[i]);
+ eset_bin_stats_init(&eset->bin_stats[i]);
+ }
+ fb_init(eset->bitmap, ESET_NPSIZES);
+ edata_list_inactive_init(&eset->lru);
+ eset->state = state;
+}
+
+size_t
+eset_npages_get(eset_t *eset) {
+ return atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
+}
+
+size_t
+eset_nextents_get(eset_t *eset, pszind_t pind) {
+ return atomic_load_zu(&eset->bin_stats[pind].nextents, ATOMIC_RELAXED);
+}
+
+size_t
+eset_nbytes_get(eset_t *eset, pszind_t pind) {
+ return atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
+}
+
+static void
+eset_stats_add(eset_t *eset, pszind_t pind, size_t sz) {
+ size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
+ ATOMIC_RELAXED);
+ atomic_store_zu(&eset->bin_stats[pind].nextents, cur + 1,
+ ATOMIC_RELAXED);
+ cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
+ atomic_store_zu(&eset->bin_stats[pind].nbytes, cur + sz,
+ ATOMIC_RELAXED);
+}
+
+static void
+eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
+ size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
+ ATOMIC_RELAXED);
+ atomic_store_zu(&eset->bin_stats[pind].nextents, cur - 1,
+ ATOMIC_RELAXED);
+ cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
+ atomic_store_zu(&eset->bin_stats[pind].nbytes, cur - sz,
+ ATOMIC_RELAXED);
+}
+
+void
+eset_insert(eset_t *eset, edata_t *edata) {
+ assert(edata_state_get(edata) == eset->state);
+
+ size_t size = edata_size_get(edata);
+ size_t psz = sz_psz_quantize_floor(size);
+ pszind_t pind = sz_psz2ind(psz);
+
+ edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
+ if (edata_heap_empty(&eset->bins[pind].heap)) {
+ fb_set(eset->bitmap, ESET_NPSIZES, (size_t)pind);
+ /* Only element is automatically the min element. */
+ eset->bins[pind].heap_min = edata_cmp_summary;
+ } else {
+ /*
+ * There's already a min element; update the summary if we're
+ * about to insert a lower one.
+ */
+ if (edata_cmp_summary_comp(edata_cmp_summary,
+ eset->bins[pind].heap_min) < 0) {
+ eset->bins[pind].heap_min = edata_cmp_summary;
+ }
+ }
+ edata_heap_insert(&eset->bins[pind].heap, edata);
+
+ if (config_stats) {
+ eset_stats_add(eset, pind, size);
+ }
+
+ edata_list_inactive_append(&eset->lru, edata);
+ size_t npages = size >> LG_PAGE;
+ /*
+ * All modifications to npages hold the mutex (as asserted above), so we
+ * don't need an atomic fetch-add; we can get by with a load followed by
+ * a store.
+ */
+ size_t cur_eset_npages =
+ atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
+ atomic_store_zu(&eset->npages, cur_eset_npages + npages,
+ ATOMIC_RELAXED);
+}
+
+void
+eset_remove(eset_t *eset, edata_t *edata) {
+ assert(edata_state_get(edata) == eset->state ||
+ edata_state_in_transition(edata_state_get(edata)));
+
+ size_t size = edata_size_get(edata);
+ size_t psz = sz_psz_quantize_floor(size);
+ pszind_t pind = sz_psz2ind(psz);
+ if (config_stats) {
+ eset_stats_sub(eset, pind, size);
+ }
+
+ edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
+ edata_heap_remove(&eset->bins[pind].heap, edata);
+ if (edata_heap_empty(&eset->bins[pind].heap)) {
+ fb_unset(eset->bitmap, ESET_NPSIZES, (size_t)pind);
+ } else {
+ /*
+ * This is a little weird; we compare if the summaries are
+ * equal, rather than if the edata we removed was the heap
+ * minimum. The reason why is that getting the heap minimum
+ * can cause a pairing heap merge operation. We can avoid this
+ * if we only update the min if it's changed, in which case the
+ * summaries of the removed element and the min element should
+ * compare equal.
+ */
+ if (edata_cmp_summary_comp(edata_cmp_summary,
+ eset->bins[pind].heap_min) == 0) {
+ eset->bins[pind].heap_min = edata_cmp_summary_get(
+ edata_heap_first(&eset->bins[pind].heap));
+ }
+ }
+ edata_list_inactive_remove(&eset->lru, edata);
+ size_t npages = size >> LG_PAGE;
+ /*
+ * As in eset_insert, we hold eset->mtx and so don't need atomic
+ * operations for updating eset->npages.
+ */
+ size_t cur_extents_npages =
+ atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
+ assert(cur_extents_npages >= npages);
+ atomic_store_zu(&eset->npages,
+ cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
+}
+
+/*
+ * Find an extent with size [min_size, max_size) to satisfy the alignment
+ * requirement. For each size, try only the first extent in the heap.
+ */
+static edata_t *
+eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
+ size_t alignment) {
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
+ pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size));
+
+ for (pszind_t i =
+ (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
+ i < pind_max;
+ i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
+ assert(i < SC_NPSIZES);
+ assert(!edata_heap_empty(&eset->bins[i].heap));
+ edata_t *edata = edata_heap_first(&eset->bins[i].heap);
+ uintptr_t base = (uintptr_t)edata_base_get(edata);
+ size_t candidate_size = edata_size_get(edata);
+ assert(candidate_size >= min_size);
+
+ uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
+ PAGE_CEILING(alignment));
+ if (base > next_align || base + candidate_size <= next_align) {
+ /* Overflow or not crossing the next alignment. */
+ continue;
+ }
+
+ size_t leadsize = next_align - base;
+ if (candidate_size - leadsize >= min_size) {
+ return edata;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
+ * large enough.
+ *
+ * lg_max_fit is the (log of the) maximum ratio between the requested size and
+ * the returned size that we'll allow. This can reduce fragmentation by
+ * avoiding reusing and splitting large extents for smaller sizes. In practice,
+ * it's set to opt_lg_extent_max_active_fit for the dirty eset and SC_PTR_BITS
+ * for others.
+ */
+static edata_t *
+eset_first_fit(eset_t *eset, size_t size, bool exact_only,
+ unsigned lg_max_fit) {
+ edata_t *ret = NULL;
+ edata_cmp_summary_t ret_summ JEMALLOC_CC_SILENCE_INIT({0});
+
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
+
+ if (exact_only) {
+ return edata_heap_empty(&eset->bins[pind].heap) ? NULL :
+ edata_heap_first(&eset->bins[pind].heap);
+ }
+
+ for (pszind_t i =
+ (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
+ i < ESET_NPSIZES;
+ i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
+ assert(!edata_heap_empty(&eset->bins[i].heap));
+ if (lg_max_fit == SC_PTR_BITS) {
+ /*
+ * We'll shift by this below, and shifting out all the
+ * bits is undefined. Decreasing is safe, since the
+ * page size is larger than 1 byte.
+ */
+ lg_max_fit = SC_PTR_BITS - 1;
+ }
+ if ((sz_pind2sz(i) >> lg_max_fit) > size) {
+ break;
+ }
+ if (ret == NULL || edata_cmp_summary_comp(
+ eset->bins[i].heap_min, ret_summ) < 0) {
+ /*
+ * We grab the edata as early as possible, even though
+ * we might change it later. Practically, a large
+ * portion of eset_fit calls succeed at the first valid
+ * index, so this doesn't cost much, and we get the
+ * effect of prefetching the edata as early as possible.
+ */
+ edata_t *edata = edata_heap_first(&eset->bins[i].heap);
+ assert(edata_size_get(edata) >= size);
+ assert(ret == NULL || edata_snad_comp(edata, ret) < 0);
+ assert(ret == NULL || edata_cmp_summary_comp(
+ eset->bins[i].heap_min,
+ edata_cmp_summary_get(edata)) == 0);
+ ret = edata;
+ ret_summ = eset->bins[i].heap_min;
+ }
+ if (i == SC_NPSIZES) {
+ break;
+ }
+ assert(i < SC_NPSIZES);
+ }
+
+ return ret;
+}
+
+edata_t *
+eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
+ unsigned lg_max_fit) {
+ size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
+ /* Beware size_t wrap-around. */
+ if (max_size < esize) {
+ return NULL;
+ }
+
+ edata_t *edata = eset_first_fit(eset, max_size, exact_only, lg_max_fit);
+
+ if (alignment > PAGE && edata == NULL) {
+ /*
+ * max_size guarantees the alignment requirement but is rather
+ * pessimistic. Next we try to satisfy the aligned allocation
+ * with sizes in [esize, max_size).
+ */
+ edata = eset_fit_alignment(eset, esize, max_size, alignment);
+ }
+
+ return edata;
+}
diff --git a/deps/jemalloc/src/exp_grow.c b/deps/jemalloc/src/exp_grow.c
new file mode 100644
index 000000000..386471f49
--- /dev/null
+++ b/deps/jemalloc/src/exp_grow.c
@@ -0,0 +1,8 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+void
+exp_grow_init(exp_grow_t *exp_grow) {
+ exp_grow->next = sz_psz2ind(HUGEPAGE);
+ exp_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
+}
diff --git a/deps/jemalloc/src/extent.c b/deps/jemalloc/src/extent.c
index 9237f903d..cf3d1f311 100644
--- a/deps/jemalloc/src/extent.c
+++ b/deps/jemalloc/src/extent.c
@@ -1,93 +1,28 @@
-#define JEMALLOC_EXTENT_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/ph.h"
-#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_pool.h"
/******************************************************************************/
/* Data. */
-rtree_t extents_rtree;
-/* Keyed by the address of the extent_t being protected. */
-mutex_pool_t extent_mutex_pool;
-
size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
-static const bitmap_info_t extents_bitmap_info =
- BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
-
-static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit,
- unsigned arena_ind);
-static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, bool committed, unsigned arena_ind);
-static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, bool committed, unsigned arena_ind);
-static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t offset, size_t length, unsigned arena_ind);
-static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained);
-static bool extent_decommit_default(extent_hooks_t *extent_hooks,
- void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
-#ifdef PAGES_CAN_PURGE_LAZY
-static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t offset, size_t length, unsigned arena_ind);
-#endif
-static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained);
-#ifdef PAGES_CAN_PURGE_FORCED
-static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
- void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
-#endif
-static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained);
-static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t size_a, size_t size_b, bool committed,
- unsigned arena_ind);
-static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
- szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
- bool growing_retained);
-static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
- size_t size_a, void *addr_b, size_t size_b, bool committed,
- unsigned arena_ind);
-static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
- bool growing_retained);
-
-const extent_hooks_t extent_hooks_default = {
- extent_alloc_default,
- extent_dalloc_default,
- extent_destroy_default,
- extent_commit_default,
- extent_decommit_default
-#ifdef PAGES_CAN_PURGE_LAZY
- ,
- extent_purge_lazy_default
-#else
- ,
- NULL
-#endif
-#ifdef PAGES_CAN_PURGE_FORCED
- ,
- extent_purge_forced_default
-#else
- ,
- NULL
-#endif
- ,
- extent_split_default,
- extent_merge_default
-};
+static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length, bool growing_retained);
+static bool extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks,
+ edata_t *edata, size_t offset, size_t length, bool growing_retained);
+static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks,
+ edata_t *edata, size_t offset, size_t length, bool growing_retained);
+static edata_t *extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks);
+static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *a, edata_t *b, bool holding_core_locks);
/* Used exclusively for gdump triggering. */
static atomic_zu_t curpages;
@@ -99,503 +34,158 @@ static atomic_zu_t highpages;
* definition.
*/
-static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
-static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
- size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
- bool *zero, bool *commit, bool growing_retained);
-static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent, bool *coalesced, bool growing_retained);
-static void extent_record(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
- bool growing_retained);
+static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
+static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment,
+ bool zero, bool *commit, bool growing_retained, bool guarded);
+static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *edata, bool *coalesced);
+static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
+ ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment,
+ bool zero, bool *commit, bool guarded);
/******************************************************************************/
-#define ATTR_NONE /* does nothing */
-
-ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
- extent_esnead_comp)
-
-#undef ATTR_NONE
-
-typedef enum {
- lock_result_success,
- lock_result_failure,
- lock_result_no_extent
-} lock_result_t;
-
-static lock_result_t
-extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
- extent_t **result, bool inactive_only) {
- extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
- elm, true);
-
- /* Slab implies active extents and should be skipped. */
- if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
- &extents_rtree, elm, true))) {
- return lock_result_no_extent;
- }
-
- /*
- * It's possible that the extent changed out from under us, and with it
- * the leaf->extent mapping. We have to recheck while holding the lock.
- */
- extent_lock(tsdn, extent1);
- extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
- &extents_rtree, elm, true);
-
- if (extent1 == extent2) {
- *result = extent1;
- return lock_result_success;
- } else {
- extent_unlock(tsdn, extent1);
- return lock_result_failure;
- }
-}
-
-/*
- * Returns a pool-locked extent_t * if there's one associated with the given
- * address, and NULL otherwise.
- */
-static extent_t *
-extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
- bool inactive_only) {
- extent_t *ret = NULL;
- rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
- rtree_ctx, (uintptr_t)addr, false, false);
- if (elm == NULL) {
- return NULL;
- }
- lock_result_t lock_result;
- do {
- lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
- inactive_only);
- } while (lock_result == lock_result_failure);
- return ret;
-}
-
-extent_t *
-extent_alloc(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
- extent_t *extent = extent_avail_first(&arena->extent_avail);
- if (extent == NULL) {
- malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
- return base_alloc_extent(tsdn, arena->base);
- }
- extent_avail_remove(&arena->extent_avail, extent);
- atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
- malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
- return extent;
-}
-
-void
-extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
- malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
- extent_avail_insert(&arena->extent_avail, extent);
- atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
- malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
-}
-
-extent_hooks_t *
-extent_hooks_get(arena_t *arena) {
- return base_extent_hooks_get(arena->base);
-}
-
-extent_hooks_t *
-extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
- background_thread_info_t *info;
- if (have_background_thread) {
- info = arena_background_thread_info_get(arena);
- malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- }
- extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
- if (have_background_thread) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
- }
-
- return ret;
-}
-
-static void
-extent_hooks_assure_initialized(arena_t *arena,
- extent_hooks_t **r_extent_hooks) {
- if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
- *r_extent_hooks = extent_hooks_get(arena);
- }
-}
-
-#ifndef JEMALLOC_JET
-static
-#endif
size_t
-extent_size_quantize_floor(size_t size) {
- size_t ret;
- pszind_t pind;
-
- assert(size > 0);
- assert((size & PAGE_MASK) == 0);
-
- pind = sz_psz2ind(size - sz_large_pad + 1);
- if (pind == 0) {
- /*
- * Avoid underflow. This short-circuit would also do the right
- * thing for all sizes in the range for which there are
- * PAGE-spaced size classes, but it's simplest to just handle
- * the one case that would cause erroneous results.
- */
- return size;
- }
- ret = sz_pind2sz(pind - 1) + sz_large_pad;
- assert(ret <= size);
- return ret;
+extent_sn_next(pac_t *pac) {
+ return atomic_fetch_add_zu(&pac->extent_sn_next, 1, ATOMIC_RELAXED);
}
-#ifndef JEMALLOC_JET
-static
-#endif
-size_t
-extent_size_quantize_ceil(size_t size) {
- size_t ret;
-
- assert(size > 0);
- assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
- assert((size & PAGE_MASK) == 0);
-
- ret = extent_size_quantize_floor(size);
- if (ret < size) {
- /*
- * Skip a quantization that may have an adequately large extent,
- * because under-sized extents may be mixed in. This only
- * happens when an unusual size is requested, i.e. for aligned
- * allocation, and is just one of several places where linear
- * search would potentially find sufficiently aligned available
- * memory somewhere lower.
- */
- ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
- sz_large_pad;
- }
- return ret;
+static inline bool
+extent_may_force_decay(pac_t *pac) {
+ return !(pac_decay_ms_get(pac, extent_state_dirty) == -1
+ || pac_decay_ms_get(pac, extent_state_muzzy) == -1);
}
-/* Generate pairing heap functions. */
-ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
+static bool
+extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *edata) {
+ emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
-bool
-extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
- bool delay_coalesce) {
- if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
- malloc_mutex_rank_exclusive)) {
+ bool coalesced;
+ edata = extent_try_coalesce(tsdn, pac, ehooks, ecache,
+ edata, &coalesced);
+ emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
+
+ if (!coalesced) {
return true;
}
- for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
- extent_heap_new(&extents->heaps[i]);
- }
- bitmap_init(extents->bitmap, &extents_bitmap_info, true);
- extent_list_init(&extents->lru);
- atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
- extents->state = state;
- extents->delay_coalesce = delay_coalesce;
+ eset_insert(&ecache->eset, edata);
return false;
}
-extent_state_t
-extents_state_get(const extents_t *extents) {
- return extents->state;
-}
-
-size_t
-extents_npages_get(extents_t *extents) {
- return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
-}
-
-size_t
-extents_nextents_get(extents_t *extents, pszind_t pind) {
- return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
-}
-
-size_t
-extents_nbytes_get(extents_t *extents, pszind_t pind) {
- return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
-}
-
-static void
-extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
- size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
- atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
- cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
- atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
-}
-
-static void
-extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
- size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
- atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
- cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
- atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
-}
-
-static void
-extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
- malloc_mutex_assert_owner(tsdn, &extents->mtx);
- assert(extent_state_get(extent) == extents->state);
-
- size_t size = extent_size_get(extent);
- size_t psz = extent_size_quantize_floor(size);
- pszind_t pind = sz_psz2ind(psz);
- if (extent_heap_empty(&extents->heaps[pind])) {
- bitmap_unset(extents->bitmap, &extents_bitmap_info,
- (size_t)pind);
- }
- extent_heap_insert(&extents->heaps[pind], extent);
-
- if (config_stats) {
- extents_stats_add(extents, pind, size);
- }
-
- extent_list_append(&extents->lru, extent);
- size_t npages = size >> LG_PAGE;
- /*
- * All modifications to npages hold the mutex (as asserted above), so we
- * don't need an atomic fetch-add; we can get by with a load followed by
- * a store.
- */
- size_t cur_extents_npages =
- atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
- atomic_store_zu(&extents->npages, cur_extents_npages + npages,
- ATOMIC_RELAXED);
-}
-
-static void
-extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
- malloc_mutex_assert_owner(tsdn, &extents->mtx);
- assert(extent_state_get(extent) == extents->state);
-
- size_t size = extent_size_get(extent);
- size_t psz = extent_size_quantize_floor(size);
- pszind_t pind = sz_psz2ind(psz);
- extent_heap_remove(&extents->heaps[pind], extent);
-
- if (config_stats) {
- extents_stats_sub(extents, pind, size);
- }
-
- if (extent_heap_empty(&extents->heaps[pind])) {
- bitmap_set(extents->bitmap, &extents_bitmap_info,
- (size_t)pind);
- }
- extent_list_remove(&extents->lru, extent);
- size_t npages = size >> LG_PAGE;
- /*
- * As in extents_insert_locked, we hold extents->mtx and so don't need
- * atomic operations for updating extents->npages.
- */
- size_t cur_extents_npages =
- atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
- assert(cur_extents_npages >= npages);
- atomic_store_zu(&extents->npages,
- cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
-}
-
-/*
- * Find an extent with size [min_size, max_size) to satisfy the alignment
- * requirement. For each size, try only the first extent in the heap.
- */
-static extent_t *
-extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
- size_t alignment) {
- pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
- pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
-
- for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
- &extents_bitmap_info, (size_t)pind); i < pind_max; i =
- (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
- (size_t)i+1)) {
- assert(i < SC_NPSIZES);
- assert(!extent_heap_empty(&extents->heaps[i]));
- extent_t *extent = extent_heap_first(&extents->heaps[i]);
- uintptr_t base = (uintptr_t)extent_base_get(extent);
- size_t candidate_size = extent_size_get(extent);
- assert(candidate_size >= min_size);
-
- uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
- PAGE_CEILING(alignment));
- if (base > next_align || base + candidate_size <= next_align) {
- /* Overflow or not crossing the next alignment. */
- continue;
- }
-
- size_t leadsize = next_align - base;
- if (candidate_size - leadsize >= min_size) {
- return extent;
- }
- }
+edata_t *
+ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *expand_edata, size_t size, size_t alignment, bool zero,
+ bool guarded) {
+ assert(size != 0);
+ assert(alignment != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- return NULL;
+ bool commit = true;
+ edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
+ size, alignment, zero, &commit, false, guarded);
+ assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
+ assert(edata == NULL || edata_guarded_get(edata) == guarded);
+ return edata;
}
-/*
- * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
- * large enough.
- */
-static extent_t *
-extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- size_t size) {
- extent_t *ret = NULL;
-
- pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
-
- if (!maps_coalesce && !opt_retain) {
- /*
- * No split / merge allowed (Windows w/o retain). Try exact fit
- * only.
- */
- return extent_heap_empty(&extents->heaps[pind]) ? NULL :
- extent_heap_first(&extents->heaps[pind]);
- }
+edata_t *
+ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *expand_edata, size_t size, size_t alignment, bool zero,
+ bool guarded) {
+ assert(size != 0);
+ assert(alignment != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
- &extents_bitmap_info, (size_t)pind);
- i < SC_NPSIZES + 1;
- i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
- (size_t)i+1)) {
- assert(!extent_heap_empty(&extents->heaps[i]));
- extent_t *extent = extent_heap_first(&extents->heaps[i]);
- assert(extent_size_get(extent) >= size);
- /*
- * In order to reduce fragmentation, avoid reusing and splitting
- * large extents for much smaller sizes.
- *
- * Only do check for dirty extents (delay_coalesce).
- */
- if (extents->delay_coalesce &&
- (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
- break;
- }
- if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
- ret = extent;
+ bool commit = true;
+ edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata,
+ size, alignment, zero, &commit, guarded);
+ if (edata == NULL) {
+ if (opt_retain && expand_edata != NULL) {
+ /*
+ * When retain is enabled and trying to expand, we do
+ * not attempt extent_alloc_wrapper which does mmap that
+ * is very unlikely to succeed (unless it happens to be
+ * at the end).
+ */
+ return NULL;
}
- if (i == SC_NPSIZES) {
- break;
+ if (guarded) {
+ /*
+ * Means no cached guarded extents available (and no
+ * grow_retained was attempted). The pac_alloc flow
+ * will alloc regular extents to make new guarded ones.
+ */
+ return NULL;
}
- assert(i < SC_NPSIZES);
- }
-
- return ret;
-}
-
-/*
- * Do first-fit extent selection, where the selection policy choice is
- * based on extents->delay_coalesce.
- */
-static extent_t *
-extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- size_t esize, size_t alignment) {
- malloc_mutex_assert_owner(tsdn, &extents->mtx);
-
- size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
- /* Beware size_t wrap-around. */
- if (max_size < esize) {
- return NULL;
+ void *new_addr = (expand_edata == NULL) ? NULL :
+ edata_past_get(expand_edata);
+ edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr,
+ size, alignment, zero, &commit,
+ /* growing_retained */ false);
}
- extent_t *extent =
- extents_first_fit_locked(tsdn, arena, extents, max_size);
-
- if (alignment > PAGE && extent == NULL) {
- /*
- * max_size guarantees the alignment requirement but is rather
- * pessimistic. Next we try to satisfy the aligned allocation
- * with sizes in [esize, max_size).
- */
- extent = extents_fit_alignment(extents, esize, max_size,
- alignment);
- }
-
- return extent;
-}
-
-static bool
-extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent) {
- extent_state_set(extent, extent_state_active);
- bool coalesced;
- extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
- extents, extent, &coalesced, false);
- extent_state_set(extent, extents_state_get(extents));
-
- if (!coalesced) {
- return true;
- }
- extents_insert_locked(tsdn, extents, extent);
- return false;
-}
-
-extent_t *
-extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
- assert(size + pad != 0);
- assert(alignment != 0);
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
- new_addr, size, pad, alignment, slab, szind, zero, commit, false);
- assert(extent == NULL || extent_dumpable_get(extent));
- return extent;
+ assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
+ return edata;
}
void
-extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, extent_t *extent) {
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
- assert(extent_dumpable_get(extent));
+ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *edata) {
+ assert(edata_base_get(edata) != NULL);
+ assert(edata_size_get(edata) != 0);
+ assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extent_addr_set(extent, extent_base_get(extent));
- extent_zeroed_set(extent, false);
+ edata_addr_set(edata, edata_base_get(edata));
+ edata_zeroed_set(edata, false);
- extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
+ extent_record(tsdn, pac, ehooks, ecache, edata);
}
-extent_t *
-extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, size_t npages_min) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- malloc_mutex_lock(tsdn, &extents->mtx);
+edata_t *
+ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, size_t npages_min) {
+ malloc_mutex_lock(tsdn, &ecache->mtx);
/*
* Get the LRU coalesced extent, if any. If coalescing was delayed,
* the loop will iterate until the LRU extent is fully coalesced.
*/
- extent_t *extent;
+ edata_t *edata;
while (true) {
/* Get the LRU extent, if any. */
- extent = extent_list_first(&extents->lru);
- if (extent == NULL) {
- goto label_return;
+ eset_t *eset = &ecache->eset;
+ edata = edata_list_inactive_first(&eset->lru);
+ if (edata == NULL) {
+ /*
+ * Next check if there are guarded extents. They are
+ * more expensive to purge (since they are not
+ * mergeable), thus in favor of caching them longer.
+ */
+ eset = &ecache->guarded_eset;
+ edata = edata_list_inactive_first(&eset->lru);
+ if (edata == NULL) {
+ goto label_return;
+ }
}
/* Check the eviction limit. */
- size_t extents_npages = atomic_load_zu(&extents->npages,
- ATOMIC_RELAXED);
+ size_t extents_npages = ecache_npages_get(ecache);
if (extents_npages <= npages_min) {
- extent = NULL;
+ edata = NULL;
goto label_return;
}
- extents_remove_locked(tsdn, extents, extent);
- if (!extents->delay_coalesce) {
+ eset_remove(eset, edata);
+ if (!ecache->delay_coalesce || edata_guarded_get(edata)) {
break;
}
/* Try to coalesce. */
- if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
- rtree_ctx, extents, extent)) {
+ if (extent_try_delayed_coalesce(tsdn, pac, ehooks, ecache,
+ edata)) {
break;
}
/*
@@ -608,23 +198,24 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
* Either mark the extent active or deregister it to protect against
* concurrent operations.
*/
- switch (extents_state_get(extents)) {
+ switch (ecache->state) {
case extent_state_active:
not_reached();
case extent_state_dirty:
case extent_state_muzzy:
- extent_state_set(extent, extent_state_active);
+ emap_update_edata_state(tsdn, pac->emap, edata,
+ extent_state_active);
break;
case extent_state_retained:
- extent_deregister(tsdn, extent);
+ extent_deregister(tsdn, pac, edata);
break;
default:
not_reached();
}
label_return:
- malloc_mutex_unlock(tsdn, &extents->mtx);
- return extent;
+ malloc_mutex_unlock(tsdn, &ecache->mtx);
+ return edata;
}
/*
@@ -632,123 +223,73 @@ label_return:
* indicates OOM), e.g. when trying to split an existing extent.
*/
static void
-extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, extent_t *extent, bool growing_retained) {
- size_t sz = extent_size_get(extent);
+extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *edata, bool growing_retained) {
+ size_t sz = edata_size_get(edata);
if (config_stats) {
- arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
+ atomic_fetch_add_zu(&pac->stats->abandoned_vm, sz,
+ ATOMIC_RELAXED);
}
/*
* Leak extent after making sure its pages have already been purged, so
* that this is only a virtual memory leak.
*/
- if (extents_state_get(extents) == extent_state_dirty) {
- if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
- extent, 0, sz, growing_retained)) {
- extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
- extent, 0, extent_size_get(extent),
- growing_retained);
+ if (ecache->state == extent_state_dirty) {
+ if (extent_purge_lazy_impl(tsdn, ehooks, edata, 0, sz,
+ growing_retained)) {
+ extent_purge_forced_impl(tsdn, ehooks, edata, 0,
+ edata_size_get(edata), growing_retained);
}
}
- extent_dalloc(tsdn, arena, extent);
-}
-
-void
-extents_prefork(tsdn_t *tsdn, extents_t *extents) {
- malloc_mutex_prefork(tsdn, &extents->mtx);
-}
-
-void
-extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
- malloc_mutex_postfork_parent(tsdn, &extents->mtx);
-}
-
-void
-extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
- malloc_mutex_postfork_child(tsdn, &extents->mtx);
+ edata_cache_put(tsdn, pac->edata_cache, edata);
}
static void
-extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- extent_t *extent) {
- assert(extent_arena_get(extent) == arena);
- assert(extent_state_get(extent) == extent_state_active);
+extent_deactivate_locked_impl(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
+ edata_t *edata) {
+ malloc_mutex_assert_owner(tsdn, &ecache->mtx);
+ assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
- extent_state_set(extent, extents_state_get(extents));
- extents_insert_locked(tsdn, extents, extent);
+ emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
+ eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset :
+ &ecache->eset;
+ eset_insert(eset, edata);
}
static void
-extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- extent_t *extent) {
- malloc_mutex_lock(tsdn, &extents->mtx);
- extent_deactivate_locked(tsdn, arena, extents, extent);
- malloc_mutex_unlock(tsdn, &extents->mtx);
+extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
+ edata_t *edata) {
+ assert(edata_state_get(edata) == extent_state_active);
+ extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
}
static void
-extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- extent_t *extent) {
- assert(extent_arena_get(extent) == arena);
- assert(extent_state_get(extent) == extents_state_get(extents));
-
- extents_remove_locked(tsdn, extents, extent);
- extent_state_set(extent, extent_state_active);
-}
-
-static bool
-extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
- const extent_t *extent, bool dependent, bool init_missing,
- rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
- *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent), dependent, init_missing);
- if (!dependent && *r_elm_a == NULL) {
- return true;
- }
- assert(*r_elm_a != NULL);
-
- *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_last_get(extent), dependent, init_missing);
- if (!dependent && *r_elm_b == NULL) {
- return true;
- }
- assert(*r_elm_b != NULL);
-
- return false;
+extent_deactivate_check_state_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
+ edata_t *edata, extent_state_t expected_state) {
+ assert(edata_state_get(edata) == expected_state);
+ extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
}
static void
-extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
- rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
- if (elm_b != NULL) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
- slab);
- }
-}
+extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset,
+ edata_t *edata) {
+ assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
+ assert(edata_state_get(edata) == ecache->state ||
+ edata_state_get(edata) == extent_state_merging);
-static void
-extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
- szind_t szind) {
- assert(extent_slab_get(extent));
-
- /* Register interior. */
- for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
- rtree_write(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
- LG_PAGE), extent, szind, true);
- }
+ eset_remove(eset, edata);
+ emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
}
-static void
-extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
+void
+extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof);
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- if (opt_prof && extent_state_get(extent) == extent_state_active) {
- size_t nadd = extent_size_get(extent) >> LG_PAGE;
+ if (opt_prof && edata_state_get(edata) == extent_state_active) {
+ size_t nadd = edata_size_get(edata) >> LG_PAGE;
size_t cur = atomic_fetch_add_zu(&curpages, nadd,
ATOMIC_RELAXED) + nadd;
size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
@@ -767,232 +308,184 @@ extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
}
static void
-extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
+extent_gdump_sub(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof);
- if (opt_prof && extent_state_get(extent) == extent_state_active) {
- size_t nsub = extent_size_get(extent) >> LG_PAGE;
+ if (opt_prof && edata_state_get(edata) == extent_state_active) {
+ size_t nsub = edata_size_get(edata) >> LG_PAGE;
assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
}
}
static bool
-extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_leaf_elm_t *elm_a, *elm_b;
-
+extent_register_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, bool gdump_add) {
+ assert(edata_state_get(edata) == extent_state_active);
/*
- * We need to hold the lock to protect against a concurrent coalesce
- * operation that sees us in a partial state.
+ * No locking needed, as the edata must be in active state, which
+ * prevents other threads from accessing the edata.
*/
- extent_lock(tsdn, extent);
-
- if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
- &elm_a, &elm_b)) {
- extent_unlock(tsdn, extent);
+ if (emap_register_boundary(tsdn, pac->emap, edata, SC_NSIZES,
+ /* slab */ false)) {
return true;
}
- szind_t szind = extent_szind_get_maybe_invalid(extent);
- bool slab = extent_slab_get(extent);
- extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
- if (slab) {
- extent_interior_register(tsdn, rtree_ctx, extent, szind);
- }
-
- extent_unlock(tsdn, extent);
-
if (config_prof && gdump_add) {
- extent_gdump_add(tsdn, extent);
+ extent_gdump_add(tsdn, edata);
}
return false;
}
static bool
-extent_register(tsdn_t *tsdn, extent_t *extent) {
- return extent_register_impl(tsdn, extent, true);
+extent_register(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
+ return extent_register_impl(tsdn, pac, edata, true);
}
static bool
-extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
- return extent_register_impl(tsdn, extent, false);
+extent_register_no_gdump_add(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
+ return extent_register_impl(tsdn, pac, edata, false);
}
static void
-extent_reregister(tsdn_t *tsdn, extent_t *extent) {
- bool err = extent_register(tsdn, extent);
+extent_reregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
+ bool err = extent_register(tsdn, pac, edata);
assert(!err);
}
/*
- * Removes all pointers to the given extent from the global rtree indices for
- * its interior. This is relevant for slab extents, for which we need to do
- * metadata lookups at places other than the head of the extent. We deregister
- * on the interior, then, when an extent moves from being an active slab to an
- * inactive state.
- */
-static void
-extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
- extent_t *extent) {
- size_t i;
-
- assert(extent_slab_get(extent));
-
- for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
- rtree_clear(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
- LG_PAGE));
- }
-}
-
-/*
* Removes all pointers to the given extent from the global rtree.
*/
static void
-extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_leaf_elm_t *elm_a, *elm_b;
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
- &elm_a, &elm_b);
-
- extent_lock(tsdn, extent);
-
- extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
- if (extent_slab_get(extent)) {
- extent_interior_deregister(tsdn, rtree_ctx, extent);
- extent_slab_set(extent, false);
- }
-
- extent_unlock(tsdn, extent);
+extent_deregister_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata,
+ bool gdump) {
+ emap_deregister_boundary(tsdn, pac->emap, edata);
if (config_prof && gdump) {
- extent_gdump_sub(tsdn, extent);
+ extent_gdump_sub(tsdn, edata);
}
}
static void
-extent_deregister(tsdn_t *tsdn, extent_t *extent) {
- extent_deregister_impl(tsdn, extent, true);
+extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
+ extent_deregister_impl(tsdn, pac, edata, true);
}
static void
-extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
- extent_deregister_impl(tsdn, extent, false);
+extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac,
+ edata_t *edata) {
+ extent_deregister_impl(tsdn, pac, edata, false);
}
/*
- * Tries to find and remove an extent from extents that can be used for the
+ * Tries to find and remove an extent from ecache that can be used for the
* given allocation request.
*/
-static extent_t *
-extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
- bool growing_retained) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+static edata_t *
+extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
+ bool guarded) {
+ malloc_mutex_assert_owner(tsdn, &ecache->mtx);
assert(alignment > 0);
- if (config_debug && new_addr != NULL) {
+ if (config_debug && expand_edata != NULL) {
/*
- * Non-NULL new_addr has two use cases:
- *
- * 1) Recycle a known-extant extent, e.g. during purging.
- * 2) Perform in-place expanding reallocation.
- *
- * Regardless of use case, new_addr must either refer to a
- * non-existing extent, or to the base of an extant extent,
- * since only active slabs support interior lookups (which of
- * course cannot be recycled).
+ * Non-NULL expand_edata indicates in-place expanding realloc.
+ * new_addr must either refer to a non-existing extent, or to
+ * the base of an extant extent, since only active slabs support
+ * interior lookups (which of course cannot be recycled).
*/
+ void *new_addr = edata_past_get(expand_edata);
assert(PAGE_ADDR2BASE(new_addr) == new_addr);
- assert(pad == 0);
assert(alignment <= PAGE);
}
- size_t esize = size + pad;
- malloc_mutex_lock(tsdn, &extents->mtx);
- extent_hooks_assure_initialized(arena, r_extent_hooks);
- extent_t *extent;
- if (new_addr != NULL) {
- extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
- false);
- if (extent != NULL) {
- /*
- * We might null-out extent to report an error, but we
- * still need to unlock the associated mutex after.
- */
- extent_t *unlock_extent = extent;
- assert(extent_base_get(extent) == new_addr);
- if (extent_arena_get(extent) != arena ||
- extent_size_get(extent) < esize ||
- extent_state_get(extent) !=
- extents_state_get(extents)) {
- extent = NULL;
+ edata_t *edata;
+ eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset;
+ if (expand_edata != NULL) {
+ edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
+ expand_edata, EXTENT_PAI_PAC, ecache->state);
+ if (edata != NULL) {
+ extent_assert_can_expand(expand_edata, edata);
+ if (edata_size_get(edata) < size) {
+ emap_release_edata(tsdn, pac->emap, edata,
+ ecache->state);
+ edata = NULL;
}
- extent_unlock(tsdn, unlock_extent);
}
} else {
- extent = extents_fit_locked(tsdn, arena, extents, esize,
- alignment);
+ /*
+ * A large extent might be broken up from its original size to
+ * some small size to satisfy a small request. When that small
+ * request is freed, though, it won't merge back with the larger
+ * extent if delayed coalescing is on. The large extent can
+ * then no longer satify a request for its original size. To
+ * limit this effect, when delayed coalescing is enabled, we
+ * put a cap on how big an extent we can split for a request.
+ */
+ unsigned lg_max_fit = ecache->delay_coalesce
+ ? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
+
+ /*
+ * If split and merge are not allowed (Windows w/o retain), try
+ * exact fit only.
+ *
+ * For simplicity purposes, splitting guarded extents is not
+ * supported. Hence, we do only exact fit for guarded
+ * allocations.
+ */
+ bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
+ edata = eset_fit(eset, size, alignment, exact_only,
+ lg_max_fit);
}
- if (extent == NULL) {
- malloc_mutex_unlock(tsdn, &extents->mtx);
+ if (edata == NULL) {
return NULL;
}
+ assert(!guarded || edata_guarded_get(edata));
+ extent_activate_locked(tsdn, pac, ecache, eset, edata);
- extent_activate_locked(tsdn, arena, extents, extent);
- malloc_mutex_unlock(tsdn, &extents->mtx);
-
- return extent;
+ return edata;
}
/*
* Given an allocation request and an extent guaranteed to be able to satisfy
- * it, this splits off lead and trail extents, leaving extent pointing to an
+ * it, this splits off lead and trail extents, leaving edata pointing to an
* extent satisfying the allocation.
- * This function doesn't put lead or trail into any extents_t; it's the caller's
+ * This function doesn't put lead or trail into any ecache; it's the caller's
* job to ensure that they can be reused.
*/
typedef enum {
/*
- * Split successfully. lead, extent, and trail, are modified to extents
+ * Split successfully. lead, edata, and trail, are modified to extents
* describing the ranges before, in, and after the given allocation.
*/
extent_split_interior_ok,
/*
* The extent can't satisfy the given allocation request. None of the
- * input extent_t *s are touched.
+ * input edata_t *s are touched.
*/
extent_split_interior_cant_alloc,
/*
* In a potentially invalid state. Must leak (if *to_leak is non-NULL),
* and salvage what's still salvageable (if *to_salvage is non-NULL).
- * None of lead, extent, or trail are valid.
+ * None of lead, edata, or trail are valid.
*/
extent_split_interior_error
} extent_split_interior_result_t;
static extent_split_interior_result_t
-extent_split_interior(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
+extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* The result of splitting, in case of success. */
- extent_t **extent, extent_t **lead, extent_t **trail,
+ edata_t **edata, edata_t **lead, edata_t **trail,
/* The mess to clean up, in case of error. */
- extent_t **to_leak, extent_t **to_salvage,
- void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
- szind_t szind, bool growing_retained) {
- size_t esize = size + pad;
- size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
- PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
- assert(new_addr == NULL || leadsize == 0);
- if (extent_size_get(*extent) < leadsize + esize) {
+ edata_t **to_leak, edata_t **to_salvage,
+ edata_t *expand_edata, size_t size, size_t alignment) {
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)edata_base_get(*edata),
+ PAGE_CEILING(alignment)) - (uintptr_t)edata_base_get(*edata);
+ assert(expand_edata == NULL || leadsize == 0);
+ if (edata_size_get(*edata) < leadsize + size) {
return extent_split_interior_cant_alloc;
}
- size_t trailsize = extent_size_get(*extent) - leadsize - esize;
+ size_t trailsize = edata_size_get(*edata) - leadsize - size;
*lead = NULL;
*trail = NULL;
@@ -1001,11 +494,11 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
/* Split the lead. */
if (leadsize != 0) {
- *lead = *extent;
- *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
- *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
- slab, growing_retained);
- if (*extent == NULL) {
+ assert(!edata_guarded_get(*edata));
+ *lead = *edata;
+ *edata = extent_split_impl(tsdn, pac, ehooks, *lead, leadsize,
+ size + trailsize, /* holding_core_locks*/ true);
+ if (*edata == NULL) {
*to_leak = *lead;
*lead = NULL;
return extent_split_interior_error;
@@ -1014,36 +507,18 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
/* Split the trail. */
if (trailsize != 0) {
- *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
- esize, szind, slab, trailsize, SC_NSIZES, false,
- growing_retained);
+ assert(!edata_guarded_get(*edata));
+ *trail = extent_split_impl(tsdn, pac, ehooks, *edata, size,
+ trailsize, /* holding_core_locks */ true);
if (*trail == NULL) {
- *to_leak = *extent;
+ *to_leak = *edata;
*to_salvage = *lead;
*lead = NULL;
- *extent = NULL;
+ *edata = NULL;
return extent_split_interior_error;
}
}
- if (leadsize == 0 && trailsize == 0) {
- /*
- * Splitting causes szind to be set as a side effect, but no
- * splitting occurred.
- */
- extent_szind_set(*extent, szind);
- if (szind != SC_NSIZES) {
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_addr_get(*extent), szind, slab);
- if (slab && extent_size_get(*extent) > PAGE) {
- rtree_szind_slab_update(tsdn, &extents_rtree,
- rtree_ctx,
- (uintptr_t)extent_past_get(*extent) -
- (uintptr_t)PAGE, szind, slab);
- }
- }
- }
-
return extent_split_interior_ok;
}
@@ -1051,42 +526,43 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
* This fulfills the indicated allocation request out of the given extent (which
* the caller should have ensured was big enough). If there's any unused space
* before or after the resulting allocation, that space is given its own extent
- * and put back into extents.
+ * and put back into ecache.
*/
-static extent_t *
-extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
- szind_t szind, extent_t *extent, bool growing_retained) {
- extent_t *lead;
- extent_t *trail;
- extent_t *to_leak;
- extent_t *to_salvage;
+static edata_t *
+extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
+ edata_t *edata, bool growing_retained) {
+ assert(!edata_guarded_get(edata) || size == edata_size_get(edata));
+ malloc_mutex_assert_owner(tsdn, &ecache->mtx);
+
+ edata_t *lead;
+ edata_t *trail;
+ edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
+ edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
extent_split_interior_result_t result = extent_split_interior(
- tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
- &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
- growing_retained);
+ tsdn, pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage,
+ expand_edata, size, alignment);
if (!maps_coalesce && result != extent_split_interior_ok
&& !opt_retain) {
/*
* Split isn't supported (implies Windows w/o retain). Avoid
- * leaking the extents.
+ * leaking the extent.
*/
assert(to_leak != NULL && lead == NULL && trail == NULL);
- extent_deactivate(tsdn, arena, extents, to_leak);
+ extent_deactivate_locked(tsdn, pac, ecache, to_leak);
return NULL;
}
if (result == extent_split_interior_ok) {
if (lead != NULL) {
- extent_deactivate(tsdn, arena, extents, lead);
+ extent_deactivate_locked(tsdn, pac, ecache, lead);
}
if (trail != NULL) {
- extent_deactivate(tsdn, arena, extents, trail);
+ extent_deactivate_locked(tsdn, pac, ecache, trail);
}
- return extent;
+ return edata;
} else {
/*
* We should have picked an extent that was large enough to
@@ -1094,294 +570,144 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
*/
assert(result == extent_split_interior_error);
if (to_salvage != NULL) {
- extent_deregister(tsdn, to_salvage);
+ extent_deregister(tsdn, pac, to_salvage);
}
if (to_leak != NULL) {
- void *leak = extent_base_get(to_leak);
- extent_deregister_no_gdump_sub(tsdn, to_leak);
- extents_abandon_vm(tsdn, arena, r_extent_hooks, extents,
- to_leak, growing_retained);
- assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
- false) == NULL);
+ extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
+ /*
+ * May go down the purge path (which assume no ecache
+ * locks). Only happens with OOM caused split failures.
+ */
+ malloc_mutex_unlock(tsdn, &ecache->mtx);
+ extents_abandon_vm(tsdn, pac, ehooks, ecache, to_leak,
+ growing_retained);
+ malloc_mutex_lock(tsdn, &ecache->mtx);
}
return NULL;
}
unreachable();
}
-static bool
-extent_need_manual_zero(arena_t *arena) {
- /*
- * Need to manually zero the extent on repopulating if either; 1) non
- * default extent hooks installed (in which case the purge semantics may
- * change); or 2) transparent huge pages enabled.
- */
- return (!arena_has_default_hooks(arena) ||
- (opt_thp == thp_mode_always));
-}
-
/*
* Tries to satisfy the given allocation request by reusing one of the extents
- * in the given extents_t.
+ * in the given ecache_t.
*/
-static extent_t *
-extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
- bool growing_retained) {
+static edata_t *
+extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *expand_edata, size_t size, size_t alignment, bool zero,
+ bool *commit, bool growing_retained, bool guarded) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- assert(new_addr == NULL || !slab);
- assert(pad == 0 || !slab);
- assert(!*zero || !slab);
+ assert(!guarded || expand_edata == NULL);
+ assert(!guarded || alignment <= PAGE);
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ malloc_mutex_lock(tsdn, &ecache->mtx);
- extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
- rtree_ctx, extents, new_addr, size, pad, alignment, slab,
- growing_retained);
- if (extent == NULL) {
+ edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
+ expand_edata, size, alignment, guarded);
+ if (edata == NULL) {
+ malloc_mutex_unlock(tsdn, &ecache->mtx);
return NULL;
}
- extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
- extents, new_addr, size, pad, alignment, slab, szind, extent,
- growing_retained);
- if (extent == NULL) {
+ edata = extent_recycle_split(tsdn, pac, ehooks, ecache, expand_edata,
+ size, alignment, edata, growing_retained);
+ malloc_mutex_unlock(tsdn, &ecache->mtx);
+ if (edata == NULL) {
return NULL;
}
- if (*commit && !extent_committed_get(extent)) {
- if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
- 0, extent_size_get(extent), growing_retained)) {
- extent_record(tsdn, arena, r_extent_hooks, extents,
- extent, growing_retained);
- return NULL;
- }
- if (!extent_need_manual_zero(arena)) {
- extent_zeroed_set(extent, true);
- }
- }
-
- if (extent_committed_get(extent)) {
- *commit = true;
- }
- if (extent_zeroed_get(extent)) {
- *zero = true;
- }
-
- if (pad != 0) {
- extent_addr_randomize(tsdn, extent, alignment);
- }
- assert(extent_state_get(extent) == extent_state_active);
- if (slab) {
- extent_slab_set(extent, slab);
- extent_interior_register(tsdn, rtree_ctx, extent, szind);
- }
-
- if (*zero) {
- void *addr = extent_base_get(extent);
- if (!extent_zeroed_get(extent)) {
- size_t size = extent_size_get(extent);
- if (extent_need_manual_zero(arena) ||
- pages_purge_forced(addr, size)) {
- memset(addr, 0, size);
- }
- } else if (config_debug) {
- size_t *p = (size_t *)(uintptr_t)addr;
- /* Check the first page only. */
- for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
- assert(p[i] == 0);
- }
- }
- }
- return extent;
-}
-
-/*
- * If the caller specifies (!*zero), it is still possible to receive zeroed
- * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
- * advantage of this to avoid demanding zeroed extents, but taking advantage of
- * them if they are returned.
- */
-static void *
-extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
- void *ret;
-
- assert(size != 0);
- assert(alignment != 0);
-
- /* "primary" dss. */
- if (have_dss && dss_prec == dss_prec_primary && (ret =
- extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL) {
- return ret;
- }
- /* mmap. */
- if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
- != NULL) {
- return ret;
- }
- /* "secondary" dss. */
- if (have_dss && dss_prec == dss_prec_secondary && (ret =
- extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL) {
- return ret;
- }
-
- /* All strategies for allocation failed. */
- return NULL;
-}
-
-static void *
-extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit) {
- void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
- commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
- ATOMIC_RELAXED));
- if (have_madvise_huge && ret) {
- pages_set_thp_state(ret, size);
+ assert(edata_state_get(edata) == extent_state_active);
+ if (extent_commit_zero(tsdn, ehooks, edata, *commit, zero,
+ growing_retained)) {
+ extent_record(tsdn, pac, ehooks, ecache, edata);
+ return NULL;
}
- return ret;
-}
-
-static void *
-extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
- tsdn_t *tsdn;
- arena_t *arena;
-
- tsdn = tsdn_fetch();
- arena = arena_get(tsdn, arena_ind, false);
- /*
- * The arena we're allocating on behalf of must have been initialized
- * already.
- */
- assert(arena != NULL);
-
- return extent_alloc_default_impl(tsdn, arena, new_addr, size,
- ALIGNMENT_CEILING(alignment, PAGE), zero, commit);
-}
-
-static void
-extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
+ if (edata_committed_get(edata)) {
/*
- * The only legitimate case of customized extent hooks for a0 is
- * hooks with no allocation activities. One such example is to
- * place metadata on pre-allocated resources such as huge pages.
- * In that case, rely on reentrancy_level checks to catch
- * infinite recursions.
+ * This reverses the purpose of this variable - previously it
+ * was treated as an input parameter, now it turns into an
+ * output parameter, reporting if the edata has actually been
+ * committed.
*/
- pre_reentrancy(tsd, NULL);
- } else {
- pre_reentrancy(tsd, arena);
+ *commit = true;
}
-}
-
-static void
-extent_hook_post_reentrancy(tsdn_t *tsdn) {
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- post_reentrancy(tsd);
+ return edata;
}
/*
* If virtual memory is retained, create increasingly larger extents from which
* to split requested extents in order to limit the total number of disjoint
- * virtual memory ranges retained by each arena.
+ * virtual memory ranges retained by each shard.
*/
-static extent_t *
-extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
- bool slab, szind_t szind, bool *zero, bool *commit) {
- malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
- assert(pad == 0 || !slab);
- assert(!*zero || !slab);
-
- size_t esize = size + pad;
- size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
+static edata_t *
+extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ size_t size, size_t alignment, bool zero, bool *commit) {
+ malloc_mutex_assert_owner(tsdn, &pac->grow_mtx);
+
+ size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
- if (alloc_size_min < esize) {
+ if (alloc_size_min < size) {
goto label_err;
}
/*
* Find the next extent size in the series that would be large enough to
* satisfy this request.
*/
- pszind_t egn_skip = 0;
- size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
- while (alloc_size < alloc_size_min) {
- egn_skip++;
- if (arena->extent_grow_next + egn_skip >=
- sz_psz2ind(SC_LARGE_MAXCLASS)) {
- /* Outside legal range. */
- goto label_err;
- }
- alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
+ size_t alloc_size;
+ pszind_t exp_grow_skip;
+ bool err = exp_grow_size_prepare(&pac->exp_grow, alloc_size_min,
+ &alloc_size, &exp_grow_skip);
+ if (err) {
+ goto label_err;
}
- extent_t *extent = extent_alloc(tsdn, arena);
- if (extent == NULL) {
+ edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
+ if (edata == NULL) {
goto label_err;
}
bool zeroed = false;
bool committed = false;
- void *ptr;
- if (*r_extent_hooks == &extent_hooks_default) {
- ptr = extent_alloc_default_impl(tsdn, arena, NULL,
- alloc_size, PAGE, &zeroed, &committed);
- } else {
- extent_hook_pre_reentrancy(tsdn, arena);
- ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
- alloc_size, PAGE, &zeroed, &committed,
- arena_ind_get(arena));
- extent_hook_post_reentrancy(tsdn);
- }
+ void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed,
+ &committed);
- extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
- arena_extent_sn_next(arena), extent_state_active, zeroed,
- committed, true, EXTENT_IS_HEAD);
if (ptr == NULL) {
- extent_dalloc(tsdn, arena, extent);
+ edata_cache_put(tsdn, pac->edata_cache, edata);
goto label_err;
}
- if (extent_register_no_gdump_add(tsdn, extent)) {
- extent_dalloc(tsdn, arena, extent);
+ edata_init(edata, ecache_ind_get(&pac->ecache_retained), ptr,
+ alloc_size, false, SC_NSIZES, extent_sn_next(pac),
+ extent_state_active, zeroed, committed, EXTENT_PAI_PAC,
+ EXTENT_IS_HEAD);
+
+ if (extent_register_no_gdump_add(tsdn, pac, edata)) {
+ edata_cache_put(tsdn, pac->edata_cache, edata);
goto label_err;
}
- if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
- *zero = true;
- }
- if (extent_committed_get(extent)) {
+ if (edata_committed_get(edata)) {
*commit = true;
}
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ edata_t *lead;
+ edata_t *trail;
+ edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
+ edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
- extent_t *lead;
- extent_t *trail;
- extent_t *to_leak;
- extent_t *to_salvage;
- extent_split_interior_result_t result = extent_split_interior(
- tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
- &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
- true);
+ extent_split_interior_result_t result = extent_split_interior(tsdn,
+ pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL,
+ size, alignment);
if (result == extent_split_interior_ok) {
if (lead != NULL) {
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, lead, true);
+ extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
+ lead);
}
if (trail != NULL) {
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, trail, true);
+ extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
+ trail);
}
} else {
/*
@@ -1393,26 +719,32 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
if (config_prof) {
extent_gdump_add(tsdn, to_salvage);
}
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, to_salvage, true);
+ extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
+ to_salvage);
}
if (to_leak != NULL) {
- extent_deregister_no_gdump_sub(tsdn, to_leak);
- extents_abandon_vm(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, to_leak, true);
+ extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
+ extents_abandon_vm(tsdn, pac, ehooks,
+ &pac->ecache_retained, to_leak, true);
}
goto label_err;
}
- if (*commit && !extent_committed_get(extent)) {
- if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
- extent_size_get(extent), true)) {
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, extent, true);
+ if (*commit && !edata_committed_get(edata)) {
+ if (extent_commit_impl(tsdn, ehooks, edata, 0,
+ edata_size_get(edata), true)) {
+ extent_record(tsdn, pac, ehooks,
+ &pac->ecache_retained, edata);
goto label_err;
}
- if (!extent_need_manual_zero(arena)) {
- extent_zeroed_set(extent, true);
+ /* A successful commit should return zeroed memory. */
+ if (config_debug) {
+ void *addr = edata_addr_get(edata);
+ size_t *p = (size_t *)(uintptr_t)addr;
+ /* Check the first page only. */
+ for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
+ assert(p[i] == 0);
+ }
}
}
@@ -1420,187 +752,74 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
* Increment extent_grow_next if doing so wouldn't exceed the allowed
* range.
*/
- if (arena->extent_grow_next + egn_skip + 1 <=
- arena->retain_grow_limit) {
- arena->extent_grow_next += egn_skip + 1;
- } else {
- arena->extent_grow_next = arena->retain_grow_limit;
- }
/* All opportunities for failure are past. */
- malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ exp_grow_size_commit(&pac->exp_grow, exp_grow_skip);
+ malloc_mutex_unlock(tsdn, &pac->grow_mtx);
if (config_prof) {
/* Adjust gdump stats now that extent is final size. */
- extent_gdump_add(tsdn, extent);
- }
- if (pad != 0) {
- extent_addr_randomize(tsdn, extent, alignment);
+ extent_gdump_add(tsdn, edata);
}
- if (slab) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
- &rtree_ctx_fallback);
-
- extent_slab_set(extent, true);
- extent_interior_register(tsdn, rtree_ctx, extent, szind);
- }
- if (*zero && !extent_zeroed_get(extent)) {
- void *addr = extent_base_get(extent);
- size_t size = extent_size_get(extent);
- if (extent_need_manual_zero(arena) ||
- pages_purge_forced(addr, size)) {
- memset(addr, 0, size);
- }
+ if (zero && !edata_zeroed_get(edata)) {
+ ehooks_zero(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata));
}
-
- return extent;
+ return edata;
label_err:
- malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ malloc_mutex_unlock(tsdn, &pac->grow_mtx);
return NULL;
}
-static extent_t *
-extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+static edata_t *
+extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *expand_edata, size_t size, size_t alignment, bool zero,
+ bool *commit, bool guarded) {
assert(size != 0);
assert(alignment != 0);
- malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
+ malloc_mutex_lock(tsdn, &pac->grow_mtx);
- extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, new_addr, size, pad, alignment, slab,
- szind, zero, commit, true);
- if (extent != NULL) {
- malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ edata_t *edata = extent_recycle(tsdn, pac, ehooks,
+ &pac->ecache_retained, expand_edata, size, alignment, zero, commit,
+ /* growing_retained */ true, guarded);
+ if (edata != NULL) {
+ malloc_mutex_unlock(tsdn, &pac->grow_mtx);
if (config_prof) {
- extent_gdump_add(tsdn, extent);
+ extent_gdump_add(tsdn, edata);
}
- } else if (opt_retain && new_addr == NULL) {
- extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
- pad, alignment, slab, szind, zero, commit);
- /* extent_grow_retained() always releases extent_grow_mtx. */
+ } else if (opt_retain && expand_edata == NULL && !guarded) {
+ edata = extent_grow_retained(tsdn, pac, ehooks, size,
+ alignment, zero, commit);
+ /* extent_grow_retained() always releases pac->grow_mtx. */
} else {
- malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
- }
- malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
-
- return extent;
-}
-
-static extent_t *
-extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
- size_t esize = size + pad;
- extent_t *extent = extent_alloc(tsdn, arena);
- if (extent == NULL) {
- return NULL;
- }
- void *addr;
- size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
- if (*r_extent_hooks == &extent_hooks_default) {
- /* Call directly to propagate tsdn. */
- addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
- palignment, zero, commit);
- } else {
- extent_hook_pre_reentrancy(tsdn, arena);
- addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
- esize, palignment, zero, commit, arena_ind_get(arena));
- extent_hook_post_reentrancy(tsdn);
- }
- if (addr == NULL) {
- extent_dalloc(tsdn, arena, extent);
- return NULL;
- }
- extent_init(extent, arena, addr, esize, slab, szind,
- arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
- true, EXTENT_NOT_HEAD);
- if (pad != 0) {
- extent_addr_randomize(tsdn, extent, alignment);
- }
- if (extent_register(tsdn, extent)) {
- extent_dalloc(tsdn, arena, extent);
- return NULL;
- }
-
- return extent;
-}
-
-extent_t *
-extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
- new_addr, size, pad, alignment, slab, szind, zero, commit);
- if (extent == NULL) {
- if (opt_retain && new_addr != NULL) {
- /*
- * When retain is enabled and new_addr is set, we do not
- * attempt extent_alloc_wrapper_hard which does mmap
- * that is very unlikely to succeed (unless it happens
- * to be at the end).
- */
- return NULL;
- }
- extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
- new_addr, size, pad, alignment, slab, szind, zero, commit);
- }
-
- assert(extent == NULL || extent_dumpable_get(extent));
- return extent;
-}
-
-static bool
-extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
- const extent_t *outer) {
- assert(extent_arena_get(inner) == arena);
- if (extent_arena_get(outer) != arena) {
- return false;
- }
-
- assert(extent_state_get(inner) == extent_state_active);
- if (extent_state_get(outer) != extents->state) {
- return false;
- }
-
- if (extent_committed_get(inner) != extent_committed_get(outer)) {
- return false;
+ malloc_mutex_unlock(tsdn, &pac->grow_mtx);
}
+ malloc_mutex_assert_not_owner(tsdn, &pac->grow_mtx);
- return true;
+ return edata;
}
static bool
-extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
- bool growing_retained) {
- assert(extent_can_coalesce(arena, extents, inner, outer));
-
- extent_activate_locked(tsdn, arena, extents, outer);
-
- malloc_mutex_unlock(tsdn, &extents->mtx);
- bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
- forward ? inner : outer, forward ? outer : inner, growing_retained);
- malloc_mutex_lock(tsdn, &extents->mtx);
-
+extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *inner, edata_t *outer, bool forward) {
+ extent_assert_can_coalesce(inner, outer);
+ eset_remove(&ecache->eset, outer);
+
+ bool err = extent_merge_impl(tsdn, pac, ehooks,
+ forward ? inner : outer, forward ? outer : inner,
+ /* holding_core_locks */ true);
if (err) {
- extent_deactivate_locked(tsdn, arena, extents, outer);
+ extent_deactivate_check_state_locked(tsdn, pac, ecache, outer,
+ extent_state_merging);
}
return err;
}
-static extent_t *
-extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent, bool *coalesced, bool growing_retained,
- bool inactive_only) {
+static edata_t *
+extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *edata, bool *coalesced) {
+ assert(!edata_guarded_get(edata));
/*
* We avoid checking / locking inactive neighbors for large size
* classes, since they are eagerly coalesced on deallocation which can
@@ -1615,467 +834,333 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
again = false;
/* Try to coalesce forward. */
- extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
- extent_past_get(extent), inactive_only);
+ edata_t *next = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
+ edata, EXTENT_PAI_PAC, ecache->state, /* forward */ true);
if (next != NULL) {
- /*
- * extents->mtx only protects against races for
- * like-state extents, so call extent_can_coalesce()
- * before releasing next's pool lock.
- */
- bool can_coalesce = extent_can_coalesce(arena, extents,
- extent, next);
-
- extent_unlock(tsdn, next);
-
- if (can_coalesce && !extent_coalesce(tsdn, arena,
- r_extent_hooks, extents, extent, next, true,
- growing_retained)) {
- if (extents->delay_coalesce) {
+ if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
+ next, true)) {
+ if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
- return extent;
+ return edata;
}
again = true;
}
}
/* Try to coalesce backward. */
- extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
- extent_before_get(extent), inactive_only);
+ edata_t *prev = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
+ edata, EXTENT_PAI_PAC, ecache->state, /* forward */ false);
if (prev != NULL) {
- bool can_coalesce = extent_can_coalesce(arena, extents,
- extent, prev);
- extent_unlock(tsdn, prev);
-
- if (can_coalesce && !extent_coalesce(tsdn, arena,
- r_extent_hooks, extents, extent, prev, false,
- growing_retained)) {
- extent = prev;
- if (extents->delay_coalesce) {
+ if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
+ prev, false)) {
+ edata = prev;
+ if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
- return extent;
+ return edata;
}
again = true;
}
}
} while (again);
- if (extents->delay_coalesce) {
+ if (ecache->delay_coalesce) {
*coalesced = false;
}
- return extent;
+ return edata;
}
-static extent_t *
-extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent, bool *coalesced, bool growing_retained) {
- return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
- extents, extent, coalesced, growing_retained, false);
+static edata_t *
+extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *edata, bool *coalesced) {
+ return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
+ coalesced);
}
-static extent_t *
-extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent, bool *coalesced, bool growing_retained) {
- return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
- extents, extent, coalesced, growing_retained, true);
+static edata_t *
+extent_try_coalesce_large(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *edata, bool *coalesced) {
+ return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
+ coalesced);
+}
+
+/* Purge a single extent to retained / unmapped directly. */
+static void
+extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata) {
+ size_t extent_size = edata_size_get(edata);
+ extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
+ if (config_stats) {
+ /* Update stats accordingly. */
+ LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx);
+ locked_inc_u64(tsdn,
+ LOCKEDINT_MTX(*pac->stats_mtx),
+ &pac->stats->decay_dirty.nmadvise, 1);
+ locked_inc_u64(tsdn,
+ LOCKEDINT_MTX(*pac->stats_mtx),
+ &pac->stats->decay_dirty.purged,
+ extent_size >> LG_PAGE);
+ LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx);
+ atomic_fetch_sub_zu(&pac->stats->pac_mapped, extent_size,
+ ATOMIC_RELAXED);
+ }
}
/*
* Does the metadata management portions of putting an unused extent into the
- * given extents_t (coalesces, deregisters slab interiors, the heap operations).
+ * given ecache_t (coalesces and inserts into the eset).
*/
-static void
-extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, extent_t *extent, bool growing_retained) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- assert((extents_state_get(extents) != extent_state_dirty &&
- extents_state_get(extents) != extent_state_muzzy) ||
- !extent_zeroed_get(extent));
-
- malloc_mutex_lock(tsdn, &extents->mtx);
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- extent_szind_set(extent, SC_NSIZES);
- if (extent_slab_get(extent)) {
- extent_interior_deregister(tsdn, rtree_ctx, extent);
- extent_slab_set(extent, false);
- }
+void
+extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *edata) {
+ assert((ecache->state != extent_state_dirty &&
+ ecache->state != extent_state_muzzy) ||
+ !edata_zeroed_get(edata));
+
+ malloc_mutex_lock(tsdn, &ecache->mtx);
- assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent), true) == extent);
+ emap_assert_mapped(tsdn, pac->emap, edata);
- if (!extents->delay_coalesce) {
- extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
- rtree_ctx, extents, extent, NULL, growing_retained);
- } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
- assert(extents == &arena->extents_dirty);
+ if (edata_guarded_get(edata)) {
+ goto label_skip_coalesce;
+ }
+ if (!ecache->delay_coalesce) {
+ edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
+ NULL);
+ } else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
+ assert(ecache == &pac->ecache_dirty);
/* Always coalesce large extents eagerly. */
bool coalesced;
do {
- assert(extent_state_get(extent) == extent_state_active);
- extent = extent_try_coalesce_large(tsdn, arena,
- r_extent_hooks, rtree_ctx, extents, extent,
- &coalesced, growing_retained);
+ assert(edata_state_get(edata) == extent_state_active);
+ edata = extent_try_coalesce_large(tsdn, pac, ehooks,
+ ecache, edata, &coalesced);
} while (coalesced);
- if (extent_size_get(extent) >= oversize_threshold) {
+ if (edata_size_get(edata) >=
+ atomic_load_zu(&pac->oversize_threshold, ATOMIC_RELAXED)
+ && extent_may_force_decay(pac)) {
/* Shortcut to purge the oversize extent eagerly. */
- malloc_mutex_unlock(tsdn, &extents->mtx);
- arena_decay_extent(tsdn, arena, r_extent_hooks, extent);
+ malloc_mutex_unlock(tsdn, &ecache->mtx);
+ extent_maximally_purge(tsdn, pac, ehooks, edata);
return;
}
}
- extent_deactivate_locked(tsdn, arena, extents, extent);
+label_skip_coalesce:
+ extent_deactivate_locked(tsdn, pac, ecache, edata);
- malloc_mutex_unlock(tsdn, &extents->mtx);
+ malloc_mutex_unlock(tsdn, &ecache->mtx);
}
void
-extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
-
+extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- if (extent_register(tsdn, extent)) {
- extent_dalloc(tsdn, arena, extent);
+ if (extent_register(tsdn, pac, edata)) {
+ edata_cache_put(tsdn, pac->edata_cache, edata);
return;
}
- extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
+ extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
}
static bool
-extent_may_dalloc(void) {
- /* With retain enabled, the default dalloc always fails. */
- return !opt_retain;
-}
-
-static bool
-extent_dalloc_default_impl(void *addr, size_t size) {
- if (!have_dss || !extent_in_dss(addr)) {
- return extent_dalloc_mmap(addr, size);
- }
- return true;
-}
-
-static bool
-extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- bool committed, unsigned arena_ind) {
- return extent_dalloc_default_impl(addr, size);
-}
-
-static bool
-extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent) {
+extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata) {
bool err;
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
+ assert(edata_base_get(edata) != NULL);
+ assert(edata_size_get(edata) != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extent_addr_set(extent, extent_base_get(extent));
+ edata_addr_set(edata, edata_base_get(edata));
- extent_hooks_assure_initialized(arena, r_extent_hooks);
/* Try to deallocate. */
- if (*r_extent_hooks == &extent_hooks_default) {
- /* Call directly to propagate tsdn. */
- err = extent_dalloc_default_impl(extent_base_get(extent),
- extent_size_get(extent));
- } else {
- extent_hook_pre_reentrancy(tsdn, arena);
- err = ((*r_extent_hooks)->dalloc == NULL ||
- (*r_extent_hooks)->dalloc(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent),
- extent_committed_get(extent), arena_ind_get(arena)));
- extent_hook_post_reentrancy(tsdn);
- }
+ err = ehooks_dalloc(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), edata_committed_get(edata));
if (!err) {
- extent_dalloc(tsdn, arena, extent);
+ edata_cache_put(tsdn, pac->edata_cache, edata);
}
return err;
}
+edata_t *
+extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
+ bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
+ if (edata == NULL) {
+ return NULL;
+ }
+ size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
+ void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
+ &zero, commit);
+ if (addr == NULL) {
+ edata_cache_put(tsdn, pac->edata_cache, edata);
+ return NULL;
+ }
+ edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr,
+ size, /* slab */ false, SC_NSIZES, extent_sn_next(pac),
+ extent_state_active, zero, *commit, EXTENT_PAI_PAC,
+ opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD);
+ /*
+ * Retained memory is not counted towards gdump. Only if an extent is
+ * allocated as a separate mapping, i.e. growing_retained is false, then
+ * gdump should be updated.
+ */
+ bool gdump_add = !growing_retained;
+ if (extent_register_impl(tsdn, pac, edata, gdump_add)) {
+ edata_cache_put(tsdn, pac->edata_cache, edata);
+ return NULL;
+ }
+
+ return edata;
+}
+
void
-extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent) {
- assert(extent_dumpable_get(extent));
+extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata) {
+ assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
/* Avoid calling the default extent_dalloc unless have to. */
- if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
+ if (!ehooks_dalloc_will_fail(ehooks)) {
+ /* Remove guard pages for dalloc / unmap. */
+ if (edata_guarded_get(edata)) {
+ assert(ehooks_are_default(ehooks));
+ san_unguard_pages_two_sided(tsdn, ehooks, edata,
+ pac->emap);
+ }
/*
* Deregister first to avoid a race with other allocating
* threads, and reregister if deallocation fails.
*/
- extent_deregister(tsdn, extent);
- if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks,
- extent)) {
+ extent_deregister(tsdn, pac, edata);
+ if (!extent_dalloc_wrapper_try(tsdn, pac, ehooks, edata)) {
return;
}
- extent_reregister(tsdn, extent);
+ extent_reregister(tsdn, pac, edata);
}
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
/* Try to decommit; purge if that fails. */
bool zeroed;
- if (!extent_committed_get(extent)) {
+ if (!edata_committed_get(edata)) {
zeroed = true;
- } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
- 0, extent_size_get(extent))) {
+ } else if (!extent_decommit_wrapper(tsdn, ehooks, edata, 0,
+ edata_size_get(edata))) {
zeroed = true;
- } else if ((*r_extent_hooks)->purge_forced != NULL &&
- !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), 0,
- extent_size_get(extent), arena_ind_get(arena))) {
+ } else if (!ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), 0, edata_size_get(edata))) {
zeroed = true;
- } else if (extent_state_get(extent) == extent_state_muzzy ||
- ((*r_extent_hooks)->purge_lazy != NULL &&
- !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), 0,
- extent_size_get(extent), arena_ind_get(arena)))) {
+ } else if (edata_state_get(edata) == extent_state_muzzy ||
+ !ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), 0, edata_size_get(edata))) {
zeroed = false;
} else {
zeroed = false;
}
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
- extent_zeroed_set(extent, zeroed);
+ edata_zeroed_set(edata, zeroed);
if (config_prof) {
- extent_gdump_sub(tsdn, extent);
+ extent_gdump_sub(tsdn, edata);
}
- extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
- extent, false);
-}
-
-static void
-extent_destroy_default_impl(void *addr, size_t size) {
- if (!have_dss || !extent_in_dss(addr)) {
- pages_unmap(addr, size);
- }
-}
-
-static void
-extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- bool committed, unsigned arena_ind) {
- extent_destroy_default_impl(addr, size);
+ extent_record(tsdn, pac, ehooks, &pac->ecache_retained, edata);
}
void
-extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent) {
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
+extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata) {
+ assert(edata_base_get(edata) != NULL);
+ assert(edata_size_get(edata) != 0);
+ extent_state_t state = edata_state_get(edata);
+ assert(state == extent_state_retained || state == extent_state_active);
+ assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- /* Deregister first to avoid a race with other allocating threads. */
- extent_deregister(tsdn, extent);
-
- extent_addr_set(extent, extent_base_get(extent));
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
- /* Try to destroy; silently fail otherwise. */
- if (*r_extent_hooks == &extent_hooks_default) {
- /* Call directly to propagate tsdn. */
- extent_destroy_default_impl(extent_base_get(extent),
- extent_size_get(extent));
- } else if ((*r_extent_hooks)->destroy != NULL) {
- extent_hook_pre_reentrancy(tsdn, arena);
- (*r_extent_hooks)->destroy(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent),
- extent_committed_get(extent), arena_ind_get(arena));
- extent_hook_post_reentrancy(tsdn);
+ if (edata_guarded_get(edata)) {
+ assert(opt_retain);
+ san_unguard_pages_pre_destroy(tsdn, ehooks, edata, pac->emap);
}
+ edata_addr_set(edata, edata_base_get(edata));
- extent_dalloc(tsdn, arena, extent);
-}
+ /* Try to destroy; silently fail otherwise. */
+ ehooks_destroy(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), edata_committed_get(edata));
-static bool
-extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind) {
- return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
- length);
+ edata_cache_put(tsdn, pac->edata_cache, edata);
}
static bool
-extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained) {
+extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = ((*r_extent_hooks)->commit == NULL ||
- (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
- extent_size_get(extent), offset, length, arena_ind_get(arena)));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
- extent_committed_set(extent, extent_committed_get(extent) || !err);
+ bool err = ehooks_commit(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), offset, length);
+ edata_committed_set(edata, edata_committed_get(edata) || !err);
return err;
}
bool
-extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length) {
- return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
- length, false);
-}
-
-static bool
-extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind) {
- return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
- length);
+extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length) {
+ return extent_commit_impl(tsdn, ehooks, edata, offset, length,
+ /* growing_retained */ false);
}
bool
-extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length) {
+extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = ((*r_extent_hooks)->decommit == NULL ||
- (*r_extent_hooks)->decommit(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), offset, length,
- arena_ind_get(arena)));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
- extent_committed_set(extent, extent_committed_get(extent) && err);
+ bool err = ehooks_decommit(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), offset, length);
+ edata_committed_set(edata, edata_committed_get(edata) && err);
return err;
}
-#ifdef PAGES_CAN_PURGE_LAZY
static bool
-extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind) {
- assert(addr != NULL);
- assert((offset & PAGE_MASK) == 0);
- assert(length != 0);
- assert((length & PAGE_MASK) == 0);
-
- return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
- length);
-}
-#endif
-
-static bool
-extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained) {
+extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- if ((*r_extent_hooks)->purge_lazy == NULL) {
- return true;
- }
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), offset, length,
- arena_ind_get(arena));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
-
+ bool err = ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), offset, length);
return err;
}
bool
-extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length) {
- return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
- offset, length, false);
-}
-
-#ifdef PAGES_CAN_PURGE_FORCED
-static bool
-extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t offset, size_t length, unsigned arena_ind) {
- assert(addr != NULL);
- assert((offset & PAGE_MASK) == 0);
- assert(length != 0);
- assert((length & PAGE_MASK) == 0);
-
- return pages_purge_forced((void *)((uintptr_t)addr +
- (uintptr_t)offset), length);
+extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length) {
+ return extent_purge_lazy_impl(tsdn, ehooks, edata, offset,
+ length, false);
}
-#endif
static bool
-extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained) {
+extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- if ((*r_extent_hooks)->purge_forced == NULL) {
- return true;
- }
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), offset, length,
- arena_ind_get(arena));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
+ bool err = ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), offset, length);
return err;
}
bool
-extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length) {
- return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
- offset, length, false);
-}
-
-static bool
-extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
- if (!maps_coalesce) {
- /*
- * Without retain, only whole regions can be purged (required by
- * MEM_RELEASE on Windows) -- therefore disallow splitting. See
- * comments in extent_head_no_merge().
- */
- return !opt_retain;
- }
-
- return false;
+extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length) {
+ return extent_purge_forced_impl(tsdn, ehooks, edata, offset, length,
+ false);
}
/*
@@ -2085,183 +1170,95 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
* with the trail (the higher addressed portion). This makes 'extent' the lead,
* and returns the trail (except in case of error).
*/
-static extent_t *
-extent_split_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
- szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
- bool growing_retained) {
- assert(extent_size_get(extent) == size_a + size_b);
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
+static edata_t *
+extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks) {
+ assert(edata_size_get(edata) == size_a + size_b);
+ /* Only the shrink path may split w/o holding core locks. */
+ if (holding_core_locks) {
+ witness_assert_positive_depth_to_rank(
+ tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
+ } else {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ }
- if ((*r_extent_hooks)->split == NULL) {
+ if (ehooks_split_will_fail(ehooks)) {
return NULL;
}
- extent_t *trail = extent_alloc(tsdn, arena);
+ edata_t *trail = edata_cache_get(tsdn, pac->edata_cache);
if (trail == NULL) {
goto label_error_a;
}
- extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
- size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
- extent_state_get(extent), extent_zeroed_get(extent),
- extent_committed_get(extent), extent_dumpable_get(extent),
- EXTENT_NOT_HEAD);
-
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
- {
- extent_t lead;
-
- extent_init(&lead, arena, extent_addr_get(extent), size_a,
- slab_a, szind_a, extent_sn_get(extent),
- extent_state_get(extent), extent_zeroed_get(extent),
- extent_committed_get(extent), extent_dumpable_get(extent),
- EXTENT_NOT_HEAD);
-
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
- true, &lead_elm_a, &lead_elm_b);
- }
- rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
- &trail_elm_a, &trail_elm_b);
-
- if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
- || trail_elm_b == NULL) {
+ edata_init(trail, edata_arena_ind_get(edata),
+ (void *)((uintptr_t)edata_base_get(edata) + size_a), size_b,
+ /* slab */ false, SC_NSIZES, edata_sn_get(edata),
+ edata_state_get(edata), edata_zeroed_get(edata),
+ edata_committed_get(edata), EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
+ emap_prepare_t prepare;
+ bool err = emap_split_prepare(tsdn, pac->emap, &prepare, edata,
+ size_a, trail, size_b);
+ if (err) {
goto label_error_b;
}
- extent_lock2(tsdn, extent, trail);
+ /*
+ * No need to acquire trail or edata, because: 1) trail was new (just
+ * allocated); and 2) edata is either an active allocation (the shrink
+ * path), or in an acquired state (extracted from the ecache on the
+ * extent_recycle_split path).
+ */
+ assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
+ assert(emap_edata_is_acquired(tsdn, pac->emap, trail));
+
+ err = ehooks_split(tsdn, ehooks, edata_base_get(edata), size_a + size_b,
+ size_a, size_b, edata_committed_get(edata));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
- size_a + size_b, size_a, size_b, extent_committed_get(extent),
- arena_ind_get(arena));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
if (err) {
- goto label_error_c;
+ goto label_error_b;
}
- extent_size_set(extent, size_a);
- extent_szind_set(extent, szind_a);
-
- extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
- szind_a, slab_a);
- extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
- szind_b, slab_b);
-
- extent_unlock2(tsdn, extent, trail);
+ edata_size_set(edata, size_a);
+ emap_split_commit(tsdn, pac->emap, &prepare, edata, size_a, trail,
+ size_b);
return trail;
-label_error_c:
- extent_unlock2(tsdn, extent, trail);
label_error_b:
- extent_dalloc(tsdn, arena, trail);
+ edata_cache_put(tsdn, pac->edata_cache, trail);
label_error_a:
return NULL;
}
-extent_t *
-extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
- szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
- return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
- szind_a, slab_a, size_b, szind_b, slab_b, false);
+edata_t *
+extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata,
+ size_t size_a, size_t size_b, bool holding_core_locks) {
+ return extent_split_impl(tsdn, pac, ehooks, edata, size_a, size_b,
+ holding_core_locks);
}
static bool
-extent_merge_default_impl(void *addr_a, void *addr_b) {
- if (!maps_coalesce && !opt_retain) {
- return true;
- }
- if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
- return true;
- }
-
- return false;
-}
-
-/*
- * Returns true if the given extents can't be merged because of their head bit
- * settings. Assumes the second extent has the higher address.
- */
-static bool
-extent_head_no_merge(extent_t *a, extent_t *b) {
- assert(extent_base_get(a) < extent_base_get(b));
- /*
- * When coalesce is not always allowed (Windows), only merge extents
- * from the same VirtualAlloc region under opt.retain (in which case
- * MEM_DECOMMIT is utilized for purging).
- */
- if (maps_coalesce) {
- return false;
- }
- if (!opt_retain) {
- return true;
- }
- /* If b is a head extent, disallow the cross-region merge. */
- if (extent_is_head_get(b)) {
- /*
- * Additionally, sn should not overflow with retain; sanity
- * check that different regions have unique sn.
- */
- assert(extent_sn_comp(a, b) != 0);
- return true;
- }
- assert(extent_sn_comp(a, b) == 0);
-
- return false;
-}
-
-static bool
-extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
- void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
- if (!maps_coalesce) {
- tsdn_t *tsdn = tsdn_fetch();
- extent_t *a = iealloc(tsdn, addr_a);
- extent_t *b = iealloc(tsdn, addr_b);
- if (extent_head_no_merge(a, b)) {
- return true;
- }
+extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a,
+ edata_t *b, bool holding_core_locks) {
+ /* Only the expanding path may merge w/o holding ecache locks. */
+ if (holding_core_locks) {
+ witness_assert_positive_depth_to_rank(
+ tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
+ } else {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
}
- return extent_merge_default_impl(addr_a, addr_b);
-}
-
-static bool
-extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
- bool growing_retained) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- assert(extent_base_get(a) < extent_base_get(b));
- extent_hooks_assure_initialized(arena, r_extent_hooks);
+ assert(edata_base_get(a) < edata_base_get(b));
+ assert(edata_arena_ind_get(a) == edata_arena_ind_get(b));
+ assert(edata_arena_ind_get(a) == ehooks_ind_get(ehooks));
+ emap_assert_mapped(tsdn, pac->emap, a);
+ emap_assert_mapped(tsdn, pac->emap, b);
- if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) {
- return true;
- }
-
- bool err;
- if (*r_extent_hooks == &extent_hooks_default) {
- /* Call directly to propagate tsdn. */
- err = extent_merge_default_impl(extent_base_get(a),
- extent_base_get(b));
- } else {
- extent_hook_pre_reentrancy(tsdn, arena);
- err = (*r_extent_hooks)->merge(*r_extent_hooks,
- extent_base_get(a), extent_size_get(a), extent_base_get(b),
- extent_size_get(b), extent_committed_get(a),
- arena_ind_get(arena));
- extent_hook_post_reentrancy(tsdn);
- }
+ bool err = ehooks_merge(tsdn, ehooks, edata_base_get(a),
+ edata_size_get(a), edata_base_get(b), edata_size_get(b),
+ edata_committed_get(a));
if (err) {
return true;
@@ -2272,132 +1269,58 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
* owned, so the following code uses decomposed helper functions rather
* than extent_{,de}register() to do things in the right order.
*/
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
- &a_elm_b);
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
- &b_elm_b);
-
- extent_lock2(tsdn, a, b);
-
- if (a_elm_b != NULL) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
- SC_NSIZES, false);
- }
- if (b_elm_b != NULL) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
- SC_NSIZES, false);
- } else {
- b_elm_b = b_elm_a;
- }
+ emap_prepare_t prepare;
+ emap_merge_prepare(tsdn, pac->emap, &prepare, a, b);
- extent_size_set(a, extent_size_get(a) + extent_size_get(b));
- extent_szind_set(a, SC_NSIZES);
- extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
- extent_sn_get(a) : extent_sn_get(b));
- extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
+ assert(edata_state_get(a) == extent_state_active ||
+ edata_state_get(a) == extent_state_merging);
+ edata_state_set(a, extent_state_active);
+ edata_size_set(a, edata_size_get(a) + edata_size_get(b));
+ edata_sn_set(a, (edata_sn_get(a) < edata_sn_get(b)) ?
+ edata_sn_get(a) : edata_sn_get(b));
+ edata_zeroed_set(a, edata_zeroed_get(a) && edata_zeroed_get(b));
- extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
- false);
-
- extent_unlock2(tsdn, a, b);
+ emap_merge_commit(tsdn, pac->emap, &prepare, a, b);
- extent_dalloc(tsdn, extent_arena_get(b), b);
+ edata_cache_put(tsdn, pac->edata_cache, b);
return false;
}
bool
-extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
- return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
+extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *a, edata_t *b) {
+ return extent_merge_impl(tsdn, pac, ehooks, a, b,
+ /* holding_core_locks */ false);
}
bool
-extent_boot(void) {
- if (rtree_new(&extents_rtree, true)) {
- return true;
- }
+extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ bool commit, bool zero, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
- WITNESS_RANK_EXTENT_POOL)) {
- return true;
+ if (commit && !edata_committed_get(edata)) {
+ if (extent_commit_impl(tsdn, ehooks, edata, 0,
+ edata_size_get(edata), growing_retained)) {
+ return true;
+ }
}
-
- if (have_dss) {
- extent_dss_boot();
+ if (zero && !edata_zeroed_get(edata)) {
+ void *addr = edata_base_get(edata);
+ size_t size = edata_size_get(edata);
+ ehooks_zero(tsdn, ehooks, addr, size);
}
-
return false;
}
-void
-extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
- size_t *nfree, size_t *nregs, size_t *size) {
- assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
-
- const extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(extent == NULL)) {
- *nfree = *nregs = *size = 0;
- return;
- }
-
- *size = extent_size_get(extent);
- if (!extent_slab_get(extent)) {
- *nfree = 0;
- *nregs = 1;
- } else {
- *nfree = extent_nfree_get(extent);
- *nregs = bin_infos[extent_szind_get(extent)].nregs;
- assert(*nfree <= *nregs);
- assert(*nfree * extent_usize_get(extent) <= *size);
- }
-}
-
-void
-extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
- size_t *nfree, size_t *nregs, size_t *size,
- size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) {
- assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
- && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
-
- const extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(extent == NULL)) {
- *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
- *slabcur_addr = NULL;
- return;
- }
+bool
+extent_boot(void) {
+ assert(sizeof(slab_data_t) >= sizeof(e_prof_info_t));
- *size = extent_size_get(extent);
- if (!extent_slab_get(extent)) {
- *nfree = *bin_nfree = *bin_nregs = 0;
- *nregs = 1;
- *slabcur_addr = NULL;
- return;
+ if (have_dss) {
+ extent_dss_boot();
}
- *nfree = extent_nfree_get(extent);
- const szind_t szind = extent_szind_get(extent);
- *nregs = bin_infos[szind].nregs;
- assert(*nfree <= *nregs);
- assert(*nfree * extent_usize_get(extent) <= *size);
-
- const arena_t *arena = extent_arena_get(extent);
- assert(arena != NULL);
- const unsigned binshard = extent_binshard_get(extent);
- bin_t *bin = &arena->bins[szind].bin_shards[binshard];
-
- malloc_mutex_lock(tsdn, &bin->lock);
- if (config_stats) {
- *bin_nregs = *nregs * bin->stats.curslabs;
- assert(*bin_nregs >= bin->stats.curregs);
- *bin_nfree = *bin_nregs - bin->stats.curregs;
- } else {
- *bin_nfree = *bin_nregs = 0;
- }
- *slabcur_addr = extent_addr_get(bin->slabcur);
- assert(*slabcur_addr != NULL);
- malloc_mutex_unlock(tsdn, &bin->lock);
+ return false;
}
diff --git a/deps/jemalloc/src/extent_dss.c b/deps/jemalloc/src/extent_dss.c
index 858178911..9a35bacfb 100644
--- a/deps/jemalloc/src/extent_dss.c
+++ b/deps/jemalloc/src/extent_dss.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_EXTENT_DSS_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -109,7 +108,7 @@ extent_dss_max_update(void *new_addr) {
void *
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) {
- extent_t *gap;
+ edata_t *gap;
cassert(have_dss);
assert(size > 0);
@@ -123,7 +122,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
return NULL;
}
- gap = extent_alloc(tsdn, arena);
+ gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
if (gap == NULL) {
return NULL;
}
@@ -141,6 +140,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
goto label_oom;
}
+ bool head_state = opt_retain ? EXTENT_IS_HEAD :
+ EXTENT_NOT_HEAD;
/*
* Compute how much page-aligned gap space (if any) is
* necessary to satisfy alignment. This space can be
@@ -153,11 +154,12 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t gap_size_page = (uintptr_t)ret -
(uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
- extent_init(gap, arena, gap_addr_page,
- gap_size_page, false, SC_NSIZES,
- arena_extent_sn_next(arena),
- extent_state_active, false, true, true,
- EXTENT_NOT_HEAD);
+ edata_init(gap, arena_ind_get(arena),
+ gap_addr_page, gap_size_page, false,
+ SC_NSIZES, extent_sn_next(
+ &arena->pa_shard.pac),
+ extent_state_active, false, true,
+ EXTENT_PAI_PAC, head_state);
}
/*
* Compute the address just past the end of the desired
@@ -186,25 +188,29 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
extent_dss_extending_finish();
if (gap_size_page != 0) {
- extent_dalloc_gap(tsdn, arena, gap);
+ ehooks_t *ehooks = arena_get_ehooks(
+ arena);
+ extent_dalloc_gap(tsdn,
+ &arena->pa_shard.pac, ehooks, gap);
} else {
- extent_dalloc(tsdn, arena, gap);
+ edata_cache_put(tsdn,
+ &arena->pa_shard.edata_cache, gap);
}
if (!*commit) {
*commit = pages_decommit(ret, size);
}
if (*zero && *commit) {
- extent_hooks_t *extent_hooks =
- EXTENT_HOOKS_INITIALIZER;
- extent_t extent;
+ edata_t edata = {0};
+ ehooks_t *ehooks = arena_get_ehooks(
+ arena);
- extent_init(&extent, arena, ret, size,
+ edata_init(&edata,
+ arena_ind_get(arena), ret, size,
size, false, SC_NSIZES,
extent_state_active, false, true,
- true, EXTENT_NOT_HEAD);
+ EXTENT_PAI_PAC, head_state);
if (extent_purge_forced_wrapper(tsdn,
- arena, &extent_hooks, &extent, 0,
- size)) {
+ ehooks, &edata, 0, size)) {
memset(ret, 0, size);
}
}
@@ -224,7 +230,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
}
label_oom:
extent_dss_extending_finish();
- extent_dalloc(tsdn, arena, gap);
+ edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap);
return NULL;
}
diff --git a/deps/jemalloc/src/extent_mmap.c b/deps/jemalloc/src/extent_mmap.c
index 17fd1c8f9..5f0ee2d24 100644
--- a/deps/jemalloc/src/extent_mmap.c
+++ b/deps/jemalloc/src/extent_mmap.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_EXTENT_MMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/deps/jemalloc/src/fxp.c b/deps/jemalloc/src/fxp.c
new file mode 100644
index 000000000..96585f0a6
--- /dev/null
+++ b/deps/jemalloc/src/fxp.c
@@ -0,0 +1,124 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/fxp.h"
+
+static bool
+fxp_isdigit(char c) {
+ return '0' <= c && c <= '9';
+}
+
+bool
+fxp_parse(fxp_t *result, const char *str, char **end) {
+ /*
+ * Using malloc_strtoumax in this method isn't as handy as you might
+ * expect (I tried). In the fractional part, significant leading zeros
+ * mean that you still need to do your own parsing, now with trickier
+ * math. In the integer part, the casting (uintmax_t to uint32_t)
+ * forces more reasoning about bounds than just checking for overflow as
+ * we parse.
+ */
+ uint32_t integer_part = 0;
+
+ const char *cur = str;
+
+ /* The string must start with a digit or a decimal point. */
+ if (*cur != '.' && !fxp_isdigit(*cur)) {
+ return true;
+ }
+
+ while ('0' <= *cur && *cur <= '9') {
+ integer_part *= 10;
+ integer_part += *cur - '0';
+ if (integer_part >= (1U << 16)) {
+ return true;
+ }
+ cur++;
+ }
+
+ /*
+ * We've parsed all digits at the beginning of the string, without
+ * overflow. Either we're done, or there's a fractional part.
+ */
+ if (*cur != '.') {
+ *result = (integer_part << 16);
+ if (end != NULL) {
+ *end = (char *)cur;
+ }
+ return false;
+ }
+
+ /* There's a fractional part. */
+ cur++;
+ if (!fxp_isdigit(*cur)) {
+ /* Shouldn't end on the decimal point. */
+ return true;
+ }
+
+ /*
+ * We use a lot of precision for the fractional part, even though we'll
+ * discard most of it; this lets us get exact values for the important
+ * special case where the denominator is a small power of 2 (for
+ * instance, 1/512 == 0.001953125 is exactly representable even with
+ * only 16 bits of fractional precision). We need to left-shift by 16
+ * before dividing so we pick the number of digits to be
+ * floor(log(2**48)) = 14.
+ */
+ uint64_t fractional_part = 0;
+ uint64_t frac_div = 1;
+ for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) {
+ fractional_part *= 10;
+ frac_div *= 10;
+ if (fxp_isdigit(*cur)) {
+ fractional_part += *cur - '0';
+ cur++;
+ }
+ }
+ /*
+ * We only parse the first maxdigits characters, but we can still ignore
+ * any digits after that.
+ */
+ while (fxp_isdigit(*cur)) {
+ cur++;
+ }
+
+ assert(fractional_part < frac_div);
+ uint32_t fractional_repr = (uint32_t)(
+ (fractional_part << 16) / frac_div);
+
+ /* Success! */
+ *result = (integer_part << 16) + fractional_repr;
+ if (end != NULL) {
+ *end = (char *)cur;
+ }
+ return false;
+}
+
+void
+fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]) {
+ uint32_t integer_part = fxp_round_down(a);
+ uint32_t fractional_part = (a & ((1U << 16) - 1));
+
+ int leading_fraction_zeros = 0;
+ uint64_t fraction_digits = fractional_part;
+ for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) {
+ if (fraction_digits < (1U << 16)
+ && fraction_digits * 10 >= (1U << 16)) {
+ leading_fraction_zeros = i;
+ }
+ fraction_digits *= 10;
+ }
+ fraction_digits >>= 16;
+ while (fraction_digits > 0 && fraction_digits % 10 == 0) {
+ fraction_digits /= 10;
+ }
+
+ size_t printed = malloc_snprintf(buf, FXP_BUF_SIZE, "%"FMTu32".",
+ integer_part);
+ for (int i = 0; i < leading_fraction_zeros; i++) {
+ buf[printed] = '0';
+ printed++;
+ }
+ malloc_snprintf(&buf[printed], FXP_BUF_SIZE - printed, "%"FMTu64,
+ fraction_digits);
+}
diff --git a/deps/jemalloc/src/hash.c b/deps/jemalloc/src/hash.c
deleted file mode 100644
index 7b2bdc2bd..000000000
--- a/deps/jemalloc/src/hash.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#define JEMALLOC_HASH_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/deps/jemalloc/src/hook.c b/deps/jemalloc/src/hook.c
index 9ac703cf9..493edbbe5 100644
--- a/deps/jemalloc/src/hook.c
+++ b/deps/jemalloc/src/hook.c
@@ -130,9 +130,9 @@ hook_reentrantp() {
*/
static bool in_hook_global = true;
tsdn_t *tsdn = tsdn_fetch();
- tcache_t *tcache = tsdn_tcachep_get(tsdn);
- if (tcache != NULL) {
- return &tcache->in_hook;
+ bool *in_hook = tsdn_in_hookp_get(tsdn);
+ if (in_hook!= NULL) {
+ return in_hook;
}
return &in_hook_global;
}
diff --git a/deps/jemalloc/src/hpa.c b/deps/jemalloc/src/hpa.c
new file mode 100644
index 000000000..7e2aeba0c
--- /dev/null
+++ b/deps/jemalloc/src/hpa.c
@@ -0,0 +1,1044 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/hpa.h"
+
+#include "jemalloc/internal/fb.h"
+#include "jemalloc/internal/witness.h"
+
+#define HPA_EDEN_SIZE (128 * HUGEPAGE)
+
+static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t alignment, bool zero, bool guarded, bool frequent_reuse,
+ bool *deferred_work_generated);
+static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
+static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
+static bool hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool *deferred_work_generated);
+static void hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated);
+static void hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self,
+ edata_list_active_t *list, bool *deferred_work_generated);
+static uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
+
+bool
+hpa_supported() {
+#ifdef _WIN32
+ /*
+ * At least until the API and implementation is somewhat settled, we
+ * don't want to try to debug the VM subsystem on the hardest-to-test
+ * platform.
+ */
+ return false;
+#endif
+ if (!pages_can_hugify) {
+ return false;
+ }
+ /*
+ * We fundamentally rely on a address-space-hungry growth strategy for
+ * hugepages.
+ */
+ if (LG_SIZEOF_PTR != 3) {
+ return false;
+ }
+ /*
+ * If we couldn't detect the value of HUGEPAGE, HUGEPAGE_PAGES becomes
+ * this sentinel value -- see the comment in pages.h.
+ */
+ if (HUGEPAGE_PAGES == 1) {
+ return false;
+ }
+ return true;
+}
+
+static void
+hpa_do_consistency_checks(hpa_shard_t *shard) {
+ assert(shard->base != NULL);
+}
+
+bool
+hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks) {
+ /* malloc_conf processing should have filtered out these cases. */
+ assert(hpa_supported());
+ bool err;
+ err = malloc_mutex_init(&central->grow_mtx, "hpa_central_grow",
+ WITNESS_RANK_HPA_CENTRAL_GROW, malloc_mutex_rank_exclusive);
+ if (err) {
+ return true;
+ }
+ err = malloc_mutex_init(&central->mtx, "hpa_central",
+ WITNESS_RANK_HPA_CENTRAL, malloc_mutex_rank_exclusive);
+ if (err) {
+ return true;
+ }
+ central->base = base;
+ central->eden = NULL;
+ central->eden_len = 0;
+ central->age_counter = 0;
+ central->hooks = *hooks;
+ return false;
+}
+
+static hpdata_t *
+hpa_alloc_ps(tsdn_t *tsdn, hpa_central_t *central) {
+ return (hpdata_t *)base_alloc(tsdn, central->base, sizeof(hpdata_t),
+ CACHELINE);
+}
+
+hpdata_t *
+hpa_central_extract(tsdn_t *tsdn, hpa_central_t *central, size_t size,
+ bool *oom) {
+ /* Don't yet support big allocations; these should get filtered out. */
+ assert(size <= HUGEPAGE);
+ /*
+ * Should only try to extract from the central allocator if the local
+ * shard is exhausted. We should hold the grow_mtx on that shard.
+ */
+ witness_assert_positive_depth_to_rank(
+ tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_HPA_SHARD_GROW);
+
+ malloc_mutex_lock(tsdn, &central->grow_mtx);
+ *oom = false;
+
+ hpdata_t *ps = NULL;
+
+ /* Is eden a perfect fit? */
+ if (central->eden != NULL && central->eden_len == HUGEPAGE) {
+ ps = hpa_alloc_ps(tsdn, central);
+ if (ps == NULL) {
+ *oom = true;
+ malloc_mutex_unlock(tsdn, &central->grow_mtx);
+ return NULL;
+ }
+ hpdata_init(ps, central->eden, central->age_counter++);
+ central->eden = NULL;
+ central->eden_len = 0;
+ malloc_mutex_unlock(tsdn, &central->grow_mtx);
+ return ps;
+ }
+
+ /*
+ * We're about to try to allocate from eden by splitting. If eden is
+ * NULL, we have to allocate it too. Otherwise, we just have to
+ * allocate an edata_t for the new psset.
+ */
+ if (central->eden == NULL) {
+ /*
+ * During development, we're primarily concerned with systems
+ * with overcommit. Eventually, we should be more careful here.
+ */
+ bool commit = true;
+ /* Allocate address space, bailing if we fail. */
+ void *new_eden = pages_map(NULL, HPA_EDEN_SIZE, HUGEPAGE,
+ &commit);
+ if (new_eden == NULL) {
+ *oom = true;
+ malloc_mutex_unlock(tsdn, &central->grow_mtx);
+ return NULL;
+ }
+ ps = hpa_alloc_ps(tsdn, central);
+ if (ps == NULL) {
+ pages_unmap(new_eden, HPA_EDEN_SIZE);
+ *oom = true;
+ malloc_mutex_unlock(tsdn, &central->grow_mtx);
+ return NULL;
+ }
+ central->eden = new_eden;
+ central->eden_len = HPA_EDEN_SIZE;
+ } else {
+ /* Eden is already nonempty; only need an edata for ps. */
+ ps = hpa_alloc_ps(tsdn, central);
+ if (ps == NULL) {
+ *oom = true;
+ malloc_mutex_unlock(tsdn, &central->grow_mtx);
+ return NULL;
+ }
+ }
+ assert(ps != NULL);
+ assert(central->eden != NULL);
+ assert(central->eden_len > HUGEPAGE);
+ assert(central->eden_len % HUGEPAGE == 0);
+ assert(HUGEPAGE_ADDR2BASE(central->eden) == central->eden);
+
+ hpdata_init(ps, central->eden, central->age_counter++);
+
+ char *eden_char = (char *)central->eden;
+ eden_char += HUGEPAGE;
+ central->eden = (void *)eden_char;
+ central->eden_len -= HUGEPAGE;
+
+ malloc_mutex_unlock(tsdn, &central->grow_mtx);
+
+ return ps;
+}
+
+bool
+hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
+ base_t *base, edata_cache_t *edata_cache, unsigned ind,
+ const hpa_shard_opts_t *opts) {
+ /* malloc_conf processing should have filtered out these cases. */
+ assert(hpa_supported());
+ bool err;
+ err = malloc_mutex_init(&shard->grow_mtx, "hpa_shard_grow",
+ WITNESS_RANK_HPA_SHARD_GROW, malloc_mutex_rank_exclusive);
+ if (err) {
+ return true;
+ }
+ err = malloc_mutex_init(&shard->mtx, "hpa_shard",
+ WITNESS_RANK_HPA_SHARD, malloc_mutex_rank_exclusive);
+ if (err) {
+ return true;
+ }
+
+ assert(edata_cache != NULL);
+ shard->central = central;
+ shard->base = base;
+ edata_cache_fast_init(&shard->ecf, edata_cache);
+ psset_init(&shard->psset);
+ shard->age_counter = 0;
+ shard->ind = ind;
+ shard->emap = emap;
+
+ shard->opts = *opts;
+
+ shard->npending_purge = 0;
+ nstime_init_zero(&shard->last_purge);
+
+ shard->stats.npurge_passes = 0;
+ shard->stats.npurges = 0;
+ shard->stats.nhugifies = 0;
+ shard->stats.ndehugifies = 0;
+
+ /*
+ * Fill these in last, so that if an hpa_shard gets used despite
+ * initialization failing, we'll at least crash instead of just
+ * operating on corrupted data.
+ */
+ shard->pai.alloc = &hpa_alloc;
+ shard->pai.alloc_batch = &hpa_alloc_batch;
+ shard->pai.expand = &hpa_expand;
+ shard->pai.shrink = &hpa_shrink;
+ shard->pai.dalloc = &hpa_dalloc;
+ shard->pai.dalloc_batch = &hpa_dalloc_batch;
+ shard->pai.time_until_deferred_work = &hpa_time_until_deferred_work;
+
+ hpa_do_consistency_checks(shard);
+
+ return false;
+}
+
+/*
+ * Note that the stats functions here follow the usual stats naming conventions;
+ * "merge" obtains the stats from some live object of instance, while "accum"
+ * only combines the stats from one stats objet to another. Hence the lack of
+ * locking here.
+ */
+static void
+hpa_shard_nonderived_stats_accum(hpa_shard_nonderived_stats_t *dst,
+ hpa_shard_nonderived_stats_t *src) {
+ dst->npurge_passes += src->npurge_passes;
+ dst->npurges += src->npurges;
+ dst->nhugifies += src->nhugifies;
+ dst->ndehugifies += src->ndehugifies;
+}
+
+void
+hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src) {
+ psset_stats_accum(&dst->psset_stats, &src->psset_stats);
+ hpa_shard_nonderived_stats_accum(&dst->nonderived_stats,
+ &src->nonderived_stats);
+}
+
+void
+hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
+ hpa_shard_stats_t *dst) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_lock(tsdn, &shard->grow_mtx);
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ psset_stats_accum(&dst->psset_stats, &shard->psset.stats);
+ hpa_shard_nonderived_stats_accum(&dst->nonderived_stats, &shard->stats);
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ malloc_mutex_unlock(tsdn, &shard->grow_mtx);
+}
+
+static bool
+hpa_good_hugification_candidate(hpa_shard_t *shard, hpdata_t *ps) {
+ /*
+ * Note that this needs to be >= rather than just >, because of the
+ * important special case in which the hugification threshold is exactly
+ * HUGEPAGE.
+ */
+ return hpdata_nactive_get(ps) * PAGE
+ >= shard->opts.hugification_threshold;
+}
+
+static size_t
+hpa_adjusted_ndirty(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ return psset_ndirty(&shard->psset) - shard->npending_purge;
+}
+
+static size_t
+hpa_ndirty_max(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ if (shard->opts.dirty_mult == (fxp_t)-1) {
+ return (size_t)-1;
+ }
+ return fxp_mul_frac(psset_nactive(&shard->psset),
+ shard->opts.dirty_mult);
+}
+
+static bool
+hpa_hugify_blocked_by_ndirty(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
+ if (to_hugify == NULL) {
+ return false;
+ }
+ return hpa_adjusted_ndirty(tsdn, shard)
+ + hpdata_nretained_get(to_hugify) > hpa_ndirty_max(tsdn, shard);
+}
+
+static bool
+hpa_should_purge(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ if (hpa_adjusted_ndirty(tsdn, shard) > hpa_ndirty_max(tsdn, shard)) {
+ return true;
+ }
+ if (hpa_hugify_blocked_by_ndirty(tsdn, shard)) {
+ return true;
+ }
+ return false;
+}
+
+static void
+hpa_update_purge_hugify_eligibility(tsdn_t *tsdn, hpa_shard_t *shard,
+ hpdata_t *ps) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ if (hpdata_changing_state_get(ps)) {
+ hpdata_purge_allowed_set(ps, false);
+ hpdata_disallow_hugify(ps);
+ return;
+ }
+ /*
+ * Hugepages are distinctly costly to purge, so try to avoid it unless
+ * they're *particularly* full of dirty pages. Eventually, we should
+ * use a smarter / more dynamic heuristic for situations where we have
+ * to manually hugify.
+ *
+ * In situations where we don't manually hugify, this problem is
+ * reduced. The "bad" situation we're trying to avoid is one's that's
+ * common in some Linux configurations (where both enabled and defrag
+ * are set to madvise) that can lead to long latency spikes on the first
+ * access after a hugification. The ideal policy in such configurations
+ * is probably time-based for both purging and hugifying; only hugify a
+ * hugepage if it's met the criteria for some extended period of time,
+ * and only dehugify it if it's failed to meet the criteria for an
+ * extended period of time. When background threads are on, we should
+ * try to take this hit on one of them, as well.
+ *
+ * I think the ideal setting is THP always enabled, and defrag set to
+ * deferred; in that case we don't need any explicit calls on the
+ * allocator's end at all; we just try to pack allocations in a
+ * hugepage-friendly manner and let the OS hugify in the background.
+ */
+ hpdata_purge_allowed_set(ps, hpdata_ndirty_get(ps) > 0);
+ if (hpa_good_hugification_candidate(shard, ps)
+ && !hpdata_huge_get(ps)) {
+ nstime_t now;
+ shard->central->hooks.curtime(&now, /* first_reading */ true);
+ hpdata_allow_hugify(ps, now);
+ }
+ /*
+ * Once a hugepage has become eligible for hugification, we don't mark
+ * it as ineligible just because it stops meeting the criteria (this
+ * could lead to situations where a hugepage that spends most of its
+ * time meeting the criteria never quite getting hugified if there are
+ * intervening deallocations). The idea is that the hugification delay
+ * will allow them to get purged, reseting their "hugify-allowed" bit.
+ * If they don't get purged, then the hugification isn't hurting and
+ * might help. As an exception, we don't hugify hugepages that are now
+ * empty; it definitely doesn't help there until the hugepage gets
+ * reused, which is likely not for a while.
+ */
+ if (hpdata_nactive_get(ps) == 0) {
+ hpdata_disallow_hugify(ps);
+ }
+}
+
+static bool
+hpa_shard_has_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
+ return to_hugify != NULL || hpa_should_purge(tsdn, shard);
+}
+
+/* Returns whether or not we purged anything. */
+static bool
+hpa_try_purge(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+
+ hpdata_t *to_purge = psset_pick_purge(&shard->psset);
+ if (to_purge == NULL) {
+ return false;
+ }
+ assert(hpdata_purge_allowed_get(to_purge));
+ assert(!hpdata_changing_state_get(to_purge));
+
+ /*
+ * Don't let anyone else purge or hugify this page while
+ * we're purging it (allocations and deallocations are
+ * OK).
+ */
+ psset_update_begin(&shard->psset, to_purge);
+ assert(hpdata_alloc_allowed_get(to_purge));
+ hpdata_mid_purge_set(to_purge, true);
+ hpdata_purge_allowed_set(to_purge, false);
+ hpdata_disallow_hugify(to_purge);
+ /*
+ * Unlike with hugification (where concurrent
+ * allocations are allowed), concurrent allocation out
+ * of a hugepage being purged is unsafe; we might hand
+ * out an extent for an allocation and then purge it
+ * (clearing out user data).
+ */
+ hpdata_alloc_allowed_set(to_purge, false);
+ psset_update_end(&shard->psset, to_purge);
+
+ /* Gather all the metadata we'll need during the purge. */
+ bool dehugify = hpdata_huge_get(to_purge);
+ hpdata_purge_state_t purge_state;
+ size_t num_to_purge = hpdata_purge_begin(to_purge, &purge_state);
+
+ shard->npending_purge += num_to_purge;
+
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+
+ /* Actually do the purging, now that the lock is dropped. */
+ if (dehugify) {
+ shard->central->hooks.dehugify(hpdata_addr_get(to_purge),
+ HUGEPAGE);
+ }
+ size_t total_purged = 0;
+ uint64_t purges_this_pass = 0;
+ void *purge_addr;
+ size_t purge_size;
+ while (hpdata_purge_next(to_purge, &purge_state, &purge_addr,
+ &purge_size)) {
+ total_purged += purge_size;
+ assert(total_purged <= HUGEPAGE);
+ purges_this_pass++;
+ shard->central->hooks.purge(purge_addr, purge_size);
+ }
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ /* The shard updates */
+ shard->npending_purge -= num_to_purge;
+ shard->stats.npurge_passes++;
+ shard->stats.npurges += purges_this_pass;
+ shard->central->hooks.curtime(&shard->last_purge,
+ /* first_reading */ false);
+ if (dehugify) {
+ shard->stats.ndehugifies++;
+ }
+
+ /* The hpdata updates. */
+ psset_update_begin(&shard->psset, to_purge);
+ if (dehugify) {
+ hpdata_dehugify(to_purge);
+ }
+ hpdata_purge_end(to_purge, &purge_state);
+ hpdata_mid_purge_set(to_purge, false);
+
+ hpdata_alloc_allowed_set(to_purge, true);
+ hpa_update_purge_hugify_eligibility(tsdn, shard, to_purge);
+
+ psset_update_end(&shard->psset, to_purge);
+
+ return true;
+}
+
+/* Returns whether or not we hugified anything. */
+static bool
+hpa_try_hugify(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+
+ if (hpa_hugify_blocked_by_ndirty(tsdn, shard)) {
+ return false;
+ }
+
+ hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
+ if (to_hugify == NULL) {
+ return false;
+ }
+ assert(hpdata_hugify_allowed_get(to_hugify));
+ assert(!hpdata_changing_state_get(to_hugify));
+
+ /* Make sure that it's been hugifiable for long enough. */
+ nstime_t time_hugify_allowed = hpdata_time_hugify_allowed(to_hugify);
+ uint64_t millis = shard->central->hooks.ms_since(&time_hugify_allowed);
+ if (millis < shard->opts.hugify_delay_ms) {
+ return false;
+ }
+
+ /*
+ * Don't let anyone else purge or hugify this page while
+ * we're hugifying it (allocations and deallocations are
+ * OK).
+ */
+ psset_update_begin(&shard->psset, to_hugify);
+ hpdata_mid_hugify_set(to_hugify, true);
+ hpdata_purge_allowed_set(to_hugify, false);
+ hpdata_disallow_hugify(to_hugify);
+ assert(hpdata_alloc_allowed_get(to_hugify));
+ psset_update_end(&shard->psset, to_hugify);
+
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+
+ shard->central->hooks.hugify(hpdata_addr_get(to_hugify), HUGEPAGE);
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ shard->stats.nhugifies++;
+
+ psset_update_begin(&shard->psset, to_hugify);
+ hpdata_hugify(to_hugify);
+ hpdata_mid_hugify_set(to_hugify, false);
+ hpa_update_purge_hugify_eligibility(tsdn, shard, to_hugify);
+ psset_update_end(&shard->psset, to_hugify);
+
+ return true;
+}
+
+/*
+ * Execution of deferred work is forced if it's triggered by an explicit
+ * hpa_shard_do_deferred_work() call.
+ */
+static void
+hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard,
+ bool forced) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ if (!forced && shard->opts.deferral_allowed) {
+ return;
+ }
+ /*
+ * If we're on a background thread, do work so long as there's work to
+ * be done. Otherwise, bound latency to not be *too* bad by doing at
+ * most a small fixed number of operations.
+ */
+ bool hugified = false;
+ bool purged = false;
+ size_t max_ops = (forced ? (size_t)-1 : 16);
+ size_t nops = 0;
+ do {
+ /*
+ * Always purge before hugifying, to make sure we get some
+ * ability to hit our quiescence targets.
+ */
+ purged = false;
+ while (hpa_should_purge(tsdn, shard) && nops < max_ops) {
+ purged = hpa_try_purge(tsdn, shard);
+ if (purged) {
+ nops++;
+ }
+ }
+ hugified = hpa_try_hugify(tsdn, shard);
+ if (hugified) {
+ nops++;
+ }
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ } while ((hugified || purged) && nops < max_ops);
+}
+
+static edata_t *
+hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
+ bool *oom) {
+ bool err;
+ edata_t *edata = edata_cache_fast_get(tsdn, &shard->ecf);
+ if (edata == NULL) {
+ *oom = true;
+ return NULL;
+ }
+
+ hpdata_t *ps = psset_pick_alloc(&shard->psset, size);
+ if (ps == NULL) {
+ edata_cache_fast_put(tsdn, &shard->ecf, edata);
+ return NULL;
+ }
+
+ psset_update_begin(&shard->psset, ps);
+
+ if (hpdata_empty(ps)) {
+ /*
+ * If the pageslab used to be empty, treat it as though it's
+ * brand new for fragmentation-avoidance purposes; what we're
+ * trying to approximate is the age of the allocations *in* that
+ * pageslab, and the allocations in the new pageslab are
+ * definitionally the youngest in this hpa shard.
+ */
+ hpdata_age_set(ps, shard->age_counter++);
+ }
+
+ void *addr = hpdata_reserve_alloc(ps, size);
+ edata_init(edata, shard->ind, addr, size, /* slab */ false,
+ SC_NSIZES, /* sn */ hpdata_age_get(ps), extent_state_active,
+ /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
+ EXTENT_NOT_HEAD);
+ edata_ps_set(edata, ps);
+
+ /*
+ * This could theoretically be moved outside of the critical section,
+ * but that introduces the potential for a race. Without the lock, the
+ * (initially nonempty, since this is the reuse pathway) pageslab we
+ * allocated out of could become otherwise empty while the lock is
+ * dropped. This would force us to deal with a pageslab eviction down
+ * the error pathway, which is a pain.
+ */
+ err = emap_register_boundary(tsdn, shard->emap, edata,
+ SC_NSIZES, /* slab */ false);
+ if (err) {
+ hpdata_unreserve(ps, edata_addr_get(edata),
+ edata_size_get(edata));
+ /*
+ * We should arguably reset dirty state here, but this would
+ * require some sort of prepare + commit functionality that's a
+ * little much to deal with for now.
+ *
+ * We don't have a do_deferred_work down this pathway, on the
+ * principle that we didn't *really* affect shard state (we
+ * tweaked the stats, but our tweaks weren't really accurate).
+ */
+ psset_update_end(&shard->psset, ps);
+ edata_cache_fast_put(tsdn, &shard->ecf, edata);
+ *oom = true;
+ return NULL;
+ }
+
+ hpa_update_purge_hugify_eligibility(tsdn, shard, ps);
+ psset_update_end(&shard->psset, ps);
+ return edata;
+}
+
+static size_t
+hpa_try_alloc_batch_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
+ bool *oom, size_t nallocs, edata_list_active_t *results,
+ bool *deferred_work_generated) {
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ size_t nsuccess = 0;
+ for (; nsuccess < nallocs; nsuccess++) {
+ edata_t *edata = hpa_try_alloc_one_no_grow(tsdn, shard, size,
+ oom);
+ if (edata == NULL) {
+ break;
+ }
+ edata_list_active_append(results, edata);
+ }
+
+ hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false);
+ *deferred_work_generated = hpa_shard_has_deferred_work(tsdn, shard);
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ return nsuccess;
+}
+
+static size_t
+hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
+ size_t nallocs, edata_list_active_t *results,
+ bool *deferred_work_generated) {
+ assert(size <= shard->opts.slab_max_alloc);
+ bool oom = false;
+
+ size_t nsuccess = hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
+ nallocs, results, deferred_work_generated);
+
+ if (nsuccess == nallocs || oom) {
+ return nsuccess;
+ }
+
+ /*
+ * We didn't OOM, but weren't able to fill everything requested of us;
+ * try to grow.
+ */
+ malloc_mutex_lock(tsdn, &shard->grow_mtx);
+ /*
+ * Check for grow races; maybe some earlier thread expanded the psset
+ * in between when we dropped the main mutex and grabbed the grow mutex.
+ */
+ nsuccess += hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
+ nallocs - nsuccess, results, deferred_work_generated);
+ if (nsuccess == nallocs || oom) {
+ malloc_mutex_unlock(tsdn, &shard->grow_mtx);
+ return nsuccess;
+ }
+
+ /*
+ * Note that we don't hold shard->mtx here (while growing);
+ * deallocations (and allocations of smaller sizes) may still succeed
+ * while we're doing this potentially expensive system call.
+ */
+ hpdata_t *ps = hpa_central_extract(tsdn, shard->central, size, &oom);
+ if (ps == NULL) {
+ malloc_mutex_unlock(tsdn, &shard->grow_mtx);
+ return nsuccess;
+ }
+
+ /*
+ * We got the pageslab; allocate from it. This does an unlock followed
+ * by a lock on the same mutex, and holds the grow mutex while doing
+ * deferred work, but this is an uncommon path; the simplicity is worth
+ * it.
+ */
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ psset_insert(&shard->psset, ps);
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+
+ nsuccess += hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
+ nallocs - nsuccess, results, deferred_work_generated);
+ /*
+ * Drop grow_mtx before doing deferred work; other threads blocked on it
+ * should be allowed to proceed while we're working.
+ */
+ malloc_mutex_unlock(tsdn, &shard->grow_mtx);
+
+ return nsuccess;
+}
+
+static hpa_shard_t *
+hpa_from_pai(pai_t *self) {
+ assert(self->alloc = &hpa_alloc);
+ assert(self->expand = &hpa_expand);
+ assert(self->shrink = &hpa_shrink);
+ assert(self->dalloc = &hpa_dalloc);
+ return (hpa_shard_t *)self;
+}
+
+static size_t
+hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
+ edata_list_active_t *results, bool *deferred_work_generated) {
+ assert(nallocs > 0);
+ assert((size & PAGE_MASK) == 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ hpa_shard_t *shard = hpa_from_pai(self);
+
+ if (size > shard->opts.slab_max_alloc) {
+ return 0;
+ }
+
+ size_t nsuccess = hpa_alloc_batch_psset(tsdn, shard, size, nallocs,
+ results, deferred_work_generated);
+
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ /*
+ * Guard the sanity checks with config_debug because the loop cannot be
+ * proven non-circular by the compiler, even if everything within the
+ * loop is optimized away.
+ */
+ if (config_debug) {
+ edata_t *edata;
+ ql_foreach(edata, &results->head, ql_link_active) {
+ emap_assert_mapped(tsdn, shard->emap, edata);
+ assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
+ assert(edata_state_get(edata) == extent_state_active);
+ assert(edata_arena_ind_get(edata) == shard->ind);
+ assert(edata_szind_get_maybe_invalid(edata) ==
+ SC_NSIZES);
+ assert(!edata_slab_get(edata));
+ assert(edata_committed_get(edata));
+ assert(edata_base_get(edata) == edata_addr_get(edata));
+ assert(edata_base_get(edata) != NULL);
+ }
+ }
+ return nsuccess;
+}
+
+static edata_t *
+hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
+ bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
+ assert((size & PAGE_MASK) == 0);
+ assert(!guarded);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ /* We don't handle alignment or zeroing for now. */
+ if (alignment > PAGE || zero) {
+ return NULL;
+ }
+ /*
+ * An alloc with alignment == PAGE and zero == false is equivalent to a
+ * batch alloc of 1. Just do that, so we can share code.
+ */
+ edata_list_active_t results;
+ edata_list_active_init(&results);
+ size_t nallocs = hpa_alloc_batch(tsdn, self, size, /* nallocs */ 1,
+ &results, deferred_work_generated);
+ assert(nallocs == 0 || nallocs == 1);
+ edata_t *edata = edata_list_active_first(&results);
+ return edata;
+}
+
+static bool
+hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
+ size_t new_size, bool zero, bool *deferred_work_generated) {
+ /* Expand not yet supported. */
+ return true;
+}
+
+static bool
+hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool *deferred_work_generated) {
+ /* Shrink not yet supported. */
+ return true;
+}
+
+static void
+hpa_dalloc_prepare_unlocked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
+ malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
+
+ assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
+ assert(edata_state_get(edata) == extent_state_active);
+ assert(edata_arena_ind_get(edata) == shard->ind);
+ assert(edata_szind_get_maybe_invalid(edata) == SC_NSIZES);
+ assert(edata_committed_get(edata));
+ assert(edata_base_get(edata) != NULL);
+
+ /*
+ * Another thread shouldn't be trying to touch the metadata of an
+ * allocation being freed. The one exception is a merge attempt from a
+ * lower-addressed PAC extent; in this case we have a nominal race on
+ * the edata metadata bits, but in practice the fact that the PAI bits
+ * are different will prevent any further access. The race is bad, but
+ * benign in practice, and the long term plan is to track enough state
+ * in the rtree to prevent these merge attempts in the first place.
+ */
+ edata_addr_set(edata, edata_base_get(edata));
+ edata_zeroed_set(edata, false);
+ emap_deregister_boundary(tsdn, shard->emap, edata);
+}
+
+static void
+hpa_dalloc_locked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+
+ /*
+ * Release the metadata early, to avoid having to remember to do it
+ * while we're also doing tricky purging logic. First, we need to grab
+ * a few bits of metadata from it.
+ *
+ * Note that the shard mutex protects ps's metadata too; it wouldn't be
+ * correct to try to read most information out of it without the lock.
+ */
+ hpdata_t *ps = edata_ps_get(edata);
+ /* Currently, all edatas come from pageslabs. */
+ assert(ps != NULL);
+ void *unreserve_addr = edata_addr_get(edata);
+ size_t unreserve_size = edata_size_get(edata);
+ edata_cache_fast_put(tsdn, &shard->ecf, edata);
+
+ psset_update_begin(&shard->psset, ps);
+ hpdata_unreserve(ps, unreserve_addr, unreserve_size);
+ hpa_update_purge_hugify_eligibility(tsdn, shard, ps);
+ psset_update_end(&shard->psset, ps);
+}
+
+static void
+hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
+ bool *deferred_work_generated) {
+ hpa_shard_t *shard = hpa_from_pai(self);
+
+ edata_t *edata;
+ ql_foreach(edata, &list->head, ql_link_active) {
+ hpa_dalloc_prepare_unlocked(tsdn, shard, edata);
+ }
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ /* Now, remove from the list. */
+ while ((edata = edata_list_active_first(list)) != NULL) {
+ edata_list_active_remove(list, edata);
+ hpa_dalloc_locked(tsdn, shard, edata);
+ }
+ hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false);
+ *deferred_work_generated =
+ hpa_shard_has_deferred_work(tsdn, shard);
+
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+}
+
+static void
+hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated) {
+ assert(!edata_guarded_get(edata));
+ /* Just a dalloc_batch of size 1; this lets us share logic. */
+ edata_list_active_t dalloc_list;
+ edata_list_active_init(&dalloc_list);
+ edata_list_active_append(&dalloc_list, edata);
+ hpa_dalloc_batch(tsdn, self, &dalloc_list, deferred_work_generated);
+}
+
+/*
+ * Calculate time until either purging or hugification ought to happen.
+ * Called by background threads.
+ */
+static uint64_t
+hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
+ hpa_shard_t *shard = hpa_from_pai(self);
+ uint64_t time_ns = BACKGROUND_THREAD_DEFERRED_MAX;
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+
+ hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
+ if (to_hugify != NULL) {
+ nstime_t time_hugify_allowed =
+ hpdata_time_hugify_allowed(to_hugify);
+ uint64_t since_hugify_allowed_ms =
+ shard->central->hooks.ms_since(&time_hugify_allowed);
+ /*
+ * If not enough time has passed since hugification was allowed,
+ * sleep for the rest.
+ */
+ if (since_hugify_allowed_ms < shard->opts.hugify_delay_ms) {
+ time_ns = shard->opts.hugify_delay_ms -
+ since_hugify_allowed_ms;
+ time_ns *= 1000 * 1000;
+ } else {
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ return BACKGROUND_THREAD_DEFERRED_MIN;
+ }
+ }
+
+ if (hpa_should_purge(tsdn, shard)) {
+ /*
+ * If we haven't purged before, no need to check interval
+ * between purges. Simply purge as soon as possible.
+ */
+ if (shard->stats.npurge_passes == 0) {
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ return BACKGROUND_THREAD_DEFERRED_MIN;
+ }
+ uint64_t since_last_purge_ms = shard->central->hooks.ms_since(
+ &shard->last_purge);
+
+ if (since_last_purge_ms < shard->opts.min_purge_interval_ms) {
+ uint64_t until_purge_ns;
+ until_purge_ns = shard->opts.min_purge_interval_ms -
+ since_last_purge_ms;
+ until_purge_ns *= 1000 * 1000;
+
+ if (until_purge_ns < time_ns) {
+ time_ns = until_purge_ns;
+ }
+ } else {
+ time_ns = BACKGROUND_THREAD_DEFERRED_MIN;
+ }
+ }
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ return time_ns;
+}
+
+void
+hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ edata_cache_fast_disable(tsdn, &shard->ecf);
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+}
+
+static void
+hpa_shard_assert_stats_empty(psset_bin_stats_t *bin_stats) {
+ assert(bin_stats->npageslabs == 0);
+ assert(bin_stats->nactive == 0);
+}
+
+static void
+hpa_assert_empty(tsdn_t *tsdn, hpa_shard_t *shard, psset_t *psset) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ for (int huge = 0; huge <= 1; huge++) {
+ hpa_shard_assert_stats_empty(&psset->stats.full_slabs[huge]);
+ for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
+ hpa_shard_assert_stats_empty(
+ &psset->stats.nonfull_slabs[i][huge]);
+ }
+ }
+}
+
+void
+hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+ /*
+ * By the time we're here, the arena code should have dalloc'd all the
+ * active extents, which means we should have eventually evicted
+ * everything from the psset, so it shouldn't be able to serve even a
+ * 1-page allocation.
+ */
+ if (config_debug) {
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ hpa_assert_empty(tsdn, shard, &shard->psset);
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ }
+ hpdata_t *ps;
+ while ((ps = psset_pick_alloc(&shard->psset, PAGE)) != NULL) {
+ /* There should be no allocations anywhere. */
+ assert(hpdata_empty(ps));
+ psset_remove(&shard->psset, ps);
+ shard->central->hooks.unmap(hpdata_addr_get(ps), HUGEPAGE);
+ }
+}
+
+void
+hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
+ bool deferral_allowed) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ bool deferral_previously_allowed = shard->opts.deferral_allowed;
+ shard->opts.deferral_allowed = deferral_allowed;
+ if (deferral_previously_allowed && !deferral_allowed) {
+ hpa_shard_maybe_do_deferred_work(tsdn, shard,
+ /* forced */ true);
+ }
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+}
+
+void
+hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ true);
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+}
+
+void
+hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_prefork(tsdn, &shard->grow_mtx);
+}
+
+void
+hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_prefork(tsdn, &shard->mtx);
+}
+
+void
+hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_postfork_parent(tsdn, &shard->grow_mtx);
+ malloc_mutex_postfork_parent(tsdn, &shard->mtx);
+}
+
+void
+hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_postfork_child(tsdn, &shard->grow_mtx);
+ malloc_mutex_postfork_child(tsdn, &shard->mtx);
+}
diff --git a/deps/jemalloc/src/hpa_hooks.c b/deps/jemalloc/src/hpa_hooks.c
new file mode 100644
index 000000000..ade581e8d
--- /dev/null
+++ b/deps/jemalloc/src/hpa_hooks.c
@@ -0,0 +1,63 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/hpa_hooks.h"
+
+static void *hpa_hooks_map(size_t size);
+static void hpa_hooks_unmap(void *ptr, size_t size);
+static void hpa_hooks_purge(void *ptr, size_t size);
+static void hpa_hooks_hugify(void *ptr, size_t size);
+static void hpa_hooks_dehugify(void *ptr, size_t size);
+static void hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading);
+static uint64_t hpa_hooks_ms_since(nstime_t *past_nstime);
+
+hpa_hooks_t hpa_hooks_default = {
+ &hpa_hooks_map,
+ &hpa_hooks_unmap,
+ &hpa_hooks_purge,
+ &hpa_hooks_hugify,
+ &hpa_hooks_dehugify,
+ &hpa_hooks_curtime,
+ &hpa_hooks_ms_since
+};
+
+static void *
+hpa_hooks_map(size_t size) {
+ bool commit = true;
+ return pages_map(NULL, size, HUGEPAGE, &commit);
+}
+
+static void
+hpa_hooks_unmap(void *ptr, size_t size) {
+ pages_unmap(ptr, size);
+}
+
+static void
+hpa_hooks_purge(void *ptr, size_t size) {
+ pages_purge_forced(ptr, size);
+}
+
+static void
+hpa_hooks_hugify(void *ptr, size_t size) {
+ bool err = pages_huge(ptr, size);
+ (void)err;
+}
+
+static void
+hpa_hooks_dehugify(void *ptr, size_t size) {
+ bool err = pages_nohuge(ptr, size);
+ (void)err;
+}
+
+static void
+hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading) {
+ if (first_reading) {
+ nstime_init_zero(r_nstime);
+ }
+ nstime_update(r_nstime);
+}
+
+static uint64_t
+hpa_hooks_ms_since(nstime_t *past_nstime) {
+ return nstime_ns_since(past_nstime) / 1000 / 1000;
+}
diff --git a/deps/jemalloc/src/hpdata.c b/deps/jemalloc/src/hpdata.c
new file mode 100644
index 000000000..e7d7294c7
--- /dev/null
+++ b/deps/jemalloc/src/hpdata.c
@@ -0,0 +1,325 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/hpdata.h"
+
+static int
+hpdata_age_comp(const hpdata_t *a, const hpdata_t *b) {
+ uint64_t a_age = hpdata_age_get(a);
+ uint64_t b_age = hpdata_age_get(b);
+ /*
+ * hpdata ages are operation counts in the psset; no two should be the
+ * same.
+ */
+ assert(a_age != b_age);
+ return (a_age > b_age) - (a_age < b_age);
+}
+
+ph_gen(, hpdata_age_heap, hpdata_t, age_link, hpdata_age_comp)
+
+void
+hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age) {
+ hpdata_addr_set(hpdata, addr);
+ hpdata_age_set(hpdata, age);
+ hpdata->h_huge = false;
+ hpdata->h_alloc_allowed = true;
+ hpdata->h_in_psset_alloc_container = false;
+ hpdata->h_purge_allowed = false;
+ hpdata->h_hugify_allowed = false;
+ hpdata->h_in_psset_hugify_container = false;
+ hpdata->h_mid_purge = false;
+ hpdata->h_mid_hugify = false;
+ hpdata->h_updating = false;
+ hpdata->h_in_psset = false;
+ hpdata_longest_free_range_set(hpdata, HUGEPAGE_PAGES);
+ hpdata->h_nactive = 0;
+ fb_init(hpdata->active_pages, HUGEPAGE_PAGES);
+ hpdata->h_ntouched = 0;
+ fb_init(hpdata->touched_pages, HUGEPAGE_PAGES);
+
+ hpdata_assert_consistent(hpdata);
+}
+
+void *
+hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) {
+ hpdata_assert_consistent(hpdata);
+ /*
+ * This is a metadata change; the hpdata should therefore either not be
+ * in the psset, or should have explicitly marked itself as being
+ * mid-update.
+ */
+ assert(!hpdata->h_in_psset || hpdata->h_updating);
+ assert(hpdata->h_alloc_allowed);
+ assert((sz & PAGE_MASK) == 0);
+ size_t npages = sz >> LG_PAGE;
+ assert(npages <= hpdata_longest_free_range_get(hpdata));
+
+ size_t result;
+
+ size_t start = 0;
+ /*
+ * These are dead stores, but the compiler will issue warnings on them
+ * since it can't tell statically that found is always true below.
+ */
+ size_t begin = 0;
+ size_t len = 0;
+
+ size_t largest_unchosen_range = 0;
+ while (true) {
+ bool found = fb_urange_iter(hpdata->active_pages,
+ HUGEPAGE_PAGES, start, &begin, &len);
+ /*
+ * A precondition to this function is that hpdata must be able
+ * to serve the allocation.
+ */
+ assert(found);
+ assert(len <= hpdata_longest_free_range_get(hpdata));
+ if (len >= npages) {
+ /*
+ * We use first-fit within the page slabs; this gives
+ * bounded worst-case fragmentation within a slab. It's
+ * not necessarily right; we could experiment with
+ * various other options.
+ */
+ break;
+ }
+ if (len > largest_unchosen_range) {
+ largest_unchosen_range = len;
+ }
+ start = begin + len;
+ }
+ /* We found a range; remember it. */
+ result = begin;
+ fb_set_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages);
+ hpdata->h_nactive += npages;
+
+ /*
+ * We might be about to dirty some memory for the first time; update our
+ * count if so.
+ */
+ size_t new_dirty = fb_ucount(hpdata->touched_pages, HUGEPAGE_PAGES,
+ result, npages);
+ fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, result, npages);
+ hpdata->h_ntouched += new_dirty;
+
+ /*
+ * If we allocated out of a range that was the longest in the hpdata, it
+ * might be the only one of that size and we'll have to adjust the
+ * metadata.
+ */
+ if (len == hpdata_longest_free_range_get(hpdata)) {
+ start = begin + npages;
+ while (start < HUGEPAGE_PAGES) {
+ bool found = fb_urange_iter(hpdata->active_pages,
+ HUGEPAGE_PAGES, start, &begin, &len);
+ if (!found) {
+ break;
+ }
+ assert(len <= hpdata_longest_free_range_get(hpdata));
+ if (len == hpdata_longest_free_range_get(hpdata)) {
+ largest_unchosen_range = len;
+ break;
+ }
+ if (len > largest_unchosen_range) {
+ largest_unchosen_range = len;
+ }
+ start = begin + len;
+ }
+ hpdata_longest_free_range_set(hpdata, largest_unchosen_range);
+ }
+
+ hpdata_assert_consistent(hpdata);
+ return (void *)(
+ (uintptr_t)hpdata_addr_get(hpdata) + (result << LG_PAGE));
+}
+
+void
+hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz) {
+ hpdata_assert_consistent(hpdata);
+ /* See the comment in reserve. */
+ assert(!hpdata->h_in_psset || hpdata->h_updating);
+ assert(((uintptr_t)addr & PAGE_MASK) == 0);
+ assert((sz & PAGE_MASK) == 0);
+ size_t begin = ((uintptr_t)addr - (uintptr_t)hpdata_addr_get(hpdata))
+ >> LG_PAGE;
+ assert(begin < HUGEPAGE_PAGES);
+ size_t npages = sz >> LG_PAGE;
+ size_t old_longest_range = hpdata_longest_free_range_get(hpdata);
+
+ fb_unset_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages);
+ /* We might have just created a new, larger range. */
+ size_t new_begin = (fb_fls(hpdata->active_pages, HUGEPAGE_PAGES,
+ begin) + 1);
+ size_t new_end = fb_ffs(hpdata->active_pages, HUGEPAGE_PAGES,
+ begin + npages - 1);
+ size_t new_range_len = new_end - new_begin;
+
+ if (new_range_len > old_longest_range) {
+ hpdata_longest_free_range_set(hpdata, new_range_len);
+ }
+
+ hpdata->h_nactive -= npages;
+
+ hpdata_assert_consistent(hpdata);
+}
+
+size_t
+hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) {
+ hpdata_assert_consistent(hpdata);
+ /*
+ * See the comment below; we might purge any inactive extent, so it's
+ * unsafe for any other thread to turn any inactive extent active while
+ * we're operating on it.
+ */
+ assert(!hpdata_alloc_allowed_get(hpdata));
+
+ purge_state->npurged = 0;
+ purge_state->next_purge_search_begin = 0;
+
+ /*
+ * Initialize to_purge.
+ *
+ * It's possible to end up in situations where two dirty extents are
+ * separated by a retained extent:
+ * - 1 page allocated.
+ * - 1 page allocated.
+ * - 1 pages allocated.
+ *
+ * If the middle page is freed and purged, and then the first and third
+ * pages are freed, and then another purge pass happens, the hpdata
+ * looks like this:
+ * - 1 page dirty.
+ * - 1 page retained.
+ * - 1 page dirty.
+ *
+ * But it's safe to do a single 3-page purge.
+ *
+ * We do this by first computing the dirty pages, and then filling in
+ * any gaps by extending each range in the dirty bitmap to extend until
+ * the next active page. This purges more pages, but the expensive part
+ * of purging is the TLB shootdowns, rather than the kernel state
+ * tracking; doing a little bit more of the latter is fine if it saves
+ * us from doing some of the former.
+ */
+
+ /*
+ * The dirty pages are those that are touched but not active. Note that
+ * in a normal-ish case, HUGEPAGE_PAGES is something like 512 and the
+ * fb_group_t is 64 bits, so this is 64 bytes, spread across 8
+ * fb_group_ts.
+ */
+ fb_group_t dirty_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
+ fb_init(dirty_pages, HUGEPAGE_PAGES);
+ fb_bit_not(dirty_pages, hpdata->active_pages, HUGEPAGE_PAGES);
+ fb_bit_and(dirty_pages, dirty_pages, hpdata->touched_pages,
+ HUGEPAGE_PAGES);
+
+ fb_init(purge_state->to_purge, HUGEPAGE_PAGES);
+ size_t next_bit = 0;
+ while (next_bit < HUGEPAGE_PAGES) {
+ size_t next_dirty = fb_ffs(dirty_pages, HUGEPAGE_PAGES,
+ next_bit);
+ /* Recall that fb_ffs returns nbits if no set bit is found. */
+ if (next_dirty == HUGEPAGE_PAGES) {
+ break;
+ }
+ size_t next_active = fb_ffs(hpdata->active_pages,
+ HUGEPAGE_PAGES, next_dirty);
+ /*
+ * Don't purge past the end of the dirty extent, into retained
+ * pages. This helps the kernel a tiny bit, but honestly it's
+ * mostly helpful for testing (where we tend to write test cases
+ * that think in terms of the dirty ranges).
+ */
+ ssize_t last_dirty = fb_fls(dirty_pages, HUGEPAGE_PAGES,
+ next_active - 1);
+ assert(last_dirty >= 0);
+ assert((size_t)last_dirty >= next_dirty);
+ assert((size_t)last_dirty - next_dirty + 1 <= HUGEPAGE_PAGES);
+
+ fb_set_range(purge_state->to_purge, HUGEPAGE_PAGES, next_dirty,
+ last_dirty - next_dirty + 1);
+ next_bit = next_active + 1;
+ }
+
+ /* We should purge, at least, everything dirty. */
+ size_t ndirty = hpdata->h_ntouched - hpdata->h_nactive;
+ purge_state->ndirty_to_purge = ndirty;
+ assert(ndirty <= fb_scount(
+ purge_state->to_purge, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
+ assert(ndirty == fb_scount(dirty_pages, HUGEPAGE_PAGES, 0,
+ HUGEPAGE_PAGES));
+
+ hpdata_assert_consistent(hpdata);
+
+ return ndirty;
+}
+
+bool
+hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
+ void **r_purge_addr, size_t *r_purge_size) {
+ /*
+ * Note that we don't have a consistency check here; we're accessing
+ * hpdata without synchronization, and therefore have no right to expect
+ * a consistent state.
+ */
+ assert(!hpdata_alloc_allowed_get(hpdata));
+
+ if (purge_state->next_purge_search_begin == HUGEPAGE_PAGES) {
+ return false;
+ }
+ size_t purge_begin;
+ size_t purge_len;
+ bool found_range = fb_srange_iter(purge_state->to_purge, HUGEPAGE_PAGES,
+ purge_state->next_purge_search_begin, &purge_begin, &purge_len);
+ if (!found_range) {
+ return false;
+ }
+
+ *r_purge_addr = (void *)(
+ (uintptr_t)hpdata_addr_get(hpdata) + purge_begin * PAGE);
+ *r_purge_size = purge_len * PAGE;
+
+ purge_state->next_purge_search_begin = purge_begin + purge_len;
+ purge_state->npurged += purge_len;
+ assert(purge_state->npurged <= HUGEPAGE_PAGES);
+
+ return true;
+}
+
+void
+hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) {
+ assert(!hpdata_alloc_allowed_get(hpdata));
+ hpdata_assert_consistent(hpdata);
+ /* See the comment in reserve. */
+ assert(!hpdata->h_in_psset || hpdata->h_updating);
+
+ assert(purge_state->npurged == fb_scount(purge_state->to_purge,
+ HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
+ assert(purge_state->npurged >= purge_state->ndirty_to_purge);
+
+ fb_bit_not(purge_state->to_purge, purge_state->to_purge,
+ HUGEPAGE_PAGES);
+ fb_bit_and(hpdata->touched_pages, hpdata->touched_pages,
+ purge_state->to_purge, HUGEPAGE_PAGES);
+ assert(hpdata->h_ntouched >= purge_state->ndirty_to_purge);
+ hpdata->h_ntouched -= purge_state->ndirty_to_purge;
+
+ hpdata_assert_consistent(hpdata);
+}
+
+void
+hpdata_hugify(hpdata_t *hpdata) {
+ hpdata_assert_consistent(hpdata);
+ hpdata->h_huge = true;
+ fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES);
+ hpdata->h_ntouched = HUGEPAGE_PAGES;
+ hpdata_assert_consistent(hpdata);
+}
+
+void
+hpdata_dehugify(hpdata_t *hpdata) {
+ hpdata_assert_consistent(hpdata);
+ hpdata->h_huge = false;
+ hpdata_assert_consistent(hpdata);
+}
diff --git a/deps/jemalloc/src/inspect.c b/deps/jemalloc/src/inspect.c
new file mode 100644
index 000000000..911b5d524
--- /dev/null
+++ b/deps/jemalloc/src/inspect.c
@@ -0,0 +1,77 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+void
+inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree,
+ size_t *nregs, size_t *size) {
+ assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
+
+ const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ if (unlikely(edata == NULL)) {
+ *nfree = *nregs = *size = 0;
+ return;
+ }
+
+ *size = edata_size_get(edata);
+ if (!edata_slab_get(edata)) {
+ *nfree = 0;
+ *nregs = 1;
+ } else {
+ *nfree = edata_nfree_get(edata);
+ *nregs = bin_infos[edata_szind_get(edata)].nregs;
+ assert(*nfree <= *nregs);
+ assert(*nfree * edata_usize_get(edata) <= *size);
+ }
+}
+
+void
+inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
+ size_t *nfree, size_t *nregs, size_t *size, size_t *bin_nfree,
+ size_t *bin_nregs, void **slabcur_addr) {
+ assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
+ && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
+
+ const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ if (unlikely(edata == NULL)) {
+ *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
+ *slabcur_addr = NULL;
+ return;
+ }
+
+ *size = edata_size_get(edata);
+ if (!edata_slab_get(edata)) {
+ *nfree = *bin_nfree = *bin_nregs = 0;
+ *nregs = 1;
+ *slabcur_addr = NULL;
+ return;
+ }
+
+ *nfree = edata_nfree_get(edata);
+ const szind_t szind = edata_szind_get(edata);
+ *nregs = bin_infos[szind].nregs;
+ assert(*nfree <= *nregs);
+ assert(*nfree * edata_usize_get(edata) <= *size);
+
+ arena_t *arena = (arena_t *)atomic_load_p(
+ &arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED);
+ assert(arena != NULL);
+ const unsigned binshard = edata_binshard_get(edata);
+ bin_t *bin = arena_get_bin(arena, szind, binshard);
+
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (config_stats) {
+ *bin_nregs = *nregs * bin->stats.curslabs;
+ assert(*bin_nregs >= bin->stats.curregs);
+ *bin_nfree = *bin_nregs - bin->stats.curregs;
+ } else {
+ *bin_nfree = *bin_nregs = 0;
+ }
+ edata_t *slab;
+ if (bin->slabcur != NULL) {
+ slab = bin->slabcur;
+ } else {
+ slab = edata_heap_first(&bin->slabs_nonfull);
+ }
+ *slabcur_addr = slab != NULL ? edata_addr_get(slab) : NULL;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+}
diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c
index cbc31bbd5..83026093b 100644
--- a/deps/jemalloc/src/jemalloc.c
+++ b/deps/jemalloc/src/jemalloc.c
@@ -4,20 +4,26 @@
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/buf_writer.h"
#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/fxp.h"
+#include "jemalloc/internal/san.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/log.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
+#include "jemalloc/internal/thread_event.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
@@ -29,6 +35,29 @@ const char *je_malloc_conf
JEMALLOC_ATTR(weak)
#endif
;
+/*
+ * The usual rule is that the closer to runtime you are, the higher priority
+ * your configuration settings are (so the jemalloc config options get lower
+ * priority than the per-binary setting, which gets lower priority than the /etc
+ * setting, which gets lower priority than the environment settings).
+ *
+ * But it's a fairly common use case in some testing environments for a user to
+ * be able to control the binary, but nothing else (e.g. a performancy canary
+ * uses the production OS and environment variables, but can run any binary in
+ * those circumstances). For these use cases, it's handy to have an in-binary
+ * mechanism for overriding environment variable settings, with the idea that if
+ * the results are positive they get promoted to the official settings, and
+ * moved from the binary to the environment variable.
+ *
+ * We don't actually want this to be widespread, so we'll give it a silly name
+ * and not mention it in headers or documentation.
+ */
+const char *je_malloc_conf_2_conf_harder
+#ifndef _WIN32
+ JEMALLOC_ATTR(weak)
+#endif
+ ;
+
bool opt_abort =
#ifdef JEMALLOC_DEBUG
true
@@ -66,16 +95,73 @@ bool opt_junk_free =
false
#endif
;
+bool opt_trust_madvise =
+#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+ false
+#else
+ true
+#endif
+ ;
+
+bool opt_cache_oblivious =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ true
+#else
+ false
+#endif
+ ;
+
+zero_realloc_action_t opt_zero_realloc_action =
+#ifdef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
+ zero_realloc_action_free
+#else
+ zero_realloc_action_alloc
+#endif
+ ;
+
+atomic_zu_t zero_realloc_count = ATOMIC_INIT(0);
+
+const char *zero_realloc_mode_names[] = {
+ "alloc",
+ "free",
+ "abort",
+};
+
+/*
+ * These are the documented values for junk fill debugging facilities -- see the
+ * man page.
+ */
+static const uint8_t junk_alloc_byte = 0xa5;
+static const uint8_t junk_free_byte = 0x5a;
+
+static void default_junk_alloc(void *ptr, size_t usize) {
+ memset(ptr, junk_alloc_byte, usize);
+}
+
+static void default_junk_free(void *ptr, size_t usize) {
+ memset(ptr, junk_free_byte, usize);
+}
+
+void (*junk_alloc_callback)(void *ptr, size_t size) = &default_junk_alloc;
+void (*junk_free_callback)(void *ptr, size_t size) = &default_junk_free;
bool opt_utrace = false;
bool opt_xmalloc = false;
+bool opt_experimental_infallible_new = false;
bool opt_zero = false;
unsigned opt_narenas = 0;
+fxp_t opt_narenas_ratio = FXP_INIT_INT(4);
unsigned ncpus;
/* Protects arenas initialization. */
malloc_mutex_t arenas_lock;
+
+/* The global hpa, and whether it's on. */
+bool opt_hpa = false;
+hpa_shard_opts_t opt_hpa_opts = HPA_SHARD_OPTS_DEFAULT;
+sec_opts_t opt_hpa_sec_opts = SEC_OPTS_DEFAULT;
+
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
@@ -94,13 +180,7 @@ static arena_t *a0; /* arenas[0]. */
unsigned narenas_auto;
unsigned manual_arena_base;
-typedef enum {
- malloc_init_uninitialized = 3,
- malloc_init_a0_initialized = 2,
- malloc_init_recursible = 1,
- malloc_init_initialized = 0 /* Common case --> jnz. */
-} malloc_init_t;
-static malloc_init_t malloc_init_state = malloc_init_uninitialized;
+malloc_init_t malloc_init_state = malloc_init_uninitialized;
/* False should be the common case. Set to true to trigger initialization. */
bool malloc_slow = true;
@@ -180,7 +260,7 @@ typedef struct {
ut.p = (a); \
ut.s = (b); \
ut.r = (c); \
- utrace(&ut, sizeof(ut)); \
+ UTRACE_CALL(&ut, sizeof(ut)); \
errno = utrace_serrno; \
} \
} while (0)
@@ -205,11 +285,6 @@ static bool malloc_init_hard(void);
* Begin miscellaneous support functions.
*/
-bool
-malloc_initialized(void) {
- return (malloc_init_state == malloc_init_initialized);
-}
-
JEMALLOC_ALWAYS_INLINE bool
malloc_init_a0(void) {
if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
@@ -257,7 +332,7 @@ a0dalloc(void *ptr) {
}
/*
- * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
+ * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-sensitive
* situations that cannot tolerate TLS variable access (TLS allocation and very
* early internal data structure initialization).
*/
@@ -315,7 +390,7 @@ narenas_total_get(void) {
/* Create a new arena and insert it into the arenas array at index ind. */
static arena_t *
-arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+arena_init_locked(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
arena_t *arena;
assert(ind <= narenas_total_get());
@@ -337,7 +412,7 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
}
/* Actually initialize the arena. */
- arena = arena_new(tsdn, ind, extent_hooks);
+ arena = arena_new(tsdn, ind, config);
return arena;
}
@@ -361,11 +436,11 @@ arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
}
arena_t *
-arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
arena_t *arena;
malloc_mutex_lock(tsdn, &arenas_lock);
- arena = arena_init_locked(tsdn, ind, extent_hooks);
+ arena = arena_init_locked(tsdn, ind, config);
malloc_mutex_unlock(tsdn, &arenas_lock);
arena_new_create_background_thread(tsdn, ind);
@@ -394,14 +469,19 @@ arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
}
void
-arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
- arena_t *oldarena, *newarena;
+arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena) {
+ assert(oldarena != NULL);
+ assert(newarena != NULL);
- oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
- newarena = arena_get(tsd_tsdn(tsd), newind, false);
arena_nthreads_dec(oldarena, false);
arena_nthreads_inc(newarena, false);
tsd_arena_set(tsd, newarena);
+
+ if (arena_nthreads_get(oldarena, false) == 0) {
+ /* Purge if the old arena has no associated threads anymore. */
+ arena_decay(tsd_tsdn(tsd), oldarena,
+ /* is_background_thread */ false, /* all */ true);
+ }
}
static void
@@ -418,82 +498,6 @@ arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
}
}
-arena_tdata_t *
-arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
- arena_tdata_t *tdata, *arenas_tdata_old;
- arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
- unsigned narenas_tdata_old, i;
- unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
- unsigned narenas_actual = narenas_total_get();
-
- /*
- * Dissociate old tdata array (and set up for deallocation upon return)
- * if it's too small.
- */
- if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
- arenas_tdata_old = arenas_tdata;
- narenas_tdata_old = narenas_tdata;
- arenas_tdata = NULL;
- narenas_tdata = 0;
- tsd_arenas_tdata_set(tsd, arenas_tdata);
- tsd_narenas_tdata_set(tsd, narenas_tdata);
- } else {
- arenas_tdata_old = NULL;
- narenas_tdata_old = 0;
- }
-
- /* Allocate tdata array if it's missing. */
- if (arenas_tdata == NULL) {
- bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
- narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
-
- if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
- *arenas_tdata_bypassp = true;
- arenas_tdata = (arena_tdata_t *)a0malloc(
- sizeof(arena_tdata_t) * narenas_tdata);
- *arenas_tdata_bypassp = false;
- }
- if (arenas_tdata == NULL) {
- tdata = NULL;
- goto label_return;
- }
- assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
- tsd_arenas_tdata_set(tsd, arenas_tdata);
- tsd_narenas_tdata_set(tsd, narenas_tdata);
- }
-
- /*
- * Copy to tdata array. It's possible that the actual number of arenas
- * has increased since narenas_total_get() was called above, but that
- * causes no correctness issues unless two threads concurrently execute
- * the arenas.create mallctl, which we trust mallctl synchronization to
- * prevent.
- */
-
- /* Copy/initialize tickers. */
- for (i = 0; i < narenas_actual; i++) {
- if (i < narenas_tdata_old) {
- ticker_copy(&arenas_tdata[i].decay_ticker,
- &arenas_tdata_old[i].decay_ticker);
- } else {
- ticker_init(&arenas_tdata[i].decay_ticker,
- DECAY_NTICKS_PER_UPDATE);
- }
- }
- if (narenas_tdata > narenas_actual) {
- memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
- * (narenas_tdata - narenas_actual));
- }
-
- /* Read the refreshed tdata array. */
- tdata = &arenas_tdata[ind];
-label_return:
- if (arenas_tdata_old != NULL) {
- a0dalloc(arenas_tdata_old);
- }
- return tdata;
-}
-
/* Slow path, called only by arena_choose(). */
arena_t *
arena_choose_hard(tsd_t *tsd, bool internal) {
@@ -576,8 +580,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
/* Initialize a new arena. */
choose[j] = first_null;
arena = arena_init_locked(tsd_tsdn(tsd),
- choose[j],
- (extent_hooks_t *)&extent_hooks_default);
+ choose[j], &arena_config_default);
if (arena == NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd),
&arenas_lock);
@@ -629,20 +632,6 @@ arena_cleanup(tsd_t *tsd) {
}
}
-void
-arenas_tdata_cleanup(tsd_t *tsd) {
- arena_tdata_t *arenas_tdata;
-
- /* Prevent tsd->arenas_tdata from being (re)created. */
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
-
- arenas_tdata = tsd_arenas_tdata_get(tsd);
- if (arenas_tdata != NULL) {
- tsd_arenas_tdata_set(tsd, NULL);
- a0dalloc(arenas_tdata);
- }
-}
-
static void
stats_print_atexit(void) {
if (config_stats) {
@@ -661,11 +650,13 @@ stats_print_atexit(void) {
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena = arena_get(tsdn, i, false);
if (arena != NULL) {
- tcache_t *tcache;
+ tcache_slow_t *tcache_slow;
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
- ql_foreach(tcache, &arena->tcache_ql, link) {
- tcache_stats_merge(tsdn, tcache, arena);
+ ql_foreach(tcache_slow, &arena->tcache_ql,
+ link) {
+ tcache_stats_merge(tsdn,
+ tcache_slow->tcache, arena);
}
malloc_mutex_unlock(tsdn,
&arena->tcache_ql_mtx);
@@ -730,18 +721,28 @@ malloc_ncpus(void) {
SYSTEM_INFO si;
GetSystemInfo(&si);
result = si.dwNumberOfProcessors;
-#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
+#elif defined(CPU_COUNT)
/*
* glibc >= 2.6 has the CPU_COUNT macro.
*
* glibc's sysconf() uses isspace(). glibc allocates for the first time
* *before* setting up the isspace tables. Therefore we need a
* different method to get the number of CPUs.
+ *
+ * The getaffinity approach is also preferred when only a subset of CPUs
+ * is available, to avoid using more arenas than necessary.
*/
{
+# if defined(__FreeBSD__) || defined(__DragonFly__)
+ cpuset_t set;
+# else
cpu_set_t set;
-
+# endif
+# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
+ sched_getaffinity(0, sizeof(set), &set);
+# else
pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
+# endif
result = CPU_COUNT(&set);
}
#else
@@ -750,9 +751,47 @@ malloc_ncpus(void) {
return ((result == -1) ? 1 : (unsigned)result);
}
+/*
+ * Ensure that number of CPUs is determistinc, i.e. it is the same based on:
+ * - sched_getaffinity()
+ * - _SC_NPROCESSORS_ONLN
+ * - _SC_NPROCESSORS_CONF
+ * Since otherwise tricky things is possible with percpu arenas in use.
+ */
+static bool
+malloc_cpu_count_is_deterministic()
+{
+#ifdef _WIN32
+ return true;
+#else
+ long cpu_onln = sysconf(_SC_NPROCESSORS_ONLN);
+ long cpu_conf = sysconf(_SC_NPROCESSORS_CONF);
+ if (cpu_onln != cpu_conf) {
+ return false;
+ }
+# if defined(CPU_COUNT)
+# if defined(__FreeBSD__) || defined(__DragonFly__)
+ cpuset_t set;
+# else
+ cpu_set_t set;
+# endif /* __FreeBSD__ */
+# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
+ sched_getaffinity(0, sizeof(set), &set);
+# else /* !JEMALLOC_HAVE_SCHED_SETAFFINITY */
+ pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
+# endif /* JEMALLOC_HAVE_SCHED_SETAFFINITY */
+ long cpu_affinity = CPU_COUNT(&set);
+ if (cpu_affinity != cpu_conf) {
+ return false;
+ }
+# endif /* CPU_COUNT */
+ return true;
+#endif
+}
+
static void
-init_opt_stats_print_opts(const char *v, size_t vlen) {
- size_t opts_len = strlen(opt_stats_print_opts);
+init_opt_stats_opts(const char *v, size_t vlen, char *dest) {
+ size_t opts_len = strlen(dest);
assert(opts_len <= stats_print_tot_num_options);
for (size_t i = 0; i < vlen; i++) {
@@ -763,16 +802,16 @@ init_opt_stats_print_opts(const char *v, size_t vlen) {
default: continue;
}
- if (strchr(opt_stats_print_opts, v[i]) != NULL) {
+ if (strchr(dest, v[i]) != NULL) {
/* Ignore repeated. */
continue;
}
- opt_stats_print_opts[opts_len++] = v[i];
- opt_stats_print_opts[opts_len] = '\0';
+ dest[opts_len++] = v[i];
+ dest[opts_len] = '\0';
assert(opts_len <= stats_print_tot_num_options);
}
- assert(opts_len == strlen(opt_stats_print_opts));
+ assert(opts_len == strlen(dest));
}
/* Reads the next size pair in a multi-sized option. */
@@ -854,10 +893,12 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
if (opts != *opts_p) {
malloc_write("<jemalloc>: Conf string ends "
"with key\n");
+ had_conf_error = true;
}
return true;
default:
malloc_write("<jemalloc>: Malformed conf string\n");
+ had_conf_error = true;
return true;
}
}
@@ -876,6 +917,7 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
if (*opts == '\0') {
malloc_write("<jemalloc>: Conf string ends "
"with comma\n");
+ had_conf_error = true;
}
*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
accept = true;
@@ -932,7 +974,7 @@ malloc_slow_flag_init(void) {
}
/* Number of sources for initializing malloc_conf */
-#define MALLOC_CONF_NSOURCES 4
+#define MALLOC_CONF_NSOURCES 5
static const char *
obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
@@ -1010,6 +1052,9 @@ obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
ret = NULL;
}
break;
+ } case 4: {
+ ret = je_malloc_conf_2_conf_harder;
+ break;
} default:
not_reached();
ret = NULL;
@@ -1026,7 +1071,9 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
"string pointed to by the global variable malloc_conf",
("\"name\" of the file referenced by the symbolic link named "
"/etc/malloc.conf"),
- "value of the environment variable MALLOC_CONF"
+ "value of the environment variable MALLOC_CONF",
+ ("string pointed to by the global variable "
+ "malloc_conf_2_conf_harder"),
};
unsigned i;
const char *opts, *k, *v;
@@ -1094,39 +1141,50 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
#define CONF_CHECK_MIN(um, min) ((um) < (min))
#define CONF_DONT_CHECK_MAX(um, max) false
#define CONF_CHECK_MAX(um, max) ((um) > (max))
-#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
+
+#define CONF_VALUE_READ(max_t, result) \
+ char *end; \
+ set_errno(0); \
+ result = (max_t)malloc_strtoumax(v, &end, 0);
+#define CONF_VALUE_READ_FAIL() \
+ (get_errno() != 0 || (uintptr_t)end - (uintptr_t)v != vlen)
+
+#define CONF_HANDLE_T(t, max_t, o, n, min, max, check_min, check_max, clip) \
if (CONF_MATCH(n)) { \
- uintmax_t um; \
- char *end; \
- \
- set_errno(0); \
- um = malloc_strtoumax(v, &end, 0); \
- if (get_errno() != 0 || (uintptr_t)end -\
- (uintptr_t)v != vlen) { \
+ max_t mv; \
+ CONF_VALUE_READ(max_t, mv) \
+ if (CONF_VALUE_READ_FAIL()) { \
CONF_ERROR("Invalid conf value",\
k, klen, v, vlen); \
} else if (clip) { \
- if (check_min(um, (t)(min))) { \
+ if (check_min(mv, (t)(min))) { \
o = (t)(min); \
} else if ( \
- check_max(um, (t)(max))) { \
+ check_max(mv, (t)(max))) { \
o = (t)(max); \
} else { \
- o = (t)um; \
+ o = (t)mv; \
} \
} else { \
- if (check_min(um, (t)(min)) || \
- check_max(um, (t)(max))) { \
+ if (check_min(mv, (t)(min)) || \
+ check_max(mv, (t)(max))) { \
CONF_ERROR( \
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
} else { \
- o = (t)um; \
+ o = (t)mv; \
} \
} \
CONF_CONTINUE; \
}
+#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
+ CONF_HANDLE_T(t, uintmax_t, o, n, min, max, check_min, \
+ check_max, clip)
+#define CONF_HANDLE_T_SIGNED(t, o, n, min, max, check_min, check_max, clip)\
+ CONF_HANDLE_T(t, intmax_t, o, n, min, max, check_min, \
+ check_max, clip)
+
#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
clip) \
CONF_HANDLE_T_U(unsigned, o, n, min, max, \
@@ -1134,27 +1192,15 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
CONF_HANDLE_T_U(size_t, o, n, min, max, \
check_min, check_max, clip)
+#define CONF_HANDLE_INT64_T(o, n, min, max, check_min, check_max, clip) \
+ CONF_HANDLE_T_SIGNED(int64_t, o, n, min, max, \
+ check_min, check_max, clip)
+#define CONF_HANDLE_UINT64_T(o, n, min, max, check_min, check_max, clip)\
+ CONF_HANDLE_T_U(uint64_t, o, n, min, max, \
+ check_min, check_max, clip)
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
- if (CONF_MATCH(n)) { \
- long l; \
- char *end; \
- \
- set_errno(0); \
- l = strtol(v, &end, 0); \
- if (get_errno() != 0 || (uintptr_t)end -\
- (uintptr_t)v != vlen) { \
- CONF_ERROR("Invalid conf value",\
- k, klen, v, vlen); \
- } else if (l < (ssize_t)(min) || l > \
- (ssize_t)(max)) { \
- CONF_ERROR( \
- "Out-of-range conf value", \
- k, klen, v, vlen); \
- } else { \
- o = l; \
- } \
- CONF_CONTINUE; \
- }
+ CONF_HANDLE_T_SIGNED(ssize_t, o, n, min, max, \
+ CONF_CHECK_MIN, CONF_CHECK_MAX, false)
#define CONF_HANDLE_CHAR_P(o, n, d) \
if (CONF_MATCH(n)) { \
size_t cpylen = (vlen <= \
@@ -1174,13 +1220,14 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
CONF_HANDLE_BOOL(opt_abort, "abort")
CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
+ CONF_HANDLE_BOOL(opt_trust_madvise, "trust_madvise")
if (strncmp("metadata_thp", k, klen) == 0) {
- int i;
+ int m;
bool match = false;
- for (i = 0; i < metadata_thp_mode_limit; i++) {
- if (strncmp(metadata_thp_mode_names[i],
+ for (m = 0; m < metadata_thp_mode_limit; m++) {
+ if (strncmp(metadata_thp_mode_names[m],
v, vlen) == 0) {
- opt_metadata_thp = i;
+ opt_metadata_thp = m;
match = true;
break;
}
@@ -1193,18 +1240,18 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
}
CONF_HANDLE_BOOL(opt_retain, "retain")
if (strncmp("dss", k, klen) == 0) {
- int i;
+ int m;
bool match = false;
- for (i = 0; i < dss_prec_limit; i++) {
- if (strncmp(dss_prec_names[i], v, vlen)
+ for (m = 0; m < dss_prec_limit; m++) {
+ if (strncmp(dss_prec_names[m], v, vlen)
== 0) {
- if (extent_dss_prec_set(i)) {
+ if (extent_dss_prec_set(m)) {
CONF_ERROR(
"Error setting dss",
k, klen, v, vlen);
} else {
opt_dss =
- dss_prec_names[i];
+ dss_prec_names[m];
match = true;
break;
}
@@ -1216,9 +1263,27 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
}
CONF_CONTINUE;
}
- CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
- UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
- false)
+ if (CONF_MATCH("narenas")) {
+ if (CONF_MATCH_VALUE("default")) {
+ opt_narenas = 0;
+ CONF_CONTINUE;
+ } else {
+ CONF_HANDLE_UNSIGNED(opt_narenas,
+ "narenas", 1, UINT_MAX,
+ CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
+ /* clip */ false)
+ }
+ }
+ if (CONF_MATCH("narenas_ratio")) {
+ char *end;
+ bool err = fxp_parse(&opt_narenas_ratio, v,
+ &end);
+ if (err || (size_t)(end - v) != vlen) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ CONF_CONTINUE;
+ }
if (CONF_MATCH("bin_shards")) {
const char *bin_shards_segment_cur = v;
size_t vlen_left = vlen;
@@ -1241,6 +1306,9 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
} while (vlen_left > 0);
CONF_CONTINUE;
}
+ CONF_HANDLE_INT64_T(opt_mutex_max_spin,
+ "mutex_max_spin", -1, INT64_MAX, CONF_CHECK_MIN,
+ CONF_DONT_CHECK_MAX, false);
CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
"dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
@@ -1251,7 +1319,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
SSIZE_MAX);
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
if (CONF_MATCH("stats_print_opts")) {
- init_opt_stats_print_opts(v, vlen);
+ init_opt_stats_opts(v, vlen,
+ opt_stats_print_opts);
+ CONF_CONTINUE;
+ }
+ CONF_HANDLE_INT64_T(opt_stats_interval,
+ "stats_interval", -1, INT64_MAX,
+ CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
+ if (CONF_MATCH("stats_interval_opts")) {
+ init_opt_stats_opts(v, vlen,
+ opt_stats_interval_opts);
CONF_CONTINUE;
}
if (config_fill) {
@@ -1287,9 +1364,61 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
if (config_xmalloc) {
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
}
+ if (config_enable_cxx) {
+ CONF_HANDLE_BOOL(
+ opt_experimental_infallible_new,
+ "experimental_infallible_new")
+ }
+
CONF_HANDLE_BOOL(opt_tcache, "tcache")
- CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
- -1, (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_SIZE_T(opt_tcache_max, "tcache_max",
+ 0, TCACHE_MAXCLASS_LIMIT, CONF_DONT_CHECK_MIN,
+ CONF_CHECK_MAX, /* clip */ true)
+ if (CONF_MATCH("lg_tcache_max")) {
+ size_t m;
+ CONF_VALUE_READ(size_t, m)
+ if (CONF_VALUE_READ_FAIL()) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ } else {
+ /* clip if necessary */
+ if (m > TCACHE_LG_MAXCLASS_LIMIT) {
+ m = TCACHE_LG_MAXCLASS_LIMIT;
+ }
+ opt_tcache_max = (size_t)1 << m;
+ }
+ CONF_CONTINUE;
+ }
+ /*
+ * Anyone trying to set a value outside -16 to 16 is
+ * deeply confused.
+ */
+ CONF_HANDLE_SSIZE_T(opt_lg_tcache_nslots_mul,
+ "lg_tcache_nslots_mul", -16, 16)
+ /* Ditto with values past 2048. */
+ CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_min,
+ "tcache_nslots_small_min", 1, 2048,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
+ CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_max,
+ "tcache_nslots_small_max", 1, 2048,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
+ CONF_HANDLE_UNSIGNED(opt_tcache_nslots_large,
+ "tcache_nslots_large", 1, 2048,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
+ CONF_HANDLE_SIZE_T(opt_tcache_gc_incr_bytes,
+ "tcache_gc_incr_bytes", 1024, SIZE_T_MAX,
+ CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
+ /* clip */ true)
+ CONF_HANDLE_SIZE_T(opt_tcache_gc_delay_bytes,
+ "tcache_gc_delay_bytes", 0, SIZE_T_MAX,
+ CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX,
+ /* clip */ false)
+ CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_small_div,
+ "lg_tcache_flush_small_div", 1, 16,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
+ CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_large_div,
+ "lg_tcache_flush_large_div", 1, 16,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
/*
* The runtime option of oversize_threshold remains
@@ -1309,16 +1438,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
if (strncmp("percpu_arena", k, klen) == 0) {
bool match = false;
- for (int i = percpu_arena_mode_names_base; i <
- percpu_arena_mode_names_limit; i++) {
- if (strncmp(percpu_arena_mode_names[i],
+ for (int m = percpu_arena_mode_names_base; m <
+ percpu_arena_mode_names_limit; m++) {
+ if (strncmp(percpu_arena_mode_names[m],
v, vlen) == 0) {
if (!have_percpu_arena) {
CONF_ERROR(
"No getcpu support",
k, klen, v, vlen);
}
- opt_percpu_arena = i;
+ opt_percpu_arena = m;
match = true;
break;
}
@@ -1336,7 +1465,83 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
opt_max_background_threads,
CONF_CHECK_MIN, CONF_CHECK_MAX,
true);
+ CONF_HANDLE_BOOL(opt_hpa, "hpa")
+ CONF_HANDLE_SIZE_T(opt_hpa_opts.slab_max_alloc,
+ "hpa_slab_max_alloc", PAGE, HUGEPAGE,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, true);
+
+ /*
+ * Accept either a ratio-based or an exact hugification
+ * threshold.
+ */
+ CONF_HANDLE_SIZE_T(opt_hpa_opts.hugification_threshold,
+ "hpa_hugification_threshold", PAGE, HUGEPAGE,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, true);
+ if (CONF_MATCH("hpa_hugification_threshold_ratio")) {
+ fxp_t ratio;
+ char *end;
+ bool err = fxp_parse(&ratio, v,
+ &end);
+ if (err || (size_t)(end - v) != vlen
+ || ratio > FXP_INIT_INT(1)) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ } else {
+ opt_hpa_opts.hugification_threshold =
+ fxp_mul_frac(HUGEPAGE, ratio);
+ }
+ CONF_CONTINUE;
+ }
+
+ CONF_HANDLE_UINT64_T(
+ opt_hpa_opts.hugify_delay_ms, "hpa_hugify_delay_ms",
+ 0, 0, CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX,
+ false);
+
+ CONF_HANDLE_UINT64_T(
+ opt_hpa_opts.min_purge_interval_ms,
+ "hpa_min_purge_interval_ms", 0, 0,
+ CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false);
+
+ if (CONF_MATCH("hpa_dirty_mult")) {
+ if (CONF_MATCH_VALUE("-1")) {
+ opt_hpa_opts.dirty_mult = (fxp_t)-1;
+ CONF_CONTINUE;
+ }
+ fxp_t ratio;
+ char *end;
+ bool err = fxp_parse(&ratio, v,
+ &end);
+ if (err || (size_t)(end - v) != vlen) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ } else {
+ opt_hpa_opts.dirty_mult = ratio;
+ }
+ CONF_CONTINUE;
+ }
+
+ CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.nshards,
+ "hpa_sec_nshards", 0, 0, CONF_CHECK_MIN,
+ CONF_DONT_CHECK_MAX, true);
+ CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_alloc,
+ "hpa_sec_max_alloc", PAGE, 0, CONF_CHECK_MIN,
+ CONF_DONT_CHECK_MAX, true);
+ CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_bytes,
+ "hpa_sec_max_bytes", PAGE, 0, CONF_CHECK_MIN,
+ CONF_DONT_CHECK_MAX, true);
+ CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.bytes_after_flush,
+ "hpa_sec_bytes_after_flush", PAGE, 0,
+ CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true);
+ CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.batch_fill_extra,
+ "hpa_sec_batch_fill_extra", 0, HUGEPAGE_PAGES,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, true);
+
if (CONF_MATCH("slab_sizes")) {
+ if (CONF_MATCH_VALUE("default")) {
+ sc_data_init(sc_data);
+ CONF_CONTINUE;
+ }
bool err;
const char *slab_size_segment_cur = v;
size_t vlen_left = vlen;
@@ -1378,7 +1583,44 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
+ CONF_HANDLE_BOOL(opt_prof_leak_error,
+ "prof_leak_error")
CONF_HANDLE_BOOL(opt_prof_log, "prof_log")
+ CONF_HANDLE_SSIZE_T(opt_prof_recent_alloc_max,
+ "prof_recent_alloc_max", -1, SSIZE_MAX)
+ CONF_HANDLE_BOOL(opt_prof_stats, "prof_stats")
+ CONF_HANDLE_BOOL(opt_prof_sys_thread_name,
+ "prof_sys_thread_name")
+ if (CONF_MATCH("prof_time_resolution")) {
+ if (CONF_MATCH_VALUE("default")) {
+ opt_prof_time_res =
+ prof_time_res_default;
+ } else if (CONF_MATCH_VALUE("high")) {
+ if (!config_high_res_timer) {
+ CONF_ERROR(
+ "No high resolution"
+ " timer support",
+ k, klen, v, vlen);
+ } else {
+ opt_prof_time_res =
+ prof_time_res_high;
+ }
+ } else {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ CONF_CONTINUE;
+ }
+ /*
+ * Undocumented. When set to false, don't
+ * correct for an unbiasing bug in jeprof
+ * attribution. This can be handy if you want
+ * to get consistent numbers from your binary
+ * across different jemalloc versions, even if
+ * those numbers are incorrect. The default is
+ * true.
+ */
+ CONF_HANDLE_BOOL(opt_prof_unbias, "prof_unbias")
}
if (config_log) {
if (CONF_MATCH("log")) {
@@ -1392,15 +1634,15 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
}
if (CONF_MATCH("thp")) {
bool match = false;
- for (int i = 0; i < thp_mode_names_limit; i++) {
- if (strncmp(thp_mode_names[i],v, vlen)
+ for (int m = 0; m < thp_mode_names_limit; m++) {
+ if (strncmp(thp_mode_names[m],v, vlen)
== 0) {
- if (!have_madvise_huge) {
+ if (!have_madvise_huge && !have_memcntl) {
CONF_ERROR(
"No THP support",
k, klen, v, vlen);
}
- opt_thp = i;
+ opt_thp = m;
match = true;
break;
}
@@ -1411,6 +1653,55 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
}
CONF_CONTINUE;
}
+ if (CONF_MATCH("zero_realloc")) {
+ if (CONF_MATCH_VALUE("alloc")) {
+ opt_zero_realloc_action
+ = zero_realloc_action_alloc;
+ } else if (CONF_MATCH_VALUE("free")) {
+ opt_zero_realloc_action
+ = zero_realloc_action_free;
+ } else if (CONF_MATCH_VALUE("abort")) {
+ opt_zero_realloc_action
+ = zero_realloc_action_abort;
+ } else {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ CONF_CONTINUE;
+ }
+ if (config_uaf_detection &&
+ CONF_MATCH("lg_san_uaf_align")) {
+ ssize_t a;
+ CONF_VALUE_READ(ssize_t, a)
+ if (CONF_VALUE_READ_FAIL() || a < -1) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ if (a == -1) {
+ opt_lg_san_uaf_align = -1;
+ CONF_CONTINUE;
+ }
+
+ /* clip if necessary */
+ ssize_t max_allowed = (sizeof(size_t) << 3) - 1;
+ ssize_t min_allowed = LG_PAGE;
+ if (a > max_allowed) {
+ a = max_allowed;
+ } else if (a < min_allowed) {
+ a = min_allowed;
+ }
+
+ opt_lg_san_uaf_align = a;
+ CONF_CONTINUE;
+ }
+
+ CONF_HANDLE_SIZE_T(opt_san_guard_small,
+ "san_guard_small", 0, SIZE_T_MAX,
+ CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
+ CONF_HANDLE_SIZE_T(opt_san_guard_large,
+ "san_guard_large", 0, SIZE_T_MAX,
+ CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
+
CONF_ERROR("Invalid conf pair", k, klen, v, vlen);
#undef CONF_ERROR
#undef CONF_CONTINUE
@@ -1421,7 +1712,9 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
#undef CONF_CHECK_MIN
#undef CONF_DONT_CHECK_MAX
#undef CONF_CHECK_MAX
+#undef CONF_HANDLE_T
#undef CONF_HANDLE_T_U
+#undef CONF_HANDLE_T_SIGNED
#undef CONF_HANDLE_UNSIGNED
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
@@ -1436,15 +1729,33 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
}
+static bool
+malloc_conf_init_check_deps(void) {
+ if (opt_prof_leak_error && !opt_prof_final) {
+ malloc_printf("<jemalloc>: prof_leak_error is set w/o "
+ "prof_final.\n");
+ return true;
+ }
+
+ return false;
+}
+
static void
malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
- const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL};
+ const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL,
+ NULL};
char buf[PATH_MAX + 1];
/* The first call only set the confirm_conf option and opts_cache */
malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf);
malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache,
NULL);
+ if (malloc_conf_init_check_deps()) {
+ /* check_deps does warning msg only; abort below if needed. */
+ if (opt_abort_conf) {
+ malloc_abort_invalid_conf();
+ }
+ }
}
#undef MALLOC_CONF_NSOURCES
@@ -1488,8 +1799,8 @@ malloc_init_hard_a0_locked() {
* Ordering here is somewhat tricky; we need sc_boot() first, since that
* determines what the size classes will be, and then
* malloc_conf_init(), since any slab size tweaking will need to be done
- * before sz_boot and bin_boot, which assume that the values they read
- * out of sc_data_global are final.
+ * before sz_boot and bin_info_boot, which assume that the values they
+ * read out of sc_data_global are final.
*/
sc_boot(&sc_data);
unsigned bin_shard_sizes[SC_NBINS];
@@ -1503,8 +1814,9 @@ malloc_init_hard_a0_locked() {
prof_boot0();
}
malloc_conf_init(&sc_data, bin_shard_sizes);
- sz_boot(&sc_data);
- bin_boot(&sc_data, bin_shard_sizes);
+ san_init(opt_lg_san_uaf_align);
+ sz_boot(&sc_data, opt_cache_oblivious);
+ bin_info_boot(&sc_data, bin_shard_sizes);
if (opt_stats_print) {
/* Print statistics at exit. */
@@ -1515,12 +1827,20 @@ malloc_init_hard_a0_locked() {
}
}
}
+
+ if (stats_boot()) {
+ return true;
+ }
if (pages_boot()) {
return true;
}
if (base_boot(TSDN_NULL)) {
return true;
}
+ /* emap_global is static, hence zeroed. */
+ if (emap_init(&arena_emap_global, b0get(), /* zeroed */ true)) {
+ return true;
+ }
if (extent_boot()) {
return true;
}
@@ -1530,8 +1850,20 @@ malloc_init_hard_a0_locked() {
if (config_prof) {
prof_boot1();
}
- arena_boot(&sc_data);
- if (tcache_boot(TSDN_NULL)) {
+ if (opt_hpa && !hpa_supported()) {
+ malloc_printf("<jemalloc>: HPA not supported in the current "
+ "configuration; %s.",
+ opt_abort_conf ? "aborting" : "disabling");
+ if (opt_abort_conf) {
+ malloc_abort_invalid_conf();
+ } else {
+ opt_hpa = false;
+ }
+ }
+ if (arena_boot(&sc_data, b0get(), opt_hpa)) {
+ return true;
+ }
+ if (tcache_boot(TSDN_NULL, b0get())) {
return true;
}
if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
@@ -1550,11 +1882,29 @@ malloc_init_hard_a0_locked() {
* Initialize one arena here. The rest are lazily created in
* arena_choose_hard().
*/
- if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
- == NULL) {
+ if (arena_init(TSDN_NULL, 0, &arena_config_default) == NULL) {
return true;
}
a0 = arena_get(TSDN_NULL, 0, false);
+
+ if (opt_hpa && !hpa_supported()) {
+ malloc_printf("<jemalloc>: HPA not supported in the current "
+ "configuration; %s.",
+ opt_abort_conf ? "aborting" : "disabling");
+ if (opt_abort_conf) {
+ malloc_abort_invalid_conf();
+ } else {
+ opt_hpa = false;
+ }
+ } else if (opt_hpa) {
+ hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
+ hpa_shard_opts.deferral_allowed = background_thread_enabled();
+ if (pa_shard_enable_hpa(TSDN_NULL, &a0->pa_shard,
+ &hpa_shard_opts, &opt_hpa_sec_opts)) {
+ return true;
+ }
+ }
+
malloc_init_state = malloc_init_a0_initialized;
return false;
@@ -1576,6 +1926,29 @@ malloc_init_hard_recursible(void) {
malloc_init_state = malloc_init_recursible;
ncpus = malloc_ncpus();
+ if (opt_percpu_arena != percpu_arena_disabled) {
+ bool cpu_count_is_deterministic =
+ malloc_cpu_count_is_deterministic();
+ if (!cpu_count_is_deterministic) {
+ /*
+ * If # of CPU is not deterministic, and narenas not
+ * specified, disables per cpu arena since it may not
+ * detect CPU IDs properly.
+ */
+ if (opt_narenas == 0) {
+ opt_percpu_arena = percpu_arena_disabled;
+ malloc_write("<jemalloc>: Number of CPUs "
+ "detected is not deterministic. Per-CPU "
+ "arena disabled.\n");
+ if (opt_abort_conf) {
+ malloc_abort_invalid_conf();
+ }
+ if (opt_abort) {
+ abort();
+ }
+ }
+ }
+ }
#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
&& !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
@@ -1606,7 +1979,13 @@ malloc_narenas_default(void) {
* default.
*/
if (ncpus > 1) {
- return ncpus << 2;
+ fxp_t fxp_ncpus = FXP_INIT_INT(ncpus);
+ fxp_t goal = fxp_mul(fxp_ncpus, opt_narenas_ratio);
+ uint32_t int_goal = fxp_round_nearest(goal);
+ if (int_goal == 0) {
+ return 1;
+ }
+ return int_goal;
} else {
return 1;
}
@@ -1765,10 +2144,11 @@ malloc_init_hard(void) {
/* Set reentrancy level to 1 during init. */
pre_reentrancy(tsd, NULL);
/* Initialize narenas before prof_boot2 (for allocation). */
- if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
+ if (malloc_init_narenas()
+ || background_thread_boot1(tsd_tsdn(tsd), b0get())) {
UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
}
- if (config_prof && prof_boot2(tsd)) {
+ if (config_prof && prof_boot2(tsd, b0get())) {
UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
}
@@ -1907,38 +2287,107 @@ dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
}
-/* ind is ignored if dopts->alignment > 0. */
-JEMALLOC_ALWAYS_INLINE void *
-imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
- size_t size, size_t usize, szind_t ind) {
- tcache_t *tcache;
- arena_t *arena;
+/*
+ * ind parameter is optional and is only checked and filled if alignment == 0;
+ * return true if result is out of range.
+ */
+JEMALLOC_ALWAYS_INLINE bool
+aligned_usize_get(size_t size, size_t alignment, size_t *usize, szind_t *ind,
+ bool bump_empty_aligned_alloc) {
+ assert(usize != NULL);
+ if (alignment == 0) {
+ if (ind != NULL) {
+ *ind = sz_size2index(size);
+ if (unlikely(*ind >= SC_NSIZES)) {
+ return true;
+ }
+ *usize = sz_index2size(*ind);
+ assert(*usize > 0 && *usize <= SC_LARGE_MAXCLASS);
+ return false;
+ }
+ *usize = sz_s2u(size);
+ } else {
+ if (bump_empty_aligned_alloc && unlikely(size == 0)) {
+ size = 1;
+ }
+ *usize = sz_sa2u(size, alignment);
+ }
+ if (unlikely(*usize == 0 || *usize > SC_LARGE_MAXCLASS)) {
+ return true;
+ }
+ return false;
+}
- /* Fill in the tcache. */
- if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
- if (likely(!sopts->slow)) {
+JEMALLOC_ALWAYS_INLINE bool
+zero_get(bool guarantee, bool slow) {
+ if (config_fill && slow && unlikely(opt_zero)) {
+ return true;
+ } else {
+ return guarantee;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_t *
+tcache_get_from_ind(tsd_t *tsd, unsigned tcache_ind, bool slow, bool is_alloc) {
+ tcache_t *tcache;
+ if (tcache_ind == TCACHE_IND_AUTOMATIC) {
+ if (likely(!slow)) {
/* Getting tcache ptr unconditionally. */
tcache = tsd_tcachep_get(tsd);
assert(tcache == tcache_get(tsd));
- } else {
+ } else if (is_alloc ||
+ likely(tsd_reentrancy_level_get(tsd) == 0)) {
tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
}
- } else if (dopts->tcache_ind == TCACHE_IND_NONE) {
- tcache = NULL;
} else {
- tcache = tcaches_get(tsd, dopts->tcache_ind);
+ /*
+ * Should not specify tcache on deallocation path when being
+ * reentrant.
+ */
+ assert(is_alloc || tsd_reentrancy_level_get(tsd) == 0 ||
+ tsd_state_nocleanup(tsd));
+ if (tcache_ind == TCACHE_IND_NONE) {
+ tcache = NULL;
+ } else {
+ tcache = tcaches_get(tsd, tcache_ind);
+ }
}
+ return tcache;
+}
- /* Fill in the arena. */
- if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
+/* Return true if a manual arena is specified and arena_get() OOMs. */
+JEMALLOC_ALWAYS_INLINE bool
+arena_get_from_ind(tsd_t *tsd, unsigned arena_ind, arena_t **arena_p) {
+ if (arena_ind == ARENA_IND_AUTOMATIC) {
/*
* In case of automatic arena management, we defer arena
* computation until as late as we can, hoping to fill the
* allocation out of the tcache.
*/
- arena = NULL;
+ *arena_p = NULL;
} else {
- arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
+ *arena_p = arena_get(tsd_tsdn(tsd), arena_ind, true);
+ if (unlikely(*arena_p == NULL) && arena_ind >= narenas_auto) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/* ind is ignored if dopts->alignment > 0. */
+JEMALLOC_ALWAYS_INLINE void *
+imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
+ size_t size, size_t usize, szind_t ind) {
+ /* Fill in the tcache. */
+ tcache_t *tcache = tcache_get_from_ind(tsd, dopts->tcache_ind,
+ sopts->slow, /* is_alloc */ true);
+
+ /* Fill in the arena. */
+ arena_t *arena;
+ if (arena_get_from_ind(tsd, dopts->arena_ind, &arena)) {
+ return NULL;
}
if (unlikely(dopts->alignment != 0)) {
@@ -1962,6 +2411,7 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
szind_t ind_large;
size_t bumped_usize = usize;
+ dopts->alignment = prof_sample_align(dopts->alignment);
if (usize <= SC_SMALL_MAXCLASS) {
assert(((dopts->alignment == 0) ?
sz_s2u(SC_LARGE_MINCLASS) :
@@ -1978,6 +2428,7 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
} else {
ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
}
+ assert(prof_sample_aligned(ret));
return ret;
}
@@ -2031,16 +2482,14 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
/* Filled in by compute_size_with_overflow below. */
size_t size = 0;
/*
- * For unaligned allocations, we need only ind. For aligned
- * allocations, or in case of stats or profiling we need usize.
- *
- * These are actually dead stores, in that their values are reset before
- * any branch on their value is taken. Sometimes though, it's
- * convenient to pass them as arguments before this point. To avoid
- * undefined behavior then, we initialize them with dummy stores.
+ * The zero initialization for ind is actually dead store, in that its
+ * value is reset before any branch on its value is taken. Sometimes
+ * though, it's convenient to pass it as arguments before this point.
+ * To avoid undefined behavior then, we initialize it with dummy stores.
*/
szind_t ind = 0;
- size_t usize = 0;
+ /* usize will always be properly initialized. */
+ size_t usize;
/* Reentrancy is only checked on slow path. */
int8_t reentrancy_level;
@@ -2057,31 +2506,12 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
}
/* This is the beginning of the "core" algorithm. */
-
- if (dopts->alignment == 0) {
- ind = sz_size2index(size);
- if (unlikely(ind >= SC_NSIZES)) {
- goto label_oom;
- }
- if (config_stats || (config_prof && opt_prof) || sopts->usize) {
- usize = sz_index2size(ind);
- dopts->usize = usize;
- assert(usize > 0 && usize
- <= SC_LARGE_MAXCLASS);
- }
- } else {
- if (sopts->bump_empty_aligned_alloc) {
- if (unlikely(size == 0)) {
- size = 1;
- }
- }
- usize = sz_sa2u(size, dopts->alignment);
- dopts->usize = usize;
- if (unlikely(usize == 0
- || usize > SC_LARGE_MAXCLASS)) {
- goto label_oom;
- }
+ dopts->zero = zero_get(dopts->zero, sopts->slow);
+ if (aligned_usize_get(size, dopts->alignment, &usize, &ind,
+ sopts->bump_empty_aligned_alloc)) {
+ goto label_oom;
}
+ dopts->usize = usize;
/* Validate the user input. */
if (sopts->assert_nonempty_alloc) {
assert (size != 0);
@@ -2107,26 +2537,25 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
dopts->arena_ind = 0;
}
+ /*
+ * If dopts->alignment > 0, then ind is still 0, but usize was computed
+ * in the previous if statement. Down the positive alignment path,
+ * imalloc_no_sample and imalloc_sample will ignore ind.
+ */
+
/* If profiling is on, get our profiling context. */
if (config_prof && opt_prof) {
- /*
- * Note that if we're going down this path, usize must have been
- * initialized in the previous if statement.
- */
- prof_tctx_t *tctx = prof_alloc_prep(
- tsd, usize, prof_active_get_unlocked(), true);
+ bool prof_active = prof_active_get_unlocked();
+ bool sample_event = te_prof_sample_event_lookahead(tsd, usize);
+ prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active,
+ sample_event);
- alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_t alloc_ctx;
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
- alloc_ctx.slab = (usize
- <= SC_SMALL_MAXCLASS);
+ alloc_ctx.slab = (usize <= SC_SMALL_MAXCLASS);
allocation = imalloc_no_sample(
sopts, dopts, tsd, usize, usize, ind);
} else if ((uintptr_t)tctx > (uintptr_t)1U) {
- /*
- * Note that ind might still be 0 here. This is fine;
- * imalloc_sample ignores ind if dopts->alignment > 0.
- */
allocation = imalloc_sample(
sopts, dopts, tsd, usize, ind);
alloc_ctx.slab = false;
@@ -2135,17 +2564,12 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
}
if (unlikely(allocation == NULL)) {
- prof_alloc_rollback(tsd, tctx, true);
+ prof_alloc_rollback(tsd, tctx);
goto label_oom;
}
- prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
+ prof_malloc(tsd, allocation, size, usize, &alloc_ctx, tctx);
} else {
- /*
- * If dopts->alignment > 0, then ind is still 0, but usize was
- * computed in the previous if statement. Down the positive
- * alignment path, imalloc_no_sample ignores ind and size
- * (relying only on usize).
- */
+ assert(!opt_prof);
allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
ind);
if (unlikely(allocation == NULL)) {
@@ -2157,12 +2581,17 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
* Allocation has been done at this point. We still have some
* post-allocation work to do though.
*/
+
+ thread_alloc_event(tsd, usize);
+
assert(dopts->alignment == 0
|| ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
- if (config_stats) {
- assert(usize == isalloc(tsd_tsdn(tsd), allocation));
- *tsd_thread_allocatedp_get(tsd) += usize;
+ assert(usize == isalloc(tsd_tsdn(tsd), allocation));
+
+ if (config_fill && sopts->slow && !dopts->zero
+ && unlikely(opt_junk_alloc)) {
+ junk_alloc_callback(allocation, usize);
}
if (sopts->slow) {
@@ -2273,7 +2702,11 @@ malloc_default(size_t size) {
static_opts_t sopts;
dynamic_opts_t dopts;
- LOG("core.malloc.entry", "size: %zu", size);
+ /*
+ * This variant has logging hook on exit but not on entry. It's callled
+ * only by je_malloc, below, which emits the entry one for us (and, if
+ * it calls us, does so only via tail call).
+ */
static_opts_init(&sopts);
dynamic_opts_init(&dopts);
@@ -2306,86 +2739,11 @@ malloc_default(size_t size) {
* Begin malloc(3)-compatible functions.
*/
-/*
- * malloc() fastpath.
- *
- * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
- * tcache. If either of these is false, we tail-call to the slowpath,
- * malloc_default(). Tail-calling is used to avoid any caller-saved
- * registers.
- *
- * fastpath supports ticker and profiling, both of which will also
- * tail-call to the slowpath if they fire.
- */
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_malloc(size_t size) {
- LOG("core.malloc.entry", "size: %zu", size);
-
- if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
- return malloc_default(size);
- }
-
- tsd_t *tsd = tsd_get(false);
- if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) {
- return malloc_default(size);
- }
-
- tcache_t *tcache = tsd_tcachep_get(tsd);
-
- if (unlikely(ticker_trytick(&tcache->gc_ticker))) {
- return malloc_default(size);
- }
-
- szind_t ind = sz_size2index_lookup(size);
- size_t usize;
- if (config_stats || config_prof) {
- usize = sz_index2size(ind);
- }
- /* Fast path relies on size being a bin. I.e. SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS */
- assert(ind < SC_NBINS);
- assert(size <= SC_SMALL_MAXCLASS);
-
- if (config_prof) {
- int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
- bytes_until_sample -= usize;
- tsd_bytes_until_sample_set(tsd, bytes_until_sample);
-
- if (unlikely(bytes_until_sample < 0)) {
- /*
- * Avoid a prof_active check on the fastpath.
- * If prof_active is false, set bytes_until_sample to
- * a large value. If prof_active is set to true,
- * bytes_until_sample will be reset.
- */
- if (!prof_active) {
- tsd_bytes_until_sample_set(tsd, SSIZE_MAX);
- }
- return malloc_default(size);
- }
- }
-
- cache_bin_t *bin = tcache_small_bin_get(tcache, ind);
- bool tcache_success;
- void* ret = cache_bin_alloc_easy(bin, &tcache_success);
-
- if (tcache_success) {
- if (config_stats) {
- *tsd_thread_allocatedp_get(tsd) += usize;
- bin->tstats.nrequests++;
- }
- if (config_prof) {
- tcache->prof_accumbytes += usize;
- }
-
- LOG("core.malloc.exit", "result: %p", ret);
-
- /* Fastpath success */
- return ret;
- }
-
- return malloc_default(size);
+ return imalloc_fastpath(size, &malloc_default);
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
@@ -2502,56 +2860,6 @@ je_calloc(size_t num, size_t size) {
return ret;
}
-static void *
-irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
- prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
- void *p;
-
- if (tctx == NULL) {
- return NULL;
- }
- if (usize <= SC_SMALL_MAXCLASS) {
- p = iralloc(tsd, old_ptr, old_usize,
- SC_LARGE_MINCLASS, 0, false, hook_args);
- if (p == NULL) {
- return NULL;
- }
- arena_prof_promote(tsd_tsdn(tsd), p, usize);
- } else {
- p = iralloc(tsd, old_ptr, old_usize, usize, 0, false,
- hook_args);
- }
-
- return p;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
- alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
- void *p;
- bool prof_active;
- prof_tctx_t *old_tctx, *tctx;
-
- prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
- tctx = prof_alloc_prep(tsd, usize, prof_active, true);
- if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
- p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx,
- hook_args);
- } else {
- p = iralloc(tsd, old_ptr, old_usize, usize, 0, false,
- hook_args);
- }
- if (unlikely(p == NULL)) {
- prof_alloc_rollback(tsd, tctx, true);
- return NULL;
- }
- prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
- old_tctx);
-
- return p;
-}
-
JEMALLOC_ALWAYS_INLINE void
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
if (!slow_path) {
@@ -2565,30 +2873,50 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ emap_alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
+ &alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
- size_t usize;
+ size_t usize = sz_index2size(alloc_ctx.szind);
if (config_prof && opt_prof) {
- usize = sz_index2size(alloc_ctx.szind);
prof_free(tsd, ptr, usize, &alloc_ctx);
- } else if (config_stats) {
- usize = sz_index2size(alloc_ctx.szind);
- }
- if (config_stats) {
- *tsd_thread_deallocatedp_get(tsd) += usize;
}
if (likely(!slow_path)) {
idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
false);
} else {
+ if (config_fill && slow_path && opt_junk_free) {
+ junk_free_callback(ptr, usize);
+ }
idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
true);
}
+ thread_dalloc_event(tsd, usize);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+maybe_check_alloc_ctx(tsd_t *tsd, void *ptr, emap_alloc_ctx_t *alloc_ctx) {
+ if (config_opt_size_checks) {
+ emap_alloc_ctx_t dbg_ctx;
+ emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
+ &dbg_ctx);
+ if (alloc_ctx->szind != dbg_ctx.szind) {
+ safety_check_fail_sized_dealloc(
+ /* current_dealloc */ true, ptr,
+ /* true_size */ sz_size2index(dbg_ctx.szind),
+ /* input_size */ sz_size2index(alloc_ctx->szind));
+ return true;
+ }
+ if (alloc_ctx->slab != dbg_ctx.slab) {
+ safety_check_fail(
+ "Internal heap corruption detected: "
+ "mismatch in slab bit");
+ return true;
+ }
+ }
+ return false;
}
JEMALLOC_ALWAYS_INLINE void
@@ -2604,166 +2932,63 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- alloc_ctx_t alloc_ctx, *ctx;
- if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
- /*
- * When cache_oblivious is disabled and ptr is not page aligned,
- * the allocation was not sampled -- usize can be used to
- * determine szind directly.
- */
+ emap_alloc_ctx_t alloc_ctx;
+ if (!config_prof) {
alloc_ctx.szind = sz_size2index(usize);
- alloc_ctx.slab = true;
- ctx = &alloc_ctx;
- if (config_debug) {
- alloc_ctx_t dbg_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
- rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
- &dbg_ctx.slab);
- assert(dbg_ctx.szind == alloc_ctx.szind);
- assert(dbg_ctx.slab == alloc_ctx.slab);
- }
- } else if (config_prof && opt_prof) {
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
- assert(alloc_ctx.szind == sz_size2index(usize));
- ctx = &alloc_ctx;
+ alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
} else {
- ctx = NULL;
+ if (likely(!prof_sample_aligned(ptr))) {
+ /*
+ * When the ptr is not page aligned, it was not sampled.
+ * usize can be trusted to determine szind and slab.
+ */
+ alloc_ctx.szind = sz_size2index(usize);
+ alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
+ } else if (opt_prof) {
+ emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global,
+ ptr, &alloc_ctx);
+
+ if (config_opt_safety_checks) {
+ /* Small alloc may have !slab (sampled). */
+ if (unlikely(alloc_ctx.szind !=
+ sz_size2index(usize))) {
+ safety_check_fail_sized_dealloc(
+ /* current_dealloc */ true, ptr,
+ /* true_size */ sz_index2size(
+ alloc_ctx.szind),
+ /* input_size */ usize);
+ }
+ }
+ } else {
+ alloc_ctx.szind = sz_size2index(usize);
+ alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
+ }
+ }
+ bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
+ if (fail) {
+ /*
+ * This is a heap corruption bug. In real life we'll crash; for
+ * the unit test we just want to avoid breaking anything too
+ * badly to get a test result out. Let's leak instead of trying
+ * to free.
+ */
+ return;
}
if (config_prof && opt_prof) {
- prof_free(tsd, ptr, usize, ctx);
- }
- if (config_stats) {
- *tsd_thread_deallocatedp_get(tsd) += usize;
+ prof_free(tsd, ptr, usize, &alloc_ctx);
}
-
if (likely(!slow_path)) {
- isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
+ isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
+ false);
} else {
- isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
- }
-}
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ALLOC_SIZE(2)
-je_realloc(void *ptr, size_t arg_size) {
- void *ret;
- tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- size_t old_usize = 0;
- size_t size = arg_size;
-
- LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
-
- if (unlikely(size == 0)) {
- if (ptr != NULL) {
- /* realloc(ptr, 0) is equivalent to free(ptr). */
- UTRACE(ptr, 0, 0);
- tcache_t *tcache;
- tsd_t *tsd = tsd_fetch();
- if (tsd_reentrancy_level_get(tsd) == 0) {
- tcache = tcache_get(tsd);
- } else {
- tcache = NULL;
- }
-
- uintptr_t args[3] = {(uintptr_t)ptr, size};
- hook_invoke_dalloc(hook_dalloc_realloc, ptr, args);
-
- ifree(tsd, ptr, tcache, true);
-
- LOG("core.realloc.exit", "result: %p", NULL);
- return NULL;
+ if (config_fill && slow_path && opt_junk_free) {
+ junk_free_callback(ptr, usize);
}
- size = 1;
- }
-
- if (likely(ptr != NULL)) {
- assert(malloc_initialized() || IS_INITIALIZER);
- tsd_t *tsd = tsd_fetch();
-
- check_entry_exit_locking(tsd_tsdn(tsd));
-
-
- hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr,
- (uintptr_t)arg_size, 0, 0}};
-
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
- assert(alloc_ctx.szind != SC_NSIZES);
- old_usize = sz_index2size(alloc_ctx.szind);
- assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
- if (config_prof && opt_prof) {
- usize = sz_s2u(size);
- if (unlikely(usize == 0
- || usize > SC_LARGE_MAXCLASS)) {
- ret = NULL;
- } else {
- ret = irealloc_prof(tsd, ptr, old_usize, usize,
- &alloc_ctx, &hook_args);
- }
- } else {
- if (config_stats) {
- usize = sz_s2u(size);
- }
- ret = iralloc(tsd, ptr, old_usize, size, 0, false,
- &hook_args);
- }
- tsdn = tsd_tsdn(tsd);
- } else {
- /* realloc(NULL, size) is equivalent to malloc(size). */
- static_opts_t sopts;
- dynamic_opts_t dopts;
-
- static_opts_init(&sopts);
- dynamic_opts_init(&dopts);
-
- sopts.null_out_result_on_error = true;
- sopts.set_errno_on_error = true;
- sopts.oom_string =
- "<jemalloc>: Error in realloc(): out of memory\n";
-
- dopts.result = &ret;
- dopts.num_items = 1;
- dopts.item_size = size;
-
- imalloc(&sopts, &dopts);
- if (sopts.slow) {
- uintptr_t args[3] = {(uintptr_t)ptr, arg_size};
- hook_invoke_alloc(hook_alloc_realloc, ret,
- (uintptr_t)ret, args);
- }
-
- return ret;
- }
-
- if (unlikely(ret == NULL)) {
- if (config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_write("<jemalloc>: Error in realloc(): "
- "out of memory\n");
- abort();
- }
- set_errno(ENOMEM);
- }
- if (config_stats && likely(ret != NULL)) {
- tsd_t *tsd;
-
- assert(usize == isalloc(tsdn, ret));
- tsd = tsdn_tsd(tsdn);
- *tsd_thread_allocatedp_get(tsd) += usize;
- *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
+ true);
}
- UTRACE(ptr, size, ret);
- check_entry_exit_locking(tsdn);
-
- LOG("core.realloc.exit", "result: %p", ret);
- return ret;
+ thread_dalloc_event(tsd, usize);
}
JEMALLOC_NOINLINE
@@ -2782,79 +3007,149 @@ free_default(void *ptr) {
tsd_t *tsd = tsd_fetch_min();
check_entry_exit_locking(tsd_tsdn(tsd));
- tcache_t *tcache;
if (likely(tsd_fast(tsd))) {
- tsd_assert_fast(tsd);
- /* Unconditionally get tcache ptr on fast path. */
- tcache = tsd_tcachep_get(tsd);
- ifree(tsd, ptr, tcache, false);
+ tcache_t *tcache = tcache_get_from_ind(tsd,
+ TCACHE_IND_AUTOMATIC, /* slow */ false,
+ /* is_alloc */ false);
+ ifree(tsd, ptr, tcache, /* slow */ false);
} else {
- if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
- tcache = tcache_get(tsd);
- } else {
- tcache = NULL;
- }
+ tcache_t *tcache = tcache_get_from_ind(tsd,
+ TCACHE_IND_AUTOMATIC, /* slow */ true,
+ /* is_alloc */ false);
uintptr_t args_raw[3] = {(uintptr_t)ptr};
hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw);
- ifree(tsd, ptr, tcache, true);
+ ifree(tsd, ptr, tcache, /* slow */ true);
}
+
check_entry_exit_locking(tsd_tsdn(tsd));
}
}
+JEMALLOC_ALWAYS_INLINE bool
+free_fastpath_nonfast_aligned(void *ptr, bool check_prof) {
+ /*
+ * free_fastpath do not handle two uncommon cases: 1) sampled profiled
+ * objects and 2) sampled junk & stash for use-after-free detection.
+ * Both have special alignments which are used to escape the fastpath.
+ *
+ * prof_sample is page-aligned, which covers the UAF check when both
+ * are enabled (the assertion below). Avoiding redundant checks since
+ * this is on the fastpath -- at most one runtime branch from this.
+ */
+ if (config_debug && cache_bin_nonfast_aligned(ptr)) {
+ assert(prof_sample_aligned(ptr));
+ }
+
+ if (config_prof && check_prof) {
+ /* When prof is enabled, the prof_sample alignment is enough. */
+ if (prof_sample_aligned(ptr)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ if (config_uaf_detection) {
+ if (cache_bin_nonfast_aligned(ptr)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ return false;
+}
+
+/* Returns whether or not the free attempt was successful. */
JEMALLOC_ALWAYS_INLINE
bool free_fastpath(void *ptr, size_t size, bool size_hint) {
tsd_t *tsd = tsd_get(false);
- if (unlikely(!tsd || !tsd_fast(tsd))) {
+ /* The branch gets optimized away unless tsd_get_allocates(). */
+ if (unlikely(tsd == NULL)) {
return false;
}
-
- tcache_t *tcache = tsd_tcachep_get(tsd);
-
- alloc_ctx_t alloc_ctx;
/*
- * If !config_cache_oblivious, we can check PAGE alignment to
- * detect sampled objects. Otherwise addresses are
- * randomized, and we have to look it up in the rtree anyway.
- * See also isfree().
+ * The tsd_fast() / initialized checks are folded into the branch
+ * testing (deallocated_after >= threshold) later in this function.
+ * The threshold will be set to 0 when !tsd_fast.
*/
- if (!size_hint || config_cache_oblivious) {
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &extents_rtree,
- rtree_ctx, (uintptr_t)ptr,
- &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(tsd_fast(tsd) ||
+ *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) == 0);
+
+ emap_alloc_ctx_t alloc_ctx;
+ if (!size_hint) {
+ bool err = emap_alloc_ctx_try_lookup_fast(tsd,
+ &arena_emap_global, ptr, &alloc_ctx);
/* Note: profiled objects will have alloc_ctx.slab set */
- if (!res || !alloc_ctx.slab) {
+ if (unlikely(err || !alloc_ctx.slab ||
+ free_fastpath_nonfast_aligned(ptr,
+ /* check_prof */ false))) {
return false;
}
assert(alloc_ctx.szind != SC_NSIZES);
} else {
/*
- * Check for both sizes that are too large, and for sampled objects.
- * Sampled objects are always page-aligned. The sampled object check
- * will also check for null ptr.
+ * Check for both sizes that are too large, and for sampled /
+ * special aligned objects. The alignment check will also check
+ * for null ptr.
*/
- if (size > SC_LOOKUP_MAXCLASS || (((uintptr_t)ptr & PAGE_MASK) == 0)) {
+ if (unlikely(size > SC_LOOKUP_MAXCLASS ||
+ free_fastpath_nonfast_aligned(ptr,
+ /* check_prof */ true))) {
return false;
}
alloc_ctx.szind = sz_size2index_lookup(size);
+ /* Max lookup class must be small. */
+ assert(alloc_ctx.szind < SC_NBINS);
+ /* This is a dead store, except when opt size checking is on. */
+ alloc_ctx.slab = true;
}
+ /*
+ * Currently the fastpath only handles small sizes. The branch on
+ * SC_LOOKUP_MAXCLASS makes sure of it. This lets us avoid checking
+ * tcache szind upper limit (i.e. tcache_maxclass) as well.
+ */
+ assert(alloc_ctx.slab);
+
+ uint64_t deallocated, threshold;
+ te_free_fastpath_ctx(tsd, &deallocated, &threshold);
- if (unlikely(ticker_trytick(&tcache->gc_ticker))) {
+ size_t usize = sz_index2size(alloc_ctx.szind);
+ uint64_t deallocated_after = deallocated + usize;
+ /*
+ * Check for events and tsd non-nominal (fast_threshold will be set to
+ * 0) in a single branch. Note that this handles the uninitialized case
+ * as well (TSD init will be triggered on the non-fastpath). Therefore
+ * anything depends on a functional TSD (e.g. the alloc_ctx sanity check
+ * below) needs to be after this branch.
+ */
+ if (unlikely(deallocated_after >= threshold)) {
return false;
}
+ assert(tsd_fast(tsd));
+ bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
+ if (fail) {
+ /* See the comment in isfree. */
+ return true;
+ }
- cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind);
- cache_bin_info_t *bin_info = &tcache_bin_info[alloc_ctx.szind];
- if (!cache_bin_dalloc_easy(bin, bin_info, ptr)) {
+ tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC,
+ /* slow */ false, /* is_alloc */ false);
+ cache_bin_t *bin = &tcache->bins[alloc_ctx.szind];
+
+ /*
+ * If junking were enabled, this is where we would do it. It's not
+ * though, since we ensured above that we're on the fast path. Assert
+ * that to double-check.
+ */
+ assert(!opt_junk_free);
+
+ if (!cache_bin_dalloc_easy(bin, ptr)) {
return false;
}
- if (config_stats) {
- size_t usize = sz_index2size(alloc_ctx.szind);
- *tsd_thread_deallocatedp_get(tsd) += usize;
- }
+ *tsd_thread_deallocatedp_get(tsd) = deallocated_after;
return true;
}
@@ -2965,6 +3260,8 @@ je_valloc(size_t size) {
* passed an extra argument for the caller return address, which will be
* ignored.
*/
+#include <features.h> // defines __GLIBC__ if we are compiling against glibc
+
JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
@@ -2973,7 +3270,7 @@ JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
je_memalign;
# endif
-# ifdef CPU_COUNT
+# ifdef __GLIBC__
/*
* To enable static linking with glibc, the libc specific malloc interface must
* be implemented also, so none of glibc's malloc.o functions are added to the
@@ -3016,6 +3313,26 @@ int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
* Begin non-standard functions.
*/
+JEMALLOC_ALWAYS_INLINE unsigned
+mallocx_tcache_get(int flags) {
+ if (likely((flags & MALLOCX_TCACHE_MASK) == 0)) {
+ return TCACHE_IND_AUTOMATIC;
+ } else if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
+ return TCACHE_IND_NONE;
+ } else {
+ return MALLOCX_TCACHE_GET(flags);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE unsigned
+mallocx_arena_get(int flags) {
+ if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
+ return MALLOCX_ARENA_GET(flags);
+ } else {
+ return ARENA_IND_AUTOMATIC;
+ }
+}
+
#ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
#define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y
@@ -3060,25 +3377,10 @@ JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT)
dopts.num_items = 1;
dopts.item_size = size;
if (unlikely(flags != 0)) {
- if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
- dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
- }
-
+ dopts.alignment = MALLOCX_ALIGN_GET(flags);
dopts.zero = MALLOCX_ZERO_GET(flags);
-
- if ((flags & MALLOCX_TCACHE_MASK) != 0) {
- if ((flags & MALLOCX_TCACHE_MASK)
- == MALLOCX_TCACHE_NONE) {
- dopts.tcache_ind = TCACHE_IND_NONE;
- } else {
- dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
- }
- } else {
- dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
- }
-
- if ((flags & MALLOCX_ARENA_MASK) != 0)
- dopts.arena_ind = MALLOCX_ARENA_GET(flags);
+ dopts.tcache_ind = mallocx_tcache_get(flags);
+ dopts.arena_ind = mallocx_arena_get(flags);
}
imalloc(&sopts, &dopts);
@@ -3113,25 +3415,10 @@ je_mallocx(size_t size, int flags) {
dopts.num_items = 1;
dopts.item_size = size;
if (unlikely(flags != 0)) {
- if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
- dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
- }
-
+ dopts.alignment = MALLOCX_ALIGN_GET(flags);
dopts.zero = MALLOCX_ZERO_GET(flags);
-
- if ((flags & MALLOCX_TCACHE_MASK) != 0) {
- if ((flags & MALLOCX_TCACHE_MASK)
- == MALLOCX_TCACHE_NONE) {
- dopts.tcache_ind = TCACHE_IND_NONE;
- } else {
- dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
- }
- } else {
- dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
- }
-
- if ((flags & MALLOCX_ARENA_MASK) != 0)
- dopts.arena_ind = MALLOCX_ARENA_GET(flags);
+ dopts.tcache_ind = mallocx_tcache_get(flags);
+ dopts.arena_ind = mallocx_arena_get(flags);
}
imalloc(&sopts, &dopts);
@@ -3154,6 +3441,8 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
if (tctx == NULL) {
return NULL;
}
+
+ alignment = prof_sample_align(alignment);
if (usize <= SC_SMALL_MAXCLASS) {
p = iralloct(tsdn, old_ptr, old_usize,
SC_LARGE_MINCLASS, alignment, zero, tcache,
@@ -3166,66 +3455,48 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
tcache, arena, hook_args);
}
+ assert(prof_sample_aligned(p));
return p;
}
JEMALLOC_ALWAYS_INLINE void *
irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
- size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
- arena_t *arena, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
+ size_t alignment, size_t usize, bool zero, tcache_t *tcache,
+ arena_t *arena, emap_alloc_ctx_t *alloc_ctx,
+ hook_ralloc_args_t *hook_args) {
+ prof_info_t old_prof_info;
+ prof_info_get_and_reset_recent(tsd, old_ptr, alloc_ctx, &old_prof_info);
+ bool prof_active = prof_active_get_unlocked();
+ bool sample_event = te_prof_sample_event_lookahead(tsd, usize);
+ prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event);
void *p;
- bool prof_active;
- prof_tctx_t *old_tctx, *tctx;
-
- prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
- tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
- *usize, alignment, zero, tcache, arena, tctx, hook_args);
+ usize, alignment, zero, tcache, arena, tctx, hook_args);
} else {
p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
zero, tcache, arena, hook_args);
}
if (unlikely(p == NULL)) {
- prof_alloc_rollback(tsd, tctx, false);
+ prof_alloc_rollback(tsd, tctx);
return NULL;
}
-
- if (p == old_ptr && alignment != 0) {
- /*
- * The allocation did not move, so it is possible that the size
- * class is smaller than would guarantee the requested
- * alignment, and that the alignment constraint was
- * serendipitously satisfied. Additionally, old_usize may not
- * be the same as the current usize because of in-place large
- * reallocation. Therefore, query the actual value of usize.
- */
- *usize = isalloc(tsd_tsdn(tsd), p);
- }
- prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
- old_usize, old_tctx);
+ assert(usize == isalloc(tsd_tsdn(tsd), p));
+ prof_realloc(tsd, p, size, usize, tctx, prof_active, old_ptr,
+ old_usize, &old_prof_info, sample_event);
return p;
}
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ALLOC_SIZE(2)
-je_rallocx(void *ptr, size_t size, int flags) {
+static void *
+do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
void *p;
tsd_t *tsd;
size_t usize;
size_t old_usize;
size_t alignment = MALLOCX_ALIGN_GET(flags);
- bool zero = flags & MALLOCX_ZERO;
arena_t *arena;
- tcache_t *tcache;
-
- LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
- size, flags);
-
assert(ptr != NULL);
assert(size != 0);
@@ -3233,44 +3504,31 @@ je_rallocx(void *ptr, size_t size, int flags) {
tsd = tsd_fetch();
check_entry_exit_locking(tsd_tsdn(tsd));
- if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
- unsigned arena_ind = MALLOCX_ARENA_GET(flags);
- arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
- if (unlikely(arena == NULL)) {
- goto label_oom;
- }
- } else {
- arena = NULL;
- }
+ bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
- if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
- tcache = NULL;
- } else {
- tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- }
- } else {
- tcache = tcache_get(tsd);
+ unsigned arena_ind = mallocx_arena_get(flags);
+ if (arena_get_from_ind(tsd, arena_ind, &arena)) {
+ goto label_oom;
}
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ unsigned tcache_ind = mallocx_tcache_get(flags);
+ tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind,
+ /* slow */ true, /* is_alloc */ true);
+
+ emap_alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
+ &alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
+ if (aligned_usize_get(size, alignment, &usize, NULL, false)) {
+ goto label_oom;
+ }
- hook_ralloc_args_t hook_args = {false, {(uintptr_t)ptr, size, flags,
- 0}};
+ hook_ralloc_args_t hook_args = {is_realloc, {(uintptr_t)ptr, size,
+ flags, 0}};
if (config_prof && opt_prof) {
- usize = (alignment == 0) ?
- sz_s2u(size) : sz_sa2u(size, alignment);
- if (unlikely(usize == 0
- || usize > SC_LARGE_MAXCLASS)) {
- goto label_oom;
- }
- p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
+ p = irallocx_prof(tsd, ptr, old_usize, size, alignment, usize,
zero, tcache, arena, &alloc_ctx, &hook_args);
if (unlikely(p == NULL)) {
goto label_oom;
@@ -3281,20 +3539,22 @@ je_rallocx(void *ptr, size_t size, int flags) {
if (unlikely(p == NULL)) {
goto label_oom;
}
- if (config_stats) {
- usize = isalloc(tsd_tsdn(tsd), p);
- }
+ assert(usize == isalloc(tsd_tsdn(tsd), p));
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+ thread_alloc_event(tsd, usize);
+ thread_dalloc_event(tsd, old_usize);
- if (config_stats) {
- *tsd_thread_allocatedp_get(tsd) += usize;
- *tsd_thread_deallocatedp_get(tsd) += old_usize;
- }
UTRACE(ptr, size, p);
check_entry_exit_locking(tsd_tsdn(tsd));
- LOG("core.rallocx.exit", "result: %p", p);
+ if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize
+ && !zero) {
+ size_t excess_len = usize - old_usize;
+ void *excess_start = (void *)((uintptr_t)p + old_usize);
+ junk_alloc_callback(excess_start, excess_len);
+ }
+
return p;
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
@@ -3304,10 +3564,103 @@ label_oom:
UTRACE(ptr, size, 0);
check_entry_exit_locking(tsd_tsdn(tsd));
- LOG("core.rallocx.exit", "result: %p", NULL);
return NULL;
}
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
+je_rallocx(void *ptr, size_t size, int flags) {
+ LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
+ size, flags);
+ void *ret = do_rallocx(ptr, size, flags, false);
+ LOG("core.rallocx.exit", "result: %p", ret);
+ return ret;
+}
+
+static void *
+do_realloc_nonnull_zero(void *ptr) {
+ if (config_stats) {
+ atomic_fetch_add_zu(&zero_realloc_count, 1, ATOMIC_RELAXED);
+ }
+ if (opt_zero_realloc_action == zero_realloc_action_alloc) {
+ /*
+ * The user might have gotten an alloc setting while expecting a
+ * free setting. If that's the case, we at least try to
+ * reduce the harm, and turn off the tcache while allocating, so
+ * that we'll get a true first fit.
+ */
+ return do_rallocx(ptr, 1, MALLOCX_TCACHE_NONE, true);
+ } else if (opt_zero_realloc_action == zero_realloc_action_free) {
+ UTRACE(ptr, 0, 0);
+ tsd_t *tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ tcache_t *tcache = tcache_get_from_ind(tsd,
+ TCACHE_IND_AUTOMATIC, /* slow */ true,
+ /* is_alloc */ false);
+ uintptr_t args[3] = {(uintptr_t)ptr, 0};
+ hook_invoke_dalloc(hook_dalloc_realloc, ptr, args);
+ ifree(tsd, ptr, tcache, true);
+
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return NULL;
+ } else {
+ safety_check_fail("Called realloc(non-null-ptr, 0) with "
+ "zero_realloc:abort set\n");
+ /* In real code, this will never run; the safety check failure
+ * will call abort. In the unit test, we just want to bail out
+ * without corrupting internal state that the test needs to
+ * finish.
+ */
+ return NULL;
+ }
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
+je_realloc(void *ptr, size_t size) {
+ LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
+
+ if (likely(ptr != NULL && size != 0)) {
+ void *ret = do_rallocx(ptr, size, 0, true);
+ LOG("core.realloc.exit", "result: %p", ret);
+ return ret;
+ } else if (ptr != NULL && size == 0) {
+ void *ret = do_realloc_nonnull_zero(ptr);
+ LOG("core.realloc.exit", "result: %p", ret);
+ return ret;
+ } else {
+ /* realloc(NULL, size) is equivalent to malloc(size). */
+ void *ret;
+
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.null_out_result_on_error = true;
+ sopts.set_errno_on_error = true;
+ sopts.oom_string =
+ "<jemalloc>: Error in realloc(): out of memory\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+
+ imalloc(&sopts, &dopts);
+ if (sopts.slow) {
+ uintptr_t args[3] = {(uintptr_t)ptr, size};
+ hook_invoke_alloc(hook_alloc_realloc, ret,
+ (uintptr_t)ret, args);
+ }
+ LOG("core.realloc.exit", "result: %p", ret);
+ return ret;
+ }
+}
+
JEMALLOC_ALWAYS_INLINE size_t
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
size_t extra, size_t alignment, bool zero) {
@@ -3324,51 +3677,46 @@ ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
static size_t
ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
- size_t usize;
-
- if (tctx == NULL) {
+ /* Sampled allocation needs to be page aligned. */
+ if (tctx == NULL || !prof_sample_aligned(ptr)) {
return old_usize;
}
- usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
- zero);
- return usize;
+ return ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
+ zero);
}
JEMALLOC_ALWAYS_INLINE size_t
ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
- size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
- size_t usize_max, usize;
- bool prof_active;
- prof_tctx_t *old_tctx, *tctx;
+ size_t extra, size_t alignment, bool zero, emap_alloc_ctx_t *alloc_ctx) {
+ /*
+ * old_prof_info is only used for asserting that the profiling info
+ * isn't changed by the ixalloc() call.
+ */
+ prof_info_t old_prof_info;
+ prof_info_get(tsd, ptr, alloc_ctx, &old_prof_info);
- prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
/*
* usize isn't knowable before ixalloc() returns when extra is non-zero.
* Therefore, compute its maximum possible value and use that in
* prof_alloc_prep() to decide whether to capture a backtrace.
* prof_realloc() will use the actual usize to decide whether to sample.
*/
- if (alignment == 0) {
- usize_max = sz_s2u(size+extra);
- assert(usize_max > 0
- && usize_max <= SC_LARGE_MAXCLASS);
- } else {
- usize_max = sz_sa2u(size+extra, alignment);
- if (unlikely(usize_max == 0
- || usize_max > SC_LARGE_MAXCLASS)) {
- /*
- * usize_max is out of range, and chances are that
- * allocation will fail, but use the maximum possible
- * value and carry on with prof_alloc_prep(), just in
- * case allocation succeeds.
- */
- usize_max = SC_LARGE_MAXCLASS;
- }
+ size_t usize_max;
+ if (aligned_usize_get(size + extra, alignment, &usize_max, NULL,
+ false)) {
+ /*
+ * usize_max is out of range, and chances are that allocation
+ * will fail, but use the maximum possible value and carry on
+ * with prof_alloc_prep(), just in case allocation succeeds.
+ */
+ usize_max = SC_LARGE_MAXCLASS;
}
- tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
+ bool prof_active = prof_active_get_unlocked();
+ bool sample_event = te_prof_sample_event_lookahead(tsd, usize_max);
+ prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event);
+ size_t usize;
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
size, extra, alignment, zero, tctx);
@@ -3376,13 +3724,28 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
extra, alignment, zero);
}
+
+ /*
+ * At this point we can still safely get the original profiling
+ * information associated with the ptr, because (a) the edata_t object
+ * associated with the ptr still lives and (b) the profiling info
+ * fields are not touched. "(a)" is asserted in the outer je_xallocx()
+ * function, and "(b)" is indirectly verified below by checking that
+ * the alloc_tctx field is unchanged.
+ */
+ prof_info_t prof_info;
if (usize == old_usize) {
- prof_alloc_rollback(tsd, tctx, false);
- return usize;
+ prof_info_get(tsd, ptr, alloc_ctx, &prof_info);
+ prof_alloc_rollback(tsd, tctx);
+ } else {
+ prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info);
+ assert(usize <= usize_max);
+ sample_event = te_prof_sample_event_lookahead(tsd, usize);
+ prof_realloc(tsd, ptr, size, usize, tctx, prof_active, ptr,
+ old_usize, &prof_info, sample_event);
}
- prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
- old_tctx);
+ assert(old_prof_info.alloc_tctx == prof_info.alloc_tctx);
return usize;
}
@@ -3391,7 +3754,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
tsd_t *tsd;
size_t usize, old_usize;
size_t alignment = MALLOCX_ALIGN_GET(flags);
- bool zero = flags & MALLOCX_ZERO;
+ bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
"flags: %d", ptr, size, extra, flags);
@@ -3403,10 +3766,17 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
tsd = tsd_fetch();
check_entry_exit_locking(tsd_tsdn(tsd));
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ /*
+ * old_edata is only for verifying that xallocx() keeps the edata_t
+ * object associated with the ptr (though the content of the edata_t
+ * object can be changed).
+ */
+ edata_t *old_edata = emap_edata_lookup(tsd_tsdn(tsd),
+ &arena_emap_global, ptr);
+
+ emap_alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
+ &alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
@@ -3434,13 +3804,25 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
extra, alignment, zero);
}
+
+ /*
+ * xallocx() should keep using the same edata_t object (though its
+ * content can be changed).
+ */
+ assert(emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr)
+ == old_edata);
+
if (unlikely(usize == old_usize)) {
goto label_not_resized;
}
+ thread_alloc_event(tsd, usize);
+ thread_dalloc_event(tsd, old_usize);
- if (config_stats) {
- *tsd_thread_allocatedp_get(tsd) += usize;
- *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize &&
+ !zero) {
+ size_t excess_len = usize - old_usize;
+ void *excess_start = (void *)((uintptr_t)ptr + old_usize);
+ junk_alloc_callback(excess_start, excess_len);
}
label_not_resized:
if (unlikely(!tsd_fast(tsd))) {
@@ -3490,31 +3872,13 @@ je_dallocx(void *ptr, int flags) {
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- tsd_t *tsd = tsd_fetch();
+ tsd_t *tsd = tsd_fetch_min();
bool fast = tsd_fast(tsd);
check_entry_exit_locking(tsd_tsdn(tsd));
- tcache_t *tcache;
- if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- /* Not allowed to be reentrant and specify a custom tcache. */
- assert(tsd_reentrancy_level_get(tsd) == 0);
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
- tcache = NULL;
- } else {
- tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- }
- } else {
- if (likely(fast)) {
- tcache = tsd_tcachep_get(tsd);
- assert(tcache == tcache_get(tsd));
- } else {
- if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
- tcache = tcache_get(tsd);
- } else {
- tcache = NULL;
- }
- }
- }
+ unsigned tcache_ind = mallocx_tcache_get(flags);
+ tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast,
+ /* is_alloc */ false);
UTRACE(ptr, 0, 0);
if (likely(fast)) {
@@ -3533,13 +3897,9 @@ je_dallocx(void *ptr, int flags) {
JEMALLOC_ALWAYS_INLINE size_t
inallocx(tsdn_t *tsdn, size_t size, int flags) {
check_entry_exit_locking(tsdn);
-
size_t usize;
- if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
- usize = sz_s2u(size);
- } else {
- usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
- }
+ /* In case of out of range, let the user see it rather than fail. */
+ aligned_usize_get(size, MALLOCX_ALIGN_GET(flags), &usize, NULL, false);
check_entry_exit_locking(tsdn);
return usize;
}
@@ -3549,33 +3909,14 @@ sdallocx_default(void *ptr, size_t size, int flags) {
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- tsd_t *tsd = tsd_fetch();
+ tsd_t *tsd = tsd_fetch_min();
bool fast = tsd_fast(tsd);
size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr));
check_entry_exit_locking(tsd_tsdn(tsd));
- tcache_t *tcache;
- if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- /* Not allowed to be reentrant and specify a custom tcache. */
- assert(tsd_reentrancy_level_get(tsd) == 0);
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
- tcache = NULL;
- } else {
- tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- }
- } else {
- if (likely(fast)) {
- tcache = tsd_tcachep_get(tsd);
- assert(tcache == tcache_get(tsd));
- } else {
- if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
- tcache = tcache_get(tsd);
- } else {
- tcache = NULL;
- }
- }
- }
+ unsigned tcache_ind = mallocx_tcache_get(flags);
+ tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast,
+ /* is_alloc */ false);
UTRACE(ptr, 0, 0);
if (likely(fast)) {
@@ -3587,7 +3928,6 @@ sdallocx_default(void *ptr, size_t size, int flags) {
isfree(tsd, ptr, usize, tcache, true);
}
check_entry_exit_locking(tsd_tsdn(tsd));
-
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
@@ -3595,7 +3935,7 @@ je_sdallocx(void *ptr, size_t size, int flags) {
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
size, flags);
- if (flags !=0 || !free_fastpath(ptr, size, true)) {
+ if (flags != 0 || !free_fastpath(ptr, size, true)) {
sdallocx_default(ptr, size, flags);
}
@@ -3704,6 +4044,7 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
return ret;
}
+#define STATS_PRINT_BUFSIZE 65536
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts) {
@@ -3713,23 +4054,30 @@ je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
tsdn = tsdn_fetch();
check_entry_exit_locking(tsdn);
- stats_print(write_cb, cbopaque, opts);
+
+ if (config_debug) {
+ stats_print(write_cb, cbopaque, opts);
+ } else {
+ buf_writer_t buf_writer;
+ buf_writer_init(tsdn, &buf_writer, write_cb, cbopaque, NULL,
+ STATS_PRINT_BUFSIZE);
+ stats_print(buf_writer_cb, &buf_writer, opts);
+ buf_writer_terminate(tsdn, &buf_writer);
+ }
+
check_entry_exit_locking(tsdn);
LOG("core.malloc_stats_print.exit", "");
}
+#undef STATS_PRINT_BUFSIZE
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
- size_t ret;
- tsdn_t *tsdn;
-
- LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
-
+JEMALLOC_ALWAYS_INLINE size_t
+je_malloc_usable_size_impl(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
assert(malloc_initialized() || IS_INITIALIZER);
- tsdn = tsdn_fetch();
+ tsdn_t *tsdn = tsdn_fetch();
check_entry_exit_locking(tsdn);
+ size_t ret;
if (unlikely(ptr == NULL)) {
ret = 0;
} else {
@@ -3740,12 +4088,211 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
ret = isalloc(tsdn, ptr);
}
}
-
check_entry_exit_locking(tsdn);
+
+ return ret;
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
+ LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
+
+ size_t ret = je_malloc_usable_size_impl(ptr);
+
LOG("core.malloc_usable_size.exit", "result: %zu", ret);
return ret;
}
+#ifdef JEMALLOC_HAVE_MALLOC_SIZE
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+je_malloc_size(const void *ptr) {
+ LOG("core.malloc_size.entry", "ptr: %p", ptr);
+
+ size_t ret = je_malloc_usable_size_impl(ptr);
+
+ LOG("core.malloc_size.exit", "result: %zu", ret);
+ return ret;
+}
+#endif
+
+static void
+batch_alloc_prof_sample_assert(tsd_t *tsd, size_t batch, size_t usize) {
+ assert(config_prof && opt_prof);
+ bool prof_sample_event = te_prof_sample_event_lookahead(tsd,
+ batch * usize);
+ assert(!prof_sample_event);
+ size_t surplus;
+ prof_sample_event = te_prof_sample_event_lookahead_surplus(tsd,
+ (batch + 1) * usize, &surplus);
+ assert(prof_sample_event);
+ assert(surplus < usize);
+}
+
+size_t
+batch_alloc(void **ptrs, size_t num, size_t size, int flags) {
+ LOG("core.batch_alloc.entry",
+ "ptrs: %p, num: %zu, size: %zu, flags: %d", ptrs, num, size, flags);
+
+ tsd_t *tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ size_t filled = 0;
+
+ if (unlikely(tsd == NULL || tsd_reentrancy_level_get(tsd) > 0)) {
+ goto label_done;
+ }
+
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
+ size_t usize;
+ if (aligned_usize_get(size, alignment, &usize, NULL, false)) {
+ goto label_done;
+ }
+ szind_t ind = sz_size2index(usize);
+ bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
+
+ /*
+ * The cache bin and arena will be lazily initialized; it's hard to
+ * know in advance whether each of them needs to be initialized.
+ */
+ cache_bin_t *bin = NULL;
+ arena_t *arena = NULL;
+
+ size_t nregs = 0;
+ if (likely(ind < SC_NBINS)) {
+ nregs = bin_infos[ind].nregs;
+ assert(nregs > 0);
+ }
+
+ while (filled < num) {
+ size_t batch = num - filled;
+ size_t surplus = SIZE_MAX; /* Dead store. */
+ bool prof_sample_event = config_prof && opt_prof
+ && prof_active_get_unlocked()
+ && te_prof_sample_event_lookahead_surplus(tsd,
+ batch * usize, &surplus);
+
+ if (prof_sample_event) {
+ /*
+ * Adjust so that the batch does not trigger prof
+ * sampling.
+ */
+ batch -= surplus / usize + 1;
+ batch_alloc_prof_sample_assert(tsd, batch, usize);
+ }
+
+ size_t progress = 0;
+
+ if (likely(ind < SC_NBINS) && batch >= nregs) {
+ if (arena == NULL) {
+ unsigned arena_ind = mallocx_arena_get(flags);
+ if (arena_get_from_ind(tsd, arena_ind,
+ &arena)) {
+ goto label_done;
+ }
+ if (arena == NULL) {
+ arena = arena_choose(tsd, NULL);
+ }
+ if (unlikely(arena == NULL)) {
+ goto label_done;
+ }
+ }
+ size_t arena_batch = batch - batch % nregs;
+ size_t n = arena_fill_small_fresh(tsd_tsdn(tsd), arena,
+ ind, ptrs + filled, arena_batch, zero);
+ progress += n;
+ filled += n;
+ }
+
+ if (likely(ind < nhbins) && progress < batch) {
+ if (bin == NULL) {
+ unsigned tcache_ind = mallocx_tcache_get(flags);
+ tcache_t *tcache = tcache_get_from_ind(tsd,
+ tcache_ind, /* slow */ true,
+ /* is_alloc */ true);
+ if (tcache != NULL) {
+ bin = &tcache->bins[ind];
+ }
+ }
+ /*
+ * If we don't have a tcache bin, we don't want to
+ * immediately give up, because there's the possibility
+ * that the user explicitly requested to bypass the
+ * tcache, or that the user explicitly turned off the
+ * tcache; in such cases, we go through the slow path,
+ * i.e. the mallocx() call at the end of the while loop.
+ */
+ if (bin != NULL) {
+ size_t bin_batch = batch - progress;
+ /*
+ * n can be less than bin_batch, meaning that
+ * the cache bin does not have enough memory.
+ * In such cases, we rely on the slow path,
+ * i.e. the mallocx() call at the end of the
+ * while loop, to fill in the cache, and in the
+ * next iteration of the while loop, the tcache
+ * will contain a lot of memory, and we can
+ * harvest them here. Compared to the
+ * alternative approach where we directly go to
+ * the arena bins here, the overhead of our
+ * current approach should usually be minimal,
+ * since we never try to fetch more memory than
+ * what a slab contains via the tcache. An
+ * additional benefit is that the tcache will
+ * not be empty for the next allocation request.
+ */
+ size_t n = cache_bin_alloc_batch(bin, bin_batch,
+ ptrs + filled);
+ if (config_stats) {
+ bin->tstats.nrequests += n;
+ }
+ if (zero) {
+ for (size_t i = 0; i < n; ++i) {
+ memset(ptrs[filled + i], 0,
+ usize);
+ }
+ }
+ if (config_prof && opt_prof
+ && unlikely(ind >= SC_NBINS)) {
+ for (size_t i = 0; i < n; ++i) {
+ prof_tctx_reset_sampled(tsd,
+ ptrs[filled + i]);
+ }
+ }
+ progress += n;
+ filled += n;
+ }
+ }
+
+ /*
+ * For thread events other than prof sampling, trigger them as
+ * if there's a single allocation of size (n * usize). This is
+ * fine because:
+ * (a) these events do not alter the allocation itself, and
+ * (b) it's possible that some event would have been triggered
+ * multiple times, instead of only once, if the allocations
+ * were handled individually, but it would do no harm (or
+ * even be beneficial) to coalesce the triggerings.
+ */
+ thread_alloc_event(tsd, progress * usize);
+
+ if (progress < batch || prof_sample_event) {
+ void *p = je_mallocx(size, flags);
+ if (p == NULL) { /* OOM */
+ break;
+ }
+ if (progress == batch) {
+ assert(prof_sampled(tsd, p));
+ }
+ ptrs[filled++] = p;
+ }
+ }
+
+label_done:
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ LOG("core.batch_alloc.exit", "result: %zu", filled);
+ return filled;
+}
+
/*
* End non-standard functions.
*/
@@ -3812,7 +4359,7 @@ _malloc_prefork(void)
background_thread_prefork1(tsd_tsdn(tsd));
}
/* Break arena prefork into stages to preserve lock order. */
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 9; i++) {
for (j = 0; j < narenas; j++) {
if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
NULL) {
@@ -3841,12 +4388,17 @@ _malloc_prefork(void)
case 7:
arena_prefork7(tsd_tsdn(tsd), arena);
break;
+ case 8:
+ arena_prefork8(tsd_tsdn(tsd), arena);
+ break;
default: not_reached();
}
}
}
+
}
prof_prefork1(tsd_tsdn(tsd));
+ stats_prefork(tsd_tsdn(tsd));
tsd_prefork(tsd);
}
@@ -3874,6 +4426,7 @@ _malloc_postfork(void)
witness_postfork_parent(tsd_witness_tsdp_get(tsd));
/* Release all mutexes, now that fork() has completed. */
+ stats_postfork_parent(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
@@ -3903,6 +4456,7 @@ jemalloc_postfork_child(void) {
witness_postfork_child(tsd_witness_tsdp_get(tsd));
/* Release all mutexes, now that fork() has completed. */
+ stats_postfork_child(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
diff --git a/deps/jemalloc/src/jemalloc_cpp.cpp b/deps/jemalloc/src/jemalloc_cpp.cpp
index da0441a7c..451655f1b 100644
--- a/deps/jemalloc/src/jemalloc_cpp.cpp
+++ b/deps/jemalloc/src/jemalloc_cpp.cpp
@@ -39,9 +39,29 @@ void operator delete(void *ptr, std::size_t size) noexcept;
void operator delete[](void *ptr, std::size_t size) noexcept;
#endif
+#if __cpp_aligned_new >= 201606
+/* C++17's over-aligned operators. */
+void *operator new(std::size_t size, std::align_val_t);
+void *operator new(std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept;
+void *operator new[](std::size_t size, std::align_val_t);
+void *operator new[](std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept;
+void operator delete(void* ptr, std::align_val_t) noexcept;
+void operator delete(void* ptr, std::align_val_t, const std::nothrow_t &) noexcept;
+void operator delete(void* ptr, std::size_t size, std::align_val_t al) noexcept;
+void operator delete[](void* ptr, std::align_val_t) noexcept;
+void operator delete[](void* ptr, std::align_val_t, const std::nothrow_t &) noexcept;
+void operator delete[](void* ptr, std::size_t size, std::align_val_t al) noexcept;
+#endif
+
JEMALLOC_NOINLINE
static void *
handleOOM(std::size_t size, bool nothrow) {
+ if (opt_experimental_infallible_new) {
+ safety_check_fail("<jemalloc>: Allocation failed and "
+ "opt.experimental_infallible_new is true. Aborting.\n");
+ return nullptr;
+ }
+
void *ptr = nullptr;
while (ptr == nullptr) {
@@ -72,14 +92,21 @@ handleOOM(std::size_t size, bool nothrow) {
}
template <bool IsNoExcept>
+JEMALLOC_NOINLINE
+static void *
+fallback_impl(std::size_t size) noexcept(IsNoExcept) {
+ void *ptr = malloc_default(size);
+ if (likely(ptr != nullptr)) {
+ return ptr;
+ }
+ return handleOOM(size, IsNoExcept);
+}
+
+template <bool IsNoExcept>
JEMALLOC_ALWAYS_INLINE
void *
newImpl(std::size_t size) noexcept(IsNoExcept) {
- void *ptr = je_malloc(size);
- if (likely(ptr != nullptr))
- return ptr;
-
- return handleOOM(size, IsNoExcept);
+ return imalloc_fastpath(size, &fallback_impl<IsNoExcept>);
}
void *
@@ -102,6 +129,42 @@ operator new[](std::size_t size, const std::nothrow_t &) noexcept {
return newImpl<true>(size);
}
+#if __cpp_aligned_new >= 201606
+
+template <bool IsNoExcept>
+JEMALLOC_ALWAYS_INLINE
+void *
+alignedNewImpl(std::size_t size, std::align_val_t alignment) noexcept(IsNoExcept) {
+ void *ptr = je_aligned_alloc(static_cast<std::size_t>(alignment), size);
+ if (likely(ptr != nullptr)) {
+ return ptr;
+ }
+
+ return handleOOM(size, IsNoExcept);
+}
+
+void *
+operator new(std::size_t size, std::align_val_t alignment) {
+ return alignedNewImpl<false>(size, alignment);
+}
+
+void *
+operator new[](std::size_t size, std::align_val_t alignment) {
+ return alignedNewImpl<false>(size, alignment);
+}
+
+void *
+operator new(std::size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept {
+ return alignedNewImpl<true>(size, alignment);
+}
+
+void *
+operator new[](std::size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept {
+ return alignedNewImpl<true>(size, alignment);
+}
+
+#endif // __cpp_aligned_new
+
void
operator delete(void *ptr) noexcept {
je_free(ptr);
@@ -123,19 +186,69 @@ void operator delete[](void *ptr, const std::nothrow_t &) noexcept {
#if __cpp_sized_deallocation >= 201309
+JEMALLOC_ALWAYS_INLINE
void
-operator delete(void *ptr, std::size_t size) noexcept {
+sizedDeleteImpl(void* ptr, std::size_t size) noexcept {
if (unlikely(ptr == nullptr)) {
return;
}
je_sdallocx_noflags(ptr, size);
}
-void operator delete[](void *ptr, std::size_t size) noexcept {
+void
+operator delete(void *ptr, std::size_t size) noexcept {
+ sizedDeleteImpl(ptr, size);
+}
+
+void
+operator delete[](void *ptr, std::size_t size) noexcept {
+ sizedDeleteImpl(ptr, size);
+}
+
+#endif // __cpp_sized_deallocation
+
+#if __cpp_aligned_new >= 201606
+
+JEMALLOC_ALWAYS_INLINE
+void
+alignedSizedDeleteImpl(void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
+ if (config_debug) {
+ assert(((size_t)alignment & ((size_t)alignment - 1)) == 0);
+ }
if (unlikely(ptr == nullptr)) {
return;
}
- je_sdallocx_noflags(ptr, size);
+ je_sdallocx(ptr, size, MALLOCX_ALIGN(alignment));
}
-#endif // __cpp_sized_deallocation
+void
+operator delete(void* ptr, std::align_val_t) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete[](void* ptr, std::align_val_t) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete(void* ptr, std::align_val_t, const std::nothrow_t&) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete[](void* ptr, std::align_val_t, const std::nothrow_t&) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete(void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
+ alignedSizedDeleteImpl(ptr, size, alignment);
+}
+
+void
+operator delete[](void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
+ alignedSizedDeleteImpl(ptr, size, alignment);
+}
+
+#endif // __cpp_aligned_new
diff --git a/deps/jemalloc/src/large.c b/deps/jemalloc/src/large.c
index 8e7a781d3..5fc4bf584 100644
--- a/deps/jemalloc/src/large.c
+++ b/deps/jemalloc/src/large.c
@@ -1,11 +1,11 @@
-#define JEMALLOC_LARGE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/prof_recent.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
@@ -21,8 +21,7 @@ void *
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero) {
size_t ausize;
- extent_t *extent;
- bool is_zeroed;
+ edata_t *edata;
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
assert(!tsdn_null(tsdn) || arena != NULL);
@@ -32,163 +31,80 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
return NULL;
}
- if (config_fill && unlikely(opt_zero)) {
- zero = true;
- }
- /*
- * Copy zero into is_zeroed and pass the copy when allocating the
- * extent, so that it is possible to make correct junk/zero fill
- * decisions below, even if is_zeroed ends up true when zero is false.
- */
- is_zeroed = zero;
if (likely(!tsdn_null(tsdn))) {
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
}
- if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
- arena, usize, alignment, &is_zeroed)) == NULL) {
+ if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn,
+ arena, usize, alignment, zero)) == NULL) {
return NULL;
}
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
- /* Insert extent into large. */
+ /* Insert edata into large. */
malloc_mutex_lock(tsdn, &arena->large_mtx);
- extent_list_append(&arena->large, extent);
+ edata_list_active_append(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
- if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
- prof_idump(tsdn);
- }
-
- if (zero) {
- assert(is_zeroed);
- } else if (config_fill && unlikely(opt_junk_alloc)) {
- memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
- extent_usize_get(extent));
- }
arena_decay_tick(tsdn, arena);
- return extent_addr_get(extent);
+ return edata_addr_get(edata);
}
-static void
-large_dalloc_junk_impl(void *ptr, size_t size) {
- memset(ptr, JEMALLOC_FREE_JUNK, size);
-}
-large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl;
-
-static void
-large_dalloc_maybe_junk_impl(void *ptr, size_t size) {
- if (config_fill && have_dss && unlikely(opt_junk_free)) {
- /*
- * Only bother junk filling if the extent isn't about to be
- * unmapped.
- */
- if (opt_retain || (have_dss && extent_in_dss(ptr))) {
- large_dalloc_junk(ptr, size);
- }
- }
-}
-large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
- large_dalloc_maybe_junk_impl;
-
static bool
-large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
- arena_t *arena = extent_arena_get(extent);
- size_t oldusize = extent_usize_get(extent);
- extent_hooks_t *extent_hooks = extent_hooks_get(arena);
- size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
+large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
+ arena_t *arena = arena_get_from_edata(edata);
+ ehooks_t *ehooks = arena_get_ehooks(arena);
+ size_t old_size = edata_size_get(edata);
+ size_t old_usize = edata_usize_get(edata);
- assert(oldusize > usize);
+ assert(old_usize > usize);
- if (extent_hooks->split == NULL) {
+ if (ehooks_split_will_fail(ehooks)) {
return true;
}
- /* Split excess pages. */
- if (diff != 0) {
- extent_t *trail = extent_split_wrapper(tsdn, arena,
- &extent_hooks, extent, usize + sz_large_pad,
- sz_size2index(usize), false, diff, SC_NSIZES, false);
- if (trail == NULL) {
- return true;
- }
-
- if (config_fill && unlikely(opt_junk_free)) {
- large_dalloc_maybe_junk(extent_addr_get(trail),
- extent_size_get(trail));
- }
-
- arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail);
+ bool deferred_work_generated = false;
+ bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size,
+ usize + sz_large_pad, sz_size2index(usize),
+ &deferred_work_generated);
+ if (err) {
+ return true;
}
-
- arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
+ if (deferred_work_generated) {
+ arena_handle_deferred_work(tsdn, arena);
+ }
+ arena_extent_ralloc_large_shrink(tsdn, arena, edata, old_usize);
return false;
}
static bool
-large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
+large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
bool zero) {
- arena_t *arena = extent_arena_get(extent);
- size_t oldusize = extent_usize_get(extent);
- extent_hooks_t *extent_hooks = extent_hooks_get(arena);
- size_t trailsize = usize - oldusize;
+ arena_t *arena = arena_get_from_edata(edata);
- if (extent_hooks->merge == NULL) {
- return true;
- }
+ size_t old_size = edata_size_get(edata);
+ size_t old_usize = edata_usize_get(edata);
+ size_t new_size = usize + sz_large_pad;
- if (config_fill && unlikely(opt_zero)) {
- zero = true;
- }
- /*
- * Copy zero into is_zeroed_trail and pass the copy when allocating the
- * extent, so that it is possible to make correct junk/zero fill
- * decisions below, even if is_zeroed_trail ends up true when zero is
- * false.
- */
- bool is_zeroed_trail = zero;
- bool commit = true;
- extent_t *trail;
- bool new_mapping;
- if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_dirty, extent_past_get(extent), trailsize, 0,
- CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL
- || (trail = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
- CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL) {
- if (config_stats) {
- new_mapping = false;
- }
- } else {
- if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
- extent_past_get(extent), trailsize, 0, CACHELINE, false,
- SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
- return true;
- }
- if (config_stats) {
- new_mapping = true;
- }
- }
+ szind_t szind = sz_size2index(usize);
- if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
- extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
- return true;
+ bool deferred_work_generated = false;
+ bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size,
+ szind, zero, &deferred_work_generated);
+
+ if (deferred_work_generated) {
+ arena_handle_deferred_work(tsdn, arena);
}
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- szind_t szind = sz_size2index(usize);
- extent_szind_set(extent, szind);
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_addr_get(extent), szind, false);
- if (config_stats && new_mapping) {
- arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
+ if (err) {
+ return true;
}
if (zero) {
- if (config_cache_oblivious) {
+ if (opt_cache_oblivious) {
+ assert(sz_large_pad == PAGE);
/*
* Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state.
@@ -197,28 +113,23 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
* of CACHELINE in [0 .. PAGE).
*/
void *zbase = (void *)
- ((uintptr_t)extent_addr_get(extent) + oldusize);
+ ((uintptr_t)edata_addr_get(edata) + old_usize);
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
PAGE));
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
assert(nzero > 0);
memset(zbase, 0, nzero);
}
- assert(is_zeroed_trail);
- } else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
- JEMALLOC_ALLOC_JUNK, usize - oldusize);
}
-
- arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
+ arena_extent_ralloc_large_expand(tsdn, arena, edata, old_usize);
return false;
}
bool
-large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
+large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero) {
- size_t oldusize = extent_usize_get(extent);
+ size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
@@ -228,16 +139,15 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
if (usize_max > oldusize) {
/* Attempt to expand the allocation in-place. */
- if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
+ if (!large_ralloc_no_move_expand(tsdn, edata, usize_max,
zero)) {
- arena_decay_tick(tsdn, extent_arena_get(extent));
+ arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && usize_min > oldusize &&
- large_ralloc_no_move_expand(tsdn, extent, usize_min,
- zero)) {
- arena_decay_tick(tsdn, extent_arena_get(extent));
+ large_ralloc_no_move_expand(tsdn, edata, usize_min, zero)) {
+ arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
}
@@ -247,14 +157,14 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
* the new size.
*/
if (oldusize >= usize_min && oldusize <= usize_max) {
- arena_decay_tick(tsdn, extent_arena_get(extent));
+ arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Attempt to shrink the allocation in-place. */
if (oldusize > usize_max) {
- if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
- arena_decay_tick(tsdn, extent_arena_get(extent));
+ if (!large_ralloc_no_move_shrink(tsdn, edata, usize_max)) {
+ arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
}
@@ -274,9 +184,9 @@ void *
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
- extent_t *extent = iealloc(tsdn, ptr);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
- size_t oldusize = extent_usize_get(extent);
+ size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
/* Both allocation sizes must be large to avoid a move. */
@@ -284,11 +194,11 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
&& usize >= SC_LARGE_MINCLASS);
/* Try to avoid moving the allocation. */
- if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
+ if (!large_ralloc_no_move(tsdn, edata, usize, usize, zero)) {
hook_invoke_expand(hook_args->is_realloc
? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
usize, (uintptr_t)ptr, hook_args->args);
- return extent_addr_get(extent);
+ return edata_addr_get(edata);
}
/*
@@ -309,87 +219,104 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
size_t copysize = (usize < oldusize) ? usize : oldusize;
- memcpy(ret, extent_addr_get(extent), copysize);
- isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
+ memcpy(ret, edata_addr_get(edata), copysize);
+ isdalloct(tsdn, edata_addr_get(edata), oldusize, tcache, NULL, true);
return ret;
}
/*
- * junked_locked indicates whether the extent's data have been junk-filled, and
- * whether the arena's large_mtx is currently held.
+ * locked indicates whether the arena's large_mtx is currently held.
*/
static void
-large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- bool junked_locked) {
- if (!junked_locked) {
+large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
+ bool locked) {
+ if (!locked) {
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
malloc_mutex_lock(tsdn, &arena->large_mtx);
- extent_list_remove(&arena->large, extent);
+ edata_list_active_remove(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
- large_dalloc_maybe_junk(extent_addr_get(extent),
- extent_usize_get(extent));
} else {
/* Only hold the large_mtx if necessary. */
if (!arena_is_auto(arena)) {
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
- extent_list_remove(&arena->large, extent);
+ edata_list_active_remove(&arena->large, edata);
}
}
- arena_extent_dalloc_large_prep(tsdn, arena, extent);
+ arena_extent_dalloc_large_prep(tsdn, arena, edata);
}
static void
-large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
- arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent);
+large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
+ bool deferred_work_generated = false;
+ pa_dalloc(tsdn, &arena->pa_shard, edata, &deferred_work_generated);
+ if (deferred_work_generated) {
+ arena_handle_deferred_work(tsdn, arena);
+ }
}
void
-large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
- large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true);
+large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata) {
+ large_dalloc_prep_impl(tsdn, arena_get_from_edata(edata), edata, true);
}
void
-large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
- large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent);
+large_dalloc_finish(tsdn_t *tsdn, edata_t *edata) {
+ large_dalloc_finish_impl(tsdn, arena_get_from_edata(edata), edata);
}
void
-large_dalloc(tsdn_t *tsdn, extent_t *extent) {
- arena_t *arena = extent_arena_get(extent);
- large_dalloc_prep_impl(tsdn, arena, extent, false);
- large_dalloc_finish_impl(tsdn, arena, extent);
+large_dalloc(tsdn_t *tsdn, edata_t *edata) {
+ arena_t *arena = arena_get_from_edata(edata);
+ large_dalloc_prep_impl(tsdn, arena, edata, false);
+ large_dalloc_finish_impl(tsdn, arena, edata);
arena_decay_tick(tsdn, arena);
}
size_t
-large_salloc(tsdn_t *tsdn, const extent_t *extent) {
- return extent_usize_get(extent);
-}
-
-prof_tctx_t *
-large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) {
- return extent_prof_tctx_get(extent);
+large_salloc(tsdn_t *tsdn, const edata_t *edata) {
+ return edata_usize_get(edata);
}
void
-large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) {
- extent_prof_tctx_set(extent, tctx);
+large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
+ bool reset_recent) {
+ assert(prof_info != NULL);
+
+ prof_tctx_t *alloc_tctx = edata_prof_tctx_get(edata);
+ prof_info->alloc_tctx = alloc_tctx;
+
+ if ((uintptr_t)alloc_tctx > (uintptr_t)1U) {
+ nstime_copy(&prof_info->alloc_time,
+ edata_prof_alloc_time_get(edata));
+ prof_info->alloc_size = edata_prof_alloc_size_get(edata);
+ if (reset_recent) {
+ /*
+ * Reset the pointer on the recent allocation record,
+ * so that this allocation is recorded as released.
+ */
+ prof_recent_alloc_reset(tsd, edata);
+ }
+ }
}
-void
-large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) {
- large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
+static void
+large_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
+ edata_prof_tctx_set(edata, tctx);
}
-nstime_t
-large_prof_alloc_time_get(const extent_t *extent) {
- return extent_prof_alloc_time_get(extent);
+void
+large_prof_tctx_reset(edata_t *edata) {
+ large_prof_tctx_set(edata, (prof_tctx_t *)(uintptr_t)1U);
}
void
-large_prof_alloc_time_set(extent_t *extent, nstime_t t) {
- extent_prof_alloc_time_set(extent, t);
+large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size) {
+ nstime_t t;
+ nstime_prof_init_update(&t);
+ edata_prof_alloc_time_set(edata, &t);
+ edata_prof_alloc_size_set(edata, size);
+ edata_prof_recent_alloc_init(edata);
+ large_prof_tctx_set(edata, tctx);
}
diff --git a/deps/jemalloc/src/malloc_io.c b/deps/jemalloc/src/malloc_io.c
index d7cb0f528..b76885cbb 100644
--- a/deps/jemalloc/src/malloc_io.c
+++ b/deps/jemalloc/src/malloc_io.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_MALLOC_IO_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -53,7 +52,6 @@
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static void wrtmessage(void *cbopaque, const char *s);
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
size_t *slen_p);
@@ -68,7 +66,7 @@ static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
/******************************************************************************/
/* malloc_message() setup. */
-static void
+void
wrtmessage(void *cbopaque, const char *s) {
malloc_write_fd(STDERR_FILENO, s, strlen(s));
}
@@ -135,10 +133,10 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
break;
case '-':
neg = true;
- /* Fall through. */
+ JEMALLOC_FALLTHROUGH;
case '+':
p++;
- /* Fall through. */
+ JEMALLOC_FALLTHROUGH;
default:
goto label_prefix;
}
@@ -289,7 +287,7 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p) {
if (!neg) {
break;
}
- /* Fall through. */
+ JEMALLOC_FALLTHROUGH;
case ' ':
case '+':
s--;
@@ -323,6 +321,7 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
return s;
}
+JEMALLOC_COLD
size_t
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
size_t i;
@@ -348,7 +347,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
if (!left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) { \
- APPEND_C(' '); \
+ if (pad_zero) { \
+ APPEND_C('0'); \
+ } else { \
+ APPEND_C(' '); \
+ } \
} \
} \
/* Value. */ \
@@ -420,6 +423,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
unsigned char len = '?';
char *s;
size_t slen;
+ bool first_width_digit = true;
+ bool pad_zero = false;
f++;
/* Flags. */
@@ -456,7 +461,12 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
width = -width;
}
break;
- case '0': case '1': case '2': case '3': case '4':
+ case '0':
+ if (first_width_digit) {
+ pad_zero = true;
+ }
+ JEMALLOC_FALLTHROUGH;
+ case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
uintmax_t uwidth;
set_errno(0);
@@ -464,6 +474,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
assert(uwidth != UINTMAX_MAX || get_errno() !=
ERANGE);
width = (int)uwidth;
+ first_width_digit = false;
break;
} default:
break;
@@ -521,6 +532,18 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[D2S_BUFSIZE];
+ /*
+ * Outputting negative, zero-padded numbers
+ * would require a nontrivial rework of the
+ * interaction between the width and padding
+ * (since 0 padding goes between the '-' and the
+ * number, while ' ' padding goes either before
+ * the - or after the number. Since we
+ * currently don't ever need 0-padded negative
+ * numbers, just don't bother supporting it.
+ */
+ assert(!pad_zero);
+
GET_ARG_NUMERIC(val, len);
s = d2s(val, (plus_plus ? '+' : (plus_space ?
' ' : '-')), buf, &slen);
@@ -620,8 +643,8 @@ malloc_snprintf(char *str, size_t size, const char *format, ...) {
}
void
-malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, va_list ap) {
+malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
+ va_list ap) {
char buf[MALLOC_PRINTF_BUFSIZE];
if (write_cb == NULL) {
@@ -644,8 +667,7 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
*/
JEMALLOC_FORMAT_PRINTF(3, 4)
void
-malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, ...) {
+malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format, ...) {
va_list ap;
va_start(ap, format);
diff --git a/deps/jemalloc/src/mutex.c b/deps/jemalloc/src/mutex.c
index 3f920f5b1..0b3547a87 100644
--- a/deps/jemalloc/src/mutex.c
+++ b/deps/jemalloc/src/mutex.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_MUTEX_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -10,6 +9,12 @@
#define _CRT_SPINCOUNT 4000
#endif
+/*
+ * Based on benchmark results, a fixed spin with this amount of retries works
+ * well for our critical sections.
+ */
+int64_t opt_mutex_max_spin = 600;
+
/******************************************************************************/
/* Data. */
@@ -46,13 +51,13 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void
malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
mutex_prof_data_t *data = &mutex->prof_data;
- nstime_t before = NSTIME_ZERO_INITIALIZER;
+ nstime_t before;
if (ncpus == 1) {
goto label_spin_done;
}
- int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
+ int cnt = 0;
do {
spin_cpu_spinwait();
if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
@@ -60,7 +65,7 @@ malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
data->n_spin_acquired++;
return;
}
- } while (cnt++ < max_cnt);
+ } while (cnt++ < opt_mutex_max_spin || opt_mutex_max_spin == -1);
if (!config_stats) {
/* Only spin is useful when stats is off. */
@@ -68,7 +73,7 @@ malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
return;
}
label_spin_done:
- nstime_update(&before);
+ nstime_init_update(&before);
/* Copy before to after to avoid clock skews. */
nstime_t after;
nstime_copy(&after, &before);
@@ -104,8 +109,8 @@ label_spin_done:
static void
mutex_prof_data_init(mutex_prof_data_t *data) {
memset(data, 0, sizeof(mutex_prof_data_t));
- nstime_init(&data->max_wait_time, 0);
- nstime_init(&data->tot_wait_time, 0);
+ nstime_init_zero(&data->max_wait_time);
+ nstime_init_zero(&data->tot_wait_time);
data->prev_owner = NULL;
}
diff --git a/deps/jemalloc/src/mutex_pool.c b/deps/jemalloc/src/mutex_pool.c
deleted file mode 100644
index f24d10e44..000000000
--- a/deps/jemalloc/src/mutex_pool.c
+++ /dev/null
@@ -1,18 +0,0 @@
-#define JEMALLOC_MUTEX_POOL_C_
-
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_pool.h"
-
-bool
-mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) {
- for (int i = 0; i < MUTEX_POOL_SIZE; ++i) {
- if (malloc_mutex_init(&pool->mutexes[i], name, rank,
- malloc_mutex_address_ordered)) {
- return true;
- }
- }
- return false;
-}
diff --git a/deps/jemalloc/src/nstime.c b/deps/jemalloc/src/nstime.c
index 71db35396..a1a53777f 100644
--- a/deps/jemalloc/src/nstime.c
+++ b/deps/jemalloc/src/nstime.c
@@ -8,96 +8,169 @@
#define BILLION UINT64_C(1000000000)
#define MILLION UINT64_C(1000000)
+static void
+nstime_set_initialized(nstime_t *time) {
+#ifdef JEMALLOC_DEBUG
+ time->magic = NSTIME_MAGIC;
+#endif
+}
+
+static void
+nstime_assert_initialized(const nstime_t *time) {
+#ifdef JEMALLOC_DEBUG
+ /*
+ * Some parts (e.g. stats) rely on memset to zero initialize. Treat
+ * these as valid initialization.
+ */
+ assert(time->magic == NSTIME_MAGIC ||
+ (time->magic == 0 && time->ns == 0));
+#endif
+}
+
+static void
+nstime_pair_assert_initialized(const nstime_t *t1, const nstime_t *t2) {
+ nstime_assert_initialized(t1);
+ nstime_assert_initialized(t2);
+}
+
+static void
+nstime_initialize_operand(nstime_t *time) {
+ /*
+ * Operations like nstime_add may have the initial operand being zero
+ * initialized (covered by the assert below). Full-initialize needed
+ * before changing it to non-zero.
+ */
+ nstime_assert_initialized(time);
+ nstime_set_initialized(time);
+}
+
void
nstime_init(nstime_t *time, uint64_t ns) {
+ nstime_set_initialized(time);
time->ns = ns;
}
void
nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) {
+ nstime_set_initialized(time);
time->ns = sec * BILLION + nsec;
}
uint64_t
nstime_ns(const nstime_t *time) {
+ nstime_assert_initialized(time);
return time->ns;
}
uint64_t
nstime_msec(const nstime_t *time) {
+ nstime_assert_initialized(time);
return time->ns / MILLION;
}
uint64_t
nstime_sec(const nstime_t *time) {
+ nstime_assert_initialized(time);
return time->ns / BILLION;
}
uint64_t
nstime_nsec(const nstime_t *time) {
+ nstime_assert_initialized(time);
return time->ns % BILLION;
}
void
nstime_copy(nstime_t *time, const nstime_t *source) {
+ /* Source is required to be initialized. */
+ nstime_assert_initialized(source);
*time = *source;
+ nstime_assert_initialized(time);
}
int
nstime_compare(const nstime_t *a, const nstime_t *b) {
+ nstime_pair_assert_initialized(a, b);
return (a->ns > b->ns) - (a->ns < b->ns);
}
void
nstime_add(nstime_t *time, const nstime_t *addend) {
+ nstime_pair_assert_initialized(time, addend);
assert(UINT64_MAX - time->ns >= addend->ns);
+ nstime_initialize_operand(time);
time->ns += addend->ns;
}
void
nstime_iadd(nstime_t *time, uint64_t addend) {
+ nstime_assert_initialized(time);
assert(UINT64_MAX - time->ns >= addend);
+ nstime_initialize_operand(time);
time->ns += addend;
}
void
nstime_subtract(nstime_t *time, const nstime_t *subtrahend) {
+ nstime_pair_assert_initialized(time, subtrahend);
assert(nstime_compare(time, subtrahend) >= 0);
+ /* No initialize operand -- subtraction must be initialized. */
time->ns -= subtrahend->ns;
}
void
nstime_isubtract(nstime_t *time, uint64_t subtrahend) {
+ nstime_assert_initialized(time);
assert(time->ns >= subtrahend);
+ /* No initialize operand -- subtraction must be initialized. */
time->ns -= subtrahend;
}
void
nstime_imultiply(nstime_t *time, uint64_t multiplier) {
+ nstime_assert_initialized(time);
assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
+ nstime_initialize_operand(time);
time->ns *= multiplier;
}
void
nstime_idivide(nstime_t *time, uint64_t divisor) {
+ nstime_assert_initialized(time);
assert(divisor != 0);
+ nstime_initialize_operand(time);
time->ns /= divisor;
}
uint64_t
nstime_divide(const nstime_t *time, const nstime_t *divisor) {
+ nstime_pair_assert_initialized(time, divisor);
assert(divisor->ns != 0);
+ /* No initialize operand -- *time itself remains unchanged. */
return time->ns / divisor->ns;
}
+/* Returns time since *past, w/o updating *past. */
+uint64_t
+nstime_ns_since(const nstime_t *past) {
+ nstime_assert_initialized(past);
+
+ nstime_t now;
+ nstime_copy(&now, past);
+ nstime_update(&now);
+
+ assert(nstime_compare(&now, past) >= 0);
+ return now.ns - past->ns;
+}
+
#ifdef _WIN32
# define NSTIME_MONOTONIC true
static void
@@ -152,7 +225,42 @@ nstime_monotonic_impl(void) {
}
nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl;
-static bool
+prof_time_res_t opt_prof_time_res =
+ prof_time_res_default;
+
+const char *prof_time_res_mode_names[] = {
+ "default",
+ "high",
+};
+
+
+static void
+nstime_get_realtime(nstime_t *time) {
+#if defined(JEMALLOC_HAVE_CLOCK_REALTIME) && !defined(_WIN32)
+ struct timespec ts;
+
+ clock_gettime(CLOCK_REALTIME, &ts);
+ nstime_init2(time, ts.tv_sec, ts.tv_nsec);
+#else
+ unreachable();
+#endif
+}
+
+static void
+nstime_prof_update_impl(nstime_t *time) {
+ nstime_t old_time;
+
+ nstime_copy(&old_time, time);
+
+ if (opt_prof_time_res == prof_time_res_high) {
+ nstime_get_realtime(time);
+ } else {
+ nstime_get(time);
+ }
+}
+nstime_prof_update_t *JET_MUTABLE nstime_prof_update = nstime_prof_update_impl;
+
+static void
nstime_update_impl(nstime_t *time) {
nstime_t old_time;
@@ -162,9 +270,20 @@ nstime_update_impl(nstime_t *time) {
/* Handle non-monotonic clocks. */
if (unlikely(nstime_compare(&old_time, time) > 0)) {
nstime_copy(time, &old_time);
- return true;
}
-
- return false;
}
nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl;
+
+void
+nstime_init_update(nstime_t *time) {
+ nstime_init_zero(time);
+ nstime_update(time);
+}
+
+void
+nstime_prof_init_update(nstime_t *time) {
+ nstime_init_zero(time);
+ nstime_prof_update(time);
+}
+
+
diff --git a/deps/jemalloc/src/pa.c b/deps/jemalloc/src/pa.c
new file mode 100644
index 000000000..eb7e4620e
--- /dev/null
+++ b/deps/jemalloc/src/pa.c
@@ -0,0 +1,277 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/san.h"
+#include "jemalloc/internal/hpa.h"
+
+static void
+pa_nactive_add(pa_shard_t *shard, size_t add_pages) {
+ atomic_fetch_add_zu(&shard->nactive, add_pages, ATOMIC_RELAXED);
+}
+
+static void
+pa_nactive_sub(pa_shard_t *shard, size_t sub_pages) {
+ assert(atomic_load_zu(&shard->nactive, ATOMIC_RELAXED) >= sub_pages);
+ atomic_fetch_sub_zu(&shard->nactive, sub_pages, ATOMIC_RELAXED);
+}
+
+bool
+pa_central_init(pa_central_t *central, base_t *base, bool hpa,
+ hpa_hooks_t *hpa_hooks) {
+ bool err;
+ if (hpa) {
+ err = hpa_central_init(&central->hpa, base, hpa_hooks);
+ if (err) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool
+pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
+ emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
+ malloc_mutex_t *stats_mtx, nstime_t *cur_time,
+ size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
+ ssize_t muzzy_decay_ms) {
+ /* This will change eventually, but for now it should hold. */
+ assert(base_ind_get(base) == ind);
+ if (edata_cache_init(&shard->edata_cache, base)) {
+ return true;
+ }
+
+ if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache,
+ cur_time, pac_oversize_threshold, dirty_decay_ms, muzzy_decay_ms,
+ &stats->pac_stats, stats_mtx)) {
+ return true;
+ }
+
+ shard->ind = ind;
+
+ shard->ever_used_hpa = false;
+ atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED);
+
+ atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED);
+
+ shard->stats_mtx = stats_mtx;
+ shard->stats = stats;
+ memset(shard->stats, 0, sizeof(*shard->stats));
+
+ shard->central = central;
+ shard->emap = emap;
+ shard->base = base;
+
+ return false;
+}
+
+bool
+pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
+ const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts) {
+ if (hpa_shard_init(&shard->hpa_shard, &shard->central->hpa, shard->emap,
+ shard->base, &shard->edata_cache, shard->ind, hpa_opts)) {
+ return true;
+ }
+ if (sec_init(tsdn, &shard->hpa_sec, shard->base, &shard->hpa_shard.pai,
+ hpa_sec_opts)) {
+ return true;
+ }
+ shard->ever_used_hpa = true;
+ atomic_store_b(&shard->use_hpa, true, ATOMIC_RELAXED);
+
+ return false;
+}
+
+void
+pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard) {
+ atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED);
+ if (shard->ever_used_hpa) {
+ sec_disable(tsdn, &shard->hpa_sec);
+ hpa_shard_disable(tsdn, &shard->hpa_shard);
+ }
+}
+
+void
+pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard) {
+ atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED);
+ if (shard->ever_used_hpa) {
+ sec_flush(tsdn, &shard->hpa_sec);
+ }
+}
+
+static bool
+pa_shard_uses_hpa(pa_shard_t *shard) {
+ return atomic_load_b(&shard->use_hpa, ATOMIC_RELAXED);
+}
+
+void
+pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard) {
+ pac_destroy(tsdn, &shard->pac);
+ if (shard->ever_used_hpa) {
+ sec_flush(tsdn, &shard->hpa_sec);
+ hpa_shard_disable(tsdn, &shard->hpa_shard);
+ }
+}
+
+static pai_t *
+pa_get_pai(pa_shard_t *shard, edata_t *edata) {
+ return (edata_pai_get(edata) == EXTENT_PAI_PAC
+ ? &shard->pac.pai : &shard->hpa_sec.pai);
+}
+
+edata_t *
+pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
+ bool slab, szind_t szind, bool zero, bool guarded,
+ bool *deferred_work_generated) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ assert(!guarded || alignment <= PAGE);
+
+ edata_t *edata = NULL;
+ if (!guarded && pa_shard_uses_hpa(shard)) {
+ edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment,
+ zero, /* guarded */ false, slab, deferred_work_generated);
+ }
+ /*
+ * Fall back to the PAC if the HPA is off or couldn't serve the given
+ * allocation request.
+ */
+ if (edata == NULL) {
+ edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero,
+ guarded, slab, deferred_work_generated);
+ }
+ if (edata != NULL) {
+ assert(edata_size_get(edata) == size);
+ pa_nactive_add(shard, size >> LG_PAGE);
+ emap_remap(tsdn, shard->emap, edata, szind, slab);
+ edata_szind_set(edata, szind);
+ edata_slab_set(edata, slab);
+ if (slab && (size > 2 * PAGE)) {
+ emap_register_interior(tsdn, shard->emap, edata, szind);
+ }
+ assert(edata_arena_ind_get(edata) == shard->ind);
+ }
+ return edata;
+}
+
+bool
+pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
+ size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated) {
+ assert(new_size > old_size);
+ assert(edata_size_get(edata) == old_size);
+ assert((new_size & PAGE_MASK) == 0);
+ if (edata_guarded_get(edata)) {
+ return true;
+ }
+ size_t expand_amount = new_size - old_size;
+
+ pai_t *pai = pa_get_pai(shard, edata);
+
+ bool error = pai_expand(tsdn, pai, edata, old_size, new_size, zero,
+ deferred_work_generated);
+ if (error) {
+ return true;
+ }
+
+ pa_nactive_add(shard, expand_amount >> LG_PAGE);
+ edata_szind_set(edata, szind);
+ emap_remap(tsdn, shard->emap, edata, szind, /* slab */ false);
+ return false;
+}
+
+bool
+pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
+ size_t new_size, szind_t szind, bool *deferred_work_generated) {
+ assert(new_size < old_size);
+ assert(edata_size_get(edata) == old_size);
+ assert((new_size & PAGE_MASK) == 0);
+ if (edata_guarded_get(edata)) {
+ return true;
+ }
+ size_t shrink_amount = old_size - new_size;
+
+ pai_t *pai = pa_get_pai(shard, edata);
+ bool error = pai_shrink(tsdn, pai, edata, old_size, new_size,
+ deferred_work_generated);
+ if (error) {
+ return true;
+ }
+ pa_nactive_sub(shard, shrink_amount >> LG_PAGE);
+
+ edata_szind_set(edata, szind);
+ emap_remap(tsdn, shard->emap, edata, szind, /* slab */ false);
+ return false;
+}
+
+void
+pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
+ bool *deferred_work_generated) {
+ emap_remap(tsdn, shard->emap, edata, SC_NSIZES, /* slab */ false);
+ if (edata_slab_get(edata)) {
+ emap_deregister_interior(tsdn, shard->emap, edata);
+ /*
+ * The slab state of the extent isn't cleared. It may be used
+ * by the pai implementation, e.g. to make caching decisions.
+ */
+ }
+ edata_addr_set(edata, edata_base_get(edata));
+ edata_szind_set(edata, SC_NSIZES);
+ pa_nactive_sub(shard, edata_size_get(edata) >> LG_PAGE);
+ pai_t *pai = pa_get_pai(shard, edata);
+ pai_dalloc(tsdn, pai, edata, deferred_work_generated);
+}
+
+bool
+pa_shard_retain_grow_limit_get_set(tsdn_t *tsdn, pa_shard_t *shard,
+ size_t *old_limit, size_t *new_limit) {
+ return pac_retain_grow_limit_get_set(tsdn, &shard->pac, old_limit,
+ new_limit);
+}
+
+bool
+pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
+ ssize_t decay_ms, pac_purge_eagerness_t eagerness) {
+ return pac_decay_ms_set(tsdn, &shard->pac, state, decay_ms, eagerness);
+}
+
+ssize_t
+pa_decay_ms_get(pa_shard_t *shard, extent_state_t state) {
+ return pac_decay_ms_get(&shard->pac, state);
+}
+
+void
+pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
+ bool deferral_allowed) {
+ if (pa_shard_uses_hpa(shard)) {
+ hpa_shard_set_deferral_allowed(tsdn, &shard->hpa_shard,
+ deferral_allowed);
+ }
+}
+
+void
+pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
+ if (pa_shard_uses_hpa(shard)) {
+ hpa_shard_do_deferred_work(tsdn, &shard->hpa_shard);
+ }
+}
+
+/*
+ * Get time until next deferred work ought to happen. If there are multiple
+ * things that have been deferred, this function calculates the time until
+ * the soonest of those things.
+ */
+uint64_t
+pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
+ uint64_t time = pai_time_until_deferred_work(tsdn, &shard->pac.pai);
+ if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
+ return time;
+ }
+
+ if (pa_shard_uses_hpa(shard)) {
+ uint64_t hpa =
+ pai_time_until_deferred_work(tsdn, &shard->hpa_shard.pai);
+ if (hpa < time) {
+ time = hpa;
+ }
+ }
+ return time;
+}
diff --git a/deps/jemalloc/src/pa_extra.c b/deps/jemalloc/src/pa_extra.c
new file mode 100644
index 000000000..0f488be69
--- /dev/null
+++ b/deps/jemalloc/src/pa_extra.c
@@ -0,0 +1,191 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+/*
+ * This file is logically part of the PA module. While pa.c contains the core
+ * allocator functionality, this file contains boring integration functionality;
+ * things like the pre- and post- fork handlers, and stats merging for CTL
+ * refreshes.
+ */
+
+void
+pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) {
+ malloc_mutex_prefork(tsdn, &shard->pac.decay_dirty.mtx);
+ malloc_mutex_prefork(tsdn, &shard->pac.decay_muzzy.mtx);
+}
+
+void
+pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) {
+ if (shard->ever_used_hpa) {
+ sec_prefork2(tsdn, &shard->hpa_sec);
+ }
+}
+
+void
+pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard) {
+ malloc_mutex_prefork(tsdn, &shard->pac.grow_mtx);
+ if (shard->ever_used_hpa) {
+ hpa_shard_prefork3(tsdn, &shard->hpa_shard);
+ }
+}
+
+void
+pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard) {
+ ecache_prefork(tsdn, &shard->pac.ecache_dirty);
+ ecache_prefork(tsdn, &shard->pac.ecache_muzzy);
+ ecache_prefork(tsdn, &shard->pac.ecache_retained);
+ if (shard->ever_used_hpa) {
+ hpa_shard_prefork4(tsdn, &shard->hpa_shard);
+ }
+}
+
+void
+pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard) {
+ edata_cache_prefork(tsdn, &shard->edata_cache);
+}
+
+void
+pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
+ edata_cache_postfork_parent(tsdn, &shard->edata_cache);
+ ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty);
+ ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy);
+ ecache_postfork_parent(tsdn, &shard->pac.ecache_retained);
+ malloc_mutex_postfork_parent(tsdn, &shard->pac.grow_mtx);
+ malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx);
+ malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx);
+ if (shard->ever_used_hpa) {
+ sec_postfork_parent(tsdn, &shard->hpa_sec);
+ hpa_shard_postfork_parent(tsdn, &shard->hpa_shard);
+ }
+}
+
+void
+pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
+ edata_cache_postfork_child(tsdn, &shard->edata_cache);
+ ecache_postfork_child(tsdn, &shard->pac.ecache_dirty);
+ ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy);
+ ecache_postfork_child(tsdn, &shard->pac.ecache_retained);
+ malloc_mutex_postfork_child(tsdn, &shard->pac.grow_mtx);
+ malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx);
+ malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx);
+ if (shard->ever_used_hpa) {
+ sec_postfork_child(tsdn, &shard->hpa_sec);
+ hpa_shard_postfork_child(tsdn, &shard->hpa_shard);
+ }
+}
+
+void
+pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty,
+ size_t *nmuzzy) {
+ *nactive += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
+ *ndirty += ecache_npages_get(&shard->pac.ecache_dirty);
+ *nmuzzy += ecache_npages_get(&shard->pac.ecache_muzzy);
+}
+
+void
+pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
+ pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
+ hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out,
+ size_t *resident) {
+ cassert(config_stats);
+
+ pa_shard_stats_out->pac_stats.retained +=
+ ecache_npages_get(&shard->pac.ecache_retained) << LG_PAGE;
+ pa_shard_stats_out->edata_avail += atomic_load_zu(
+ &shard->edata_cache.count, ATOMIC_RELAXED);
+
+ size_t resident_pgs = 0;
+ resident_pgs += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
+ resident_pgs += ecache_npages_get(&shard->pac.ecache_dirty);
+ *resident += (resident_pgs << LG_PAGE);
+
+ /* Dirty decay stats */
+ locked_inc_u64_unsynchronized(
+ &pa_shard_stats_out->pac_stats.decay_dirty.npurge,
+ locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
+ &shard->pac.stats->decay_dirty.npurge));
+ locked_inc_u64_unsynchronized(
+ &pa_shard_stats_out->pac_stats.decay_dirty.nmadvise,
+ locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
+ &shard->pac.stats->decay_dirty.nmadvise));
+ locked_inc_u64_unsynchronized(
+ &pa_shard_stats_out->pac_stats.decay_dirty.purged,
+ locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
+ &shard->pac.stats->decay_dirty.purged));
+
+ /* Muzzy decay stats */
+ locked_inc_u64_unsynchronized(
+ &pa_shard_stats_out->pac_stats.decay_muzzy.npurge,
+ locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
+ &shard->pac.stats->decay_muzzy.npurge));
+ locked_inc_u64_unsynchronized(
+ &pa_shard_stats_out->pac_stats.decay_muzzy.nmadvise,
+ locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
+ &shard->pac.stats->decay_muzzy.nmadvise));
+ locked_inc_u64_unsynchronized(
+ &pa_shard_stats_out->pac_stats.decay_muzzy.purged,
+ locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
+ &shard->pac.stats->decay_muzzy.purged));
+
+ atomic_load_add_store_zu(&pa_shard_stats_out->pac_stats.abandoned_vm,
+ atomic_load_zu(&shard->pac.stats->abandoned_vm, ATOMIC_RELAXED));
+
+ for (pszind_t i = 0; i < SC_NPSIZES; i++) {
+ size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
+ retained_bytes;
+ dirty = ecache_nextents_get(&shard->pac.ecache_dirty, i);
+ muzzy = ecache_nextents_get(&shard->pac.ecache_muzzy, i);
+ retained = ecache_nextents_get(&shard->pac.ecache_retained, i);
+ dirty_bytes = ecache_nbytes_get(&shard->pac.ecache_dirty, i);
+ muzzy_bytes = ecache_nbytes_get(&shard->pac.ecache_muzzy, i);
+ retained_bytes = ecache_nbytes_get(&shard->pac.ecache_retained,
+ i);
+
+ estats_out[i].ndirty = dirty;
+ estats_out[i].nmuzzy = muzzy;
+ estats_out[i].nretained = retained;
+ estats_out[i].dirty_bytes = dirty_bytes;
+ estats_out[i].muzzy_bytes = muzzy_bytes;
+ estats_out[i].retained_bytes = retained_bytes;
+ }
+
+ if (shard->ever_used_hpa) {
+ hpa_shard_stats_merge(tsdn, &shard->hpa_shard, hpa_stats_out);
+ sec_stats_merge(tsdn, &shard->hpa_sec, sec_stats_out);
+ }
+}
+
+static void
+pa_shard_mtx_stats_read_single(tsdn_t *tsdn, mutex_prof_data_t *mutex_prof_data,
+ malloc_mutex_t *mtx, int ind) {
+ malloc_mutex_lock(tsdn, mtx);
+ malloc_mutex_prof_read(tsdn, &mutex_prof_data[ind], mtx);
+ malloc_mutex_unlock(tsdn, mtx);
+}
+
+void
+pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
+ mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]) {
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->edata_cache.mtx, arena_prof_mutex_extent_avail);
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->pac.ecache_dirty.mtx, arena_prof_mutex_extents_dirty);
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->pac.ecache_muzzy.mtx, arena_prof_mutex_extents_muzzy);
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->pac.ecache_retained.mtx, arena_prof_mutex_extents_retained);
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->pac.decay_dirty.mtx, arena_prof_mutex_decay_dirty);
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->pac.decay_muzzy.mtx, arena_prof_mutex_decay_muzzy);
+
+ if (shard->ever_used_hpa) {
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->hpa_shard.mtx, arena_prof_mutex_hpa_shard);
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->hpa_shard.grow_mtx,
+ arena_prof_mutex_hpa_shard_grow);
+ sec_mutex_stats_read(tsdn, &shard->hpa_sec,
+ &mutex_prof_data[arena_prof_mutex_hpa_sec]);
+ }
+}
diff --git a/deps/jemalloc/src/pac.c b/deps/jemalloc/src/pac.c
new file mode 100644
index 000000000..53e3d8237
--- /dev/null
+++ b/deps/jemalloc/src/pac.c
@@ -0,0 +1,587 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/pac.h"
+#include "jemalloc/internal/san.h"
+
+static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t alignment, bool zero, bool guarded, bool frequent_reuse,
+ bool *deferred_work_generated);
+static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
+static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool *deferred_work_generated);
+static void pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated);
+static uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
+
+static inline void
+pac_decay_data_get(pac_t *pac, extent_state_t state,
+ decay_t **r_decay, pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) {
+ switch(state) {
+ case extent_state_dirty:
+ *r_decay = &pac->decay_dirty;
+ *r_decay_stats = &pac->stats->decay_dirty;
+ *r_ecache = &pac->ecache_dirty;
+ return;
+ case extent_state_muzzy:
+ *r_decay = &pac->decay_muzzy;
+ *r_decay_stats = &pac->stats->decay_muzzy;
+ *r_ecache = &pac->ecache_muzzy;
+ return;
+ default:
+ unreachable();
+ }
+}
+
+bool
+pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
+ edata_cache_t *edata_cache, nstime_t *cur_time,
+ size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
+ ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx) {
+ unsigned ind = base_ind_get(base);
+ /*
+ * Delay coalescing for dirty extents despite the disruptive effect on
+ * memory layout for best-fit extent allocation, since cached extents
+ * are likely to be reused soon after deallocation, and the cost of
+ * merging/splitting extents is non-trivial.
+ */
+ if (ecache_init(tsdn, &pac->ecache_dirty, extent_state_dirty, ind,
+ /* delay_coalesce */ true)) {
+ return true;
+ }
+ /*
+ * Coalesce muzzy extents immediately, because operations on them are in
+ * the critical path much less often than for dirty extents.
+ */
+ if (ecache_init(tsdn, &pac->ecache_muzzy, extent_state_muzzy, ind,
+ /* delay_coalesce */ false)) {
+ return true;
+ }
+ /*
+ * Coalesce retained extents immediately, in part because they will
+ * never be evicted (and therefore there's no opportunity for delayed
+ * coalescing), but also because operations on retained extents are not
+ * in the critical path.
+ */
+ if (ecache_init(tsdn, &pac->ecache_retained, extent_state_retained,
+ ind, /* delay_coalesce */ false)) {
+ return true;
+ }
+ exp_grow_init(&pac->exp_grow);
+ if (malloc_mutex_init(&pac->grow_mtx, "extent_grow",
+ WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ atomic_store_zu(&pac->oversize_threshold, pac_oversize_threshold,
+ ATOMIC_RELAXED);
+ if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
+ return true;
+ }
+ if (decay_init(&pac->decay_muzzy, cur_time, muzzy_decay_ms)) {
+ return true;
+ }
+ if (san_bump_alloc_init(&pac->sba)) {
+ return true;
+ }
+
+ pac->base = base;
+ pac->emap = emap;
+ pac->edata_cache = edata_cache;
+ pac->stats = pac_stats;
+ pac->stats_mtx = stats_mtx;
+ atomic_store_zu(&pac->extent_sn_next, 0, ATOMIC_RELAXED);
+
+ pac->pai.alloc = &pac_alloc_impl;
+ pac->pai.alloc_batch = &pai_alloc_batch_default;
+ pac->pai.expand = &pac_expand_impl;
+ pac->pai.shrink = &pac_shrink_impl;
+ pac->pai.dalloc = &pac_dalloc_impl;
+ pac->pai.dalloc_batch = &pai_dalloc_batch_default;
+ pac->pai.time_until_deferred_work = &pac_time_until_deferred_work;
+
+ return false;
+}
+
+static inline bool
+pac_may_have_muzzy(pac_t *pac) {
+ return pac_decay_ms_get(pac, extent_state_muzzy) != 0;
+}
+
+static edata_t *
+pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
+ size_t alignment, bool zero, bool guarded) {
+ assert(!guarded || alignment <= PAGE);
+
+ edata_t *edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
+ NULL, size, alignment, zero, guarded);
+
+ if (edata == NULL && pac_may_have_muzzy(pac)) {
+ edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
+ NULL, size, alignment, zero, guarded);
+ }
+ if (edata == NULL) {
+ edata = ecache_alloc_grow(tsdn, pac, ehooks,
+ &pac->ecache_retained, NULL, size, alignment, zero,
+ guarded);
+ if (config_stats && edata != NULL) {
+ atomic_fetch_add_zu(&pac->stats->pac_mapped, size,
+ ATOMIC_RELAXED);
+ }
+ }
+
+ return edata;
+}
+
+static edata_t *
+pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
+ size_t alignment, bool zero, bool frequent_reuse) {
+ assert(alignment <= PAGE);
+
+ edata_t *edata;
+ if (san_bump_enabled() && frequent_reuse) {
+ edata = san_bump_alloc(tsdn, &pac->sba, pac, ehooks, size,
+ zero);
+ } else {
+ size_t size_with_guards = san_two_side_guarded_sz(size);
+ /* Alloc a non-guarded extent first.*/
+ edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards,
+ /* alignment */ PAGE, zero, /* guarded */ false);
+ if (edata != NULL) {
+ /* Add guards around it. */
+ assert(edata_size_get(edata) == size_with_guards);
+ san_guard_pages_two_sided(tsdn, ehooks, edata,
+ pac->emap, true);
+ }
+ }
+ assert(edata == NULL || (edata_guarded_get(edata) &&
+ edata_size_get(edata) == size));
+
+ return edata;
+}
+
+static edata_t *
+pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
+ bool zero, bool guarded, bool frequent_reuse,
+ bool *deferred_work_generated) {
+ pac_t *pac = (pac_t *)self;
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+
+ edata_t *edata = NULL;
+ /*
+ * The condition is an optimization - not frequently reused guarded
+ * allocations are never put in the ecache. pac_alloc_real also
+ * doesn't grow retained for guarded allocations. So pac_alloc_real
+ * for such allocations would always return NULL.
+ * */
+ if (!guarded || frequent_reuse) {
+ edata = pac_alloc_real(tsdn, pac, ehooks, size, alignment,
+ zero, guarded);
+ }
+ if (edata == NULL && guarded) {
+ /* No cached guarded extents; creating a new one. */
+ edata = pac_alloc_new_guarded(tsdn, pac, ehooks, size,
+ alignment, zero, frequent_reuse);
+ }
+
+ return edata;
+}
+
+static bool
+pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
+ size_t new_size, bool zero, bool *deferred_work_generated) {
+ pac_t *pac = (pac_t *)self;
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+
+ size_t mapped_add = 0;
+ size_t expand_amount = new_size - old_size;
+
+ if (ehooks_merge_will_fail(ehooks)) {
+ return true;
+ }
+ edata_t *trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
+ edata, expand_amount, PAGE, zero, /* guarded*/ false);
+ if (trail == NULL) {
+ trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
+ edata, expand_amount, PAGE, zero, /* guarded*/ false);
+ }
+ if (trail == NULL) {
+ trail = ecache_alloc_grow(tsdn, pac, ehooks,
+ &pac->ecache_retained, edata, expand_amount, PAGE, zero,
+ /* guarded */ false);
+ mapped_add = expand_amount;
+ }
+ if (trail == NULL) {
+ return true;
+ }
+ if (extent_merge_wrapper(tsdn, pac, ehooks, edata, trail)) {
+ extent_dalloc_wrapper(tsdn, pac, ehooks, trail);
+ return true;
+ }
+ if (config_stats && mapped_add > 0) {
+ atomic_fetch_add_zu(&pac->stats->pac_mapped, mapped_add,
+ ATOMIC_RELAXED);
+ }
+ return false;
+}
+
+static bool
+pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
+ size_t new_size, bool *deferred_work_generated) {
+ pac_t *pac = (pac_t *)self;
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+
+ size_t shrink_amount = old_size - new_size;
+
+ if (ehooks_split_will_fail(ehooks)) {
+ return true;
+ }
+
+ edata_t *trail = extent_split_wrapper(tsdn, pac, ehooks, edata,
+ new_size, shrink_amount, /* holding_core_locks */ false);
+ if (trail == NULL) {
+ return true;
+ }
+ ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_dirty, trail);
+ *deferred_work_generated = true;
+ return false;
+}
+
+static void
+pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated) {
+ pac_t *pac = (pac_t *)self;
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+
+ if (edata_guarded_get(edata)) {
+ /*
+ * Because cached guarded extents do exact fit only, large
+ * guarded extents are restored on dalloc eagerly (otherwise
+ * they will not be reused efficiently). Slab sizes have a
+ * limited number of size classes, and tend to cycle faster.
+ *
+ * In the case where coalesce is restrained (VirtualFree on
+ * Windows), guarded extents are also not cached -- otherwise
+ * during arena destroy / reset, the retained extents would not
+ * be whole regions (i.e. they are split between regular and
+ * guarded).
+ */
+ if (!edata_slab_get(edata) || !maps_coalesce) {
+ assert(edata_size_get(edata) >= SC_LARGE_MINCLASS ||
+ !maps_coalesce);
+ san_unguard_pages_two_sided(tsdn, ehooks, edata,
+ pac->emap);
+ }
+ }
+
+ ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_dirty, edata);
+ /* Purging of deallocated pages is deferred */
+ *deferred_work_generated = true;
+}
+
+static inline uint64_t
+pac_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) {
+ if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
+ /* Use minimal interval if decay is contended. */
+ return BACKGROUND_THREAD_DEFERRED_MIN;
+ }
+ uint64_t result = decay_ns_until_purge(decay, npages,
+ ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD);
+
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+ return result;
+}
+
+static uint64_t
+pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
+ uint64_t time;
+ pac_t *pac = (pac_t *)self;
+
+ time = pac_ns_until_purge(tsdn,
+ &pac->decay_dirty,
+ ecache_npages_get(&pac->ecache_dirty));
+ if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
+ return time;
+ }
+
+ uint64_t muzzy = pac_ns_until_purge(tsdn,
+ &pac->decay_muzzy,
+ ecache_npages_get(&pac->ecache_muzzy));
+ if (muzzy < time) {
+ time = muzzy;
+ }
+ return time;
+}
+
+bool
+pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
+ size_t *new_limit) {
+ pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
+ if (new_limit != NULL) {
+ size_t limit = *new_limit;
+ /* Grow no more than the new limit. */
+ if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
+ return true;
+ }
+ }
+
+ malloc_mutex_lock(tsdn, &pac->grow_mtx);
+ if (old_limit != NULL) {
+ *old_limit = sz_pind2sz(pac->exp_grow.limit);
+ }
+ if (new_limit != NULL) {
+ pac->exp_grow.limit = new_ind;
+ }
+ malloc_mutex_unlock(tsdn, &pac->grow_mtx);
+
+ return false;
+}
+
+static size_t
+pac_stash_decayed(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
+ size_t npages_limit, size_t npages_decay_max,
+ edata_list_inactive_t *result) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+
+ /* Stash extents according to npages_limit. */
+ size_t nstashed = 0;
+ while (nstashed < npages_decay_max) {
+ edata_t *edata = ecache_evict(tsdn, pac, ehooks, ecache,
+ npages_limit);
+ if (edata == NULL) {
+ break;
+ }
+ edata_list_inactive_append(result, edata);
+ nstashed += edata_size_get(edata) >> LG_PAGE;
+ }
+ return nstashed;
+}
+
+static size_t
+pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
+ edata_list_inactive_t *decay_extents) {
+ bool err;
+
+ size_t nmadvise = 0;
+ size_t nunmapped = 0;
+ size_t npurged = 0;
+
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+
+ bool try_muzzy = !fully_decay
+ && pac_decay_ms_get(pac, extent_state_muzzy) != 0;
+
+ for (edata_t *edata = edata_list_inactive_first(decay_extents); edata !=
+ NULL; edata = edata_list_inactive_first(decay_extents)) {
+ edata_list_inactive_remove(decay_extents, edata);
+
+ size_t size = edata_size_get(edata);
+ size_t npages = size >> LG_PAGE;
+
+ nmadvise++;
+ npurged += npages;
+
+ switch (ecache->state) {
+ case extent_state_active:
+ not_reached();
+ case extent_state_dirty:
+ if (try_muzzy) {
+ err = extent_purge_lazy_wrapper(tsdn, ehooks,
+ edata, /* offset */ 0, size);
+ if (!err) {
+ ecache_dalloc(tsdn, pac, ehooks,
+ &pac->ecache_muzzy, edata);
+ break;
+ }
+ }
+ JEMALLOC_FALLTHROUGH;
+ case extent_state_muzzy:
+ extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
+ nunmapped += npages;
+ break;
+ case extent_state_retained:
+ default:
+ not_reached();
+ }
+ }
+
+ if (config_stats) {
+ LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx);
+ locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
+ &decay_stats->npurge, 1);
+ locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
+ &decay_stats->nmadvise, nmadvise);
+ locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
+ &decay_stats->purged, npurged);
+ LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx);
+ atomic_fetch_sub_zu(&pac->stats->pac_mapped,
+ nunmapped << LG_PAGE, ATOMIC_RELAXED);
+ }
+
+ return npurged;
+}
+
+/*
+ * npages_limit: Decay at most npages_decay_max pages without violating the
+ * invariant: (ecache_npages_get(ecache) >= npages_limit). We need an upper
+ * bound on number of pages in order to prevent unbounded growth (namely in
+ * stashed), otherwise unbounded new pages could be added to extents during the
+ * current decay run, so that the purging thread never finishes.
+ */
+static void
+pac_decay_to_limit(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
+ size_t npages_limit, size_t npages_decay_max) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 1);
+
+ if (decay->purging || npages_decay_max == 0) {
+ return;
+ }
+ decay->purging = true;
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+
+ edata_list_inactive_t decay_extents;
+ edata_list_inactive_init(&decay_extents);
+ size_t npurge = pac_stash_decayed(tsdn, pac, ecache, npages_limit,
+ npages_decay_max, &decay_extents);
+ if (npurge != 0) {
+ size_t npurged = pac_decay_stashed(tsdn, pac, decay,
+ decay_stats, ecache, fully_decay, &decay_extents);
+ assert(npurged == npurge);
+ }
+
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ decay->purging = false;
+}
+
+void
+pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay) {
+ malloc_mutex_assert_owner(tsdn, &decay->mtx);
+ pac_decay_to_limit(tsdn, pac, decay, decay_stats, ecache, fully_decay,
+ /* npages_limit */ 0, ecache_npages_get(ecache));
+}
+
+static void
+pac_decay_try_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache,
+ size_t current_npages, size_t npages_limit) {
+ if (current_npages > npages_limit) {
+ pac_decay_to_limit(tsdn, pac, decay, decay_stats, ecache,
+ /* fully_decay */ false, npages_limit,
+ current_npages - npages_limit);
+ }
+}
+
+bool
+pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache,
+ pac_purge_eagerness_t eagerness) {
+ malloc_mutex_assert_owner(tsdn, &decay->mtx);
+
+ /* Purge all or nothing if the option is disabled. */
+ ssize_t decay_ms = decay_ms_read(decay);
+ if (decay_ms <= 0) {
+ if (decay_ms == 0) {
+ pac_decay_to_limit(tsdn, pac, decay, decay_stats,
+ ecache, /* fully_decay */ false,
+ /* npages_limit */ 0, ecache_npages_get(ecache));
+ }
+ return false;
+ }
+
+ /*
+ * If the deadline has been reached, advance to the current epoch and
+ * purge to the new limit if necessary. Note that dirty pages created
+ * during the current epoch are not subject to purge until a future
+ * epoch, so as a result purging only happens during epoch advances, or
+ * being triggered by background threads (scheduled event).
+ */
+ nstime_t time;
+ nstime_init_update(&time);
+ size_t npages_current = ecache_npages_get(ecache);
+ bool epoch_advanced = decay_maybe_advance_epoch(decay, &time,
+ npages_current);
+ if (eagerness == PAC_PURGE_ALWAYS
+ || (epoch_advanced && eagerness == PAC_PURGE_ON_EPOCH_ADVANCE)) {
+ size_t npages_limit = decay_npages_limit_get(decay);
+ pac_decay_try_purge(tsdn, pac, decay, decay_stats, ecache,
+ npages_current, npages_limit);
+ }
+
+ return epoch_advanced;
+}
+
+bool
+pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
+ ssize_t decay_ms, pac_purge_eagerness_t eagerness) {
+ decay_t *decay;
+ pac_decay_stats_t *decay_stats;
+ ecache_t *ecache;
+ pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache);
+
+ if (!decay_ms_valid(decay_ms)) {
+ return true;
+ }
+
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ /*
+ * Restart decay backlog from scratch, which may cause many dirty pages
+ * to be immediately purged. It would conceptually be possible to map
+ * the old backlog onto the new backlog, but there is no justification
+ * for such complexity since decay_ms changes are intended to be
+ * infrequent, either between the {-1, 0, >0} states, or a one-time
+ * arbitrary change during initial arena configuration.
+ */
+ nstime_t cur_time;
+ nstime_init_update(&cur_time);
+ decay_reinit(decay, &cur_time, decay_ms);
+ pac_maybe_decay_purge(tsdn, pac, decay, decay_stats, ecache, eagerness);
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+
+ return false;
+}
+
+ssize_t
+pac_decay_ms_get(pac_t *pac, extent_state_t state) {
+ decay_t *decay;
+ pac_decay_stats_t *decay_stats;
+ ecache_t *ecache;
+ pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache);
+ return decay_ms_read(decay);
+}
+
+void
+pac_reset(tsdn_t *tsdn, pac_t *pac) {
+ /*
+ * No-op for now; purging is still done at the arena-level. It should
+ * get moved in here, though.
+ */
+ (void)tsdn;
+ (void)pac;
+}
+
+void
+pac_destroy(tsdn_t *tsdn, pac_t *pac) {
+ assert(ecache_npages_get(&pac->ecache_dirty) == 0);
+ assert(ecache_npages_get(&pac->ecache_muzzy) == 0);
+ /*
+ * Iterate over the retained extents and destroy them. This gives the
+ * extent allocator underlying the extent hooks an opportunity to unmap
+ * all retained memory without having to keep its own metadata
+ * structures. In practice, virtual memory for dss-allocated extents is
+ * leaked here, so best practice is to avoid dss for arenas to be
+ * destroyed, or provide custom extent hooks that track retained
+ * dss-based extents for later reuse.
+ */
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+ edata_t *edata;
+ while ((edata = ecache_evict(tsdn, pac, ehooks,
+ &pac->ecache_retained, 0)) != NULL) {
+ extent_destroy_wrapper(tsdn, pac, ehooks, edata);
+ }
+}
diff --git a/deps/jemalloc/src/pages.c b/deps/jemalloc/src/pages.c
index 13de27a00..8c83a7de7 100644
--- a/deps/jemalloc/src/pages.c
+++ b/deps/jemalloc/src/pages.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_PAGES_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/pages.h"
@@ -14,6 +13,14 @@
#include <vm/vm_param.h>
#endif
#endif
+#ifdef __NetBSD__
+#include <sys/bitops.h> /* ilog2 */
+#endif
+#ifdef JEMALLOC_HAVE_VM_MAKE_TAG
+#define PAGES_FD_TAG VM_MAKE_TAG(101U)
+#else
+#define PAGES_FD_TAG -1
+#endif
/******************************************************************************/
/* Data. */
@@ -40,6 +47,57 @@ thp_mode_t init_system_thp_mode;
/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */
static bool pages_can_purge_lazy_runtime = true;
+#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+static int madvise_dont_need_zeros_is_faulty = -1;
+/**
+ * Check that MADV_DONTNEED will actually zero pages on subsequent access.
+ *
+ * Since qemu does not support this, yet [1], and you can get very tricky
+ * assert if you will run program with jemalloc in use under qemu:
+ *
+ * <jemalloc>: ../contrib/jemalloc/src/extent.c:1195: Failed assertion: "p[i] == 0"
+ *
+ * [1]: https://patchwork.kernel.org/patch/10576637/
+ */
+static int madvise_MADV_DONTNEED_zeroes_pages()
+{
+ int works = -1;
+ size_t size = PAGE;
+
+ void * addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+
+ if (addr == MAP_FAILED) {
+ malloc_write("<jemalloc>: Cannot allocate memory for "
+ "MADV_DONTNEED check\n");
+ if (opt_abort) {
+ abort();
+ }
+ }
+
+ memset(addr, 'A', size);
+ if (madvise(addr, size, MADV_DONTNEED) == 0) {
+ works = memchr(addr, 'A', size) == NULL;
+ } else {
+ /*
+ * If madvise() does not support MADV_DONTNEED, then we can
+ * call it anyway, and use it's return code.
+ */
+ works = 1;
+ }
+
+ if (munmap(addr, size) != 0) {
+ malloc_write("<jemalloc>: Cannot deallocate memory for "
+ "MADV_DONTNEED check\n");
+ if (opt_abort) {
+ abort();
+ }
+ }
+
+ return works;
+}
+#endif
+
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
@@ -74,9 +132,21 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
* of existing mappings, and we only want to create new mappings.
*/
{
+#ifdef __NetBSD__
+ /*
+ * On NetBSD PAGE for a platform is defined to the
+ * maximum page size of all machine architectures
+ * for that platform, so that we can use the same
+ * binaries across all machine architectures.
+ */
+ if (alignment > os_page || PAGE > os_page) {
+ unsigned int a = ilog2(MAX(alignment, PAGE));
+ mmap_flags |= MAP_ALIGNED(a);
+ }
+#endif
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
- ret = mmap(addr, size, prot, mmap_flags, -1, 0);
+ ret = mmap(addr, size, prot, mmap_flags, PAGES_FD_TAG, 0);
}
assert(ret != NULL);
@@ -197,8 +267,8 @@ pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
flags |= MAP_FIXED | MAP_EXCL;
} else {
unsigned alignment_bits = ffs_zu(alignment);
- assert(alignment_bits > 1);
- flags |= MAP_ALIGNED(alignment_bits - 1);
+ assert(alignment_bits > 0);
+ flags |= MAP_ALIGNED(alignment_bits);
}
void *ret = mmap(addr, size, prot, flags, -1, 0);
@@ -246,14 +316,10 @@ pages_unmap(void *addr, size_t size) {
}
static bool
-pages_commit_impl(void *addr, size_t size, bool commit) {
+os_pages_commit(void *addr, size_t size, bool commit) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
- if (os_overcommits) {
- return true;
- }
-
#ifdef _WIN32
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
@@ -261,7 +327,7 @@ pages_commit_impl(void *addr, size_t size, bool commit) {
{
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
- -1, 0);
+ PAGES_FD_TAG, 0);
if (result == MAP_FAILED) {
return true;
}
@@ -278,6 +344,15 @@ pages_commit_impl(void *addr, size_t size, bool commit) {
#endif
}
+static bool
+pages_commit_impl(void *addr, size_t size, bool commit) {
+ if (os_overcommits) {
+ return true;
+ }
+
+ return os_pages_commit(addr, size, commit);
+}
+
bool
pages_commit(void *addr, size_t size) {
return pages_commit_impl(addr, size, true);
@@ -288,6 +363,66 @@ pages_decommit(void *addr, size_t size) {
return pages_commit_impl(addr, size, false);
}
+void
+pages_mark_guards(void *head, void *tail) {
+ assert(head != NULL || tail != NULL);
+ assert(head == NULL || tail == NULL ||
+ (uintptr_t)head < (uintptr_t)tail);
+#ifdef JEMALLOC_HAVE_MPROTECT
+ if (head != NULL) {
+ mprotect(head, PAGE, PROT_NONE);
+ }
+ if (tail != NULL) {
+ mprotect(tail, PAGE, PROT_NONE);
+ }
+#else
+ /* Decommit sets to PROT_NONE / MEM_DECOMMIT. */
+ if (head != NULL) {
+ os_pages_commit(head, PAGE, false);
+ }
+ if (tail != NULL) {
+ os_pages_commit(tail, PAGE, false);
+ }
+#endif
+}
+
+void
+pages_unmark_guards(void *head, void *tail) {
+ assert(head != NULL || tail != NULL);
+ assert(head == NULL || tail == NULL ||
+ (uintptr_t)head < (uintptr_t)tail);
+#ifdef JEMALLOC_HAVE_MPROTECT
+ bool head_and_tail = (head != NULL) && (tail != NULL);
+ size_t range = head_and_tail ?
+ (uintptr_t)tail - (uintptr_t)head + PAGE :
+ SIZE_T_MAX;
+ /*
+ * The amount of work that the kernel does in mprotect depends on the
+ * range argument. SC_LARGE_MINCLASS is an arbitrary threshold chosen
+ * to prevent kernel from doing too much work that would outweigh the
+ * savings of performing one less system call.
+ */
+ bool ranged_mprotect = head_and_tail && range <= SC_LARGE_MINCLASS;
+ if (ranged_mprotect) {
+ mprotect(head, range, PROT_READ | PROT_WRITE);
+ } else {
+ if (head != NULL) {
+ mprotect(head, PAGE, PROT_READ | PROT_WRITE);
+ }
+ if (tail != NULL) {
+ mprotect(tail, PAGE, PROT_READ | PROT_WRITE);
+ }
+ }
+#else
+ if (head != NULL) {
+ os_pages_commit(head, PAGE, true);
+ }
+ if (tail != NULL) {
+ os_pages_commit(tail, PAGE, true);
+ }
+#endif
+}
+
bool
pages_purge_lazy(void *addr, size_t size) {
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
@@ -318,6 +453,9 @@ pages_purge_lazy(void *addr, size_t size) {
#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
!defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
return (madvise(addr, size, MADV_DONTNEED) != 0);
+#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \
+ !defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
+ return (posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
#else
not_reached();
#endif
@@ -334,7 +472,12 @@ pages_purge_forced(void *addr, size_t size) {
#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
- return (madvise(addr, size, MADV_DONTNEED) != 0);
+ return (unlikely(madvise_dont_need_zeros_is_faulty) ||
+ madvise(addr, size, MADV_DONTNEED) != 0);
+#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \
+ defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
+ return (unlikely(madvise_dont_need_zeros_is_faulty) ||
+ posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
#elif defined(JEMALLOC_MAPS_COALESCE)
/* Try to overlay a new demand-zeroed mapping. */
return pages_commit(addr, size);
@@ -349,8 +492,13 @@ pages_huge_impl(void *addr, size_t size, bool aligned) {
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
assert(HUGEPAGE_CEILING(size) == size);
}
-#ifdef JEMALLOC_HAVE_MADVISE_HUGE
+#if defined(JEMALLOC_HAVE_MADVISE_HUGE)
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
+#elif defined(JEMALLOC_HAVE_MEMCNTL)
+ struct memcntl_mha m = {0};
+ m.mha_cmd = MHA_MAPSIZE_VA;
+ m.mha_pagesize = HUGEPAGE;
+ return (memcntl(addr, size, MC_HAT_ADVISE, (caddr_t)&m, 0, 0) == 0);
#else
return true;
#endif
@@ -394,8 +542,10 @@ bool
pages_dontdump(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
-#ifdef JEMALLOC_MADVISE_DONTDUMP
+#if defined(JEMALLOC_MADVISE_DONTDUMP)
return madvise(addr, size, MADV_DONTDUMP) != 0;
+#elif defined(JEMALLOC_MADVISE_NOCORE)
+ return madvise(addr, size, MADV_NOCORE) != 0;
#else
return false;
#endif
@@ -405,8 +555,10 @@ bool
pages_dodump(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
-#ifdef JEMALLOC_MADVISE_DONTDUMP
+#if defined(JEMALLOC_MADVISE_DONTDUMP)
return madvise(addr, size, MADV_DODUMP) != 0;
+#elif defined(JEMALLOC_MADVISE_NOCORE)
+ return madvise(addr, size, MADV_CORE) != 0;
#else
return false;
#endif
@@ -547,14 +699,14 @@ pages_set_thp_state (void *ptr, size_t size) {
static void
init_thp_state(void) {
- if (!have_madvise_huge) {
+ if (!have_madvise_huge && !have_memcntl) {
if (metadata_thp_enabled() && opt_abort) {
malloc_write("<jemalloc>: no MADV_HUGEPAGE support\n");
abort();
}
goto label_error;
}
-
+#if defined(JEMALLOC_HAVE_MADVISE_HUGE)
static const char sys_state_madvise[] = "always [madvise] never\n";
static const char sys_state_always[] = "[always] madvise never\n";
static const char sys_state_never[] = "always madvise [never]\n";
@@ -563,6 +715,9 @@ init_thp_state(void) {
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
int fd = (int)syscall(SYS_open,
"/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
+#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
+ int fd = (int)syscall(SYS_openat,
+ AT_FDCWD, "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#else
int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#endif
@@ -578,7 +733,7 @@ init_thp_state(void) {
#endif
if (nread < 0) {
- goto label_error;
+ goto label_error;
}
if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) {
@@ -591,6 +746,10 @@ init_thp_state(void) {
goto label_error;
}
return;
+#elif defined(JEMALLOC_HAVE_MEMCNTL)
+ init_system_thp_mode = thp_mode_default;
+ return;
+#endif
label_error:
opt_thp = init_system_thp_mode = thp_mode_not_supported;
}
@@ -606,6 +765,20 @@ pages_boot(void) {
return true;
}
+#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+ if (!opt_trust_madvise) {
+ madvise_dont_need_zeros_is_faulty = !madvise_MADV_DONTNEED_zeroes_pages();
+ if (madvise_dont_need_zeros_is_faulty) {
+ malloc_write("<jemalloc>: MADV_DONTNEED does not work (memset will be used instead)\n");
+ malloc_write("<jemalloc>: (This is the expected behaviour if you are running under QEMU)\n");
+ }
+ } else {
+ /* In case opt_trust_madvise is disable,
+ * do not do runtime check */
+ madvise_dont_need_zeros_is_faulty = 0;
+ }
+#endif
+
#ifndef _WIN32
mmap_flags = MAP_PRIVATE | MAP_ANON;
#endif
@@ -619,6 +792,8 @@ pages_boot(void) {
mmap_flags |= MAP_NORESERVE;
}
# endif
+#elif defined(__NetBSD__)
+ os_overcommits = true;
#else
os_overcommits = false;
#endif
diff --git a/deps/jemalloc/src/pai.c b/deps/jemalloc/src/pai.c
new file mode 100644
index 000000000..45c877292
--- /dev/null
+++ b/deps/jemalloc/src/pai.c
@@ -0,0 +1,31 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+size_t
+pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
+ edata_list_active_t *results, bool *deferred_work_generated) {
+ for (size_t i = 0; i < nallocs; i++) {
+ bool deferred_by_alloc = false;
+ edata_t *edata = pai_alloc(tsdn, self, size, PAGE,
+ /* zero */ false, /* guarded */ false,
+ /* frequent_reuse */ false, &deferred_by_alloc);
+ *deferred_work_generated |= deferred_by_alloc;
+ if (edata == NULL) {
+ return i;
+ }
+ edata_list_active_append(results, edata);
+ }
+ return nallocs;
+}
+
+void
+pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
+ edata_list_active_t *list, bool *deferred_work_generated) {
+ edata_t *edata;
+ while ((edata = edata_list_active_first(list)) != NULL) {
+ bool deferred_by_dalloc = false;
+ edata_list_active_remove(list, edata);
+ pai_dalloc(tsdn, self, edata, &deferred_by_dalloc);
+ *deferred_work_generated |= deferred_by_dalloc;
+ }
+}
diff --git a/deps/jemalloc/src/peak_event.c b/deps/jemalloc/src/peak_event.c
new file mode 100644
index 000000000..4093fbcc6
--- /dev/null
+++ b/deps/jemalloc/src/peak_event.c
@@ -0,0 +1,82 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/peak_event.h"
+
+#include "jemalloc/internal/activity_callback.h"
+#include "jemalloc/internal/peak.h"
+
+/*
+ * Update every 64K by default. We're not exposing this as a configuration
+ * option for now; we don't want to bind ourselves too tightly to any particular
+ * performance requirements for small values, or guarantee that we'll even be
+ * able to provide fine-grained accuracy.
+ */
+#define PEAK_EVENT_WAIT (64 * 1024)
+
+/* Update the peak with current tsd state. */
+void
+peak_event_update(tsd_t *tsd) {
+ uint64_t alloc = tsd_thread_allocated_get(tsd);
+ uint64_t dalloc = tsd_thread_deallocated_get(tsd);
+ peak_t *peak = tsd_peakp_get(tsd);
+ peak_update(peak, alloc, dalloc);
+}
+
+static void
+peak_event_activity_callback(tsd_t *tsd) {
+ activity_callback_thunk_t *thunk = tsd_activity_callback_thunkp_get(
+ tsd);
+ uint64_t alloc = tsd_thread_allocated_get(tsd);
+ uint64_t dalloc = tsd_thread_deallocated_get(tsd);
+ if (thunk->callback != NULL) {
+ thunk->callback(thunk->uctx, alloc, dalloc);
+ }
+}
+
+/* Set current state to zero. */
+void
+peak_event_zero(tsd_t *tsd) {
+ uint64_t alloc = tsd_thread_allocated_get(tsd);
+ uint64_t dalloc = tsd_thread_deallocated_get(tsd);
+ peak_t *peak = tsd_peakp_get(tsd);
+ peak_set_zero(peak, alloc, dalloc);
+}
+
+uint64_t
+peak_event_max(tsd_t *tsd) {
+ peak_t *peak = tsd_peakp_get(tsd);
+ return peak_max(peak);
+}
+
+uint64_t
+peak_alloc_new_event_wait(tsd_t *tsd) {
+ return PEAK_EVENT_WAIT;
+}
+
+uint64_t
+peak_alloc_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+void
+peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ peak_event_update(tsd);
+ peak_event_activity_callback(tsd);
+}
+
+uint64_t
+peak_dalloc_new_event_wait(tsd_t *tsd) {
+ return PEAK_EVENT_WAIT;
+}
+
+uint64_t
+peak_dalloc_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+void
+peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ peak_event_update(tsd);
+ peak_event_activity_callback(tsd);
+}
diff --git a/deps/jemalloc/src/prng.c b/deps/jemalloc/src/prng.c
deleted file mode 100644
index 83c04bf9b..000000000
--- a/deps/jemalloc/src/prng.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#define JEMALLOC_PRNG_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/deps/jemalloc/src/prof.c b/deps/jemalloc/src/prof.c
index 13334cb4c..7a6d5d569 100644
--- a/deps/jemalloc/src/prof.c
+++ b/deps/jemalloc/src/prof.c
@@ -1,1126 +1,199 @@
-#define JEMALLOC_PROF_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
+#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/ckh.h"
-#include "jemalloc/internal/hash.h"
-#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/emitter.h"
+#include "jemalloc/internal/counter.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_log.h"
+#include "jemalloc/internal/prof_recent.h"
+#include "jemalloc/internal/prof_stats.h"
+#include "jemalloc/internal/prof_sys.h"
+#include "jemalloc/internal/prof_hook.h"
+#include "jemalloc/internal/thread_event.h"
-/******************************************************************************/
-
-#ifdef JEMALLOC_PROF_LIBUNWIND
-#define UNW_LOCAL_ONLY
-#include <libunwind.h>
-#endif
-
-#ifdef JEMALLOC_PROF_LIBGCC
/*
- * We have a circular dependency -- jemalloc_internal.h tells us if we should
- * use libgcc's unwinding functionality, but after we've included that, we've
- * already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
+ * This file implements the profiling "APIs" needed by other parts of jemalloc,
+ * and also manages the relevant "operational" data, mainly options and mutexes;
+ * the core profiling data structures are encapsulated in prof_data.c.
*/
-#undef _Unwind_Backtrace
-#include <unwind.h>
-#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
-#endif
/******************************************************************************/
+
/* Data. */
-bool opt_prof = false;
-bool opt_prof_active = true;
-bool opt_prof_thread_active_init = true;
-size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
-ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
-bool opt_prof_gdump = false;
-bool opt_prof_final = false;
-bool opt_prof_leak = false;
-bool opt_prof_accum = false;
-bool opt_prof_log = false;
-char opt_prof_prefix[
- /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
- PATH_MAX +
-#endif
- 1];
+bool opt_prof = false;
+bool opt_prof_active = true;
+bool opt_prof_thread_active_init = true;
+size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
+ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
+bool opt_prof_gdump = false;
+bool opt_prof_final = false;
+bool opt_prof_leak = false;
+bool opt_prof_leak_error = false;
+bool opt_prof_accum = false;
+char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
+bool opt_prof_sys_thread_name = false;
+bool opt_prof_unbias = true;
+
+/* Accessed via prof_sample_event_handler(). */
+static counter_accum_t prof_idump_accumulated;
/*
* Initialized as opt_prof_active, and accessed via
* prof_active_[gs]et{_unlocked,}().
*/
-bool prof_active;
-static malloc_mutex_t prof_active_mtx;
+bool prof_active_state;
+static malloc_mutex_t prof_active_mtx;
/*
* Initialized as opt_prof_thread_active_init, and accessed via
* prof_thread_active_init_[gs]et().
*/
-static bool prof_thread_active_init;
-static malloc_mutex_t prof_thread_active_init_mtx;
+static bool prof_thread_active_init;
+static malloc_mutex_t prof_thread_active_init_mtx;
/*
* Initialized as opt_prof_gdump, and accessed via
* prof_gdump_[gs]et{_unlocked,}().
*/
-bool prof_gdump_val;
-static malloc_mutex_t prof_gdump_mtx;
-
-uint64_t prof_interval = 0;
-
-size_t lg_prof_sample;
-
-typedef enum prof_logging_state_e prof_logging_state_t;
-enum prof_logging_state_e {
- prof_logging_state_stopped,
- prof_logging_state_started,
- prof_logging_state_dumping
-};
-
-/*
- * - stopped: log_start never called, or previous log_stop has completed.
- * - started: log_start called, log_stop not called yet. Allocations are logged.
- * - dumping: log_stop called but not finished; samples are not logged anymore.
- */
-prof_logging_state_t prof_logging_state = prof_logging_state_stopped;
-
-#ifdef JEMALLOC_JET
-static bool prof_log_dummy = false;
-#endif
-
-/* Incremented for every log file that is output. */
-static uint64_t log_seq = 0;
-static char log_filename[
- /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
- PATH_MAX +
-#endif
- 1];
-
-/* Timestamp for most recent call to log_start(). */
-static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER;
-
-/* Increment these when adding to the log_bt and log_thr linked lists. */
-static size_t log_bt_index = 0;
-static size_t log_thr_index = 0;
-
-/* Linked list node definitions. These are only used in prof.c. */
-typedef struct prof_bt_node_s prof_bt_node_t;
-
-struct prof_bt_node_s {
- prof_bt_node_t *next;
- size_t index;
- prof_bt_t bt;
- /* Variable size backtrace vector pointed to by bt. */
- void *vec[1];
-};
-
-typedef struct prof_thr_node_s prof_thr_node_t;
-
-struct prof_thr_node_s {
- prof_thr_node_t *next;
- size_t index;
- uint64_t thr_uid;
- /* Variable size based on thr_name_sz. */
- char name[1];
-};
-
-typedef struct prof_alloc_node_s prof_alloc_node_t;
-
-/* This is output when logging sampled allocations. */
-struct prof_alloc_node_s {
- prof_alloc_node_t *next;
- /* Indices into an array of thread data. */
- size_t alloc_thr_ind;
- size_t free_thr_ind;
+bool prof_gdump_val;
+static malloc_mutex_t prof_gdump_mtx;
- /* Indices into an array of backtraces. */
- size_t alloc_bt_ind;
- size_t free_bt_ind;
+uint64_t prof_interval = 0;
- uint64_t alloc_time_ns;
- uint64_t free_time_ns;
+size_t lg_prof_sample;
- size_t usize;
-};
-
-/*
- * Created on the first call to prof_log_start and deleted on prof_log_stop.
- * These are the backtraces and threads that have already been logged by an
- * allocation.
- */
-static bool log_tables_initialized = false;
-static ckh_t log_bt_node_set;
-static ckh_t log_thr_node_set;
-
-/* Store linked lists for logged data. */
-static prof_bt_node_t *log_bt_first = NULL;
-static prof_bt_node_t *log_bt_last = NULL;
-static prof_thr_node_t *log_thr_first = NULL;
-static prof_thr_node_t *log_thr_last = NULL;
-static prof_alloc_node_t *log_alloc_first = NULL;
-static prof_alloc_node_t *log_alloc_last = NULL;
-
-/* Protects the prof_logging_state and any log_{...} variable. */
-static malloc_mutex_t log_mtx;
-
-/*
- * Table of mutexes that are shared among gctx's. These are leaf locks, so
- * there is no problem with using them for more than one gctx at the same time.
- * The primary motivation for this sharing though is that gctx's are ephemeral,
- * and destroying mutexes causes complications for systems that allocate when
- * creating/destroying mutexes.
- */
-static malloc_mutex_t *gctx_locks;
-static atomic_u_t cum_gctxs; /* Atomic counter. */
-
-/*
- * Table of mutexes that are shared among tdata's. No operations require
- * holding multiple tdata locks, so there is no problem with using them for more
- * than one tdata at the same time, even though a gctx lock may be acquired
- * while holding a tdata lock.
- */
-static malloc_mutex_t *tdata_locks;
-
-/*
- * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
- * structure that knows about all backtraces currently captured.
- */
-static ckh_t bt2gctx;
-/* Non static to enable profiling. */
-malloc_mutex_t bt2gctx_mtx;
-
-/*
- * Tree of all extant prof_tdata_t structures, regardless of state,
- * {attached,detached,expired}.
- */
-static prof_tdata_tree_t tdatas;
-static malloc_mutex_t tdatas_mtx;
-
-static uint64_t next_thr_uid;
-static malloc_mutex_t next_thr_uid_mtx;
-
-static malloc_mutex_t prof_dump_seq_mtx;
-static uint64_t prof_dump_seq;
-static uint64_t prof_dump_iseq;
-static uint64_t prof_dump_mseq;
-static uint64_t prof_dump_useq;
-
-/*
- * This buffer is rather large for stack allocation, so use a single buffer for
- * all profile dumps.
- */
-static malloc_mutex_t prof_dump_mtx;
-static char prof_dump_buf[
- /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
- PROF_DUMP_BUFSIZE
-#else
- 1
-#endif
-];
-static size_t prof_dump_buf_end;
-static int prof_dump_fd;
+static uint64_t next_thr_uid;
+static malloc_mutex_t next_thr_uid_mtx;
/* Do not dump any profiles until bootstrapping is complete. */
-static bool prof_booted = false;
+bool prof_booted = false;
-/******************************************************************************/
-/*
- * Function prototypes for static functions that are referenced prior to
- * definition.
- */
+/* Logically a prof_backtrace_hook_t. */
+atomic_p_t prof_backtrace_hook;
-static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
-static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
-static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
- bool even_if_attached);
-static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
- bool even_if_attached);
-static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
-
-/* Hashtable functions for log_bt_node_set and log_thr_node_set. */
-static void prof_thr_node_hash(const void *key, size_t r_hash[2]);
-static bool prof_thr_node_keycomp(const void *k1, const void *k2);
-static void prof_bt_node_hash(const void *key, size_t r_hash[2]);
-static bool prof_bt_node_keycomp(const void *k1, const void *k2);
-
-/******************************************************************************/
-/* Red-black trees. */
-
-static int
-prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
- uint64_t a_thr_uid = a->thr_uid;
- uint64_t b_thr_uid = b->thr_uid;
- int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
- if (ret == 0) {
- uint64_t a_thr_discrim = a->thr_discrim;
- uint64_t b_thr_discrim = b->thr_discrim;
- ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
- b_thr_discrim);
- if (ret == 0) {
- uint64_t a_tctx_uid = a->tctx_uid;
- uint64_t b_tctx_uid = b->tctx_uid;
- ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
- b_tctx_uid);
- }
- }
- return ret;
-}
-
-rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
- tctx_link, prof_tctx_comp)
-
-static int
-prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
- unsigned a_len = a->bt.len;
- unsigned b_len = b->bt.len;
- unsigned comp_len = (a_len < b_len) ? a_len : b_len;
- int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
- if (ret == 0) {
- ret = (a_len > b_len) - (a_len < b_len);
- }
- return ret;
-}
-
-rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
- prof_gctx_comp)
-
-static int
-prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
- int ret;
- uint64_t a_uid = a->thr_uid;
- uint64_t b_uid = b->thr_uid;
-
- ret = ((a_uid > b_uid) - (a_uid < b_uid));
- if (ret == 0) {
- uint64_t a_discrim = a->thr_discrim;
- uint64_t b_discrim = b->thr_discrim;
-
- ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
- }
- return ret;
-}
-
-rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
- prof_tdata_comp)
+/* Logically a prof_dump_hook_t. */
+atomic_p_t prof_dump_hook;
/******************************************************************************/
void
-prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
- prof_tdata_t *tdata;
-
+prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx) {
cassert(config_prof);
- if (updated) {
- /*
- * Compute a new sample threshold. This isn't very important in
- * practice, because this function is rarely executed, so the
- * potential for sample bias is minimal except in contrived
- * programs.
- */
- tdata = prof_tdata_get(tsd, true);
- if (tdata != NULL) {
- prof_sample_threshold_update(tdata);
- }
+ if (tsd_reentrancy_level_get(tsd) > 0) {
+ assert((uintptr_t)tctx == (uintptr_t)1U);
+ return;
}
if ((uintptr_t)tctx > (uintptr_t)1U) {
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
tctx->prepared = false;
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
- prof_tctx_destroy(tsd, tctx);
- } else {
- malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
- }
+ prof_tctx_try_destroy(tsd, tctx);
}
}
void
-prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx) {
- prof_tctx_set(tsdn, ptr, usize, NULL, tctx);
+prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
+ size_t usize, prof_tctx_t *tctx) {
+ cassert(config_prof);
+
+ if (opt_prof_sys_thread_name) {
+ prof_sys_thread_name_fetch(tsd);
+ }
+
+ edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
+ ptr);
+ prof_info_set(tsd, edata, tctx, size);
- /* Get the current time and set this in the extent_t. We'll read this
- * when free() is called. */
- nstime_t t = NSTIME_ZERO_INITIALIZER;
- nstime_update(&t);
- prof_alloc_time_set(tsdn, ptr, NULL, t);
+ szind_t szind = sz_size2index(usize);
- malloc_mutex_lock(tsdn, tctx->tdata->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ /*
+ * We need to do these map lookups while holding the lock, to avoid the
+ * possibility of races with prof_reset calls, which update the map and
+ * then acquire the lock. This actually still leaves a data race on the
+ * contents of the unbias map, but we have not yet gone through and
+ * atomic-ified the prof module, and compilers are not yet causing us
+ * issues. The key thing is to make sure that, if we read garbage data,
+ * the prof_reset call is about to mark our tctx as expired before any
+ * dumping of our corrupted output is attempted.
+ */
+ size_t shifted_unbiased_cnt = prof_shifted_unbiased_cnt[szind];
+ size_t unbiased_bytes = prof_unbiased_sz[szind];
tctx->cnts.curobjs++;
+ tctx->cnts.curobjs_shifted_unbiased += shifted_unbiased_cnt;
tctx->cnts.curbytes += usize;
+ tctx->cnts.curbytes_unbiased += unbiased_bytes;
if (opt_prof_accum) {
tctx->cnts.accumobjs++;
+ tctx->cnts.accumobjs_shifted_unbiased += shifted_unbiased_cnt;
tctx->cnts.accumbytes += usize;
+ tctx->cnts.accumbytes_unbiased += unbiased_bytes;
}
+ bool record_recent = prof_recent_alloc_prepare(tsd, tctx);
tctx->prepared = false;
- malloc_mutex_unlock(tsdn, tctx->tdata->lock);
-}
-
-static size_t
-prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
- assert(prof_logging_state == prof_logging_state_started);
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
-
- prof_bt_node_t dummy_node;
- dummy_node.bt = *bt;
- prof_bt_node_t *node;
-
- /* See if this backtrace is already cached in the table. */
- if (ckh_search(&log_bt_node_set, (void *)(&dummy_node),
- (void **)(&node), NULL)) {
- size_t sz = offsetof(prof_bt_node_t, vec) +
- (bt->len * sizeof(void *));
- prof_bt_node_t *new_node = (prof_bt_node_t *)
- iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
- true, arena_get(TSDN_NULL, 0, true), true);
- if (log_bt_first == NULL) {
- log_bt_first = new_node;
- log_bt_last = new_node;
- } else {
- log_bt_last->next = new_node;
- log_bt_last = new_node;
- }
-
- new_node->next = NULL;
- new_node->index = log_bt_index;
- /*
- * Copy the backtrace: bt is inside a tdata or gctx, which
- * might die before prof_log_stop is called.
- */
- new_node->bt.len = bt->len;
- memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *));
- new_node->bt.vec = new_node->vec;
-
- log_bt_index++;
- ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL);
- return new_node->index;
- } else {
- return node->index;
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ if (record_recent) {
+ assert(tctx == edata_prof_tctx_get(edata));
+ prof_recent_alloc(tsd, edata, size, usize);
}
-}
-static size_t
-prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
- assert(prof_logging_state == prof_logging_state_started);
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
-
- prof_thr_node_t dummy_node;
- dummy_node.thr_uid = thr_uid;
- prof_thr_node_t *node;
-
- /* See if this thread is already cached in the table. */
- if (ckh_search(&log_thr_node_set, (void *)(&dummy_node),
- (void **)(&node), NULL)) {
- size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1;
- prof_thr_node_t *new_node = (prof_thr_node_t *)
- iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
- true, arena_get(TSDN_NULL, 0, true), true);
- if (log_thr_first == NULL) {
- log_thr_first = new_node;
- log_thr_last = new_node;
- } else {
- log_thr_last->next = new_node;
- log_thr_last = new_node;
- }
-
- new_node->next = NULL;
- new_node->index = log_thr_index;
- new_node->thr_uid = thr_uid;
- strcpy(new_node->name, name);
- log_thr_index++;
- ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL);
- return new_node->index;
- } else {
- return node->index;
+ if (opt_prof_stats) {
+ prof_stats_inc(tsd, szind, size);
}
}
-static void
-prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) {
- malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
- prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false);
- if (cons_tdata == NULL) {
- /*
- * We decide not to log these allocations. cons_tdata will be
- * NULL only when the current thread is in a weird state (e.g.
- * it's being destroyed).
- */
- return;
- }
-
- malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx);
-
- if (prof_logging_state != prof_logging_state_started) {
- goto label_done;
- }
-
- if (!log_tables_initialized) {
- bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
- prof_bt_node_hash, prof_bt_node_keycomp);
- bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
- prof_thr_node_hash, prof_thr_node_keycomp);
- if (err1 || err2) {
- goto label_done;
- }
- log_tables_initialized = true;
- }
-
- nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr,
- (alloc_ctx_t *)NULL);
- nstime_t free_time = NSTIME_ZERO_INITIALIZER;
- nstime_update(&free_time);
-
- size_t sz = sizeof(prof_alloc_node_t);
- prof_alloc_node_t *new_node = (prof_alloc_node_t *)
- iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
-
- const char *prod_thr_name = (tctx->tdata->thread_name == NULL)?
- "" : tctx->tdata->thread_name;
- const char *cons_thr_name = prof_thread_name_get(tsd);
-
- prof_bt_t bt;
- /* Initialize the backtrace, using the buffer in tdata to store it. */
- bt_init(&bt, cons_tdata->vec);
- prof_backtrace(&bt);
- prof_bt_t *cons_bt = &bt;
-
- /* We haven't destroyed tctx yet, so gctx should be good to read. */
- prof_bt_t *prod_bt = &tctx->gctx->bt;
-
- new_node->next = NULL;
- new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid,
- prod_thr_name);
- new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid,
- cons_thr_name);
- new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt);
- new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt);
- new_node->alloc_time_ns = nstime_ns(&alloc_time);
- new_node->free_time_ns = nstime_ns(&free_time);
- new_node->usize = usize;
-
- if (log_alloc_first == NULL) {
- log_alloc_first = new_node;
- log_alloc_last = new_node;
- } else {
- log_alloc_last->next = new_node;
- log_alloc_last = new_node;
- }
+void
+prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
+ cassert(config_prof);
-label_done:
- malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
-}
+ assert(prof_info != NULL);
+ prof_tctx_t *tctx = prof_info->alloc_tctx;
+ assert((uintptr_t)tctx > (uintptr_t)1U);
-void
-prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
- prof_tctx_t *tctx) {
+ szind_t szind = sz_size2index(usize);
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
assert(tctx->cnts.curobjs > 0);
assert(tctx->cnts.curbytes >= usize);
+ /*
+ * It's not correct to do equivalent asserts for unbiased bytes, because
+ * of the potential for races with prof.reset calls. The map contents
+ * should really be atomic, but we have not atomic-ified the prof module
+ * yet.
+ */
tctx->cnts.curobjs--;
+ tctx->cnts.curobjs_shifted_unbiased -= prof_shifted_unbiased_cnt[szind];
tctx->cnts.curbytes -= usize;
+ tctx->cnts.curbytes_unbiased -= prof_unbiased_sz[szind];
- prof_try_log(tsd, ptr, usize, tctx);
-
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
- prof_tctx_destroy(tsd, tctx);
- } else {
- malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
- }
-}
-
-void
-bt_init(prof_bt_t *bt, void **vec) {
- cassert(config_prof);
-
- bt->vec = vec;
- bt->len = 0;
-}
-
-static void
-prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
- cassert(config_prof);
- assert(tdata == prof_tdata_get(tsd, false));
-
- if (tdata != NULL) {
- assert(!tdata->enq);
- tdata->enq = true;
- }
-
- malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
-}
-
-static void
-prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
- cassert(config_prof);
- assert(tdata == prof_tdata_get(tsd, false));
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
-
- if (tdata != NULL) {
- bool idump, gdump;
-
- assert(tdata->enq);
- tdata->enq = false;
- idump = tdata->enq_idump;
- tdata->enq_idump = false;
- gdump = tdata->enq_gdump;
- tdata->enq_gdump = false;
-
- if (idump) {
- prof_idump(tsd_tsdn(tsd));
- }
- if (gdump) {
- prof_gdump(tsd_tsdn(tsd));
- }
- }
-}
-
-#ifdef JEMALLOC_PROF_LIBUNWIND
-void
-prof_backtrace(prof_bt_t *bt) {
- int nframes;
-
- cassert(config_prof);
- assert(bt->len == 0);
- assert(bt->vec != NULL);
-
- nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
- if (nframes <= 0) {
- return;
- }
- bt->len = nframes;
-}
-#elif (defined(JEMALLOC_PROF_LIBGCC))
-static _Unwind_Reason_Code
-prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
- cassert(config_prof);
-
- return _URC_NO_REASON;
-}
-
-static _Unwind_Reason_Code
-prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
- prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
- void *ip;
-
- cassert(config_prof);
-
- ip = (void *)_Unwind_GetIP(context);
- if (ip == NULL) {
- return _URC_END_OF_STACK;
- }
- data->bt->vec[data->bt->len] = ip;
- data->bt->len++;
- if (data->bt->len == data->max) {
- return _URC_END_OF_STACK;
- }
-
- return _URC_NO_REASON;
-}
+ prof_try_log(tsd, usize, prof_info);
-void
-prof_backtrace(prof_bt_t *bt) {
- prof_unwind_data_t data = {bt, PROF_BT_MAX};
-
- cassert(config_prof);
+ prof_tctx_try_destroy(tsd, tctx);
- _Unwind_Backtrace(prof_unwind_callback, &data);
-}
-#elif (defined(JEMALLOC_PROF_GCC))
-void
-prof_backtrace(prof_bt_t *bt) {
-#define BT_FRAME(i) \
- if ((i) < PROF_BT_MAX) { \
- void *p; \
- if (__builtin_frame_address(i) == 0) { \
- return; \
- } \
- p = __builtin_return_address(i); \
- if (p == NULL) { \
- return; \
- } \
- bt->vec[(i)] = p; \
- bt->len = (i) + 1; \
- } else { \
- return; \
+ if (opt_prof_stats) {
+ prof_stats_dec(tsd, szind, prof_info->alloc_size);
}
-
- cassert(config_prof);
-
- BT_FRAME(0)
- BT_FRAME(1)
- BT_FRAME(2)
- BT_FRAME(3)
- BT_FRAME(4)
- BT_FRAME(5)
- BT_FRAME(6)
- BT_FRAME(7)
- BT_FRAME(8)
- BT_FRAME(9)
-
- BT_FRAME(10)
- BT_FRAME(11)
- BT_FRAME(12)
- BT_FRAME(13)
- BT_FRAME(14)
- BT_FRAME(15)
- BT_FRAME(16)
- BT_FRAME(17)
- BT_FRAME(18)
- BT_FRAME(19)
-
- BT_FRAME(20)
- BT_FRAME(21)
- BT_FRAME(22)
- BT_FRAME(23)
- BT_FRAME(24)
- BT_FRAME(25)
- BT_FRAME(26)
- BT_FRAME(27)
- BT_FRAME(28)
- BT_FRAME(29)
-
- BT_FRAME(30)
- BT_FRAME(31)
- BT_FRAME(32)
- BT_FRAME(33)
- BT_FRAME(34)
- BT_FRAME(35)
- BT_FRAME(36)
- BT_FRAME(37)
- BT_FRAME(38)
- BT_FRAME(39)
-
- BT_FRAME(40)
- BT_FRAME(41)
- BT_FRAME(42)
- BT_FRAME(43)
- BT_FRAME(44)
- BT_FRAME(45)
- BT_FRAME(46)
- BT_FRAME(47)
- BT_FRAME(48)
- BT_FRAME(49)
-
- BT_FRAME(50)
- BT_FRAME(51)
- BT_FRAME(52)
- BT_FRAME(53)
- BT_FRAME(54)
- BT_FRAME(55)
- BT_FRAME(56)
- BT_FRAME(57)
- BT_FRAME(58)
- BT_FRAME(59)
-
- BT_FRAME(60)
- BT_FRAME(61)
- BT_FRAME(62)
- BT_FRAME(63)
- BT_FRAME(64)
- BT_FRAME(65)
- BT_FRAME(66)
- BT_FRAME(67)
- BT_FRAME(68)
- BT_FRAME(69)
-
- BT_FRAME(70)
- BT_FRAME(71)
- BT_FRAME(72)
- BT_FRAME(73)
- BT_FRAME(74)
- BT_FRAME(75)
- BT_FRAME(76)
- BT_FRAME(77)
- BT_FRAME(78)
- BT_FRAME(79)
-
- BT_FRAME(80)
- BT_FRAME(81)
- BT_FRAME(82)
- BT_FRAME(83)
- BT_FRAME(84)
- BT_FRAME(85)
- BT_FRAME(86)
- BT_FRAME(87)
- BT_FRAME(88)
- BT_FRAME(89)
-
- BT_FRAME(90)
- BT_FRAME(91)
- BT_FRAME(92)
- BT_FRAME(93)
- BT_FRAME(94)
- BT_FRAME(95)
- BT_FRAME(96)
- BT_FRAME(97)
- BT_FRAME(98)
- BT_FRAME(99)
-
- BT_FRAME(100)
- BT_FRAME(101)
- BT_FRAME(102)
- BT_FRAME(103)
- BT_FRAME(104)
- BT_FRAME(105)
- BT_FRAME(106)
- BT_FRAME(107)
- BT_FRAME(108)
- BT_FRAME(109)
-
- BT_FRAME(110)
- BT_FRAME(111)
- BT_FRAME(112)
- BT_FRAME(113)
- BT_FRAME(114)
- BT_FRAME(115)
- BT_FRAME(116)
- BT_FRAME(117)
- BT_FRAME(118)
- BT_FRAME(119)
-
- BT_FRAME(120)
- BT_FRAME(121)
- BT_FRAME(122)
- BT_FRAME(123)
- BT_FRAME(124)
- BT_FRAME(125)
- BT_FRAME(126)
- BT_FRAME(127)
-#undef BT_FRAME
-}
-#else
-void
-prof_backtrace(prof_bt_t *bt) {
- cassert(config_prof);
- not_reached();
-}
-#endif
-
-static malloc_mutex_t *
-prof_gctx_mutex_choose(void) {
- unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
-
- return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
}
-static malloc_mutex_t *
-prof_tdata_mutex_choose(uint64_t thr_uid) {
- return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
-}
-
-static prof_gctx_t *
-prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
- /*
- * Create a single allocation that has space for vec of length bt->len.
- */
- size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
- prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
- sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
- true);
- if (gctx == NULL) {
+prof_tctx_t *
+prof_tctx_create(tsd_t *tsd) {
+ if (!tsd_nominal(tsd) || tsd_reentrancy_level_get(tsd) > 0) {
return NULL;
}
- gctx->lock = prof_gctx_mutex_choose();
- /*
- * Set nlimbo to 1, in order to avoid a race condition with
- * prof_tctx_destroy()/prof_gctx_try_destroy().
- */
- gctx->nlimbo = 1;
- tctx_tree_new(&gctx->tctxs);
- /* Duplicate bt. */
- memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
- gctx->bt.vec = gctx->vec;
- gctx->bt.len = bt->len;
- return gctx;
-}
-
-static void
-prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
- prof_tdata_t *tdata) {
- cassert(config_prof);
-
- /*
- * Check that gctx is still unused by any thread cache before destroying
- * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
- * condition with this function, as does prof_tctx_destroy() in order to
- * avoid a race between the main body of prof_tctx_destroy() and entry
- * into this function.
- */
- prof_enter(tsd, tdata_self);
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
- assert(gctx->nlimbo != 0);
- if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
- /* Remove gctx from bt2gctx. */
- if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
- not_reached();
- }
- prof_leave(tsd, tdata_self);
- /* Destroy gctx. */
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
- } else {
- /*
- * Compensate for increment in prof_tctx_destroy() or
- * prof_lookup().
- */
- gctx->nlimbo--;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- prof_leave(tsd, tdata_self);
- }
-}
-
-static bool
-prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
- malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
-
- if (opt_prof_accum) {
- return false;
- }
- if (tctx->cnts.curobjs != 0) {
- return false;
- }
- if (tctx->prepared) {
- return false;
- }
- return true;
-}
-static bool
-prof_gctx_should_destroy(prof_gctx_t *gctx) {
- if (opt_prof_accum) {
- return false;
- }
- if (!tctx_tree_empty(&gctx->tctxs)) {
- return false;
- }
- if (gctx->nlimbo != 0) {
- return false;
- }
- return true;
-}
-
-static void
-prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
- prof_tdata_t *tdata = tctx->tdata;
- prof_gctx_t *gctx = tctx->gctx;
- bool destroy_tdata, destroy_tctx, destroy_gctx;
-
- malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
- assert(tctx->cnts.curobjs == 0);
- assert(tctx->cnts.curbytes == 0);
- assert(!opt_prof_accum);
- assert(tctx->cnts.accumobjs == 0);
- assert(tctx->cnts.accumbytes == 0);
-
- ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
- destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
-
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
- switch (tctx->state) {
- case prof_tctx_state_nominal:
- tctx_tree_remove(&gctx->tctxs, tctx);
- destroy_tctx = true;
- if (prof_gctx_should_destroy(gctx)) {
- /*
- * Increment gctx->nlimbo in order to keep another
- * thread from winning the race to destroy gctx while
- * this one has gctx->lock dropped. Without this, it
- * would be possible for another thread to:
- *
- * 1) Sample an allocation associated with gctx.
- * 2) Deallocate the sampled object.
- * 3) Successfully prof_gctx_try_destroy(gctx).
- *
- * The result would be that gctx no longer exists by the
- * time this thread accesses it in
- * prof_gctx_try_destroy().
- */
- gctx->nlimbo++;
- destroy_gctx = true;
- } else {
- destroy_gctx = false;
- }
- break;
- case prof_tctx_state_dumping:
- /*
- * A dumping thread needs tctx to remain valid until dumping
- * has finished. Change state such that the dumping thread will
- * complete destruction during a late dump iteration phase.
- */
- tctx->state = prof_tctx_state_purgatory;
- destroy_tctx = false;
- destroy_gctx = false;
- break;
- default:
- not_reached();
- destroy_tctx = false;
- destroy_gctx = false;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- if (destroy_gctx) {
- prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
- tdata);
- }
-
- malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
- if (destroy_tdata) {
- prof_tdata_destroy(tsd, tdata, false);
- }
-
- if (destroy_tctx) {
- idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
- }
-}
-
-static bool
-prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
- void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
- union {
- prof_gctx_t *p;
- void *v;
- } gctx, tgctx;
- union {
- prof_bt_t *p;
- void *v;
- } btkey;
- bool new_gctx;
-
- prof_enter(tsd, tdata);
- if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
- /* bt has never been seen before. Insert it. */
- prof_leave(tsd, tdata);
- tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
- if (tgctx.v == NULL) {
- return true;
- }
- prof_enter(tsd, tdata);
- if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
- gctx.p = tgctx.p;
- btkey.p = &gctx.p->bt;
- if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
- /* OOM. */
- prof_leave(tsd, tdata);
- idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
- true, true);
- return true;
- }
- new_gctx = true;
- } else {
- new_gctx = false;
- }
- } else {
- tgctx.v = NULL;
- new_gctx = false;
- }
-
- if (!new_gctx) {
- /*
- * Increment nlimbo, in order to avoid a race condition with
- * prof_tctx_destroy()/prof_gctx_try_destroy().
- */
- malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
- gctx.p->nlimbo++;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
- new_gctx = false;
-
- if (tgctx.v != NULL) {
- /* Lost race to insert. */
- idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
- true);
- }
- }
- prof_leave(tsd, tdata);
-
- *p_btkey = btkey.v;
- *p_gctx = gctx.p;
- *p_new_gctx = new_gctx;
- return false;
-}
-
-prof_tctx_t *
-prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
- union {
- prof_tctx_t *p;
- void *v;
- } ret;
- prof_tdata_t *tdata;
- bool not_found;
-
- cassert(config_prof);
-
- tdata = prof_tdata_get(tsd, false);
+ prof_tdata_t *tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return NULL;
}
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
- not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
- if (!not_found) { /* Note double negative! */
- ret.p->prepared = true;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
- if (not_found) {
- void *btkey;
- prof_gctx_t *gctx;
- bool new_gctx, error;
-
- /*
- * This thread's cache lacks bt. Look for it in the global
- * cache.
- */
- if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
- &new_gctx)) {
- return NULL;
- }
-
- /* Link a prof_tctx_t into gctx for this thread. */
- ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
- sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
- arena_ichoose(tsd, NULL), true);
- if (ret.p == NULL) {
- if (new_gctx) {
- prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- }
- return NULL;
- }
- ret.p->tdata = tdata;
- ret.p->thr_uid = tdata->thr_uid;
- ret.p->thr_discrim = tdata->thr_discrim;
- memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
- ret.p->gctx = gctx;
- ret.p->tctx_uid = tdata->tctx_uid_next++;
- ret.p->prepared = true;
- ret.p->state = prof_tctx_state_initializing;
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
- error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
- if (error) {
- if (new_gctx) {
- prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- }
- idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
- return NULL;
- }
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
- ret.p->state = prof_tctx_state_nominal;
- tctx_tree_insert(&gctx->tctxs, ret.p);
- gctx->nlimbo--;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- }
-
- return ret.p;
+ prof_bt_t bt;
+ bt_init(&bt, tdata->vec);
+ prof_backtrace(tsd, &bt);
+ return prof_lookup(tsd, &bt);
}
/*
@@ -1136,27 +209,22 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
* (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
-void
-prof_sample_threshold_update(prof_tdata_t *tdata) {
+uint64_t
+prof_sample_new_event_wait(tsd_t *tsd) {
#ifdef JEMALLOC_PROF
- if (!config_prof) {
- return;
- }
-
if (lg_prof_sample == 0) {
- tsd_bytes_until_sample_set(tsd_fetch(), 0);
- return;
+ return TE_MIN_START_WAIT;
}
/*
* Compute sample interval as a geometrically distributed random
* variable with mean (2^lg_prof_sample).
*
- * __ __
- * | log(u) | 1
- * tdata->bytes_until_sample = | -------- |, where p = ---------------
- * | log(1-p) | lg_prof_sample
- * 2
+ * __ __
+ * | log(u) | 1
+ * bytes_until_sample = | -------- |, where p = ---------------
+ * | log(1-p) | lg_prof_sample
+ * 2
*
* For more information on the math, see:
*
@@ -1165,857 +233,56 @@ prof_sample_threshold_update(prof_tdata_t *tdata) {
* Springer-Verlag, New York, 1986
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
+ *
+ * In the actual computation, there's a non-zero probability that our
+ * pseudo random number generator generates an exact 0, and to avoid
+ * log(0), we set u to 1.0 in case r is 0. Therefore u effectively is
+ * uniformly distributed in (0, 1] instead of [0, 1). Further, rather
+ * than taking the ceiling, we take the floor and then add 1, since
+ * otherwise bytes_until_sample would be 0 if u is exactly 1.0.
*/
- uint64_t r = prng_lg_range_u64(&tdata->prng_state, 53);
- double u = (double)r * (1.0/9007199254740992.0L);
- uint64_t bytes_until_sample = (uint64_t)(log(u) /
+ uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53);
+ double u = (r == 0U) ? 1.0 : (double)r * (1.0/9007199254740992.0L);
+ return (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ (uint64_t)1U;
- if (bytes_until_sample > SSIZE_MAX) {
- bytes_until_sample = SSIZE_MAX;
- }
- tsd_bytes_until_sample_set(tsd_fetch(), bytes_until_sample);
-
-#endif
-}
-
-#ifdef JEMALLOC_JET
-static prof_tdata_t *
-prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *arg) {
- size_t *tdata_count = (size_t *)arg;
-
- (*tdata_count)++;
-
- return NULL;
-}
-
-size_t
-prof_tdata_count(void) {
- size_t tdata_count = 0;
- tsdn_t *tsdn;
-
- tsdn = tsdn_fetch();
- malloc_mutex_lock(tsdn, &tdatas_mtx);
- tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
- (void *)&tdata_count);
- malloc_mutex_unlock(tsdn, &tdatas_mtx);
-
- return tdata_count;
-}
-
-size_t
-prof_bt_count(void) {
- size_t bt_count;
- tsd_t *tsd;
- prof_tdata_t *tdata;
-
- tsd = tsd_fetch();
- tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL) {
- return 0;
- }
-
- malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
- bt_count = ckh_count(&bt2gctx);
- malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
-
- return bt_count;
-}
-#endif
-
-static int
-prof_dump_open_impl(bool propagate_err, const char *filename) {
- int fd;
-
- fd = creat(filename, 0644);
- if (fd == -1 && !propagate_err) {
- malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
- filename);
- if (opt_abort) {
- abort();
- }
- }
-
- return fd;
-}
-prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl;
-
-static bool
-prof_dump_flush(bool propagate_err) {
- bool ret = false;
- ssize_t err;
-
- cassert(config_prof);
-
- err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
- if (err == -1) {
- if (!propagate_err) {
- malloc_write("<jemalloc>: write() failed during heap "
- "profile flush\n");
- if (opt_abort) {
- abort();
- }
- }
- ret = true;
- }
- prof_dump_buf_end = 0;
-
- return ret;
-}
-
-static bool
-prof_dump_close(bool propagate_err) {
- bool ret;
-
- assert(prof_dump_fd != -1);
- ret = prof_dump_flush(propagate_err);
- close(prof_dump_fd);
- prof_dump_fd = -1;
-
- return ret;
-}
-
-static bool
-prof_dump_write(bool propagate_err, const char *s) {
- size_t i, slen, n;
-
- cassert(config_prof);
-
- i = 0;
- slen = strlen(s);
- while (i < slen) {
- /* Flush the buffer if it is full. */
- if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
- if (prof_dump_flush(propagate_err) && propagate_err) {
- return true;
- }
- }
-
- if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) {
- /* Finish writing. */
- n = slen - i;
- } else {
- /* Write as much of s as will fit. */
- n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
- }
- memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
- prof_dump_buf_end += n;
- i += n;
- }
- assert(i == slen);
-
- return false;
-}
-
-JEMALLOC_FORMAT_PRINTF(2, 3)
-static bool
-prof_dump_printf(bool propagate_err, const char *format, ...) {
- bool ret;
- va_list ap;
- char buf[PROF_PRINTF_BUFSIZE];
-
- va_start(ap, format);
- malloc_vsnprintf(buf, sizeof(buf), format, ap);
- va_end(ap);
- ret = prof_dump_write(propagate_err, buf);
-
- return ret;
-}
-
-static void
-prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
- malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
-
- malloc_mutex_lock(tsdn, tctx->gctx->lock);
-
- switch (tctx->state) {
- case prof_tctx_state_initializing:
- malloc_mutex_unlock(tsdn, tctx->gctx->lock);
- return;
- case prof_tctx_state_nominal:
- tctx->state = prof_tctx_state_dumping;
- malloc_mutex_unlock(tsdn, tctx->gctx->lock);
-
- memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
-
- tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
- tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
- if (opt_prof_accum) {
- tdata->cnt_summed.accumobjs +=
- tctx->dump_cnts.accumobjs;
- tdata->cnt_summed.accumbytes +=
- tctx->dump_cnts.accumbytes;
- }
- break;
- case prof_tctx_state_dumping:
- case prof_tctx_state_purgatory:
- not_reached();
- }
-}
-
-static void
-prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
- malloc_mutex_assert_owner(tsdn, gctx->lock);
-
- gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
- gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
- if (opt_prof_accum) {
- gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
- gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
- }
-}
-
-static prof_tctx_t *
-prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
- tsdn_t *tsdn = (tsdn_t *)arg;
-
- malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
-
- switch (tctx->state) {
- case prof_tctx_state_nominal:
- /* New since dumping started; ignore. */
- break;
- case prof_tctx_state_dumping:
- case prof_tctx_state_purgatory:
- prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
- break;
- default:
- not_reached();
- }
-
- return NULL;
-}
-
-struct prof_tctx_dump_iter_arg_s {
- tsdn_t *tsdn;
- bool propagate_err;
-};
-
-static prof_tctx_t *
-prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
- struct prof_tctx_dump_iter_arg_s *arg =
- (struct prof_tctx_dump_iter_arg_s *)opaque;
-
- malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
-
- switch (tctx->state) {
- case prof_tctx_state_initializing:
- case prof_tctx_state_nominal:
- /* Not captured by this dump. */
- break;
- case prof_tctx_state_dumping:
- case prof_tctx_state_purgatory:
- if (prof_dump_printf(arg->propagate_err,
- " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
- "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
- tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
- tctx->dump_cnts.accumbytes)) {
- return tctx;
- }
- break;
- default:
- not_reached();
- }
- return NULL;
-}
-
-static prof_tctx_t *
-prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
- tsdn_t *tsdn = (tsdn_t *)arg;
- prof_tctx_t *ret;
-
- malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
-
- switch (tctx->state) {
- case prof_tctx_state_nominal:
- /* New since dumping started; ignore. */
- break;
- case prof_tctx_state_dumping:
- tctx->state = prof_tctx_state_nominal;
- break;
- case prof_tctx_state_purgatory:
- ret = tctx;
- goto label_return;
- default:
- not_reached();
- }
-
- ret = NULL;
-label_return:
- return ret;
-}
-
-static void
-prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
- cassert(config_prof);
-
- malloc_mutex_lock(tsdn, gctx->lock);
-
- /*
- * Increment nlimbo so that gctx won't go away before dump.
- * Additionally, link gctx into the dump list so that it is included in
- * prof_dump()'s second pass.
- */
- gctx->nlimbo++;
- gctx_tree_insert(gctxs, gctx);
-
- memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
-
- malloc_mutex_unlock(tsdn, gctx->lock);
-}
-
-struct prof_gctx_merge_iter_arg_s {
- tsdn_t *tsdn;
- size_t leak_ngctx;
-};
-
-static prof_gctx_t *
-prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
- struct prof_gctx_merge_iter_arg_s *arg =
- (struct prof_gctx_merge_iter_arg_s *)opaque;
-
- malloc_mutex_lock(arg->tsdn, gctx->lock);
- tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
- (void *)arg->tsdn);
- if (gctx->cnt_summed.curobjs != 0) {
- arg->leak_ngctx++;
- }
- malloc_mutex_unlock(arg->tsdn, gctx->lock);
-
- return NULL;
-}
-
-static void
-prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
- prof_tdata_t *tdata = prof_tdata_get(tsd, false);
- prof_gctx_t *gctx;
-
- /*
- * Standard tree iteration won't work here, because as soon as we
- * decrement gctx->nlimbo and unlock gctx, another thread can
- * concurrently destroy it, which will corrupt the tree. Therefore,
- * tear down the tree one node at a time during iteration.
- */
- while ((gctx = gctx_tree_first(gctxs)) != NULL) {
- gctx_tree_remove(gctxs, gctx);
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
- {
- prof_tctx_t *next;
-
- next = NULL;
- do {
- prof_tctx_t *to_destroy =
- tctx_tree_iter(&gctx->tctxs, next,
- prof_tctx_finish_iter,
- (void *)tsd_tsdn(tsd));
- if (to_destroy != NULL) {
- next = tctx_tree_next(&gctx->tctxs,
- to_destroy);
- tctx_tree_remove(&gctx->tctxs,
- to_destroy);
- idalloctm(tsd_tsdn(tsd), to_destroy,
- NULL, NULL, true, true);
- } else {
- next = NULL;
- }
- } while (next != NULL);
- }
- gctx->nlimbo--;
- if (prof_gctx_should_destroy(gctx)) {
- gctx->nlimbo++;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- } else {
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- }
- }
-}
-
-struct prof_tdata_merge_iter_arg_s {
- tsdn_t *tsdn;
- prof_cnt_t cnt_all;
-};
-
-static prof_tdata_t *
-prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *opaque) {
- struct prof_tdata_merge_iter_arg_s *arg =
- (struct prof_tdata_merge_iter_arg_s *)opaque;
-
- malloc_mutex_lock(arg->tsdn, tdata->lock);
- if (!tdata->expired) {
- size_t tabind;
- union {
- prof_tctx_t *p;
- void *v;
- } tctx;
-
- tdata->dumping = true;
- memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
- for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
- &tctx.v);) {
- prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
- }
-
- arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
- arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
- if (opt_prof_accum) {
- arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
- arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
- }
- } else {
- tdata->dumping = false;
- }
- malloc_mutex_unlock(arg->tsdn, tdata->lock);
-
- return NULL;
-}
-
-static prof_tdata_t *
-prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *arg) {
- bool propagate_err = *(bool *)arg;
-
- if (!tdata->dumping) {
- return NULL;
- }
-
- if (prof_dump_printf(propagate_err,
- " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
- tdata->thr_uid, tdata->cnt_summed.curobjs,
- tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
- tdata->cnt_summed.accumbytes,
- (tdata->thread_name != NULL) ? " " : "",
- (tdata->thread_name != NULL) ? tdata->thread_name : "")) {
- return tdata;
- }
- return NULL;
-}
-
-static bool
-prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err,
- const prof_cnt_t *cnt_all) {
- bool ret;
-
- if (prof_dump_printf(propagate_err,
- "heap_v2/%"FMTu64"\n"
- " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
- ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
- cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
- return true;
- }
-
- malloc_mutex_lock(tsdn, &tdatas_mtx);
- ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
- (void *)&propagate_err) != NULL);
- malloc_mutex_unlock(tsdn, &tdatas_mtx);
- return ret;
-}
-prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl;
-
-static bool
-prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
- const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
- bool ret;
- unsigned i;
- struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
-
- cassert(config_prof);
- malloc_mutex_assert_owner(tsdn, gctx->lock);
-
- /* Avoid dumping such gctx's that have no useful data. */
- if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
- (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
- assert(gctx->cnt_summed.curobjs == 0);
- assert(gctx->cnt_summed.curbytes == 0);
- assert(gctx->cnt_summed.accumobjs == 0);
- assert(gctx->cnt_summed.accumbytes == 0);
- ret = false;
- goto label_return;
- }
-
- if (prof_dump_printf(propagate_err, "@")) {
- ret = true;
- goto label_return;
- }
- for (i = 0; i < bt->len; i++) {
- if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
- (uintptr_t)bt->vec[i])) {
- ret = true;
- goto label_return;
- }
- }
-
- if (prof_dump_printf(propagate_err,
- "\n"
- " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
- gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
- gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
- ret = true;
- goto label_return;
- }
-
- prof_tctx_dump_iter_arg.tsdn = tsdn;
- prof_tctx_dump_iter_arg.propagate_err = propagate_err;
- if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
- (void *)&prof_tctx_dump_iter_arg) != NULL) {
- ret = true;
- goto label_return;
- }
-
- ret = false;
-label_return:
- return ret;
-}
-
-#ifndef _WIN32
-JEMALLOC_FORMAT_PRINTF(1, 2)
-static int
-prof_open_maps(const char *format, ...) {
- int mfd;
- va_list ap;
- char filename[PATH_MAX + 1];
-
- va_start(ap, format);
- malloc_vsnprintf(filename, sizeof(filename), format, ap);
- va_end(ap);
-
-#if defined(O_CLOEXEC)
- mfd = open(filename, O_RDONLY | O_CLOEXEC);
-#else
- mfd = open(filename, O_RDONLY);
- if (mfd != -1) {
- fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
- }
-#endif
-
- return mfd;
-}
-#endif
-
-static int
-prof_getpid(void) {
-#ifdef _WIN32
- return GetCurrentProcessId();
#else
- return getpid();
-#endif
-}
-
-static bool
-prof_dump_maps(bool propagate_err) {
- bool ret;
- int mfd;
-
- cassert(config_prof);
-#ifdef __FreeBSD__
- mfd = prof_open_maps("/proc/curproc/map");
-#elif defined(_WIN32)
- mfd = -1; // Not implemented
-#else
- {
- int pid = prof_getpid();
-
- mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
- if (mfd == -1) {
- mfd = prof_open_maps("/proc/%d/maps", pid);
- }
- }
-#endif
- if (mfd != -1) {
- ssize_t nread;
-
- if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
- propagate_err) {
- ret = true;
- goto label_return;
- }
- nread = 0;
- do {
- prof_dump_buf_end += nread;
- if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
- /* Make space in prof_dump_buf before read(). */
- if (prof_dump_flush(propagate_err) &&
- propagate_err) {
- ret = true;
- goto label_return;
- }
- }
- nread = malloc_read_fd(mfd,
- &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE
- - prof_dump_buf_end);
- } while (nread > 0);
- } else {
- ret = true;
- goto label_return;
- }
-
- ret = false;
-label_return:
- if (mfd != -1) {
- close(mfd);
- }
- return ret;
-}
-
-/*
- * See prof_sample_threshold_update() comment for why the body of this function
- * is conditionally compiled.
- */
-static void
-prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
- const char *filename) {
-#ifdef JEMALLOC_PROF
- /*
- * Scaling is equivalent AdjustSamples() in jeprof, but the result may
- * differ slightly from what jeprof reports, because here we scale the
- * summary values, whereas jeprof scales each context individually and
- * reports the sums of the scaled values.
- */
- if (cnt_all->curbytes != 0) {
- double sample_period = (double)((uint64_t)1 << lg_prof_sample);
- double ratio = (((double)cnt_all->curbytes) /
- (double)cnt_all->curobjs) / sample_period;
- double scale_factor = 1.0 / (1.0 - exp(-ratio));
- uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
- * scale_factor);
- uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
- scale_factor);
-
- malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
- " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
- curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
- 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
- malloc_printf(
- "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
- filename);
- }
+ not_reached();
+ return TE_MAX_START_WAIT;
#endif
}
-struct prof_gctx_dump_iter_arg_s {
- tsdn_t *tsdn;
- bool propagate_err;
-};
-
-static prof_gctx_t *
-prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
- prof_gctx_t *ret;
- struct prof_gctx_dump_iter_arg_s *arg =
- (struct prof_gctx_dump_iter_arg_s *)opaque;
-
- malloc_mutex_lock(arg->tsdn, gctx->lock);
-
- if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
- gctxs)) {
- ret = gctx;
- goto label_return;
- }
-
- ret = NULL;
-label_return:
- malloc_mutex_unlock(arg->tsdn, gctx->lock);
- return ret;
-}
-
-static void
-prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
- struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
- struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
- prof_gctx_tree_t *gctxs) {
- size_t tabind;
- union {
- prof_gctx_t *p;
- void *v;
- } gctx;
-
- prof_enter(tsd, tdata);
-
+uint64_t
+prof_sample_postponed_event_wait(tsd_t *tsd) {
/*
- * Put gctx's in limbo and clear their counters in preparation for
- * summing.
+ * The postponed wait time for prof sample event is computed as if we
+ * want a new wait time (i.e. as if the event were triggered). If we
+ * instead postpone to the immediate next allocation, like how we're
+ * handling the other events, then we can have sampling bias, if e.g.
+ * the allocation immediately following a reentrancy always comes from
+ * the same stack trace.
*/
- gctx_tree_new(gctxs);
- for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
- prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
- }
-
- /*
- * Iterate over tdatas, and for the non-expired ones snapshot their tctx
- * stats and merge them into the associated gctx's.
- */
- prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd);
- memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t));
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
- tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
- (void *)prof_tdata_merge_iter_arg);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-
- /* Merge tctx stats into gctx's. */
- prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd);
- prof_gctx_merge_iter_arg->leak_ngctx = 0;
- gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
- (void *)prof_gctx_merge_iter_arg);
-
- prof_leave(tsd, tdata);
-}
-
-static bool
-prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
- bool leakcheck, prof_tdata_t *tdata,
- struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
- struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
- struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
- prof_gctx_tree_t *gctxs) {
- /* Create dump file. */
- if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
- return true;
- }
-
- /* Dump profile header. */
- if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
- &prof_tdata_merge_iter_arg->cnt_all)) {
- goto label_write_error;
- }
-
- /* Dump per gctx profile stats. */
- prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd);
- prof_gctx_dump_iter_arg->propagate_err = propagate_err;
- if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter,
- (void *)prof_gctx_dump_iter_arg) != NULL) {
- goto label_write_error;
- }
-
- /* Dump /proc/<pid>/maps if possible. */
- if (prof_dump_maps(propagate_err)) {
- goto label_write_error;
- }
-
- if (prof_dump_close(propagate_err)) {
- return true;
- }
-
- return false;
-label_write_error:
- prof_dump_close(propagate_err);
- return true;
+ return prof_sample_new_event_wait(tsd);
}
-static bool
-prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
- bool leakcheck) {
- cassert(config_prof);
- assert(tsd_reentrancy_level_get(tsd) == 0);
-
- prof_tdata_t * tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL) {
- return true;
- }
-
- pre_reentrancy(tsd, NULL);
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
-
- prof_gctx_tree_t gctxs;
- struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
- struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
- struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
- prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
- &prof_gctx_merge_iter_arg, &gctxs);
- bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata,
- &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg,
- &prof_gctx_dump_iter_arg, &gctxs);
- prof_gctx_finish(tsd, &gctxs);
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
- post_reentrancy(tsd);
-
- if (err) {
- return true;
- }
-
- if (leakcheck) {
- prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
- prof_gctx_merge_iter_arg.leak_ngctx, filename);
- }
- return false;
-}
-
-#ifdef JEMALLOC_JET
void
-prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
- uint64_t *accumbytes) {
- tsd_t *tsd;
- prof_tdata_t *tdata;
- struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
- struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
- prof_gctx_tree_t gctxs;
-
- tsd = tsd_fetch();
- tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL) {
- if (curobjs != NULL) {
- *curobjs = 0;
- }
- if (curbytes != NULL) {
- *curbytes = 0;
- }
- if (accumobjs != NULL) {
- *accumobjs = 0;
- }
- if (accumbytes != NULL) {
- *accumbytes = 0;
- }
+prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ cassert(config_prof);
+ assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED);
+ if (prof_interval == 0 || !prof_active_get_unlocked()) {
return;
}
-
- prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
- &prof_gctx_merge_iter_arg, &gctxs);
- prof_gctx_finish(tsd, &gctxs);
-
- if (curobjs != NULL) {
- *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs;
- }
- if (curbytes != NULL) {
- *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes;
- }
- if (accumobjs != NULL) {
- *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs;
- }
- if (accumbytes != NULL) {
- *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes;
- }
-}
-#endif
-
-#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
-#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
-static void
-prof_dump_filename(char *filename, char v, uint64_t vseq) {
- cassert(config_prof);
-
- if (vseq != VSEQ_INVALID) {
- /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
- malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"FMTu64".%c%"FMTu64".heap",
- opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
- } else {
- /* "<prefix>.<pid>.<seq>.<v>.heap" */
- malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"FMTu64".%c.heap",
- opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
+ if (counter_accum(tsd_tsdn(tsd), &prof_idump_accumulated, elapsed)) {
+ prof_idump(tsd_tsdn(tsd));
}
- prof_dump_seq++;
}
static void
prof_fdump(void) {
tsd_t *tsd;
- char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
assert(opt_prof_final);
- assert(opt_prof_prefix[0] != '\0');
if (!prof_booted) {
return;
@@ -2023,26 +290,14 @@ prof_fdump(void) {
tsd = tsd_fetch();
assert(tsd_reentrancy_level_get(tsd) == 0);
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump_filename(filename, 'f', VSEQ_INVALID);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump(tsd, false, filename, opt_prof_leak);
+ prof_fdump_impl(tsd);
}
-bool
-prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
+static bool
+prof_idump_accum_init(void) {
cassert(config_prof);
-#ifndef JEMALLOC_ATOMIC_U64
- if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
- WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
- return true;
- }
- prof_accum->accumbytes = 0;
-#else
- atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED);
-#endif
- return false;
+ return counter_accum_init(&prof_idump_accumulated, prof_interval);
}
void
@@ -2060,7 +315,7 @@ prof_idump(tsdn_t *tsdn) {
return;
}
- tdata = prof_tdata_get(tsd, false);
+ tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return;
}
@@ -2069,14 +324,7 @@ prof_idump(tsdn_t *tsdn) {
return;
}
- if (opt_prof_prefix[0] != '\0') {
- char filename[PATH_MAX + 1];
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump_filename(filename, 'i', prof_dump_iseq);
- prof_dump_iseq++;
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump(tsd, false, filename, false);
- }
+ prof_idump_impl(tsd);
}
bool
@@ -2087,19 +335,8 @@ prof_mdump(tsd_t *tsd, const char *filename) {
if (!opt_prof || !prof_booted) {
return true;
}
- char filename_buf[DUMP_FILENAME_BUFSIZE];
- if (filename == NULL) {
- /* No filename specified, so automatically generate one. */
- if (opt_prof_prefix[0] == '\0') {
- return true;
- }
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
- prof_dump_mseq++;
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- filename = filename_buf;
- }
- return prof_dump(tsd, true, filename, false);
+
+ return prof_mdump_impl(tsd, filename);
}
void
@@ -2126,63 +363,7 @@ prof_gdump(tsdn_t *tsdn) {
return;
}
- if (opt_prof_prefix[0] != '\0') {
- char filename[DUMP_FILENAME_BUFSIZE];
- malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
- prof_dump_filename(filename, 'u', prof_dump_useq);
- prof_dump_useq++;
- malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
- prof_dump(tsd, false, filename, false);
- }
-}
-
-static void
-prof_bt_hash(const void *key, size_t r_hash[2]) {
- prof_bt_t *bt = (prof_bt_t *)key;
-
- cassert(config_prof);
-
- hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
-}
-
-static bool
-prof_bt_keycomp(const void *k1, const void *k2) {
- const prof_bt_t *bt1 = (prof_bt_t *)k1;
- const prof_bt_t *bt2 = (prof_bt_t *)k2;
-
- cassert(config_prof);
-
- if (bt1->len != bt2->len) {
- return false;
- }
- return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
-}
-
-static void
-prof_bt_node_hash(const void *key, size_t r_hash[2]) {
- const prof_bt_node_t *bt_node = (prof_bt_node_t *)key;
- prof_bt_hash((void *)(&bt_node->bt), r_hash);
-}
-
-static bool
-prof_bt_node_keycomp(const void *k1, const void *k2) {
- const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1;
- const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2;
- return prof_bt_keycomp((void *)(&bt_node1->bt),
- (void *)(&bt_node2->bt));
-}
-
-static void
-prof_thr_node_hash(const void *key, size_t r_hash[2]) {
- const prof_thr_node_t *thr_node = (prof_thr_node_t *)key;
- hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash);
-}
-
-static bool
-prof_thr_node_keycomp(const void *k1, const void *k2) {
- const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1;
- const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2;
- return thr_node1->thr_uid == thr_node2->thr_uid;
+ prof_gdump_impl(tsd);
}
static uint64_t
@@ -2197,132 +378,18 @@ prof_thr_uid_alloc(tsdn_t *tsdn) {
return thr_uid;
}
-static prof_tdata_t *
-prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
- char *thread_name, bool active) {
- prof_tdata_t *tdata;
-
- cassert(config_prof);
-
- /* Initialize an empty cache for this thread. */
- tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
- sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
- if (tdata == NULL) {
- return NULL;
- }
-
- tdata->lock = prof_tdata_mutex_choose(thr_uid);
- tdata->thr_uid = thr_uid;
- tdata->thr_discrim = thr_discrim;
- tdata->thread_name = thread_name;
- tdata->attached = true;
- tdata->expired = false;
- tdata->tctx_uid_next = 0;
-
- if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
- prof_bt_keycomp)) {
- idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
- return NULL;
- }
-
- tdata->prng_state = (uint64_t)(uintptr_t)tdata;
- prof_sample_threshold_update(tdata);
-
- tdata->enq = false;
- tdata->enq_idump = false;
- tdata->enq_gdump = false;
-
- tdata->dumping = false;
- tdata->active = active;
-
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
- tdata_tree_insert(&tdatas, tdata);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-
- return tdata;
-}
-
prof_tdata_t *
prof_tdata_init(tsd_t *tsd) {
return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
}
-static bool
-prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
- if (tdata->attached && !even_if_attached) {
- return false;
- }
- if (ckh_count(&tdata->bt2tctx) != 0) {
- return false;
- }
- return true;
-}
-
-static bool
-prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
- bool even_if_attached) {
- malloc_mutex_assert_owner(tsdn, tdata->lock);
-
- return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
-}
-
-static void
-prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
- bool even_if_attached) {
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
-
- tdata_tree_remove(&tdatas, tdata);
-
- assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
-
- if (tdata->thread_name != NULL) {
- idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
- true);
- }
- ckh_delete(tsd, &tdata->bt2tctx);
- idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
-}
-
-static void
-prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
- prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-}
-
-static void
-prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
- bool destroy_tdata;
-
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
- if (tdata->attached) {
- destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
- true);
- /*
- * Only detach if !destroy_tdata, because detaching would allow
- * another thread to win the race to destroy tdata.
- */
- if (!destroy_tdata) {
- tdata->attached = false;
- }
- tsd_prof_tdata_set(tsd, NULL);
- } else {
- destroy_tdata = false;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
- if (destroy_tdata) {
- prof_tdata_destroy(tsd, tdata, true);
- }
-}
-
prof_tdata_t *
prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
uint64_t thr_uid = tdata->thr_uid;
uint64_t thr_discrim = tdata->thr_discrim + 1;
char *thread_name = (tdata->thread_name != NULL) ?
- prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
+ prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
bool active = tdata->active;
prof_tdata_detach(tsd, tdata);
@@ -2330,58 +397,6 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
active);
}
-static bool
-prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
- bool destroy_tdata;
-
- malloc_mutex_lock(tsdn, tdata->lock);
- if (!tdata->expired) {
- tdata->expired = true;
- destroy_tdata = tdata->attached ? false :
- prof_tdata_should_destroy(tsdn, tdata, false);
- } else {
- destroy_tdata = false;
- }
- malloc_mutex_unlock(tsdn, tdata->lock);
-
- return destroy_tdata;
-}
-
-static prof_tdata_t *
-prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *arg) {
- tsdn_t *tsdn = (tsdn_t *)arg;
-
- return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
-}
-
-void
-prof_reset(tsd_t *tsd, size_t lg_sample) {
- prof_tdata_t *next;
-
- assert(lg_sample < (sizeof(uint64_t) << 3));
-
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
-
- lg_prof_sample = lg_sample;
-
- next = NULL;
- do {
- prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
- prof_tdata_reset_iter, (void *)tsd);
- if (to_destroy != NULL) {
- next = tdata_tree_next(&tdatas, to_destroy);
- prof_tdata_destroy_locked(tsd, to_destroy, false);
- } else {
- next = NULL;
- }
- } while (next != NULL);
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
-}
-
void
prof_tdata_cleanup(tsd_t *tsd) {
prof_tdata_t *tdata;
@@ -2400,8 +415,9 @@ bool
prof_active_get(tsdn_t *tsdn) {
bool prof_active_current;
+ prof_active_assert();
malloc_mutex_lock(tsdn, &prof_active_mtx);
- prof_active_current = prof_active;
+ prof_active_current = prof_active_state;
malloc_mutex_unlock(tsdn, &prof_active_mtx);
return prof_active_current;
}
@@ -2410,377 +426,19 @@ bool
prof_active_set(tsdn_t *tsdn, bool active) {
bool prof_active_old;
+ prof_active_assert();
malloc_mutex_lock(tsdn, &prof_active_mtx);
- prof_active_old = prof_active;
- prof_active = active;
+ prof_active_old = prof_active_state;
+ prof_active_state = active;
malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ prof_active_assert();
return prof_active_old;
}
-#ifdef JEMALLOC_JET
-size_t
-prof_log_bt_count(void) {
- size_t cnt = 0;
- prof_bt_node_t *node = log_bt_first;
- while (node != NULL) {
- cnt++;
- node = node->next;
- }
- return cnt;
-}
-
-size_t
-prof_log_alloc_count(void) {
- size_t cnt = 0;
- prof_alloc_node_t *node = log_alloc_first;
- while (node != NULL) {
- cnt++;
- node = node->next;
- }
- return cnt;
-}
-
-size_t
-prof_log_thr_count(void) {
- size_t cnt = 0;
- prof_thr_node_t *node = log_thr_first;
- while (node != NULL) {
- cnt++;
- node = node->next;
- }
- return cnt;
-}
-
-bool
-prof_log_is_logging(void) {
- return prof_logging_state == prof_logging_state_started;
-}
-
-bool
-prof_log_rep_check(void) {
- if (prof_logging_state == prof_logging_state_stopped
- && log_tables_initialized) {
- return true;
- }
-
- if (log_bt_last != NULL && log_bt_last->next != NULL) {
- return true;
- }
- if (log_thr_last != NULL && log_thr_last->next != NULL) {
- return true;
- }
- if (log_alloc_last != NULL && log_alloc_last->next != NULL) {
- return true;
- }
-
- size_t bt_count = prof_log_bt_count();
- size_t thr_count = prof_log_thr_count();
- size_t alloc_count = prof_log_alloc_count();
-
-
- if (prof_logging_state == prof_logging_state_stopped) {
- if (bt_count != 0 || thr_count != 0 || alloc_count || 0) {
- return true;
- }
- }
-
- prof_alloc_node_t *node = log_alloc_first;
- while (node != NULL) {
- if (node->alloc_bt_ind >= bt_count) {
- return true;
- }
- if (node->free_bt_ind >= bt_count) {
- return true;
- }
- if (node->alloc_thr_ind >= thr_count) {
- return true;
- }
- if (node->free_thr_ind >= thr_count) {
- return true;
- }
- if (node->alloc_time_ns > node->free_time_ns) {
- return true;
- }
- node = node->next;
- }
-
- return false;
-}
-
-void
-prof_log_dummy_set(bool new_value) {
- prof_log_dummy = new_value;
-}
-#endif
-
-bool
-prof_log_start(tsdn_t *tsdn, const char *filename) {
- if (!opt_prof || !prof_booted) {
- return true;
- }
-
- bool ret = false;
- size_t buf_size = PATH_MAX + 1;
-
- malloc_mutex_lock(tsdn, &log_mtx);
-
- if (prof_logging_state != prof_logging_state_stopped) {
- ret = true;
- } else if (filename == NULL) {
- /* Make default name. */
- malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json",
- opt_prof_prefix, prof_getpid(), log_seq);
- log_seq++;
- prof_logging_state = prof_logging_state_started;
- } else if (strlen(filename) >= buf_size) {
- ret = true;
- } else {
- strcpy(log_filename, filename);
- prof_logging_state = prof_logging_state_started;
- }
-
- if (!ret) {
- nstime_update(&log_start_timestamp);
- }
-
- malloc_mutex_unlock(tsdn, &log_mtx);
-
- return ret;
-}
-
-/* Used as an atexit function to stop logging on exit. */
-static void
-prof_log_stop_final(void) {
- tsd_t *tsd = tsd_fetch();
- prof_log_stop(tsd_tsdn(tsd));
-}
-
-struct prof_emitter_cb_arg_s {
- int fd;
- ssize_t ret;
-};
-
-static void
-prof_emitter_write_cb(void *opaque, const char *to_write) {
- struct prof_emitter_cb_arg_s *arg =
- (struct prof_emitter_cb_arg_s *)opaque;
- size_t bytes = strlen(to_write);
-#ifdef JEMALLOC_JET
- if (prof_log_dummy) {
- return;
- }
-#endif
- arg->ret = write(arg->fd, (void *)to_write, bytes);
-}
-
-/*
- * prof_log_emit_{...} goes through the appropriate linked list, emitting each
- * node to the json and deallocating it.
- */
-static void
-prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) {
- emitter_json_array_kv_begin(emitter, "threads");
- prof_thr_node_t *thr_node = log_thr_first;
- prof_thr_node_t *thr_old_node;
- while (thr_node != NULL) {
- emitter_json_object_begin(emitter);
-
- emitter_json_kv(emitter, "thr_uid", emitter_type_uint64,
- &thr_node->thr_uid);
-
- char *thr_name = thr_node->name;
-
- emitter_json_kv(emitter, "thr_name", emitter_type_string,
- &thr_name);
-
- emitter_json_object_end(emitter);
- thr_old_node = thr_node;
- thr_node = thr_node->next;
- idalloc(tsd, thr_old_node);
- }
- emitter_json_array_end(emitter);
-}
-
-static void
-prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
- emitter_json_array_kv_begin(emitter, "stack_traces");
- prof_bt_node_t *bt_node = log_bt_first;
- prof_bt_node_t *bt_old_node;
- /*
- * Calculate how many hex digits we need: twice number of bytes, two for
- * "0x", and then one more for terminating '\0'.
- */
- char buf[2 * sizeof(intptr_t) + 3];
- size_t buf_sz = sizeof(buf);
- while (bt_node != NULL) {
- emitter_json_array_begin(emitter);
- size_t i;
- for (i = 0; i < bt_node->bt.len; i++) {
- malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]);
- char *trace_str = buf;
- emitter_json_value(emitter, emitter_type_string,
- &trace_str);
- }
- emitter_json_array_end(emitter);
-
- bt_old_node = bt_node;
- bt_node = bt_node->next;
- idalloc(tsd, bt_old_node);
- }
- emitter_json_array_end(emitter);
-}
-
-static void
-prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) {
- emitter_json_array_kv_begin(emitter, "allocations");
- prof_alloc_node_t *alloc_node = log_alloc_first;
- prof_alloc_node_t *alloc_old_node;
- while (alloc_node != NULL) {
- emitter_json_object_begin(emitter);
-
- emitter_json_kv(emitter, "alloc_thread", emitter_type_size,
- &alloc_node->alloc_thr_ind);
-
- emitter_json_kv(emitter, "free_thread", emitter_type_size,
- &alloc_node->free_thr_ind);
-
- emitter_json_kv(emitter, "alloc_trace", emitter_type_size,
- &alloc_node->alloc_bt_ind);
-
- emitter_json_kv(emitter, "free_trace", emitter_type_size,
- &alloc_node->free_bt_ind);
-
- emitter_json_kv(emitter, "alloc_timestamp",
- emitter_type_uint64, &alloc_node->alloc_time_ns);
-
- emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64,
- &alloc_node->free_time_ns);
-
- emitter_json_kv(emitter, "usize", emitter_type_uint64,
- &alloc_node->usize);
-
- emitter_json_object_end(emitter);
-
- alloc_old_node = alloc_node;
- alloc_node = alloc_node->next;
- idalloc(tsd, alloc_old_node);
- }
- emitter_json_array_end(emitter);
-}
-
-static void
-prof_log_emit_metadata(emitter_t *emitter) {
- emitter_json_object_kv_begin(emitter, "info");
-
- nstime_t now = NSTIME_ZERO_INITIALIZER;
-
- nstime_update(&now);
- uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp);
- emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns);
-
- char *vers = JEMALLOC_VERSION;
- emitter_json_kv(emitter, "version",
- emitter_type_string, &vers);
-
- emitter_json_kv(emitter, "lg_sample_rate",
- emitter_type_int, &lg_prof_sample);
-
- int pid = prof_getpid();
- emitter_json_kv(emitter, "pid", emitter_type_int, &pid);
-
- emitter_json_object_end(emitter);
-}
-
-
-bool
-prof_log_stop(tsdn_t *tsdn) {
- if (!opt_prof || !prof_booted) {
- return true;
- }
-
- tsd_t *tsd = tsdn_tsd(tsdn);
- malloc_mutex_lock(tsdn, &log_mtx);
-
- if (prof_logging_state != prof_logging_state_started) {
- malloc_mutex_unlock(tsdn, &log_mtx);
- return true;
- }
-
- /*
- * Set the state to dumping. We'll set it to stopped when we're done.
- * Since other threads won't be able to start/stop/log when the state is
- * dumping, we don't have to hold the lock during the whole method.
- */
- prof_logging_state = prof_logging_state_dumping;
- malloc_mutex_unlock(tsdn, &log_mtx);
-
-
- emitter_t emitter;
-
- /* Create a file. */
-
- int fd;
-#ifdef JEMALLOC_JET
- if (prof_log_dummy) {
- fd = 0;
- } else {
- fd = creat(log_filename, 0644);
- }
-#else
- fd = creat(log_filename, 0644);
-#endif
-
- if (fd == -1) {
- malloc_printf("<jemalloc>: creat() for log file \"%s\" "
- " failed with %d\n", log_filename, errno);
- if (opt_abort) {
- abort();
- }
- return true;
- }
-
- /* Emit to json. */
- struct prof_emitter_cb_arg_s arg;
- arg.fd = fd;
- emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb,
- (void *)(&arg));
-
- emitter_begin(&emitter);
- prof_log_emit_metadata(&emitter);
- prof_log_emit_threads(tsd, &emitter);
- prof_log_emit_traces(tsd, &emitter);
- prof_log_emit_allocs(tsd, &emitter);
- emitter_end(&emitter);
-
- /* Reset global state. */
- if (log_tables_initialized) {
- ckh_delete(tsd, &log_bt_node_set);
- ckh_delete(tsd, &log_thr_node_set);
- }
- log_tables_initialized = false;
- log_bt_index = 0;
- log_thr_index = 0;
- log_bt_first = NULL;
- log_bt_last = NULL;
- log_thr_first = NULL;
- log_thr_last = NULL;
- log_alloc_first = NULL;
- log_alloc_last = NULL;
-
- malloc_mutex_lock(tsdn, &log_mtx);
- prof_logging_state = prof_logging_state_stopped;
- malloc_mutex_unlock(tsdn, &log_mtx);
-
-#ifdef JEMALLOC_JET
- if (prof_log_dummy) {
- return false;
- }
-#endif
- return close(fd);
-}
-
const char *
prof_thread_name_get(tsd_t *tsd) {
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
@@ -2790,69 +448,19 @@ prof_thread_name_get(tsd_t *tsd) {
return (tdata->thread_name != NULL ? tdata->thread_name : "");
}
-static char *
-prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
- char *ret;
- size_t size;
-
- if (thread_name == NULL) {
- return NULL;
- }
-
- size = strlen(thread_name) + 1;
- if (size == 1) {
- return "";
- }
-
- ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
- if (ret == NULL) {
- return NULL;
- }
- memcpy(ret, thread_name, size);
- return ret;
-}
-
int
prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
- prof_tdata_t *tdata;
- unsigned i;
- char *s;
-
- tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL) {
- return EAGAIN;
- }
-
- /* Validate input. */
- if (thread_name == NULL) {
- return EFAULT;
- }
- for (i = 0; thread_name[i] != '\0'; i++) {
- char c = thread_name[i];
- if (!isgraph(c) && !isblank(c)) {
- return EFAULT;
- }
- }
-
- s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
- if (s == NULL) {
- return EAGAIN;
- }
-
- if (tdata->thread_name != NULL) {
- idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
- true);
- tdata->thread_name = NULL;
- }
- if (strlen(s) > 0) {
- tdata->thread_name = s;
+ if (opt_prof_sys_thread_name) {
+ return ENOENT;
+ } else {
+ return prof_thread_name_set_impl(tsd, thread_name);
}
- return 0;
}
bool
prof_thread_active_get(tsd_t *tsd) {
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
@@ -2864,6 +472,8 @@ prof_thread_active_get(tsd_t *tsd) {
bool
prof_thread_active_set(tsd_t *tsd, bool active) {
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
@@ -2917,6 +527,28 @@ prof_gdump_set(tsdn_t *tsdn, bool gdump) {
}
void
+prof_backtrace_hook_set(prof_backtrace_hook_t hook) {
+ atomic_store_p(&prof_backtrace_hook, hook, ATOMIC_RELEASE);
+}
+
+prof_backtrace_hook_t
+prof_backtrace_hook_get() {
+ return (prof_backtrace_hook_t)atomic_load_p(&prof_backtrace_hook,
+ ATOMIC_ACQUIRE);
+}
+
+void
+prof_dump_hook_set(prof_dump_hook_t hook) {
+ atomic_store_p(&prof_dump_hook, hook, ATOMIC_RELEASE);
+}
+
+prof_dump_hook_t
+prof_dump_hook_get() {
+ return (prof_dump_hook_t)atomic_load_p(&prof_dump_hook,
+ ATOMIC_ACQUIRE);
+}
+
+void
prof_boot0(void) {
cassert(config_prof);
@@ -2932,6 +564,9 @@ prof_boot1(void) {
* opt_prof must be in its final state before any arenas are
* initialized, so this function must be executed early.
*/
+ if (opt_prof_leak_error && !opt_prof_leak) {
+ opt_prof_leak = true;
+ }
if (opt_prof_leak && !opt_prof) {
/*
@@ -2949,61 +584,65 @@ prof_boot1(void) {
}
bool
-prof_boot2(tsd_t *tsd) {
+prof_boot2(tsd_t *tsd, base_t *base) {
cassert(config_prof);
- if (opt_prof) {
- unsigned i;
+ /*
+ * Initialize the global mutexes unconditionally to maintain correct
+ * stats when opt_prof is false.
+ */
+ if (malloc_mutex_init(&prof_active_mtx, "prof_active",
+ WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
+ WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&prof_thread_active_init_mtx,
+ "prof_thread_active_init", WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
+ WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
+ WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
+ WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&prof_stats_mtx, "prof_stats",
+ WITNESS_RANK_PROF_STATS, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&prof_dump_filename_mtx,
+ "prof_dump_filename", WITNESS_RANK_PROF_DUMP_FILENAME,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
+ WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (opt_prof) {
lg_prof_sample = opt_lg_prof_sample;
-
- prof_active = opt_prof_active;
- if (malloc_mutex_init(&prof_active_mtx, "prof_active",
- WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
+ prof_unbias_map_init();
+ prof_active_state = opt_prof_active;
prof_gdump_val = opt_prof_gdump;
- if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
- WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
prof_thread_active_init = opt_prof_thread_active_init;
- if (malloc_mutex_init(&prof_thread_active_init_mtx,
- "prof_thread_active_init",
- WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
- prof_bt_keycomp)) {
- return true;
- }
- if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
- WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
- return true;
- }
- tdata_tree_new(&tdatas);
- if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
- WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
+ if (prof_data_init(tsd)) {
return true;
}
next_thr_uid = 0;
- if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
- WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
- WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) {
- return true;
- }
- if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
- WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
+ if (prof_idump_accum_init()) {
return true;
}
@@ -3015,42 +654,22 @@ prof_boot2(tsd_t *tsd) {
}
}
- if (opt_prof_log) {
- prof_log_start(tsd_tsdn(tsd), NULL);
- }
-
- if (atexit(prof_log_stop_final) != 0) {
- malloc_write("<jemalloc>: Error in atexit() "
- "for logging\n");
- if (opt_abort) {
- abort();
- }
- }
-
- if (malloc_mutex_init(&log_mtx, "prof_log",
- WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
- prof_bt_node_hash, prof_bt_node_keycomp)) {
+ if (prof_log_init(tsd)) {
return true;
}
- if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
- prof_thr_node_hash, prof_thr_node_keycomp)) {
+ if (prof_recent_init()) {
return true;
}
- log_tables_initialized = true;
+ prof_base = base;
- gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
- CACHELINE);
+ gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), base,
+ PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), CACHELINE);
if (gctx_locks == NULL) {
return true;
}
- for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+ for (unsigned i = 0; i < PROF_NCTX_LOCKS; i++) {
if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
WITNESS_RANK_PROF_GCTX,
malloc_mutex_rank_exclusive)) {
@@ -3058,26 +677,21 @@ prof_boot2(tsd_t *tsd) {
}
}
- tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
- CACHELINE);
+ tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), base,
+ PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), CACHELINE);
if (tdata_locks == NULL) {
return true;
}
- for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ for (unsigned i = 0; i < PROF_NTDATA_LOCKS; i++) {
if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
WITNESS_RANK_PROF_TDATA,
malloc_mutex_rank_exclusive)) {
return true;
}
}
-#ifdef JEMALLOC_PROF_LIBGCC
- /*
- * Cause the backtracing machinery to allocate its internal
- * state before enabling profiling.
- */
- _Unwind_Backtrace(prof_unwind_init_callback, NULL);
-#endif
+
+ prof_unwind_init();
+ prof_hooks_init();
}
prof_booted = true;
@@ -3095,18 +709,23 @@ prof_prefork0(tsdn_t *tsdn) {
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_prefork(tsdn, &tdata_locks[i]);
}
+ malloc_mutex_prefork(tsdn, &log_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_prefork(tsdn, &gctx_locks[i]);
}
+ malloc_mutex_prefork(tsdn, &prof_recent_dump_mtx);
}
}
void
prof_prefork1(tsdn_t *tsdn) {
if (config_prof && opt_prof) {
+ counter_prefork(tsdn, &prof_idump_accumulated);
malloc_mutex_prefork(tsdn, &prof_active_mtx);
- malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_prefork(tsdn, &prof_dump_filename_mtx);
malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
+ malloc_mutex_prefork(tsdn, &prof_recent_alloc_mtx);
+ malloc_mutex_prefork(tsdn, &prof_stats_mtx);
malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
}
@@ -3120,12 +739,17 @@ prof_postfork_parent(tsdn_t *tsdn) {
malloc_mutex_postfork_parent(tsdn,
&prof_thread_active_init_mtx);
malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_stats_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_recent_alloc_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_dump_filename_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
+ counter_postfork_parent(tsdn, &prof_idump_accumulated);
+ malloc_mutex_postfork_parent(tsdn, &prof_recent_dump_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
}
+ malloc_mutex_postfork_parent(tsdn, &log_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
}
@@ -3142,12 +766,17 @@ prof_postfork_child(tsdn_t *tsdn) {
malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_stats_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_recent_alloc_mtx);
malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_dump_filename_mtx);
malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
+ counter_postfork_child(tsdn, &prof_idump_accumulated);
+ malloc_mutex_postfork_child(tsdn, &prof_recent_dump_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
}
+ malloc_mutex_postfork_child(tsdn, &log_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
}
diff --git a/deps/jemalloc/src/prof_data.c b/deps/jemalloc/src/prof_data.c
new file mode 100644
index 000000000..bfa55be1c
--- /dev/null
+++ b/deps/jemalloc/src/prof_data.c
@@ -0,0 +1,1447 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/prof_data.h"
+
+/*
+ * This file defines and manages the core profiling data structures.
+ *
+ * Conceptually, profiling data can be imagined as a table with three columns:
+ * thread, stack trace, and current allocation size. (When prof_accum is on,
+ * there's one additional column which is the cumulative allocation size.)
+ *
+ * Implementation wise, each thread maintains a hash recording the stack trace
+ * to allocation size correspondences, which are basically the individual rows
+ * in the table. In addition, two global "indices" are built to make data
+ * aggregation efficient (for dumping): bt2gctx and tdatas, which are basically
+ * the "grouped by stack trace" and "grouped by thread" views of the same table,
+ * respectively. Note that the allocation size is only aggregated to the two
+ * indices at dumping time, so as to optimize for performance.
+ */
+
+/******************************************************************************/
+
+malloc_mutex_t bt2gctx_mtx;
+malloc_mutex_t tdatas_mtx;
+malloc_mutex_t prof_dump_mtx;
+
+/*
+ * Table of mutexes that are shared among gctx's. These are leaf locks, so
+ * there is no problem with using them for more than one gctx at the same time.
+ * The primary motivation for this sharing though is that gctx's are ephemeral,
+ * and destroying mutexes causes complications for systems that allocate when
+ * creating/destroying mutexes.
+ */
+malloc_mutex_t *gctx_locks;
+static atomic_u_t cum_gctxs; /* Atomic counter. */
+
+/*
+ * Table of mutexes that are shared among tdata's. No operations require
+ * holding multiple tdata locks, so there is no problem with using them for more
+ * than one tdata at the same time, even though a gctx lock may be acquired
+ * while holding a tdata lock.
+ */
+malloc_mutex_t *tdata_locks;
+
+/*
+ * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
+ * structure that knows about all backtraces currently captured.
+ */
+static ckh_t bt2gctx;
+
+/*
+ * Tree of all extant prof_tdata_t structures, regardless of state,
+ * {attached,detached,expired}.
+ */
+static prof_tdata_tree_t tdatas;
+
+size_t prof_unbiased_sz[PROF_SC_NSIZES];
+size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES];
+
+/******************************************************************************/
+/* Red-black trees. */
+
+static int
+prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
+ uint64_t a_thr_uid = a->thr_uid;
+ uint64_t b_thr_uid = b->thr_uid;
+ int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
+ if (ret == 0) {
+ uint64_t a_thr_discrim = a->thr_discrim;
+ uint64_t b_thr_discrim = b->thr_discrim;
+ ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
+ b_thr_discrim);
+ if (ret == 0) {
+ uint64_t a_tctx_uid = a->tctx_uid;
+ uint64_t b_tctx_uid = b->tctx_uid;
+ ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
+ b_tctx_uid);
+ }
+ }
+ return ret;
+}
+
+rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
+ tctx_link, prof_tctx_comp)
+
+static int
+prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
+ unsigned a_len = a->bt.len;
+ unsigned b_len = b->bt.len;
+ unsigned comp_len = (a_len < b_len) ? a_len : b_len;
+ int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
+ if (ret == 0) {
+ ret = (a_len > b_len) - (a_len < b_len);
+ }
+ return ret;
+}
+
+rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
+ prof_gctx_comp)
+
+static int
+prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
+ int ret;
+ uint64_t a_uid = a->thr_uid;
+ uint64_t b_uid = b->thr_uid;
+
+ ret = ((a_uid > b_uid) - (a_uid < b_uid));
+ if (ret == 0) {
+ uint64_t a_discrim = a->thr_discrim;
+ uint64_t b_discrim = b->thr_discrim;
+
+ ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
+ }
+ return ret;
+}
+
+rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
+ prof_tdata_comp)
+
+/******************************************************************************/
+
+static malloc_mutex_t *
+prof_gctx_mutex_choose(void) {
+ unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
+
+ return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
+}
+
+static malloc_mutex_t *
+prof_tdata_mutex_choose(uint64_t thr_uid) {
+ return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
+}
+
+bool
+prof_data_init(tsd_t *tsd) {
+ tdata_tree_new(&tdatas);
+ return ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS,
+ prof_bt_hash, prof_bt_keycomp);
+}
+
+static void
+prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
+ cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
+
+ if (tdata != NULL) {
+ assert(!tdata->enq);
+ tdata->enq = true;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+}
+
+static void
+prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
+ cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+
+ if (tdata != NULL) {
+ bool idump, gdump;
+
+ assert(tdata->enq);
+ tdata->enq = false;
+ idump = tdata->enq_idump;
+ tdata->enq_idump = false;
+ gdump = tdata->enq_gdump;
+ tdata->enq_gdump = false;
+
+ if (idump) {
+ prof_idump(tsd_tsdn(tsd));
+ }
+ if (gdump) {
+ prof_gdump(tsd_tsdn(tsd));
+ }
+ }
+}
+
+static prof_gctx_t *
+prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
+ /*
+ * Create a single allocation that has space for vec of length bt->len.
+ */
+ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
+ sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
+ true);
+ if (gctx == NULL) {
+ return NULL;
+ }
+ gctx->lock = prof_gctx_mutex_choose();
+ /*
+ * Set nlimbo to 1, in order to avoid a race condition with
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
+ */
+ gctx->nlimbo = 1;
+ tctx_tree_new(&gctx->tctxs);
+ /* Duplicate bt. */
+ memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
+ gctx->bt.vec = gctx->vec;
+ gctx->bt.len = bt->len;
+ return gctx;
+}
+
+static void
+prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self,
+ prof_gctx_t *gctx) {
+ cassert(config_prof);
+
+ /*
+ * Check that gctx is still unused by any thread cache before destroying
+ * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
+ * condition with this function, as does prof_tctx_destroy() in order to
+ * avoid a race between the main body of prof_tctx_destroy() and entry
+ * into this function.
+ */
+ prof_enter(tsd, tdata_self);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ assert(gctx->nlimbo != 0);
+ if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
+ /* Remove gctx from bt2gctx. */
+ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
+ not_reached();
+ }
+ prof_leave(tsd, tdata_self);
+ /* Destroy gctx. */
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
+ } else {
+ /*
+ * Compensate for increment in prof_tctx_destroy() or
+ * prof_lookup().
+ */
+ gctx->nlimbo--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ prof_leave(tsd, tdata_self);
+ }
+}
+
+static bool
+prof_gctx_should_destroy(prof_gctx_t *gctx) {
+ if (opt_prof_accum) {
+ return false;
+ }
+ if (!tctx_tree_empty(&gctx->tctxs)) {
+ return false;
+ }
+ if (gctx->nlimbo != 0) {
+ return false;
+ }
+ return true;
+}
+
+static bool
+prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
+ void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
+ union {
+ prof_gctx_t *p;
+ void *v;
+ } gctx, tgctx;
+ union {
+ prof_bt_t *p;
+ void *v;
+ } btkey;
+ bool new_gctx;
+
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
+ /* bt has never been seen before. Insert it. */
+ prof_leave(tsd, tdata);
+ tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
+ if (tgctx.v == NULL) {
+ return true;
+ }
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
+ gctx.p = tgctx.p;
+ btkey.p = &gctx.p->bt;
+ if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
+ /* OOM. */
+ prof_leave(tsd, tdata);
+ idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
+ true, true);
+ return true;
+ }
+ new_gctx = true;
+ } else {
+ new_gctx = false;
+ }
+ } else {
+ tgctx.v = NULL;
+ new_gctx = false;
+ }
+
+ if (!new_gctx) {
+ /*
+ * Increment nlimbo, in order to avoid a race condition with
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
+ */
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
+ gctx.p->nlimbo++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
+ new_gctx = false;
+
+ if (tgctx.v != NULL) {
+ /* Lost race to insert. */
+ idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
+ true);
+ }
+ }
+ prof_leave(tsd, tdata);
+
+ *p_btkey = btkey.v;
+ *p_gctx = gctx.p;
+ *p_new_gctx = new_gctx;
+ return false;
+}
+
+prof_tctx_t *
+prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
+ union {
+ prof_tctx_t *p;
+ void *v;
+ } ret;
+ prof_tdata_t *tdata;
+ bool not_found;
+
+ cassert(config_prof);
+
+ tdata = prof_tdata_get(tsd, false);
+ assert(tdata != NULL);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
+ if (!not_found) { /* Note double negative! */
+ ret.p->prepared = true;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (not_found) {
+ void *btkey;
+ prof_gctx_t *gctx;
+ bool new_gctx, error;
+
+ /*
+ * This thread's cache lacks bt. Look for it in the global
+ * cache.
+ */
+ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
+ &new_gctx)) {
+ return NULL;
+ }
+
+ /* Link a prof_tctx_t into gctx for this thread. */
+ ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
+ sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
+ arena_ichoose(tsd, NULL), true);
+ if (ret.p == NULL) {
+ if (new_gctx) {
+ prof_gctx_try_destroy(tsd, tdata, gctx);
+ }
+ return NULL;
+ }
+ ret.p->tdata = tdata;
+ ret.p->thr_uid = tdata->thr_uid;
+ ret.p->thr_discrim = tdata->thr_discrim;
+ ret.p->recent_count = 0;
+ memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
+ ret.p->gctx = gctx;
+ ret.p->tctx_uid = tdata->tctx_uid_next++;
+ ret.p->prepared = true;
+ ret.p->state = prof_tctx_state_initializing;
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (error) {
+ if (new_gctx) {
+ prof_gctx_try_destroy(tsd, tdata, gctx);
+ }
+ idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
+ return NULL;
+ }
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ ret.p->state = prof_tctx_state_nominal;
+ tctx_tree_insert(&gctx->tctxs, ret.p);
+ gctx->nlimbo--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ }
+
+ return ret.p;
+}
+
+/* Used in unit tests. */
+static prof_tdata_t *
+prof_tdata_count_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
+ void *arg) {
+ size_t *tdata_count = (size_t *)arg;
+
+ (*tdata_count)++;
+
+ return NULL;
+}
+
+/* Used in unit tests. */
+size_t
+prof_tdata_count(void) {
+ size_t tdata_count = 0;
+ tsdn_t *tsdn;
+
+ tsdn = tsdn_fetch();
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
+ (void *)&tdata_count);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
+
+ return tdata_count;
+}
+
+/* Used in unit tests. */
+size_t
+prof_bt_count(void) {
+ size_t bt_count;
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
+ return 0;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ bt_count = ckh_count(&bt2gctx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+
+ return bt_count;
+}
+
+char *
+prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) {
+ char *ret;
+ size_t size;
+
+ if (thread_name == NULL) {
+ return NULL;
+ }
+
+ size = strlen(thread_name) + 1;
+ if (size == 1) {
+ return "";
+ }
+
+ ret = iallocztm(tsd_tsdn(tsd), size, sz_size2index(size), false, NULL,
+ true, arena_get(TSDN_NULL, 0, true), true);
+ if (ret == NULL) {
+ return NULL;
+ }
+ memcpy(ret, thread_name, size);
+ return ret;
+}
+
+int
+prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name) {
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
+ prof_tdata_t *tdata;
+ unsigned i;
+ char *s;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return EAGAIN;
+ }
+
+ /* Validate input. */
+ if (thread_name == NULL) {
+ return EFAULT;
+ }
+ for (i = 0; thread_name[i] != '\0'; i++) {
+ char c = thread_name[i];
+ if (!isgraph(c) && !isblank(c)) {
+ return EFAULT;
+ }
+ }
+
+ s = prof_thread_name_alloc(tsd, thread_name);
+ if (s == NULL) {
+ return EAGAIN;
+ }
+
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
+ true);
+ tdata->thread_name = NULL;
+ }
+ if (strlen(s) > 0) {
+ tdata->thread_name = s;
+ }
+ return 0;
+}
+
+JEMALLOC_FORMAT_PRINTF(3, 4)
+static void
+prof_dump_printf(write_cb_t *prof_dump_write, void *cbopaque,
+ const char *format, ...) {
+ va_list ap;
+ char buf[PROF_PRINTF_BUFSIZE];
+
+ va_start(ap, format);
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ prof_dump_write(cbopaque, buf);
+}
+
+/*
+ * Casting a double to a uint64_t may not necessarily be in range; this can be
+ * UB. I don't think this is practically possible with the cur counters, but
+ * plausibly could be with the accum counters.
+ */
+#ifdef JEMALLOC_PROF
+static uint64_t
+prof_double_uint64_cast(double d) {
+ /*
+ * Note: UINT64_MAX + 1 is exactly representable as a double on all
+ * reasonable platforms (certainly those we'll support). Writing this
+ * as !(a < b) instead of (a >= b) means that we're NaN-safe.
+ */
+ double rounded = round(d);
+ if (!(rounded < (double)UINT64_MAX)) {
+ return UINT64_MAX;
+ }
+ return (uint64_t)rounded;
+}
+#endif
+
+void prof_unbias_map_init() {
+ /* See the comment in prof_sample_new_event_wait */
+#ifdef JEMALLOC_PROF
+ for (szind_t i = 0; i < SC_NSIZES; i++) {
+ double sz = (double)sz_index2size(i);
+ double rate = (double)(ZU(1) << lg_prof_sample);
+ double div_val = 1.0 - exp(-sz / rate);
+ double unbiased_sz = sz / div_val;
+ /*
+ * The "true" right value for the unbiased count is
+ * 1.0/(1 - exp(-sz/rate)). The problem is, we keep the counts
+ * as integers (for a variety of reasons -- rounding errors
+ * could trigger asserts, and not all libcs can properly handle
+ * floating point arithmetic during malloc calls inside libc).
+ * Rounding to an integer, though, can lead to rounding errors
+ * of over 30% for sizes close to the sampling rate. So
+ * instead, we multiply by a constant, dividing the maximum
+ * possible roundoff error by that constant. To avoid overflow
+ * in summing up size_t values, the largest safe constant we can
+ * pick is the size of the smallest allocation.
+ */
+ double cnt_shift = (double)(ZU(1) << SC_LG_TINY_MIN);
+ double shifted_unbiased_cnt = cnt_shift / div_val;
+ prof_unbiased_sz[i] = (size_t)round(unbiased_sz);
+ prof_shifted_unbiased_cnt[i] = (size_t)round(
+ shifted_unbiased_cnt);
+ }
+#else
+ unreachable();
+#endif
+}
+
+/*
+ * The unbiasing story is long. The jeprof unbiasing logic was copied from
+ * pprof. Both shared an issue: they unbiased using the average size of the
+ * allocations at a particular stack trace. This can work out OK if allocations
+ * are mostly of the same size given some stack, but not otherwise. We now
+ * internally track what the unbiased results ought to be. We can't just report
+ * them as they are though; they'll still go through the jeprof unbiasing
+ * process. Instead, we figure out what values we can feed *into* jeprof's
+ * unbiasing mechanism that will lead to getting the right values out.
+ *
+ * It'll unbias count and aggregate size as:
+ *
+ * c_out = c_in * 1/(1-exp(-s_in/c_in/R)
+ * s_out = s_in * 1/(1-exp(-s_in/c_in/R)
+ *
+ * We want to solve for the values of c_in and s_in that will
+ * give the c_out and s_out that we've computed internally.
+ *
+ * Let's do a change of variables (both to make the math easier and to make it
+ * easier to write):
+ * x = s_in / c_in
+ * y = s_in
+ * k = 1/R.
+ *
+ * Then
+ * c_out = y/x * 1/(1-exp(-k*x))
+ * s_out = y * 1/(1-exp(-k*x))
+ *
+ * The first equation gives:
+ * y = x * c_out * (1-exp(-k*x))
+ * The second gives:
+ * y = s_out * (1-exp(-k*x))
+ * So we have
+ * x = s_out / c_out.
+ * And all the other values fall out from that.
+ *
+ * This is all a fair bit of work. The thing we get out of it is that we don't
+ * break backwards compatibility with jeprof (and the various tools that have
+ * copied its unbiasing logic). Eventually, we anticipate a v3 heap profile
+ * dump format based on JSON, at which point I think much of this logic can get
+ * cleaned up (since we'll be taking a compatibility break there anyways).
+ */
+static void
+prof_do_unbias(uint64_t c_out_shifted_i, uint64_t s_out_i, uint64_t *r_c_in,
+ uint64_t *r_s_in) {
+#ifdef JEMALLOC_PROF
+ if (c_out_shifted_i == 0 || s_out_i == 0) {
+ *r_c_in = 0;
+ *r_s_in = 0;
+ return;
+ }
+ /*
+ * See the note in prof_unbias_map_init() to see why we take c_out in a
+ * shifted form.
+ */
+ double c_out = (double)c_out_shifted_i
+ / (double)(ZU(1) << SC_LG_TINY_MIN);
+ double s_out = (double)s_out_i;
+ double R = (double)(ZU(1) << lg_prof_sample);
+
+ double x = s_out / c_out;
+ double y = s_out * (1.0 - exp(-x / R));
+
+ double c_in = y / x;
+ double s_in = y;
+
+ *r_c_in = prof_double_uint64_cast(c_in);
+ *r_s_in = prof_double_uint64_cast(s_in);
+#else
+ unreachable();
+#endif
+}
+
+static void
+prof_dump_print_cnts(write_cb_t *prof_dump_write, void *cbopaque,
+ const prof_cnt_t *cnts) {
+ uint64_t curobjs;
+ uint64_t curbytes;
+ uint64_t accumobjs;
+ uint64_t accumbytes;
+ if (opt_prof_unbias) {
+ prof_do_unbias(cnts->curobjs_shifted_unbiased,
+ cnts->curbytes_unbiased, &curobjs, &curbytes);
+ prof_do_unbias(cnts->accumobjs_shifted_unbiased,
+ cnts->accumbytes_unbiased, &accumobjs, &accumbytes);
+ } else {
+ curobjs = cnts->curobjs;
+ curbytes = cnts->curbytes;
+ accumobjs = cnts->accumobjs;
+ accumbytes = cnts->accumbytes;
+ }
+ prof_dump_printf(prof_dump_write, cbopaque,
+ "%"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]",
+ curobjs, curbytes, accumobjs, accumbytes);
+}
+
+static void
+prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
+ malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
+
+ malloc_mutex_lock(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+ return;
+ case prof_tctx_state_nominal:
+ tctx->state = prof_tctx_state_dumping;
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+
+ memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
+
+ tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ tdata->cnt_summed.curobjs_shifted_unbiased
+ += tctx->dump_cnts.curobjs_shifted_unbiased;
+ tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ tdata->cnt_summed.curbytes_unbiased
+ += tctx->dump_cnts.curbytes_unbiased;
+ if (opt_prof_accum) {
+ tdata->cnt_summed.accumobjs +=
+ tctx->dump_cnts.accumobjs;
+ tdata->cnt_summed.accumobjs_shifted_unbiased +=
+ tctx->dump_cnts.accumobjs_shifted_unbiased;
+ tdata->cnt_summed.accumbytes +=
+ tctx->dump_cnts.accumbytes;
+ tdata->cnt_summed.accumbytes_unbiased +=
+ tctx->dump_cnts.accumbytes_unbiased;
+ }
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ not_reached();
+ }
+}
+
+static void
+prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
+ malloc_mutex_assert_owner(tsdn, gctx->lock);
+
+ gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ gctx->cnt_summed.curobjs_shifted_unbiased
+ += tctx->dump_cnts.curobjs_shifted_unbiased;
+ gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ gctx->cnt_summed.curbytes_unbiased += tctx->dump_cnts.curbytes_unbiased;
+ if (opt_prof_accum) {
+ gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
+ gctx->cnt_summed.accumobjs_shifted_unbiased
+ += tctx->dump_cnts.accumobjs_shifted_unbiased;
+ gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
+ gctx->cnt_summed.accumbytes_unbiased
+ += tctx->dump_cnts.accumbytes_unbiased;
+ }
+}
+
+static prof_tctx_t *
+prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
+ break;
+ default:
+ not_reached();
+ }
+
+ return NULL;
+}
+
+typedef struct prof_dump_iter_arg_s prof_dump_iter_arg_t;
+struct prof_dump_iter_arg_s {
+ tsdn_t *tsdn;
+ write_cb_t *prof_dump_write;
+ void *cbopaque;
+};
+
+static prof_tctx_t *
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
+ prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque;
+ malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ case prof_tctx_state_nominal:
+ /* Not captured by this dump. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
+ " t%"FMTu64": ", tctx->thr_uid);
+ prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque,
+ &tctx->dump_cnts);
+ arg->prof_dump_write(arg->cbopaque, "\n");
+ break;
+ default:
+ not_reached();
+ }
+ return NULL;
+}
+
+static prof_tctx_t *
+prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
+ tsdn_t *tsdn = (tsdn_t *)arg;
+ prof_tctx_t *ret;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ tctx->state = prof_tctx_state_nominal;
+ break;
+ case prof_tctx_state_purgatory:
+ ret = tctx;
+ goto label_return;
+ default:
+ not_reached();
+ }
+
+ ret = NULL;
+label_return:
+ return ret;
+}
+
+static void
+prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
+ cassert(config_prof);
+
+ malloc_mutex_lock(tsdn, gctx->lock);
+
+ /*
+ * Increment nlimbo so that gctx won't go away before dump.
+ * Additionally, link gctx into the dump list so that it is included in
+ * prof_dump()'s second pass.
+ */
+ gctx->nlimbo++;
+ gctx_tree_insert(gctxs, gctx);
+
+ memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
+
+ malloc_mutex_unlock(tsdn, gctx->lock);
+}
+
+typedef struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg_t;
+struct prof_gctx_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ size_t *leak_ngctx;
+};
+
+static prof_gctx_t *
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
+ prof_gctx_merge_iter_arg_t *arg = (prof_gctx_merge_iter_arg_t *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
+ (void *)arg->tsdn);
+ if (gctx->cnt_summed.curobjs != 0) {
+ (*arg->leak_ngctx)++;
+ }
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
+
+ return NULL;
+}
+
+static void
+prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
+ prof_tdata_t *tdata = prof_tdata_get(tsd, false);
+ prof_gctx_t *gctx;
+
+ /*
+ * Standard tree iteration won't work here, because as soon as we
+ * decrement gctx->nlimbo and unlock gctx, another thread can
+ * concurrently destroy it, which will corrupt the tree. Therefore,
+ * tear down the tree one node at a time during iteration.
+ */
+ while ((gctx = gctx_tree_first(gctxs)) != NULL) {
+ gctx_tree_remove(gctxs, gctx);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ {
+ prof_tctx_t *next;
+
+ next = NULL;
+ do {
+ prof_tctx_t *to_destroy =
+ tctx_tree_iter(&gctx->tctxs, next,
+ prof_tctx_finish_iter,
+ (void *)tsd_tsdn(tsd));
+ if (to_destroy != NULL) {
+ next = tctx_tree_next(&gctx->tctxs,
+ to_destroy);
+ tctx_tree_remove(&gctx->tctxs,
+ to_destroy);
+ idalloctm(tsd_tsdn(tsd), to_destroy,
+ NULL, NULL, true, true);
+ } else {
+ next = NULL;
+ }
+ } while (next != NULL);
+ }
+ gctx->nlimbo--;
+ if (prof_gctx_should_destroy(gctx)) {
+ gctx->nlimbo++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ prof_gctx_try_destroy(tsd, tdata, gctx);
+ } else {
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ }
+ }
+}
+
+typedef struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg_t;
+struct prof_tdata_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ prof_cnt_t *cnt_all;
+};
+
+static prof_tdata_t *
+prof_tdata_merge_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
+ void *opaque) {
+ prof_tdata_merge_iter_arg_t *arg =
+ (prof_tdata_merge_iter_arg_t *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, tdata->lock);
+ if (!tdata->expired) {
+ size_t tabind;
+ union {
+ prof_tctx_t *p;
+ void *v;
+ } tctx;
+
+ tdata->dumping = true;
+ memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
+ for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
+ &tctx.v);) {
+ prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
+ }
+
+ arg->cnt_all->curobjs += tdata->cnt_summed.curobjs;
+ arg->cnt_all->curobjs_shifted_unbiased
+ += tdata->cnt_summed.curobjs_shifted_unbiased;
+ arg->cnt_all->curbytes += tdata->cnt_summed.curbytes;
+ arg->cnt_all->curbytes_unbiased
+ += tdata->cnt_summed.curbytes_unbiased;
+ if (opt_prof_accum) {
+ arg->cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
+ arg->cnt_all->accumobjs_shifted_unbiased
+ += tdata->cnt_summed.accumobjs_shifted_unbiased;
+ arg->cnt_all->accumbytes +=
+ tdata->cnt_summed.accumbytes;
+ arg->cnt_all->accumbytes_unbiased +=
+ tdata->cnt_summed.accumbytes_unbiased;
+ }
+ } else {
+ tdata->dumping = false;
+ }
+ malloc_mutex_unlock(arg->tsdn, tdata->lock);
+
+ return NULL;
+}
+
+static prof_tdata_t *
+prof_tdata_dump_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
+ void *opaque) {
+ if (!tdata->dumping) {
+ return NULL;
+ }
+
+ prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque;
+ prof_dump_printf(arg->prof_dump_write, arg->cbopaque, " t%"FMTu64": ",
+ tdata->thr_uid);
+ prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque,
+ &tdata->cnt_summed);
+ if (tdata->thread_name != NULL) {
+ arg->prof_dump_write(arg->cbopaque, " ");
+ arg->prof_dump_write(arg->cbopaque, tdata->thread_name);
+ }
+ arg->prof_dump_write(arg->cbopaque, "\n");
+ return NULL;
+}
+
+static void
+prof_dump_header(prof_dump_iter_arg_t *arg, const prof_cnt_t *cnt_all) {
+ prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
+ "heap_v2/%"FMTu64"\n t*: ", ((uint64_t)1U << lg_prof_sample));
+ prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, cnt_all);
+ arg->prof_dump_write(arg->cbopaque, "\n");
+
+ malloc_mutex_lock(arg->tsdn, &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, arg);
+ malloc_mutex_unlock(arg->tsdn, &tdatas_mtx);
+}
+
+static void
+prof_dump_gctx(prof_dump_iter_arg_t *arg, prof_gctx_t *gctx,
+ const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
+ cassert(config_prof);
+ malloc_mutex_assert_owner(arg->tsdn, gctx->lock);
+
+ /* Avoid dumping such gctx's that have no useful data. */
+ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
+ (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
+ assert(gctx->cnt_summed.curobjs == 0);
+ assert(gctx->cnt_summed.curbytes == 0);
+ /*
+ * These asserts would not be correct -- see the comment on races
+ * in prof.c
+ * assert(gctx->cnt_summed.curobjs_unbiased == 0);
+ * assert(gctx->cnt_summed.curbytes_unbiased == 0);
+ */
+ assert(gctx->cnt_summed.accumobjs == 0);
+ assert(gctx->cnt_summed.accumobjs_shifted_unbiased == 0);
+ assert(gctx->cnt_summed.accumbytes == 0);
+ assert(gctx->cnt_summed.accumbytes_unbiased == 0);
+ return;
+ }
+
+ arg->prof_dump_write(arg->cbopaque, "@");
+ for (unsigned i = 0; i < bt->len; i++) {
+ prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
+ " %#"FMTxPTR, (uintptr_t)bt->vec[i]);
+ }
+
+ arg->prof_dump_write(arg->cbopaque, "\n t*: ");
+ prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque,
+ &gctx->cnt_summed);
+ arg->prof_dump_write(arg->cbopaque, "\n");
+
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, arg);
+}
+
+/*
+ * See prof_sample_new_event_wait() comment for why the body of this function
+ * is conditionally compiled.
+ */
+static void
+prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx) {
+#ifdef JEMALLOC_PROF
+ /*
+ * Scaling is equivalent AdjustSamples() in jeprof, but the result may
+ * differ slightly from what jeprof reports, because here we scale the
+ * summary values, whereas jeprof scales each context individually and
+ * reports the sums of the scaled values.
+ */
+ if (cnt_all->curbytes != 0) {
+ double sample_period = (double)((uint64_t)1 << lg_prof_sample);
+ double ratio = (((double)cnt_all->curbytes) /
+ (double)cnt_all->curobjs) / sample_period;
+ double scale_factor = 1.0 / (1.0 - exp(-ratio));
+ uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
+ * scale_factor);
+ uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
+ scale_factor);
+
+ malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
+ " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
+ curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
+ 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
+ malloc_printf(
+ "<jemalloc>: Run jeprof on dump output for leak detail\n");
+ if (opt_prof_leak_error) {
+ malloc_printf(
+ "<jemalloc>: Exiting with error code because memory"
+ " leaks were detected\n");
+ /*
+ * Use _exit() with underscore to avoid calling atexit()
+ * and entering endless cycle.
+ */
+ _exit(1);
+ }
+ }
+#endif
+}
+
+static prof_gctx_t *
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
+ prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque;
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+ prof_dump_gctx(arg, gctx, &gctx->bt, gctxs);
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
+ return NULL;
+}
+
+static void
+prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, prof_cnt_t *cnt_all,
+ size_t *leak_ngctx, prof_gctx_tree_t *gctxs) {
+ size_t tabind;
+ union {
+ prof_gctx_t *p;
+ void *v;
+ } gctx;
+
+ prof_enter(tsd, tdata);
+
+ /*
+ * Put gctx's in limbo and clear their counters in preparation for
+ * summing.
+ */
+ gctx_tree_new(gctxs);
+ for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
+ prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
+ }
+
+ /*
+ * Iterate over tdatas, and for the non-expired ones snapshot their tctx
+ * stats and merge them into the associated gctx's.
+ */
+ memset(cnt_all, 0, sizeof(prof_cnt_t));
+ prof_tdata_merge_iter_arg_t prof_tdata_merge_iter_arg = {tsd_tsdn(tsd),
+ cnt_all};
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
+ &prof_tdata_merge_iter_arg);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ /* Merge tctx stats into gctx's. */
+ *leak_ngctx = 0;
+ prof_gctx_merge_iter_arg_t prof_gctx_merge_iter_arg = {tsd_tsdn(tsd),
+ leak_ngctx};
+ gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
+ &prof_gctx_merge_iter_arg);
+
+ prof_leave(tsd, tdata);
+}
+
+void
+prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque,
+ prof_tdata_t *tdata, bool leakcheck) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_dump_mtx);
+ prof_cnt_t cnt_all;
+ size_t leak_ngctx;
+ prof_gctx_tree_t gctxs;
+ prof_dump_prep(tsd, tdata, &cnt_all, &leak_ngctx, &gctxs);
+ prof_dump_iter_arg_t prof_dump_iter_arg = {tsd_tsdn(tsd),
+ prof_dump_write, cbopaque};
+ prof_dump_header(&prof_dump_iter_arg, &cnt_all);
+ gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, &prof_dump_iter_arg);
+ prof_gctx_finish(tsd, &gctxs);
+ if (leakcheck) {
+ prof_leakcheck(&cnt_all, leak_ngctx);
+ }
+}
+
+/* Used in unit tests. */
+void
+prof_cnt_all(prof_cnt_t *cnt_all) {
+ tsd_t *tsd = tsd_fetch();
+ prof_tdata_t *tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
+ memset(cnt_all, 0, sizeof(prof_cnt_t));
+ } else {
+ size_t leak_ngctx;
+ prof_gctx_tree_t gctxs;
+ prof_dump_prep(tsd, tdata, cnt_all, &leak_ngctx, &gctxs);
+ prof_gctx_finish(tsd, &gctxs);
+ }
+}
+
+void
+prof_bt_hash(const void *key, size_t r_hash[2]) {
+ prof_bt_t *bt = (prof_bt_t *)key;
+
+ cassert(config_prof);
+
+ hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
+}
+
+bool
+prof_bt_keycomp(const void *k1, const void *k2) {
+ const prof_bt_t *bt1 = (prof_bt_t *)k1;
+ const prof_bt_t *bt2 = (prof_bt_t *)k2;
+
+ cassert(config_prof);
+
+ if (bt1->len != bt2->len) {
+ return false;
+ }
+ return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
+}
+
+prof_tdata_t *
+prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
+ char *thread_name, bool active) {
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ /* Initialize an empty cache for this thread. */
+ tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
+ sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+ if (tdata == NULL) {
+ return NULL;
+ }
+
+ tdata->lock = prof_tdata_mutex_choose(thr_uid);
+ tdata->thr_uid = thr_uid;
+ tdata->thr_discrim = thr_discrim;
+ tdata->thread_name = thread_name;
+ tdata->attached = true;
+ tdata->expired = false;
+ tdata->tctx_uid_next = 0;
+
+ if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ prof_bt_keycomp)) {
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
+ return NULL;
+ }
+
+ tdata->enq = false;
+ tdata->enq_idump = false;
+ tdata->enq_gdump = false;
+
+ tdata->dumping = false;
+ tdata->active = active;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_insert(&tdatas, tdata);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ return tdata;
+}
+
+static bool
+prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
+ if (tdata->attached && !even_if_attached) {
+ return false;
+ }
+ if (ckh_count(&tdata->bt2tctx) != 0) {
+ return false;
+ }
+ return true;
+}
+
+static bool
+prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+ bool even_if_attached) {
+ malloc_mutex_assert_owner(tsdn, tdata->lock);
+
+ return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
+}
+
+static void
+prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tdata->lock);
+
+ tdata_tree_remove(&tdatas, tdata);
+
+ assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
+
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
+ true);
+ }
+ ckh_delete(tsd, &tdata->bt2tctx);
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
+}
+
+static void
+prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+}
+
+void
+prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ if (tdata->attached) {
+ destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
+ true);
+ /*
+ * Only detach if !destroy_tdata, because detaching would allow
+ * another thread to win the race to destroy tdata.
+ */
+ if (!destroy_tdata) {
+ tdata->attached = false;
+ }
+ tsd_prof_tdata_set(tsd, NULL);
+ } else {
+ destroy_tdata = false;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (destroy_tdata) {
+ prof_tdata_destroy(tsd, tdata, true);
+ }
+}
+
+static bool
+prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tsdn, tdata->lock);
+ if (!tdata->expired) {
+ tdata->expired = true;
+ destroy_tdata = prof_tdata_should_destroy(tsdn, tdata, false);
+ } else {
+ destroy_tdata = false;
+ }
+ malloc_mutex_unlock(tsdn, tdata->lock);
+
+ return destroy_tdata;
+}
+
+static prof_tdata_t *
+prof_tdata_reset_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
+ void *arg) {
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
+}
+
+void
+prof_reset(tsd_t *tsd, size_t lg_sample) {
+ prof_tdata_t *next;
+
+ assert(lg_sample < (sizeof(uint64_t) << 3));
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ lg_prof_sample = lg_sample;
+ prof_unbias_map_init();
+
+ next = NULL;
+ do {
+ prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
+ prof_tdata_reset_iter, (void *)tsd);
+ if (to_destroy != NULL) {
+ next = tdata_tree_next(&tdatas, to_destroy);
+ prof_tdata_destroy_locked(tsd, to_destroy, false);
+ } else {
+ next = NULL;
+ }
+ } while (next != NULL);
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+}
+
+static bool
+prof_tctx_should_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ if (opt_prof_accum) {
+ return false;
+ }
+ if (tctx->cnts.curobjs != 0) {
+ return false;
+ }
+ if (tctx->prepared) {
+ return false;
+ }
+ if (tctx->recent_count != 0) {
+ return false;
+ }
+ return true;
+}
+
+static void
+prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ assert(tctx->cnts.curobjs == 0);
+ assert(tctx->cnts.curbytes == 0);
+ /*
+ * These asserts are not correct -- see the comment about races in
+ * prof.c
+ *
+ * assert(tctx->cnts.curobjs_shifted_unbiased == 0);
+ * assert(tctx->cnts.curbytes_unbiased == 0);
+ */
+ assert(!opt_prof_accum);
+ assert(tctx->cnts.accumobjs == 0);
+ assert(tctx->cnts.accumbytes == 0);
+ /*
+ * These ones are, since accumbyte counts never go down. Either
+ * prof_accum is off (in which case these should never have changed from
+ * their initial value of zero), or it's on (in which case we shouldn't
+ * be destroying this tctx).
+ */
+ assert(tctx->cnts.accumobjs_shifted_unbiased == 0);
+ assert(tctx->cnts.accumbytes_unbiased == 0);
+
+ prof_gctx_t *gctx = tctx->gctx;
+
+ {
+ prof_tdata_t *tdata = tctx->tdata;
+ tctx->tdata = NULL;
+ ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
+ bool destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd),
+ tdata, false);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (destroy_tdata) {
+ prof_tdata_destroy(tsd, tdata, false);
+ }
+ }
+
+ bool destroy_tctx, destroy_gctx;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ tctx_tree_remove(&gctx->tctxs, tctx);
+ destroy_tctx = true;
+ if (prof_gctx_should_destroy(gctx)) {
+ /*
+ * Increment gctx->nlimbo in order to keep another
+ * thread from winning the race to destroy gctx while
+ * this one has gctx->lock dropped. Without this, it
+ * would be possible for another thread to:
+ *
+ * 1) Sample an allocation associated with gctx.
+ * 2) Deallocate the sampled object.
+ * 3) Successfully prof_gctx_try_destroy(gctx).
+ *
+ * The result would be that gctx no longer exists by the
+ * time this thread accesses it in
+ * prof_gctx_try_destroy().
+ */
+ gctx->nlimbo++;
+ destroy_gctx = true;
+ } else {
+ destroy_gctx = false;
+ }
+ break;
+ case prof_tctx_state_dumping:
+ /*
+ * A dumping thread needs tctx to remain valid until dumping
+ * has finished. Change state such that the dumping thread will
+ * complete destruction during a late dump iteration phase.
+ */
+ tctx->state = prof_tctx_state_purgatory;
+ destroy_tctx = false;
+ destroy_gctx = false;
+ break;
+ default:
+ not_reached();
+ destroy_tctx = false;
+ destroy_gctx = false;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ if (destroy_gctx) {
+ prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx);
+ }
+ if (destroy_tctx) {
+ idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
+ }
+}
+
+void
+prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+ if (prof_tctx_should_destroy(tsd, tctx)) {
+ /* tctx->tdata->lock will be released in prof_tctx_destroy(). */
+ prof_tctx_destroy(tsd, tctx);
+ } else {
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ }
+}
+
+/******************************************************************************/
diff --git a/deps/jemalloc/src/prof_log.c b/deps/jemalloc/src/prof_log.c
new file mode 100644
index 000000000..0632c3b37
--- /dev/null
+++ b/deps/jemalloc/src/prof_log.c
@@ -0,0 +1,717 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/buf_writer.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/emitter.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_log.h"
+#include "jemalloc/internal/prof_sys.h"
+
+bool opt_prof_log = false;
+typedef enum prof_logging_state_e prof_logging_state_t;
+enum prof_logging_state_e {
+ prof_logging_state_stopped,
+ prof_logging_state_started,
+ prof_logging_state_dumping
+};
+
+/*
+ * - stopped: log_start never called, or previous log_stop has completed.
+ * - started: log_start called, log_stop not called yet. Allocations are logged.
+ * - dumping: log_stop called but not finished; samples are not logged anymore.
+ */
+prof_logging_state_t prof_logging_state = prof_logging_state_stopped;
+
+/* Used in unit tests. */
+static bool prof_log_dummy = false;
+
+/* Incremented for every log file that is output. */
+static uint64_t log_seq = 0;
+static char log_filename[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
+
+/* Timestamp for most recent call to log_start(). */
+static nstime_t log_start_timestamp;
+
+/* Increment these when adding to the log_bt and log_thr linked lists. */
+static size_t log_bt_index = 0;
+static size_t log_thr_index = 0;
+
+/* Linked list node definitions. These are only used in this file. */
+typedef struct prof_bt_node_s prof_bt_node_t;
+
+struct prof_bt_node_s {
+ prof_bt_node_t *next;
+ size_t index;
+ prof_bt_t bt;
+ /* Variable size backtrace vector pointed to by bt. */
+ void *vec[1];
+};
+
+typedef struct prof_thr_node_s prof_thr_node_t;
+
+struct prof_thr_node_s {
+ prof_thr_node_t *next;
+ size_t index;
+ uint64_t thr_uid;
+ /* Variable size based on thr_name_sz. */
+ char name[1];
+};
+
+typedef struct prof_alloc_node_s prof_alloc_node_t;
+
+/* This is output when logging sampled allocations. */
+struct prof_alloc_node_s {
+ prof_alloc_node_t *next;
+ /* Indices into an array of thread data. */
+ size_t alloc_thr_ind;
+ size_t free_thr_ind;
+
+ /* Indices into an array of backtraces. */
+ size_t alloc_bt_ind;
+ size_t free_bt_ind;
+
+ uint64_t alloc_time_ns;
+ uint64_t free_time_ns;
+
+ size_t usize;
+};
+
+/*
+ * Created on the first call to prof_try_log and deleted on prof_log_stop.
+ * These are the backtraces and threads that have already been logged by an
+ * allocation.
+ */
+static bool log_tables_initialized = false;
+static ckh_t log_bt_node_set;
+static ckh_t log_thr_node_set;
+
+/* Store linked lists for logged data. */
+static prof_bt_node_t *log_bt_first = NULL;
+static prof_bt_node_t *log_bt_last = NULL;
+static prof_thr_node_t *log_thr_first = NULL;
+static prof_thr_node_t *log_thr_last = NULL;
+static prof_alloc_node_t *log_alloc_first = NULL;
+static prof_alloc_node_t *log_alloc_last = NULL;
+
+/* Protects the prof_logging_state and any log_{...} variable. */
+malloc_mutex_t log_mtx;
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+/* Hashtable functions for log_bt_node_set and log_thr_node_set. */
+static void prof_thr_node_hash(const void *key, size_t r_hash[2]);
+static bool prof_thr_node_keycomp(const void *k1, const void *k2);
+static void prof_bt_node_hash(const void *key, size_t r_hash[2]);
+static bool prof_bt_node_keycomp(const void *k1, const void *k2);
+
+/******************************************************************************/
+
+static size_t
+prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
+ assert(prof_logging_state == prof_logging_state_started);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
+
+ prof_bt_node_t dummy_node;
+ dummy_node.bt = *bt;
+ prof_bt_node_t *node;
+
+ /* See if this backtrace is already cached in the table. */
+ if (ckh_search(&log_bt_node_set, (void *)(&dummy_node),
+ (void **)(&node), NULL)) {
+ size_t sz = offsetof(prof_bt_node_t, vec) +
+ (bt->len * sizeof(void *));
+ prof_bt_node_t *new_node = (prof_bt_node_t *)
+ iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
+ true, arena_get(TSDN_NULL, 0, true), true);
+ if (log_bt_first == NULL) {
+ log_bt_first = new_node;
+ log_bt_last = new_node;
+ } else {
+ log_bt_last->next = new_node;
+ log_bt_last = new_node;
+ }
+
+ new_node->next = NULL;
+ new_node->index = log_bt_index;
+ /*
+ * Copy the backtrace: bt is inside a tdata or gctx, which
+ * might die before prof_log_stop is called.
+ */
+ new_node->bt.len = bt->len;
+ memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *));
+ new_node->bt.vec = new_node->vec;
+
+ log_bt_index++;
+ ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL);
+ return new_node->index;
+ } else {
+ return node->index;
+ }
+}
+
+static size_t
+prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
+ assert(prof_logging_state == prof_logging_state_started);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
+
+ prof_thr_node_t dummy_node;
+ dummy_node.thr_uid = thr_uid;
+ prof_thr_node_t *node;
+
+ /* See if this thread is already cached in the table. */
+ if (ckh_search(&log_thr_node_set, (void *)(&dummy_node),
+ (void **)(&node), NULL)) {
+ size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1;
+ prof_thr_node_t *new_node = (prof_thr_node_t *)
+ iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
+ true, arena_get(TSDN_NULL, 0, true), true);
+ if (log_thr_first == NULL) {
+ log_thr_first = new_node;
+ log_thr_last = new_node;
+ } else {
+ log_thr_last->next = new_node;
+ log_thr_last = new_node;
+ }
+
+ new_node->next = NULL;
+ new_node->index = log_thr_index;
+ new_node->thr_uid = thr_uid;
+ strcpy(new_node->name, name);
+
+ log_thr_index++;
+ ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL);
+ return new_node->index;
+ } else {
+ return node->index;
+ }
+}
+
+JEMALLOC_COLD
+void
+prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
+ cassert(config_prof);
+ prof_tctx_t *tctx = prof_info->alloc_tctx;
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false);
+ if (cons_tdata == NULL) {
+ /*
+ * We decide not to log these allocations. cons_tdata will be
+ * NULL only when the current thread is in a weird state (e.g.
+ * it's being destroyed).
+ */
+ return;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx);
+
+ if (prof_logging_state != prof_logging_state_started) {
+ goto label_done;
+ }
+
+ if (!log_tables_initialized) {
+ bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
+ prof_bt_node_hash, prof_bt_node_keycomp);
+ bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
+ prof_thr_node_hash, prof_thr_node_keycomp);
+ if (err1 || err2) {
+ goto label_done;
+ }
+ log_tables_initialized = true;
+ }
+
+ nstime_t alloc_time = prof_info->alloc_time;
+ nstime_t free_time;
+ nstime_prof_init_update(&free_time);
+
+ size_t sz = sizeof(prof_alloc_node_t);
+ prof_alloc_node_t *new_node = (prof_alloc_node_t *)
+ iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+
+ const char *prod_thr_name = (tctx->tdata->thread_name == NULL)?
+ "" : tctx->tdata->thread_name;
+ const char *cons_thr_name = prof_thread_name_get(tsd);
+
+ prof_bt_t bt;
+ /* Initialize the backtrace, using the buffer in tdata to store it. */
+ bt_init(&bt, cons_tdata->vec);
+ prof_backtrace(tsd, &bt);
+ prof_bt_t *cons_bt = &bt;
+
+ /* We haven't destroyed tctx yet, so gctx should be good to read. */
+ prof_bt_t *prod_bt = &tctx->gctx->bt;
+
+ new_node->next = NULL;
+ new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid,
+ prod_thr_name);
+ new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid,
+ cons_thr_name);
+ new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt);
+ new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt);
+ new_node->alloc_time_ns = nstime_ns(&alloc_time);
+ new_node->free_time_ns = nstime_ns(&free_time);
+ new_node->usize = usize;
+
+ if (log_alloc_first == NULL) {
+ log_alloc_first = new_node;
+ log_alloc_last = new_node;
+ } else {
+ log_alloc_last->next = new_node;
+ log_alloc_last = new_node;
+ }
+
+label_done:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
+}
+
+static void
+prof_bt_node_hash(const void *key, size_t r_hash[2]) {
+ const prof_bt_node_t *bt_node = (prof_bt_node_t *)key;
+ prof_bt_hash((void *)(&bt_node->bt), r_hash);
+}
+
+static bool
+prof_bt_node_keycomp(const void *k1, const void *k2) {
+ const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1;
+ const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2;
+ return prof_bt_keycomp((void *)(&bt_node1->bt),
+ (void *)(&bt_node2->bt));
+}
+
+static void
+prof_thr_node_hash(const void *key, size_t r_hash[2]) {
+ const prof_thr_node_t *thr_node = (prof_thr_node_t *)key;
+ hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash);
+}
+
+static bool
+prof_thr_node_keycomp(const void *k1, const void *k2) {
+ const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1;
+ const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2;
+ return thr_node1->thr_uid == thr_node2->thr_uid;
+}
+
+/* Used in unit tests. */
+size_t
+prof_log_bt_count(void) {
+ cassert(config_prof);
+ size_t cnt = 0;
+ prof_bt_node_t *node = log_bt_first;
+ while (node != NULL) {
+ cnt++;
+ node = node->next;
+ }
+ return cnt;
+}
+
+/* Used in unit tests. */
+size_t
+prof_log_alloc_count(void) {
+ cassert(config_prof);
+ size_t cnt = 0;
+ prof_alloc_node_t *node = log_alloc_first;
+ while (node != NULL) {
+ cnt++;
+ node = node->next;
+ }
+ return cnt;
+}
+
+/* Used in unit tests. */
+size_t
+prof_log_thr_count(void) {
+ cassert(config_prof);
+ size_t cnt = 0;
+ prof_thr_node_t *node = log_thr_first;
+ while (node != NULL) {
+ cnt++;
+ node = node->next;
+ }
+ return cnt;
+}
+
+/* Used in unit tests. */
+bool
+prof_log_is_logging(void) {
+ cassert(config_prof);
+ return prof_logging_state == prof_logging_state_started;
+}
+
+/* Used in unit tests. */
+bool
+prof_log_rep_check(void) {
+ cassert(config_prof);
+ if (prof_logging_state == prof_logging_state_stopped
+ && log_tables_initialized) {
+ return true;
+ }
+
+ if (log_bt_last != NULL && log_bt_last->next != NULL) {
+ return true;
+ }
+ if (log_thr_last != NULL && log_thr_last->next != NULL) {
+ return true;
+ }
+ if (log_alloc_last != NULL && log_alloc_last->next != NULL) {
+ return true;
+ }
+
+ size_t bt_count = prof_log_bt_count();
+ size_t thr_count = prof_log_thr_count();
+ size_t alloc_count = prof_log_alloc_count();
+
+
+ if (prof_logging_state == prof_logging_state_stopped) {
+ if (bt_count != 0 || thr_count != 0 || alloc_count || 0) {
+ return true;
+ }
+ }
+
+ prof_alloc_node_t *node = log_alloc_first;
+ while (node != NULL) {
+ if (node->alloc_bt_ind >= bt_count) {
+ return true;
+ }
+ if (node->free_bt_ind >= bt_count) {
+ return true;
+ }
+ if (node->alloc_thr_ind >= thr_count) {
+ return true;
+ }
+ if (node->free_thr_ind >= thr_count) {
+ return true;
+ }
+ if (node->alloc_time_ns > node->free_time_ns) {
+ return true;
+ }
+ node = node->next;
+ }
+
+ return false;
+}
+
+/* Used in unit tests. */
+void
+prof_log_dummy_set(bool new_value) {
+ cassert(config_prof);
+ prof_log_dummy = new_value;
+}
+
+/* Used as an atexit function to stop logging on exit. */
+static void
+prof_log_stop_final(void) {
+ tsd_t *tsd = tsd_fetch();
+ prof_log_stop(tsd_tsdn(tsd));
+}
+
+JEMALLOC_COLD
+bool
+prof_log_start(tsdn_t *tsdn, const char *filename) {
+ cassert(config_prof);
+
+ if (!opt_prof) {
+ return true;
+ }
+
+ bool ret = false;
+
+ malloc_mutex_lock(tsdn, &log_mtx);
+
+ static bool prof_log_atexit_called = false;
+ if (!prof_log_atexit_called) {
+ prof_log_atexit_called = true;
+ if (atexit(prof_log_stop_final) != 0) {
+ malloc_write("<jemalloc>: Error in atexit() "
+ "for logging\n");
+ if (opt_abort) {
+ abort();
+ }
+ ret = true;
+ goto label_done;
+ }
+ }
+
+ if (prof_logging_state != prof_logging_state_stopped) {
+ ret = true;
+ } else if (filename == NULL) {
+ /* Make default name. */
+ prof_get_default_filename(tsdn, log_filename, log_seq);
+ log_seq++;
+ prof_logging_state = prof_logging_state_started;
+ } else if (strlen(filename) >= PROF_DUMP_FILENAME_LEN) {
+ ret = true;
+ } else {
+ strcpy(log_filename, filename);
+ prof_logging_state = prof_logging_state_started;
+ }
+
+ if (!ret) {
+ nstime_prof_init_update(&log_start_timestamp);
+ }
+label_done:
+ malloc_mutex_unlock(tsdn, &log_mtx);
+
+ return ret;
+}
+
+struct prof_emitter_cb_arg_s {
+ int fd;
+ ssize_t ret;
+};
+
+static void
+prof_emitter_write_cb(void *opaque, const char *to_write) {
+ struct prof_emitter_cb_arg_s *arg =
+ (struct prof_emitter_cb_arg_s *)opaque;
+ size_t bytes = strlen(to_write);
+ if (prof_log_dummy) {
+ return;
+ }
+ arg->ret = malloc_write_fd(arg->fd, to_write, bytes);
+}
+
+/*
+ * prof_log_emit_{...} goes through the appropriate linked list, emitting each
+ * node to the json and deallocating it.
+ */
+static void
+prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) {
+ emitter_json_array_kv_begin(emitter, "threads");
+ prof_thr_node_t *thr_node = log_thr_first;
+ prof_thr_node_t *thr_old_node;
+ while (thr_node != NULL) {
+ emitter_json_object_begin(emitter);
+
+ emitter_json_kv(emitter, "thr_uid", emitter_type_uint64,
+ &thr_node->thr_uid);
+
+ char *thr_name = thr_node->name;
+
+ emitter_json_kv(emitter, "thr_name", emitter_type_string,
+ &thr_name);
+
+ emitter_json_object_end(emitter);
+ thr_old_node = thr_node;
+ thr_node = thr_node->next;
+ idalloctm(tsd_tsdn(tsd), thr_old_node, NULL, NULL, true, true);
+ }
+ emitter_json_array_end(emitter);
+}
+
+static void
+prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
+ emitter_json_array_kv_begin(emitter, "stack_traces");
+ prof_bt_node_t *bt_node = log_bt_first;
+ prof_bt_node_t *bt_old_node;
+ /*
+ * Calculate how many hex digits we need: twice number of bytes, two for
+ * "0x", and then one more for terminating '\0'.
+ */
+ char buf[2 * sizeof(intptr_t) + 3];
+ size_t buf_sz = sizeof(buf);
+ while (bt_node != NULL) {
+ emitter_json_array_begin(emitter);
+ size_t i;
+ for (i = 0; i < bt_node->bt.len; i++) {
+ malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]);
+ char *trace_str = buf;
+ emitter_json_value(emitter, emitter_type_string,
+ &trace_str);
+ }
+ emitter_json_array_end(emitter);
+
+ bt_old_node = bt_node;
+ bt_node = bt_node->next;
+ idalloctm(tsd_tsdn(tsd), bt_old_node, NULL, NULL, true, true);
+ }
+ emitter_json_array_end(emitter);
+}
+
+static void
+prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) {
+ emitter_json_array_kv_begin(emitter, "allocations");
+ prof_alloc_node_t *alloc_node = log_alloc_first;
+ prof_alloc_node_t *alloc_old_node;
+ while (alloc_node != NULL) {
+ emitter_json_object_begin(emitter);
+
+ emitter_json_kv(emitter, "alloc_thread", emitter_type_size,
+ &alloc_node->alloc_thr_ind);
+
+ emitter_json_kv(emitter, "free_thread", emitter_type_size,
+ &alloc_node->free_thr_ind);
+
+ emitter_json_kv(emitter, "alloc_trace", emitter_type_size,
+ &alloc_node->alloc_bt_ind);
+
+ emitter_json_kv(emitter, "free_trace", emitter_type_size,
+ &alloc_node->free_bt_ind);
+
+ emitter_json_kv(emitter, "alloc_timestamp",
+ emitter_type_uint64, &alloc_node->alloc_time_ns);
+
+ emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64,
+ &alloc_node->free_time_ns);
+
+ emitter_json_kv(emitter, "usize", emitter_type_uint64,
+ &alloc_node->usize);
+
+ emitter_json_object_end(emitter);
+
+ alloc_old_node = alloc_node;
+ alloc_node = alloc_node->next;
+ idalloctm(tsd_tsdn(tsd), alloc_old_node, NULL, NULL, true,
+ true);
+ }
+ emitter_json_array_end(emitter);
+}
+
+static void
+prof_log_emit_metadata(emitter_t *emitter) {
+ emitter_json_object_kv_begin(emitter, "info");
+
+ nstime_t now;
+
+ nstime_prof_init_update(&now);
+ uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp);
+ emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns);
+
+ char *vers = JEMALLOC_VERSION;
+ emitter_json_kv(emitter, "version",
+ emitter_type_string, &vers);
+
+ emitter_json_kv(emitter, "lg_sample_rate",
+ emitter_type_int, &lg_prof_sample);
+
+ const char *res_type = prof_time_res_mode_names[opt_prof_time_res];
+ emitter_json_kv(emitter, "prof_time_resolution", emitter_type_string,
+ &res_type);
+
+ int pid = prof_getpid();
+ emitter_json_kv(emitter, "pid", emitter_type_int, &pid);
+
+ emitter_json_object_end(emitter);
+}
+
+#define PROF_LOG_STOP_BUFSIZE PROF_DUMP_BUFSIZE
+JEMALLOC_COLD
+bool
+prof_log_stop(tsdn_t *tsdn) {
+ cassert(config_prof);
+ if (!opt_prof || !prof_booted) {
+ return true;
+ }
+
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ malloc_mutex_lock(tsdn, &log_mtx);
+
+ if (prof_logging_state != prof_logging_state_started) {
+ malloc_mutex_unlock(tsdn, &log_mtx);
+ return true;
+ }
+
+ /*
+ * Set the state to dumping. We'll set it to stopped when we're done.
+ * Since other threads won't be able to start/stop/log when the state is
+ * dumping, we don't have to hold the lock during the whole method.
+ */
+ prof_logging_state = prof_logging_state_dumping;
+ malloc_mutex_unlock(tsdn, &log_mtx);
+
+
+ emitter_t emitter;
+
+ /* Create a file. */
+
+ int fd;
+ if (prof_log_dummy) {
+ fd = 0;
+ } else {
+ fd = creat(log_filename, 0644);
+ }
+
+ if (fd == -1) {
+ malloc_printf("<jemalloc>: creat() for log file \"%s\" "
+ " failed with %d\n", log_filename, errno);
+ if (opt_abort) {
+ abort();
+ }
+ return true;
+ }
+
+ struct prof_emitter_cb_arg_s arg;
+ arg.fd = fd;
+
+ buf_writer_t buf_writer;
+ buf_writer_init(tsdn, &buf_writer, prof_emitter_write_cb, &arg, NULL,
+ PROF_LOG_STOP_BUFSIZE);
+ emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,
+ &buf_writer);
+
+ emitter_begin(&emitter);
+ prof_log_emit_metadata(&emitter);
+ prof_log_emit_threads(tsd, &emitter);
+ prof_log_emit_traces(tsd, &emitter);
+ prof_log_emit_allocs(tsd, &emitter);
+ emitter_end(&emitter);
+
+ buf_writer_terminate(tsdn, &buf_writer);
+
+ /* Reset global state. */
+ if (log_tables_initialized) {
+ ckh_delete(tsd, &log_bt_node_set);
+ ckh_delete(tsd, &log_thr_node_set);
+ }
+ log_tables_initialized = false;
+ log_bt_index = 0;
+ log_thr_index = 0;
+ log_bt_first = NULL;
+ log_bt_last = NULL;
+ log_thr_first = NULL;
+ log_thr_last = NULL;
+ log_alloc_first = NULL;
+ log_alloc_last = NULL;
+
+ malloc_mutex_lock(tsdn, &log_mtx);
+ prof_logging_state = prof_logging_state_stopped;
+ malloc_mutex_unlock(tsdn, &log_mtx);
+
+ if (prof_log_dummy) {
+ return false;
+ }
+ return close(fd) || arg.ret == -1;
+}
+#undef PROF_LOG_STOP_BUFSIZE
+
+JEMALLOC_COLD
+bool
+prof_log_init(tsd_t *tsd) {
+ cassert(config_prof);
+ if (malloc_mutex_init(&log_mtx, "prof_log",
+ WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ if (opt_prof_log) {
+ prof_log_start(tsd_tsdn(tsd), NULL);
+ }
+
+ return false;
+}
+
+/******************************************************************************/
diff --git a/deps/jemalloc/src/prof_recent.c b/deps/jemalloc/src/prof_recent.c
new file mode 100644
index 000000000..834a9446c
--- /dev/null
+++ b/deps/jemalloc/src/prof_recent.c
@@ -0,0 +1,600 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/buf_writer.h"
+#include "jemalloc/internal/emitter.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_recent.h"
+
+ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT;
+malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */
+static atomic_zd_t prof_recent_alloc_max;
+static ssize_t prof_recent_alloc_count = 0;
+prof_recent_list_t prof_recent_alloc_list;
+
+malloc_mutex_t prof_recent_dump_mtx; /* Protects dumping. */
+
+static void
+prof_recent_alloc_max_init() {
+ atomic_store_zd(&prof_recent_alloc_max, opt_prof_recent_alloc_max,
+ ATOMIC_RELAXED);
+}
+
+static inline ssize_t
+prof_recent_alloc_max_get_no_lock() {
+ return atomic_load_zd(&prof_recent_alloc_max, ATOMIC_RELAXED);
+}
+
+static inline ssize_t
+prof_recent_alloc_max_get(tsd_t *tsd) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ return prof_recent_alloc_max_get_no_lock();
+}
+
+static inline ssize_t
+prof_recent_alloc_max_update(tsd_t *tsd, ssize_t max) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ ssize_t old_max = prof_recent_alloc_max_get(tsd);
+ atomic_store_zd(&prof_recent_alloc_max, max, ATOMIC_RELAXED);
+ return old_max;
+}
+
+static prof_recent_t *
+prof_recent_allocate_node(tsdn_t *tsdn) {
+ return (prof_recent_t *)iallocztm(tsdn, sizeof(prof_recent_t),
+ sz_size2index(sizeof(prof_recent_t)), false, NULL, true,
+ arena_get(tsdn, 0, false), true);
+}
+
+static void
+prof_recent_free_node(tsdn_t *tsdn, prof_recent_t *node) {
+ assert(node != NULL);
+ assert(isalloc(tsdn, node) == sz_s2u(sizeof(prof_recent_t)));
+ idalloctm(tsdn, node, NULL, NULL, true, true);
+}
+
+static inline void
+increment_recent_count(tsd_t *tsd, prof_tctx_t *tctx) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+ ++tctx->recent_count;
+ assert(tctx->recent_count > 0);
+}
+
+bool
+prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(opt_prof && prof_booted);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+
+ /*
+ * Check whether last-N mode is turned on without trying to acquire the
+ * lock, so as to optimize for the following two scenarios:
+ * (1) Last-N mode is switched off;
+ * (2) Dumping, during which last-N mode is temporarily turned off so
+ * as not to block sampled allocations.
+ */
+ if (prof_recent_alloc_max_get_no_lock() == 0) {
+ return false;
+ }
+
+ /*
+ * Increment recent_count to hold the tctx so that it won't be gone
+ * even after tctx->tdata->lock is released. This acts as a
+ * "placeholder"; the real recording of the allocation requires a lock
+ * on prof_recent_alloc_mtx and is done in prof_recent_alloc (when
+ * tctx->tdata->lock has been released).
+ */
+ increment_recent_count(tsd, tctx);
+ return true;
+}
+
+static void
+decrement_recent_count(tsd_t *tsd, prof_tctx_t *tctx) {
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ assert(tctx != NULL);
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ assert(tctx->recent_count > 0);
+ --tctx->recent_count;
+ prof_tctx_try_destroy(tsd, tctx);
+}
+
+static inline edata_t *
+prof_recent_alloc_edata_get_no_lock(const prof_recent_t *n) {
+ return (edata_t *)atomic_load_p(&n->alloc_edata, ATOMIC_ACQUIRE);
+}
+
+edata_t *
+prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *n) {
+ cassert(config_prof);
+ return prof_recent_alloc_edata_get_no_lock(n);
+}
+
+static inline edata_t *
+prof_recent_alloc_edata_get(tsd_t *tsd, const prof_recent_t *n) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ return prof_recent_alloc_edata_get_no_lock(n);
+}
+
+static void
+prof_recent_alloc_edata_set(tsd_t *tsd, prof_recent_t *n, edata_t *edata) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ atomic_store_p(&n->alloc_edata, edata, ATOMIC_RELEASE);
+}
+
+void
+edata_prof_recent_alloc_init(edata_t *edata) {
+ cassert(config_prof);
+ edata_prof_recent_alloc_set_dont_call_directly(edata, NULL);
+}
+
+static inline prof_recent_t *
+edata_prof_recent_alloc_get_no_lock(const edata_t *edata) {
+ cassert(config_prof);
+ return edata_prof_recent_alloc_get_dont_call_directly(edata);
+}
+
+prof_recent_t *
+edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata) {
+ cassert(config_prof);
+ return edata_prof_recent_alloc_get_no_lock(edata);
+}
+
+static inline prof_recent_t *
+edata_prof_recent_alloc_get(tsd_t *tsd, const edata_t *edata) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_t *recent_alloc =
+ edata_prof_recent_alloc_get_no_lock(edata);
+ assert(recent_alloc == NULL ||
+ prof_recent_alloc_edata_get(tsd, recent_alloc) == edata);
+ return recent_alloc;
+}
+
+static prof_recent_t *
+edata_prof_recent_alloc_update_internal(tsd_t *tsd, edata_t *edata,
+ prof_recent_t *recent_alloc) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_t *old_recent_alloc =
+ edata_prof_recent_alloc_get(tsd, edata);
+ edata_prof_recent_alloc_set_dont_call_directly(edata, recent_alloc);
+ return old_recent_alloc;
+}
+
+static void
+edata_prof_recent_alloc_set(tsd_t *tsd, edata_t *edata,
+ prof_recent_t *recent_alloc) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ assert(recent_alloc != NULL);
+ prof_recent_t *old_recent_alloc =
+ edata_prof_recent_alloc_update_internal(tsd, edata, recent_alloc);
+ assert(old_recent_alloc == NULL);
+ prof_recent_alloc_edata_set(tsd, recent_alloc, edata);
+}
+
+static void
+edata_prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata,
+ prof_recent_t *recent_alloc) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ assert(recent_alloc != NULL);
+ prof_recent_t *old_recent_alloc =
+ edata_prof_recent_alloc_update_internal(tsd, edata, NULL);
+ assert(old_recent_alloc == recent_alloc);
+ assert(edata == prof_recent_alloc_edata_get(tsd, recent_alloc));
+ prof_recent_alloc_edata_set(tsd, recent_alloc, NULL);
+}
+
+/*
+ * This function should be called right before an allocation is released, so
+ * that the associated recent allocation record can contain the following
+ * information:
+ * (1) The allocation is released;
+ * (2) The time of the deallocation; and
+ * (3) The prof_tctx associated with the deallocation.
+ */
+void
+prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata) {
+ cassert(config_prof);
+ /*
+ * Check whether the recent allocation record still exists without
+ * trying to acquire the lock.
+ */
+ if (edata_prof_recent_alloc_get_no_lock(edata) == NULL) {
+ return;
+ }
+
+ prof_tctx_t *dalloc_tctx = prof_tctx_create(tsd);
+ /*
+ * In case dalloc_tctx is NULL, e.g. due to OOM, we will not record the
+ * deallocation time / tctx, which is handled later, after we check
+ * again when holding the lock.
+ */
+
+ if (dalloc_tctx != NULL) {
+ malloc_mutex_lock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock);
+ increment_recent_count(tsd, dalloc_tctx);
+ dalloc_tctx->prepared = false;
+ malloc_mutex_unlock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock);
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ /* Check again after acquiring the lock. */
+ prof_recent_t *recent = edata_prof_recent_alloc_get(tsd, edata);
+ if (recent != NULL) {
+ assert(nstime_equals_zero(&recent->dalloc_time));
+ assert(recent->dalloc_tctx == NULL);
+ if (dalloc_tctx != NULL) {
+ nstime_prof_update(&recent->dalloc_time);
+ recent->dalloc_tctx = dalloc_tctx;
+ dalloc_tctx = NULL;
+ }
+ edata_prof_recent_alloc_reset(tsd, edata, recent);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+
+ if (dalloc_tctx != NULL) {
+ /* We lost the rase - the allocation record was just gone. */
+ decrement_recent_count(tsd, dalloc_tctx);
+ }
+}
+
+static void
+prof_recent_alloc_evict_edata(tsd_t *tsd, prof_recent_t *recent_alloc) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ edata_t *edata = prof_recent_alloc_edata_get(tsd, recent_alloc);
+ if (edata != NULL) {
+ edata_prof_recent_alloc_reset(tsd, edata, recent_alloc);
+ }
+}
+
+static bool
+prof_recent_alloc_is_empty(tsd_t *tsd) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ if (ql_empty(&prof_recent_alloc_list)) {
+ assert(prof_recent_alloc_count == 0);
+ return true;
+ } else {
+ assert(prof_recent_alloc_count > 0);
+ return false;
+ }
+}
+
+static void
+prof_recent_alloc_assert_count(tsd_t *tsd) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ if (!config_debug) {
+ return;
+ }
+ ssize_t count = 0;
+ prof_recent_t *n;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ ++count;
+ }
+ assert(count == prof_recent_alloc_count);
+ assert(prof_recent_alloc_max_get(tsd) == -1 ||
+ count <= prof_recent_alloc_max_get(tsd));
+}
+
+void
+prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize) {
+ cassert(config_prof);
+ assert(edata != NULL);
+ prof_tctx_t *tctx = edata_prof_tctx_get(edata);
+
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_alloc_assert_count(tsd);
+
+ /*
+ * Reserve a new prof_recent_t node if needed. If needed, we release
+ * the prof_recent_alloc_mtx lock and allocate. Then, rather than
+ * immediately checking for OOM, we regain the lock and try to make use
+ * of the reserve node if needed. There are six scenarios:
+ *
+ * \ now | no need | need but OOMed | need and allocated
+ * later \ | | |
+ * ------------------------------------------------------------
+ * no need | (1) | (2) | (3)
+ * ------------------------------------------------------------
+ * need | (4) | (5) | (6)
+ *
+ * First, "(4)" never happens, because we don't release the lock in the
+ * middle if there's no need for a new node; in such cases "(1)" always
+ * takes place, which is trivial.
+ *
+ * Out of the remaining four scenarios, "(6)" is the common case and is
+ * trivial. "(5)" is also trivial, in which case we'll rollback the
+ * effect of prof_recent_alloc_prepare() as expected.
+ *
+ * "(2)" / "(3)" occurs when the need for a new node is gone after we
+ * regain the lock. If the new node is successfully allocated, i.e. in
+ * the case of "(3)", we'll release it in the end; otherwise, i.e. in
+ * the case of "(2)", we do nothing - we're lucky that the OOM ends up
+ * doing no harm at all.
+ *
+ * Therefore, the only performance cost of the "release lock" ->
+ * "allocate" -> "regain lock" design is the "(3)" case, but it happens
+ * very rarely, so the cost is relatively small compared to the gain of
+ * not having to have the lock order of prof_recent_alloc_mtx above all
+ * the allocation locks.
+ */
+ prof_recent_t *reserve = NULL;
+ if (prof_recent_alloc_max_get(tsd) == -1 ||
+ prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)) {
+ assert(prof_recent_alloc_max_get(tsd) != 0);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ reserve = prof_recent_allocate_node(tsd_tsdn(tsd));
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_alloc_assert_count(tsd);
+ }
+
+ if (prof_recent_alloc_max_get(tsd) == 0) {
+ assert(prof_recent_alloc_is_empty(tsd));
+ goto label_rollback;
+ }
+
+ prof_tctx_t *old_alloc_tctx, *old_dalloc_tctx;
+ if (prof_recent_alloc_count == prof_recent_alloc_max_get(tsd)) {
+ /* If upper limit is reached, rotate the head. */
+ assert(prof_recent_alloc_max_get(tsd) != -1);
+ assert(!prof_recent_alloc_is_empty(tsd));
+ prof_recent_t *head = ql_first(&prof_recent_alloc_list);
+ old_alloc_tctx = head->alloc_tctx;
+ assert(old_alloc_tctx != NULL);
+ old_dalloc_tctx = head->dalloc_tctx;
+ prof_recent_alloc_evict_edata(tsd, head);
+ ql_rotate(&prof_recent_alloc_list, link);
+ } else {
+ /* Otherwise make use of the new node. */
+ assert(prof_recent_alloc_max_get(tsd) == -1 ||
+ prof_recent_alloc_count < prof_recent_alloc_max_get(tsd));
+ if (reserve == NULL) {
+ goto label_rollback;
+ }
+ ql_elm_new(reserve, link);
+ ql_tail_insert(&prof_recent_alloc_list, reserve, link);
+ reserve = NULL;
+ old_alloc_tctx = NULL;
+ old_dalloc_tctx = NULL;
+ ++prof_recent_alloc_count;
+ }
+
+ /* Fill content into the tail node. */
+ prof_recent_t *tail = ql_last(&prof_recent_alloc_list, link);
+ assert(tail != NULL);
+ tail->size = size;
+ tail->usize = usize;
+ nstime_copy(&tail->alloc_time, edata_prof_alloc_time_get(edata));
+ tail->alloc_tctx = tctx;
+ nstime_init_zero(&tail->dalloc_time);
+ tail->dalloc_tctx = NULL;
+ edata_prof_recent_alloc_set(tsd, edata, tail);
+
+ assert(!prof_recent_alloc_is_empty(tsd));
+ prof_recent_alloc_assert_count(tsd);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+
+ if (reserve != NULL) {
+ prof_recent_free_node(tsd_tsdn(tsd), reserve);
+ }
+
+ /*
+ * Asynchronously handle the tctx of the old node, so that there's no
+ * simultaneous holdings of prof_recent_alloc_mtx and tdata->lock.
+ * In the worst case this may delay the tctx release but it's better
+ * than holding prof_recent_alloc_mtx for longer.
+ */
+ if (old_alloc_tctx != NULL) {
+ decrement_recent_count(tsd, old_alloc_tctx);
+ }
+ if (old_dalloc_tctx != NULL) {
+ decrement_recent_count(tsd, old_dalloc_tctx);
+ }
+ return;
+
+label_rollback:
+ assert(edata_prof_recent_alloc_get(tsd, edata) == NULL);
+ prof_recent_alloc_assert_count(tsd);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ if (reserve != NULL) {
+ prof_recent_free_node(tsd_tsdn(tsd), reserve);
+ }
+ decrement_recent_count(tsd, tctx);
+}
+
+ssize_t
+prof_recent_alloc_max_ctl_read() {
+ cassert(config_prof);
+ /* Don't bother to acquire the lock. */
+ return prof_recent_alloc_max_get_no_lock();
+}
+
+static void
+prof_recent_alloc_restore_locked(tsd_t *tsd, prof_recent_list_t *to_delete) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ ssize_t max = prof_recent_alloc_max_get(tsd);
+ if (max == -1 || prof_recent_alloc_count <= max) {
+ /* Easy case - no need to alter the list. */
+ ql_new(to_delete);
+ prof_recent_alloc_assert_count(tsd);
+ return;
+ }
+
+ prof_recent_t *node;
+ ql_foreach(node, &prof_recent_alloc_list, link) {
+ if (prof_recent_alloc_count == max) {
+ break;
+ }
+ prof_recent_alloc_evict_edata(tsd, node);
+ --prof_recent_alloc_count;
+ }
+ assert(prof_recent_alloc_count == max);
+
+ ql_move(to_delete, &prof_recent_alloc_list);
+ if (max == 0) {
+ assert(node == NULL);
+ } else {
+ assert(node != NULL);
+ ql_split(to_delete, node, &prof_recent_alloc_list, link);
+ }
+ assert(!ql_empty(to_delete));
+ prof_recent_alloc_assert_count(tsd);
+}
+
+static void
+prof_recent_alloc_async_cleanup(tsd_t *tsd, prof_recent_list_t *to_delete) {
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_dump_mtx);
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ while (!ql_empty(to_delete)) {
+ prof_recent_t *node = ql_first(to_delete);
+ ql_remove(to_delete, node, link);
+ decrement_recent_count(tsd, node->alloc_tctx);
+ if (node->dalloc_tctx != NULL) {
+ decrement_recent_count(tsd, node->dalloc_tctx);
+ }
+ prof_recent_free_node(tsd_tsdn(tsd), node);
+ }
+}
+
+ssize_t
+prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) {
+ cassert(config_prof);
+ assert(max >= -1);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_alloc_assert_count(tsd);
+ const ssize_t old_max = prof_recent_alloc_max_update(tsd, max);
+ prof_recent_list_t to_delete;
+ prof_recent_alloc_restore_locked(tsd, &to_delete);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_alloc_async_cleanup(tsd, &to_delete);
+ return old_max;
+}
+
+static void
+prof_recent_alloc_dump_bt(emitter_t *emitter, prof_tctx_t *tctx) {
+ char bt_buf[2 * sizeof(intptr_t) + 3];
+ char *s = bt_buf;
+ assert(tctx != NULL);
+ prof_bt_t *bt = &tctx->gctx->bt;
+ for (size_t i = 0; i < bt->len; ++i) {
+ malloc_snprintf(bt_buf, sizeof(bt_buf), "%p", bt->vec[i]);
+ emitter_json_value(emitter, emitter_type_string, &s);
+ }
+}
+
+static void
+prof_recent_alloc_dump_node(emitter_t *emitter, prof_recent_t *node) {
+ emitter_json_object_begin(emitter);
+
+ emitter_json_kv(emitter, "size", emitter_type_size, &node->size);
+ emitter_json_kv(emitter, "usize", emitter_type_size, &node->usize);
+ bool released = prof_recent_alloc_edata_get_no_lock(node) == NULL;
+ emitter_json_kv(emitter, "released", emitter_type_bool, &released);
+
+ emitter_json_kv(emitter, "alloc_thread_uid", emitter_type_uint64,
+ &node->alloc_tctx->thr_uid);
+ prof_tdata_t *alloc_tdata = node->alloc_tctx->tdata;
+ assert(alloc_tdata != NULL);
+ if (alloc_tdata->thread_name != NULL) {
+ emitter_json_kv(emitter, "alloc_thread_name",
+ emitter_type_string, &alloc_tdata->thread_name);
+ }
+ uint64_t alloc_time_ns = nstime_ns(&node->alloc_time);
+ emitter_json_kv(emitter, "alloc_time", emitter_type_uint64,
+ &alloc_time_ns);
+ emitter_json_array_kv_begin(emitter, "alloc_trace");
+ prof_recent_alloc_dump_bt(emitter, node->alloc_tctx);
+ emitter_json_array_end(emitter);
+
+ if (released && node->dalloc_tctx != NULL) {
+ emitter_json_kv(emitter, "dalloc_thread_uid",
+ emitter_type_uint64, &node->dalloc_tctx->thr_uid);
+ prof_tdata_t *dalloc_tdata = node->dalloc_tctx->tdata;
+ assert(dalloc_tdata != NULL);
+ if (dalloc_tdata->thread_name != NULL) {
+ emitter_json_kv(emitter, "dalloc_thread_name",
+ emitter_type_string, &dalloc_tdata->thread_name);
+ }
+ assert(!nstime_equals_zero(&node->dalloc_time));
+ uint64_t dalloc_time_ns = nstime_ns(&node->dalloc_time);
+ emitter_json_kv(emitter, "dalloc_time", emitter_type_uint64,
+ &dalloc_time_ns);
+ emitter_json_array_kv_begin(emitter, "dalloc_trace");
+ prof_recent_alloc_dump_bt(emitter, node->dalloc_tctx);
+ emitter_json_array_end(emitter);
+ }
+
+ emitter_json_object_end(emitter);
+}
+
+#define PROF_RECENT_PRINT_BUFSIZE 65536
+JEMALLOC_COLD
+void
+prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) {
+ cassert(config_prof);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_dump_mtx);
+ buf_writer_t buf_writer;
+ buf_writer_init(tsd_tsdn(tsd), &buf_writer, write_cb, cbopaque, NULL,
+ PROF_RECENT_PRINT_BUFSIZE);
+ emitter_t emitter;
+ emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,
+ &buf_writer);
+ prof_recent_list_t temp_list;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_alloc_assert_count(tsd);
+ ssize_t dump_max = prof_recent_alloc_max_get(tsd);
+ ql_move(&temp_list, &prof_recent_alloc_list);
+ ssize_t dump_count = prof_recent_alloc_count;
+ prof_recent_alloc_count = 0;
+ prof_recent_alloc_assert_count(tsd);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+
+ emitter_begin(&emitter);
+ uint64_t sample_interval = (uint64_t)1U << lg_prof_sample;
+ emitter_json_kv(&emitter, "sample_interval", emitter_type_uint64,
+ &sample_interval);
+ emitter_json_kv(&emitter, "recent_alloc_max", emitter_type_ssize,
+ &dump_max);
+ emitter_json_array_kv_begin(&emitter, "recent_alloc");
+ prof_recent_t *node;
+ ql_foreach(node, &temp_list, link) {
+ prof_recent_alloc_dump_node(&emitter, node);
+ }
+ emitter_json_array_end(&emitter);
+ emitter_end(&emitter);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_alloc_assert_count(tsd);
+ ql_concat(&temp_list, &prof_recent_alloc_list, link);
+ ql_move(&prof_recent_alloc_list, &temp_list);
+ prof_recent_alloc_count += dump_count;
+ prof_recent_alloc_restore_locked(tsd, &temp_list);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+
+ buf_writer_terminate(tsd_tsdn(tsd), &buf_writer);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_dump_mtx);
+
+ prof_recent_alloc_async_cleanup(tsd, &temp_list);
+}
+#undef PROF_RECENT_PRINT_BUFSIZE
+
+bool
+prof_recent_init() {
+ cassert(config_prof);
+ prof_recent_alloc_max_init();
+
+ if (malloc_mutex_init(&prof_recent_alloc_mtx, "prof_recent_alloc",
+ WITNESS_RANK_PROF_RECENT_ALLOC, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ if (malloc_mutex_init(&prof_recent_dump_mtx, "prof_recent_dump",
+ WITNESS_RANK_PROF_RECENT_DUMP, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ ql_new(&prof_recent_alloc_list);
+
+ return false;
+}
diff --git a/deps/jemalloc/src/prof_stats.c b/deps/jemalloc/src/prof_stats.c
new file mode 100644
index 000000000..5d1a506bb
--- /dev/null
+++ b/deps/jemalloc/src/prof_stats.c
@@ -0,0 +1,57 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/prof_stats.h"
+
+bool opt_prof_stats = false;
+malloc_mutex_t prof_stats_mtx;
+static prof_stats_t prof_stats_live[PROF_SC_NSIZES];
+static prof_stats_t prof_stats_accum[PROF_SC_NSIZES];
+
+static void
+prof_stats_enter(tsd_t *tsd, szind_t ind) {
+ assert(opt_prof && opt_prof_stats);
+ assert(ind < SC_NSIZES);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_stats_mtx);
+}
+
+static void
+prof_stats_leave(tsd_t *tsd) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_stats_mtx);
+}
+
+void
+prof_stats_inc(tsd_t *tsd, szind_t ind, size_t size) {
+ cassert(config_prof);
+ prof_stats_enter(tsd, ind);
+ prof_stats_live[ind].req_sum += size;
+ prof_stats_live[ind].count++;
+ prof_stats_accum[ind].req_sum += size;
+ prof_stats_accum[ind].count++;
+ prof_stats_leave(tsd);
+}
+
+void
+prof_stats_dec(tsd_t *tsd, szind_t ind, size_t size) {
+ cassert(config_prof);
+ prof_stats_enter(tsd, ind);
+ prof_stats_live[ind].req_sum -= size;
+ prof_stats_live[ind].count--;
+ prof_stats_leave(tsd);
+}
+
+void
+prof_stats_get_live(tsd_t *tsd, szind_t ind, prof_stats_t *stats) {
+ cassert(config_prof);
+ prof_stats_enter(tsd, ind);
+ memcpy(stats, &prof_stats_live[ind], sizeof(prof_stats_t));
+ prof_stats_leave(tsd);
+}
+
+void
+prof_stats_get_accum(tsd_t *tsd, szind_t ind, prof_stats_t *stats) {
+ cassert(config_prof);
+ prof_stats_enter(tsd, ind);
+ memcpy(stats, &prof_stats_accum[ind], sizeof(prof_stats_t));
+ prof_stats_leave(tsd);
+}
diff --git a/deps/jemalloc/src/prof_sys.c b/deps/jemalloc/src/prof_sys.c
new file mode 100644
index 000000000..b5f1f5b22
--- /dev/null
+++ b/deps/jemalloc/src/prof_sys.c
@@ -0,0 +1,669 @@
+#define JEMALLOC_PROF_SYS_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/buf_writer.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_sys.h"
+
+#ifdef JEMALLOC_PROF_LIBUNWIND
+#define UNW_LOCAL_ONLY
+#include <libunwind.h>
+#endif
+
+#ifdef JEMALLOC_PROF_LIBGCC
+/*
+ * We have a circular dependency -- jemalloc_internal.h tells us if we should
+ * use libgcc's unwinding functionality, but after we've included that, we've
+ * already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
+ */
+#undef _Unwind_Backtrace
+#include <unwind.h>
+#define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
+#endif
+
+/******************************************************************************/
+
+malloc_mutex_t prof_dump_filename_mtx;
+
+bool prof_do_mock = false;
+
+static uint64_t prof_dump_seq;
+static uint64_t prof_dump_iseq;
+static uint64_t prof_dump_mseq;
+static uint64_t prof_dump_useq;
+
+static char *prof_prefix = NULL;
+
+/* The fallback allocator profiling functionality will use. */
+base_t *prof_base;
+
+void
+bt_init(prof_bt_t *bt, void **vec) {
+ cassert(config_prof);
+
+ bt->vec = vec;
+ bt->len = 0;
+}
+
+#ifdef JEMALLOC_PROF_LIBUNWIND
+static void
+prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
+ int nframes;
+
+ cassert(config_prof);
+ assert(*len == 0);
+ assert(vec != NULL);
+ assert(max_len == PROF_BT_MAX);
+
+ nframes = unw_backtrace(vec, PROF_BT_MAX);
+ if (nframes <= 0) {
+ return;
+ }
+ *len = nframes;
+}
+#elif (defined(JEMALLOC_PROF_LIBGCC))
+static _Unwind_Reason_Code
+prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
+ cassert(config_prof);
+
+ return _URC_NO_REASON;
+}
+
+static _Unwind_Reason_Code
+prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
+ prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
+ void *ip;
+
+ cassert(config_prof);
+
+ ip = (void *)_Unwind_GetIP(context);
+ if (ip == NULL) {
+ return _URC_END_OF_STACK;
+ }
+ data->vec[*data->len] = ip;
+ (*data->len)++;
+ if (*data->len == data->max) {
+ return _URC_END_OF_STACK;
+ }
+
+ return _URC_NO_REASON;
+}
+
+static void
+prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
+ prof_unwind_data_t data = {vec, len, max_len};
+
+ cassert(config_prof);
+ assert(vec != NULL);
+ assert(max_len == PROF_BT_MAX);
+
+ _Unwind_Backtrace(prof_unwind_callback, &data);
+}
+#elif (defined(JEMALLOC_PROF_GCC))
+static void
+prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
+#define BT_FRAME(i) \
+ if ((i) < max_len) { \
+ void *p; \
+ if (__builtin_frame_address(i) == 0) { \
+ return; \
+ } \
+ p = __builtin_return_address(i); \
+ if (p == NULL) { \
+ return; \
+ } \
+ vec[(i)] = p; \
+ *len = (i) + 1; \
+ } else { \
+ return; \
+ }
+
+ cassert(config_prof);
+ assert(vec != NULL);
+ assert(max_len == PROF_BT_MAX);
+
+ BT_FRAME(0)
+ BT_FRAME(1)
+ BT_FRAME(2)
+ BT_FRAME(3)
+ BT_FRAME(4)
+ BT_FRAME(5)
+ BT_FRAME(6)
+ BT_FRAME(7)
+ BT_FRAME(8)
+ BT_FRAME(9)
+
+ BT_FRAME(10)
+ BT_FRAME(11)
+ BT_FRAME(12)
+ BT_FRAME(13)
+ BT_FRAME(14)
+ BT_FRAME(15)
+ BT_FRAME(16)
+ BT_FRAME(17)
+ BT_FRAME(18)
+ BT_FRAME(19)
+
+ BT_FRAME(20)
+ BT_FRAME(21)
+ BT_FRAME(22)
+ BT_FRAME(23)
+ BT_FRAME(24)
+ BT_FRAME(25)
+ BT_FRAME(26)
+ BT_FRAME(27)
+ BT_FRAME(28)
+ BT_FRAME(29)
+
+ BT_FRAME(30)
+ BT_FRAME(31)
+ BT_FRAME(32)
+ BT_FRAME(33)
+ BT_FRAME(34)
+ BT_FRAME(35)
+ BT_FRAME(36)
+ BT_FRAME(37)
+ BT_FRAME(38)
+ BT_FRAME(39)
+
+ BT_FRAME(40)
+ BT_FRAME(41)
+ BT_FRAME(42)
+ BT_FRAME(43)
+ BT_FRAME(44)
+ BT_FRAME(45)
+ BT_FRAME(46)
+ BT_FRAME(47)
+ BT_FRAME(48)
+ BT_FRAME(49)
+
+ BT_FRAME(50)
+ BT_FRAME(51)
+ BT_FRAME(52)
+ BT_FRAME(53)
+ BT_FRAME(54)
+ BT_FRAME(55)
+ BT_FRAME(56)
+ BT_FRAME(57)
+ BT_FRAME(58)
+ BT_FRAME(59)
+
+ BT_FRAME(60)
+ BT_FRAME(61)
+ BT_FRAME(62)
+ BT_FRAME(63)
+ BT_FRAME(64)
+ BT_FRAME(65)
+ BT_FRAME(66)
+ BT_FRAME(67)
+ BT_FRAME(68)
+ BT_FRAME(69)
+
+ BT_FRAME(70)
+ BT_FRAME(71)
+ BT_FRAME(72)
+ BT_FRAME(73)
+ BT_FRAME(74)
+ BT_FRAME(75)
+ BT_FRAME(76)
+ BT_FRAME(77)
+ BT_FRAME(78)
+ BT_FRAME(79)
+
+ BT_FRAME(80)
+ BT_FRAME(81)
+ BT_FRAME(82)
+ BT_FRAME(83)
+ BT_FRAME(84)
+ BT_FRAME(85)
+ BT_FRAME(86)
+ BT_FRAME(87)
+ BT_FRAME(88)
+ BT_FRAME(89)
+
+ BT_FRAME(90)
+ BT_FRAME(91)
+ BT_FRAME(92)
+ BT_FRAME(93)
+ BT_FRAME(94)
+ BT_FRAME(95)
+ BT_FRAME(96)
+ BT_FRAME(97)
+ BT_FRAME(98)
+ BT_FRAME(99)
+
+ BT_FRAME(100)
+ BT_FRAME(101)
+ BT_FRAME(102)
+ BT_FRAME(103)
+ BT_FRAME(104)
+ BT_FRAME(105)
+ BT_FRAME(106)
+ BT_FRAME(107)
+ BT_FRAME(108)
+ BT_FRAME(109)
+
+ BT_FRAME(110)
+ BT_FRAME(111)
+ BT_FRAME(112)
+ BT_FRAME(113)
+ BT_FRAME(114)
+ BT_FRAME(115)
+ BT_FRAME(116)
+ BT_FRAME(117)
+ BT_FRAME(118)
+ BT_FRAME(119)
+
+ BT_FRAME(120)
+ BT_FRAME(121)
+ BT_FRAME(122)
+ BT_FRAME(123)
+ BT_FRAME(124)
+ BT_FRAME(125)
+ BT_FRAME(126)
+ BT_FRAME(127)
+#undef BT_FRAME
+}
+#else
+static void
+prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
+ cassert(config_prof);
+ not_reached();
+}
+#endif
+
+void
+prof_backtrace(tsd_t *tsd, prof_bt_t *bt) {
+ cassert(config_prof);
+ prof_backtrace_hook_t prof_backtrace_hook = prof_backtrace_hook_get();
+ assert(prof_backtrace_hook != NULL);
+
+ pre_reentrancy(tsd, NULL);
+ prof_backtrace_hook(bt->vec, &bt->len, PROF_BT_MAX);
+ post_reentrancy(tsd);
+}
+
+void
+prof_hooks_init() {
+ prof_backtrace_hook_set(&prof_backtrace_impl);
+ prof_dump_hook_set(NULL);
+}
+
+void
+prof_unwind_init() {
+#ifdef JEMALLOC_PROF_LIBGCC
+ /*
+ * Cause the backtracing machinery to allocate its internal
+ * state before enabling profiling.
+ */
+ _Unwind_Backtrace(prof_unwind_init_callback, NULL);
+#endif
+}
+
+static int
+prof_sys_thread_name_read_impl(char *buf, size_t limit) {
+#if defined(JEMALLOC_HAVE_PTHREAD_GETNAME_NP)
+ return pthread_getname_np(pthread_self(), buf, limit);
+#elif defined(JEMALLOC_HAVE_PTHREAD_GET_NAME_NP)
+ pthread_get_name_np(pthread_self(), buf, limit);
+ return 0;
+#else
+ return ENOSYS;
+#endif
+}
+prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read =
+ prof_sys_thread_name_read_impl;
+
+void
+prof_sys_thread_name_fetch(tsd_t *tsd) {
+#define THREAD_NAME_MAX_LEN 16
+ char buf[THREAD_NAME_MAX_LEN];
+ if (!prof_sys_thread_name_read(buf, THREAD_NAME_MAX_LEN)) {
+ prof_thread_name_set_impl(tsd, buf);
+ }
+#undef THREAD_NAME_MAX_LEN
+}
+
+int
+prof_getpid(void) {
+#ifdef _WIN32
+ return GetCurrentProcessId();
+#else
+ return getpid();
+#endif
+}
+
+/*
+ * This buffer is rather large for stack allocation, so use a single buffer for
+ * all profile dumps; protected by prof_dump_mtx.
+ */
+static char prof_dump_buf[PROF_DUMP_BUFSIZE];
+
+typedef struct prof_dump_arg_s prof_dump_arg_t;
+struct prof_dump_arg_s {
+ /*
+ * Whether error should be handled locally: if true, then we print out
+ * error message as well as abort (if opt_abort is true) when an error
+ * occurred, and we also report the error back to the caller in the end;
+ * if false, then we only report the error back to the caller in the
+ * end.
+ */
+ const bool handle_error_locally;
+ /*
+ * Whether there has been an error in the dumping process, which could
+ * have happened either in file opening or in file writing. When an
+ * error has already occurred, we will stop further writing to the file.
+ */
+ bool error;
+ /* File descriptor of the dump file. */
+ int prof_dump_fd;
+};
+
+static void
+prof_dump_check_possible_error(prof_dump_arg_t *arg, bool err_cond,
+ const char *format, ...) {
+ assert(!arg->error);
+ if (!err_cond) {
+ return;
+ }
+
+ arg->error = true;
+ if (!arg->handle_error_locally) {
+ return;
+ }
+
+ va_list ap;
+ char buf[PROF_PRINTF_BUFSIZE];
+ va_start(ap, format);
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ malloc_write(buf);
+
+ if (opt_abort) {
+ abort();
+ }
+}
+
+static int
+prof_dump_open_file_impl(const char *filename, int mode) {
+ return creat(filename, mode);
+}
+prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file =
+ prof_dump_open_file_impl;
+
+static void
+prof_dump_open(prof_dump_arg_t *arg, const char *filename) {
+ arg->prof_dump_fd = prof_dump_open_file(filename, 0644);
+ prof_dump_check_possible_error(arg, arg->prof_dump_fd == -1,
+ "<jemalloc>: failed to open \"%s\"\n", filename);
+}
+
+prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file = malloc_write_fd;
+
+static void
+prof_dump_flush(void *opaque, const char *s) {
+ cassert(config_prof);
+ prof_dump_arg_t *arg = (prof_dump_arg_t *)opaque;
+ if (!arg->error) {
+ ssize_t err = prof_dump_write_file(arg->prof_dump_fd, s,
+ strlen(s));
+ prof_dump_check_possible_error(arg, err == -1,
+ "<jemalloc>: failed to write during heap profile flush\n");
+ }
+}
+
+static void
+prof_dump_close(prof_dump_arg_t *arg) {
+ if (arg->prof_dump_fd != -1) {
+ close(arg->prof_dump_fd);
+ }
+}
+
+#ifndef _WIN32
+JEMALLOC_FORMAT_PRINTF(1, 2)
+static int
+prof_open_maps_internal(const char *format, ...) {
+ int mfd;
+ va_list ap;
+ char filename[PATH_MAX + 1];
+
+ va_start(ap, format);
+ malloc_vsnprintf(filename, sizeof(filename), format, ap);
+ va_end(ap);
+
+#if defined(O_CLOEXEC)
+ mfd = open(filename, O_RDONLY | O_CLOEXEC);
+#else
+ mfd = open(filename, O_RDONLY);
+ if (mfd != -1) {
+ fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
+ }
+#endif
+
+ return mfd;
+}
+#endif
+
+static int
+prof_dump_open_maps_impl() {
+ int mfd;
+
+ cassert(config_prof);
+#if defined(__FreeBSD__) || defined(__DragonFly__)
+ mfd = prof_open_maps_internal("/proc/curproc/map");
+#elif defined(_WIN32)
+ mfd = -1; // Not implemented
+#else
+ int pid = prof_getpid();
+
+ mfd = prof_open_maps_internal("/proc/%d/task/%d/maps", pid, pid);
+ if (mfd == -1) {
+ mfd = prof_open_maps_internal("/proc/%d/maps", pid);
+ }
+#endif
+ return mfd;
+}
+prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps =
+ prof_dump_open_maps_impl;
+
+static ssize_t
+prof_dump_read_maps_cb(void *read_cbopaque, void *buf, size_t limit) {
+ int mfd = *(int *)read_cbopaque;
+ assert(mfd != -1);
+ return malloc_read_fd(mfd, buf, limit);
+}
+
+static void
+prof_dump_maps(buf_writer_t *buf_writer) {
+ int mfd = prof_dump_open_maps();
+ if (mfd == -1) {
+ return;
+ }
+
+ buf_writer_cb(buf_writer, "\nMAPPED_LIBRARIES:\n");
+ buf_writer_pipe(buf_writer, prof_dump_read_maps_cb, &mfd);
+ close(mfd);
+}
+
+static bool
+prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
+ bool leakcheck) {
+ cassert(config_prof);
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
+ prof_tdata_t * tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return true;
+ }
+
+ prof_dump_arg_t arg = {/* handle_error_locally */ !propagate_err,
+ /* error */ false, /* prof_dump_fd */ -1};
+
+ pre_reentrancy(tsd, NULL);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+
+ prof_dump_open(&arg, filename);
+ buf_writer_t buf_writer;
+ bool err = buf_writer_init(tsd_tsdn(tsd), &buf_writer, prof_dump_flush,
+ &arg, prof_dump_buf, PROF_DUMP_BUFSIZE);
+ assert(!err);
+ prof_dump_impl(tsd, buf_writer_cb, &buf_writer, tdata, leakcheck);
+ prof_dump_maps(&buf_writer);
+ buf_writer_terminate(tsd_tsdn(tsd), &buf_writer);
+ prof_dump_close(&arg);
+
+ prof_dump_hook_t dump_hook = prof_dump_hook_get();
+ if (dump_hook != NULL) {
+ dump_hook(filename);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ post_reentrancy(tsd);
+
+ return arg.error;
+}
+
+/*
+ * If profiling is off, then PROF_DUMP_FILENAME_LEN is 1, so we'll end up
+ * calling strncpy with a size of 0, which triggers a -Wstringop-truncation
+ * warning (strncpy can never actually be called in this case, since we bail out
+ * much earlier when config_prof is false). This function works around the
+ * warning to let us leave the warning on.
+ */
+static inline void
+prof_strncpy(char *UNUSED dest, const char *UNUSED src, size_t UNUSED size) {
+ cassert(config_prof);
+#ifdef JEMALLOC_PROF
+ strncpy(dest, src, size);
+#endif
+}
+
+static const char *
+prof_prefix_get(tsdn_t* tsdn) {
+ malloc_mutex_assert_owner(tsdn, &prof_dump_filename_mtx);
+
+ return prof_prefix == NULL ? opt_prof_prefix : prof_prefix;
+}
+
+static bool
+prof_prefix_is_empty(tsdn_t *tsdn) {
+ malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
+ bool ret = (prof_prefix_get(tsdn)[0] == '\0');
+ malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
+ return ret;
+}
+
+#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
+#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
+static void
+prof_dump_filename(tsd_t *tsd, char *filename, char v, uint64_t vseq) {
+ cassert(config_prof);
+
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+ const char *prefix = prof_prefix_get(tsd_tsdn(tsd));
+
+ if (vseq != VSEQ_INVALID) {
+ /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
+ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+ "%s.%d.%"FMTu64".%c%"FMTu64".heap", prefix, prof_getpid(),
+ prof_dump_seq, v, vseq);
+ } else {
+ /* "<prefix>.<pid>.<seq>.<v>.heap" */
+ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+ "%s.%d.%"FMTu64".%c.heap", prefix, prof_getpid(),
+ prof_dump_seq, v);
+ }
+ prof_dump_seq++;
+}
+
+void
+prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind) {
+ malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
+ malloc_snprintf(filename, PROF_DUMP_FILENAME_LEN,
+ "%s.%d.%"FMTu64".json", prof_prefix_get(tsdn), prof_getpid(), ind);
+ malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
+}
+
+void
+prof_fdump_impl(tsd_t *tsd) {
+ char filename[DUMP_FILENAME_BUFSIZE];
+
+ assert(!prof_prefix_is_empty(tsd_tsdn(tsd)));
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ prof_dump_filename(tsd, filename, 'f', VSEQ_INVALID);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ prof_dump(tsd, false, filename, opt_prof_leak);
+}
+
+bool
+prof_prefix_set(tsdn_t *tsdn, const char *prefix) {
+ cassert(config_prof);
+ ctl_mtx_assert_held(tsdn);
+ malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
+ if (prof_prefix == NULL) {
+ malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
+ /* Everything is still guarded by ctl_mtx. */
+ char *buffer = base_alloc(tsdn, prof_base,
+ PROF_DUMP_FILENAME_LEN, QUANTUM);
+ if (buffer == NULL) {
+ return true;
+ }
+ malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
+ prof_prefix = buffer;
+ }
+ assert(prof_prefix != NULL);
+
+ prof_strncpy(prof_prefix, prefix, PROF_DUMP_FILENAME_LEN - 1);
+ prof_prefix[PROF_DUMP_FILENAME_LEN - 1] = '\0';
+ malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
+
+ return false;
+}
+
+void
+prof_idump_impl(tsd_t *tsd) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ if (prof_prefix_get(tsd_tsdn(tsd))[0] == '\0') {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ return;
+ }
+ char filename[PATH_MAX + 1];
+ prof_dump_filename(tsd, filename, 'i', prof_dump_iseq);
+ prof_dump_iseq++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ prof_dump(tsd, false, filename, false);
+}
+
+bool
+prof_mdump_impl(tsd_t *tsd, const char *filename) {
+ char filename_buf[DUMP_FILENAME_BUFSIZE];
+ if (filename == NULL) {
+ /* No filename specified, so automatically generate one. */
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ if (prof_prefix_get(tsd_tsdn(tsd))[0] == '\0') {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ return true;
+ }
+ prof_dump_filename(tsd, filename_buf, 'm', prof_dump_mseq);
+ prof_dump_mseq++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ filename = filename_buf;
+ }
+ return prof_dump(tsd, true, filename, false);
+}
+
+void
+prof_gdump_impl(tsd_t *tsd) {
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+ malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
+ if (prof_prefix_get(tsdn)[0] == '\0') {
+ malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
+ return;
+ }
+ char filename[DUMP_FILENAME_BUFSIZE];
+ prof_dump_filename(tsd, filename, 'u', prof_dump_useq);
+ prof_dump_useq++;
+ malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
+ prof_dump(tsd, false, filename, false);
+}
diff --git a/deps/jemalloc/src/psset.c b/deps/jemalloc/src/psset.c
new file mode 100644
index 000000000..9a8f054f1
--- /dev/null
+++ b/deps/jemalloc/src/psset.c
@@ -0,0 +1,385 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/psset.h"
+
+#include "jemalloc/internal/fb.h"
+
+void
+psset_init(psset_t *psset) {
+ for (unsigned i = 0; i < PSSET_NPSIZES; i++) {
+ hpdata_age_heap_new(&psset->pageslabs[i]);
+ }
+ fb_init(psset->pageslab_bitmap, PSSET_NPSIZES);
+ memset(&psset->merged_stats, 0, sizeof(psset->merged_stats));
+ memset(&psset->stats, 0, sizeof(psset->stats));
+ hpdata_empty_list_init(&psset->empty);
+ for (int i = 0; i < PSSET_NPURGE_LISTS; i++) {
+ hpdata_purge_list_init(&psset->to_purge[i]);
+ }
+ fb_init(psset->purge_bitmap, PSSET_NPURGE_LISTS);
+ hpdata_hugify_list_init(&psset->to_hugify);
+}
+
+static void
+psset_bin_stats_accum(psset_bin_stats_t *dst, psset_bin_stats_t *src) {
+ dst->npageslabs += src->npageslabs;
+ dst->nactive += src->nactive;
+ dst->ndirty += src->ndirty;
+}
+
+void
+psset_stats_accum(psset_stats_t *dst, psset_stats_t *src) {
+ psset_bin_stats_accum(&dst->full_slabs[0], &src->full_slabs[0]);
+ psset_bin_stats_accum(&dst->full_slabs[1], &src->full_slabs[1]);
+ psset_bin_stats_accum(&dst->empty_slabs[0], &src->empty_slabs[0]);
+ psset_bin_stats_accum(&dst->empty_slabs[1], &src->empty_slabs[1]);
+ for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
+ psset_bin_stats_accum(&dst->nonfull_slabs[i][0],
+ &src->nonfull_slabs[i][0]);
+ psset_bin_stats_accum(&dst->nonfull_slabs[i][1],
+ &src->nonfull_slabs[i][1]);
+ }
+}
+
+/*
+ * The stats maintenance strategy is to remove a pageslab's contribution to the
+ * stats when we call psset_update_begin, and re-add it (to a potentially new
+ * bin) when we call psset_update_end.
+ */
+JEMALLOC_ALWAYS_INLINE void
+psset_bin_stats_insert_remove(psset_t *psset, psset_bin_stats_t *binstats,
+ hpdata_t *ps, bool insert) {
+ size_t mul = insert ? (size_t)1 : (size_t)-1;
+ size_t huge_idx = (size_t)hpdata_huge_get(ps);
+
+ binstats[huge_idx].npageslabs += mul * 1;
+ binstats[huge_idx].nactive += mul * hpdata_nactive_get(ps);
+ binstats[huge_idx].ndirty += mul * hpdata_ndirty_get(ps);
+
+ psset->merged_stats.npageslabs += mul * 1;
+ psset->merged_stats.nactive += mul * hpdata_nactive_get(ps);
+ psset->merged_stats.ndirty += mul * hpdata_ndirty_get(ps);
+
+ if (config_debug) {
+ psset_bin_stats_t check_stats = {0};
+ for (size_t huge = 0; huge <= 1; huge++) {
+ psset_bin_stats_accum(&check_stats,
+ &psset->stats.full_slabs[huge]);
+ psset_bin_stats_accum(&check_stats,
+ &psset->stats.empty_slabs[huge]);
+ for (pszind_t pind = 0; pind < PSSET_NPSIZES; pind++) {
+ psset_bin_stats_accum(&check_stats,
+ &psset->stats.nonfull_slabs[pind][huge]);
+ }
+ }
+ assert(psset->merged_stats.npageslabs
+ == check_stats.npageslabs);
+ assert(psset->merged_stats.nactive == check_stats.nactive);
+ assert(psset->merged_stats.ndirty == check_stats.ndirty);
+ }
+}
+
+static void
+psset_bin_stats_insert(psset_t *psset, psset_bin_stats_t *binstats,
+ hpdata_t *ps) {
+ psset_bin_stats_insert_remove(psset, binstats, ps, true);
+}
+
+static void
+psset_bin_stats_remove(psset_t *psset, psset_bin_stats_t *binstats,
+ hpdata_t *ps) {
+ psset_bin_stats_insert_remove(psset, binstats, ps, false);
+}
+
+static void
+psset_hpdata_heap_remove(psset_t *psset, pszind_t pind, hpdata_t *ps) {
+ hpdata_age_heap_remove(&psset->pageslabs[pind], ps);
+ if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
+ fb_unset(psset->pageslab_bitmap, PSSET_NPSIZES, (size_t)pind);
+ }
+}
+
+static void
+psset_hpdata_heap_insert(psset_t *psset, pszind_t pind, hpdata_t *ps) {
+ if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
+ fb_set(psset->pageslab_bitmap, PSSET_NPSIZES, (size_t)pind);
+ }
+ hpdata_age_heap_insert(&psset->pageslabs[pind], ps);
+}
+
+static void
+psset_stats_insert(psset_t* psset, hpdata_t *ps) {
+ if (hpdata_empty(ps)) {
+ psset_bin_stats_insert(psset, psset->stats.empty_slabs, ps);
+ } else if (hpdata_full(ps)) {
+ psset_bin_stats_insert(psset, psset->stats.full_slabs, ps);
+ } else {
+ size_t longest_free_range = hpdata_longest_free_range_get(ps);
+
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
+ longest_free_range << LG_PAGE));
+ assert(pind < PSSET_NPSIZES);
+
+ psset_bin_stats_insert(psset, psset->stats.nonfull_slabs[pind],
+ ps);
+ }
+}
+
+static void
+psset_stats_remove(psset_t *psset, hpdata_t *ps) {
+ if (hpdata_empty(ps)) {
+ psset_bin_stats_remove(psset, psset->stats.empty_slabs, ps);
+ } else if (hpdata_full(ps)) {
+ psset_bin_stats_remove(psset, psset->stats.full_slabs, ps);
+ } else {
+ size_t longest_free_range = hpdata_longest_free_range_get(ps);
+
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
+ longest_free_range << LG_PAGE));
+ assert(pind < PSSET_NPSIZES);
+
+ psset_bin_stats_remove(psset, psset->stats.nonfull_slabs[pind],
+ ps);
+ }
+}
+
+/*
+ * Put ps into some container so that it can be found during future allocation
+ * requests.
+ */
+static void
+psset_alloc_container_insert(psset_t *psset, hpdata_t *ps) {
+ assert(!hpdata_in_psset_alloc_container_get(ps));
+ hpdata_in_psset_alloc_container_set(ps, true);
+ if (hpdata_empty(ps)) {
+ /*
+ * This prepend, paired with popping the head in psset_fit,
+ * means we implement LIFO ordering for the empty slabs set,
+ * which seems reasonable.
+ */
+ hpdata_empty_list_prepend(&psset->empty, ps);
+ } else if (hpdata_full(ps)) {
+ /*
+ * We don't need to keep track of the full slabs; we're never
+ * going to return them from a psset_pick_alloc call.
+ */
+ } else {
+ size_t longest_free_range = hpdata_longest_free_range_get(ps);
+
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
+ longest_free_range << LG_PAGE));
+ assert(pind < PSSET_NPSIZES);
+
+ psset_hpdata_heap_insert(psset, pind, ps);
+ }
+}
+
+/* Remove ps from those collections. */
+static void
+psset_alloc_container_remove(psset_t *psset, hpdata_t *ps) {
+ assert(hpdata_in_psset_alloc_container_get(ps));
+ hpdata_in_psset_alloc_container_set(ps, false);
+
+ if (hpdata_empty(ps)) {
+ hpdata_empty_list_remove(&psset->empty, ps);
+ } else if (hpdata_full(ps)) {
+ /* Same as above -- do nothing in this case. */
+ } else {
+ size_t longest_free_range = hpdata_longest_free_range_get(ps);
+
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
+ longest_free_range << LG_PAGE));
+ assert(pind < PSSET_NPSIZES);
+
+ psset_hpdata_heap_remove(psset, pind, ps);
+ }
+}
+
+static size_t
+psset_purge_list_ind(hpdata_t *ps) {
+ size_t ndirty = hpdata_ndirty_get(ps);
+ /* Shouldn't have something with no dirty pages purgeable. */
+ assert(ndirty > 0);
+ /*
+ * Higher indices correspond to lists we'd like to purge earlier; make
+ * the two highest indices correspond to empty lists, which we attempt
+ * to purge before purging any non-empty list. This has two advantages:
+ * - Empty page slabs are the least likely to get reused (we'll only
+ * pick them for an allocation if we have no other choice).
+ * - Empty page slabs can purge every dirty page they contain in a
+ * single call, which is not usually the case.
+ *
+ * We purge hugeified empty slabs before nonhugeified ones, on the basis
+ * that they are fully dirty, while nonhugified slabs might not be, so
+ * we free up more pages more easily.
+ */
+ if (hpdata_nactive_get(ps) == 0) {
+ if (hpdata_huge_get(ps)) {
+ return PSSET_NPURGE_LISTS - 1;
+ } else {
+ return PSSET_NPURGE_LISTS - 2;
+ }
+ }
+
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(ndirty << LG_PAGE));
+ /*
+ * For non-empty slabs, we may reuse them again. Prefer purging
+ * non-hugeified slabs before hugeified ones then, among pages of
+ * similar dirtiness. We still get some benefit from the hugification.
+ */
+ return (size_t)pind * 2 + (hpdata_huge_get(ps) ? 0 : 1);
+}
+
+static void
+psset_maybe_remove_purge_list(psset_t *psset, hpdata_t *ps) {
+ /*
+ * Remove the hpdata from its purge list (if it's in one). Even if it's
+ * going to stay in the same one, by appending it during
+ * psset_update_end, we move it to the end of its queue, so that we
+ * purge LRU within a given dirtiness bucket.
+ */
+ if (hpdata_purge_allowed_get(ps)) {
+ size_t ind = psset_purge_list_ind(ps);
+ hpdata_purge_list_t *purge_list = &psset->to_purge[ind];
+ hpdata_purge_list_remove(purge_list, ps);
+ if (hpdata_purge_list_empty(purge_list)) {
+ fb_unset(psset->purge_bitmap, PSSET_NPURGE_LISTS, ind);
+ }
+ }
+}
+
+static void
+psset_maybe_insert_purge_list(psset_t *psset, hpdata_t *ps) {
+ if (hpdata_purge_allowed_get(ps)) {
+ size_t ind = psset_purge_list_ind(ps);
+ hpdata_purge_list_t *purge_list = &psset->to_purge[ind];
+ if (hpdata_purge_list_empty(purge_list)) {
+ fb_set(psset->purge_bitmap, PSSET_NPURGE_LISTS, ind);
+ }
+ hpdata_purge_list_append(purge_list, ps);
+ }
+
+}
+
+void
+psset_update_begin(psset_t *psset, hpdata_t *ps) {
+ hpdata_assert_consistent(ps);
+ assert(hpdata_in_psset_get(ps));
+ hpdata_updating_set(ps, true);
+ psset_stats_remove(psset, ps);
+ if (hpdata_in_psset_alloc_container_get(ps)) {
+ /*
+ * Some metadata updates can break alloc container invariants
+ * (e.g. the longest free range determines the hpdata_heap_t the
+ * pageslab lives in).
+ */
+ assert(hpdata_alloc_allowed_get(ps));
+ psset_alloc_container_remove(psset, ps);
+ }
+ psset_maybe_remove_purge_list(psset, ps);
+ /*
+ * We don't update presence in the hugify list; we try to keep it FIFO,
+ * even in the presence of other metadata updates. We'll update
+ * presence at the end of the metadata update if necessary.
+ */
+}
+
+void
+psset_update_end(psset_t *psset, hpdata_t *ps) {
+ assert(hpdata_in_psset_get(ps));
+ hpdata_updating_set(ps, false);
+ psset_stats_insert(psset, ps);
+
+ /*
+ * The update begin should have removed ps from whatever alloc container
+ * it was in.
+ */
+ assert(!hpdata_in_psset_alloc_container_get(ps));
+ if (hpdata_alloc_allowed_get(ps)) {
+ psset_alloc_container_insert(psset, ps);
+ }
+ psset_maybe_insert_purge_list(psset, ps);
+
+ if (hpdata_hugify_allowed_get(ps)
+ && !hpdata_in_psset_hugify_container_get(ps)) {
+ hpdata_in_psset_hugify_container_set(ps, true);
+ hpdata_hugify_list_append(&psset->to_hugify, ps);
+ } else if (!hpdata_hugify_allowed_get(ps)
+ && hpdata_in_psset_hugify_container_get(ps)) {
+ hpdata_in_psset_hugify_container_set(ps, false);
+ hpdata_hugify_list_remove(&psset->to_hugify, ps);
+ }
+ hpdata_assert_consistent(ps);
+}
+
+hpdata_t *
+psset_pick_alloc(psset_t *psset, size_t size) {
+ assert((size & PAGE_MASK) == 0);
+ assert(size <= HUGEPAGE);
+
+ pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size));
+ pszind_t pind = (pszind_t)fb_ffs(psset->pageslab_bitmap, PSSET_NPSIZES,
+ (size_t)min_pind);
+ if (pind == PSSET_NPSIZES) {
+ return hpdata_empty_list_first(&psset->empty);
+ }
+ hpdata_t *ps = hpdata_age_heap_first(&psset->pageslabs[pind]);
+ if (ps == NULL) {
+ return NULL;
+ }
+
+ hpdata_assert_consistent(ps);
+
+ return ps;
+}
+
+hpdata_t *
+psset_pick_purge(psset_t *psset) {
+ ssize_t ind_ssz = fb_fls(psset->purge_bitmap, PSSET_NPURGE_LISTS,
+ PSSET_NPURGE_LISTS - 1);
+ if (ind_ssz < 0) {
+ return NULL;
+ }
+ pszind_t ind = (pszind_t)ind_ssz;
+ assert(ind < PSSET_NPURGE_LISTS);
+ hpdata_t *ps = hpdata_purge_list_first(&psset->to_purge[ind]);
+ assert(ps != NULL);
+ return ps;
+}
+
+hpdata_t *
+psset_pick_hugify(psset_t *psset) {
+ return hpdata_hugify_list_first(&psset->to_hugify);
+}
+
+void
+psset_insert(psset_t *psset, hpdata_t *ps) {
+ hpdata_in_psset_set(ps, true);
+
+ psset_stats_insert(psset, ps);
+ if (hpdata_alloc_allowed_get(ps)) {
+ psset_alloc_container_insert(psset, ps);
+ }
+ psset_maybe_insert_purge_list(psset, ps);
+
+ if (hpdata_hugify_allowed_get(ps)) {
+ hpdata_in_psset_hugify_container_set(ps, true);
+ hpdata_hugify_list_append(&psset->to_hugify, ps);
+ }
+}
+
+void
+psset_remove(psset_t *psset, hpdata_t *ps) {
+ hpdata_in_psset_set(ps, false);
+
+ psset_stats_remove(psset, ps);
+ if (hpdata_in_psset_alloc_container_get(ps)) {
+ psset_alloc_container_remove(psset, ps);
+ }
+ psset_maybe_remove_purge_list(psset, ps);
+ if (hpdata_in_psset_hugify_container_get(ps)) {
+ hpdata_in_psset_hugify_container_set(ps, false);
+ hpdata_hugify_list_remove(&psset->to_hugify, ps);
+ }
+}
diff --git a/deps/jemalloc/src/rtree.c b/deps/jemalloc/src/rtree.c
index 4ae41fe2f..6496b5afd 100644
--- a/deps/jemalloc/src/rtree.c
+++ b/deps/jemalloc/src/rtree.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_RTREE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -10,7 +9,7 @@
* used.
*/
bool
-rtree_new(rtree_t *rtree, bool zeroed) {
+rtree_new(rtree_t *rtree, base_t *base, bool zeroed) {
#ifdef JEMALLOC_JET
if (!zeroed) {
memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
@@ -18,6 +17,7 @@ rtree_new(rtree_t *rtree, bool zeroed) {
#else
assert(zeroed);
#endif
+ rtree->base = base;
if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
malloc_mutex_rank_exclusive)) {
@@ -28,75 +28,16 @@ rtree_new(rtree_t *rtree, bool zeroed) {
}
static rtree_node_elm_t *
-rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
- return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms *
- sizeof(rtree_node_elm_t), CACHELINE);
+rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
+ return (rtree_node_elm_t *)base_alloc(tsdn, rtree->base,
+ nelms * sizeof(rtree_node_elm_t), CACHELINE);
}
-rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl;
-
-static void
-rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) {
- /* Nodes are never deleted during normal operation. */
- not_reached();
-}
-rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc =
- rtree_node_dalloc_impl;
static rtree_leaf_elm_t *
-rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
- return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms *
- sizeof(rtree_leaf_elm_t), CACHELINE);
-}
-rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl;
-
-static void
-rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) {
- /* Leaves are never deleted during normal operation. */
- not_reached();
+rtree_leaf_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
+ return (rtree_leaf_elm_t *)base_alloc(tsdn, rtree->base,
+ nelms * sizeof(rtree_leaf_elm_t), CACHELINE);
}
-rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc =
- rtree_leaf_dalloc_impl;
-
-#ifdef JEMALLOC_JET
-# if RTREE_HEIGHT > 1
-static void
-rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree,
- unsigned level) {
- size_t nchildren = ZU(1) << rtree_levels[level].bits;
- if (level + 2 < RTREE_HEIGHT) {
- for (size_t i = 0; i < nchildren; i++) {
- rtree_node_elm_t *node =
- (rtree_node_elm_t *)atomic_load_p(&subtree[i].child,
- ATOMIC_RELAXED);
- if (node != NULL) {
- rtree_delete_subtree(tsdn, rtree, node, level +
- 1);
- }
- }
- } else {
- for (size_t i = 0; i < nchildren; i++) {
- rtree_leaf_elm_t *leaf =
- (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child,
- ATOMIC_RELAXED);
- if (leaf != NULL) {
- rtree_leaf_dalloc(tsdn, rtree, leaf);
- }
- }
- }
-
- if (subtree != rtree->root) {
- rtree_node_dalloc(tsdn, rtree, subtree);
- }
-}
-# endif
-
-void
-rtree_delete(tsdn_t *tsdn, rtree_t *rtree) {
-# if RTREE_HEIGHT > 1
- rtree_delete_subtree(tsdn, rtree, rtree->root, 0);
-# endif
-}
-#endif
static rtree_node_elm_t *
rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
diff --git a/deps/jemalloc/src/safety_check.c b/deps/jemalloc/src/safety_check.c
index 804155dcf..209fdda92 100644
--- a/deps/jemalloc/src/safety_check.c
+++ b/deps/jemalloc/src/safety_check.c
@@ -1,9 +1,21 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
-static void (*safety_check_abort)(const char *message);
+static safety_check_abort_hook_t safety_check_abort;
-void safety_check_set_abort(void (*abort_fn)(const char *)) {
+void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
+ size_t true_size, size_t input_size) {
+ char *src = current_dealloc ? "the current pointer being freed" :
+ "in thread cache, possibly from previous deallocations";
+
+ safety_check_fail("<jemalloc>: size mismatch detected (true size %zu "
+ "vs input size %zu), likely caused by application sized "
+ "deallocation bugs (source address: %p, %s). Suggest building with "
+ "--enable-debug or address sanitizer for debugging. Abort.\n",
+ true_size, input_size, ptr, src);
+}
+
+void safety_check_set_abort(safety_check_abort_hook_t abort_fn) {
safety_check_abort = abort_fn;
}
diff --git a/deps/jemalloc/src/san.c b/deps/jemalloc/src/san.c
new file mode 100644
index 000000000..6e5129113
--- /dev/null
+++ b/deps/jemalloc/src/san.c
@@ -0,0 +1,208 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ehooks.h"
+#include "jemalloc/internal/san.h"
+#include "jemalloc/internal/tsd.h"
+
+/* The sanitizer options. */
+size_t opt_san_guard_large = SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT;
+size_t opt_san_guard_small = SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT;
+
+/* Aligned (-1 is off) ptrs will be junked & stashed on dealloc. */
+ssize_t opt_lg_san_uaf_align = SAN_LG_UAF_ALIGN_DEFAULT;
+
+/*
+ * Initialized in san_init(). When disabled, the mask is set to (uintptr_t)-1
+ * to always fail the nonfast_align check.
+ */
+uintptr_t san_cache_bin_nonfast_mask = SAN_CACHE_BIN_NONFAST_MASK_DEFAULT;
+
+static inline void
+san_find_guarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
+ uintptr_t *addr, size_t size, bool left, bool right) {
+ assert(!edata_guarded_get(edata));
+ assert(size % PAGE == 0);
+ *addr = (uintptr_t)edata_base_get(edata);
+ if (left) {
+ *guard1 = *addr;
+ *addr += SAN_PAGE_GUARD;
+ } else {
+ *guard1 = 0;
+ }
+
+ if (right) {
+ *guard2 = *addr + size;
+ } else {
+ *guard2 = 0;
+ }
+}
+
+static inline void
+san_find_unguarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
+ uintptr_t *addr, size_t size, bool left, bool right) {
+ assert(edata_guarded_get(edata));
+ assert(size % PAGE == 0);
+ *addr = (uintptr_t)edata_base_get(edata);
+ if (right) {
+ *guard2 = *addr + size;
+ } else {
+ *guard2 = 0;
+ }
+
+ if (left) {
+ *guard1 = *addr - SAN_PAGE_GUARD;
+ assert(*guard1 != 0);
+ *addr = *guard1;
+ } else {
+ *guard1 = 0;
+ }
+}
+
+void
+san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap,
+ bool left, bool right, bool remap) {
+ assert(left || right);
+ if (remap) {
+ emap_deregister_boundary(tsdn, emap, edata);
+ }
+
+ size_t size_with_guards = edata_size_get(edata);
+ size_t usize = (left && right)
+ ? san_two_side_unguarded_sz(size_with_guards)
+ : san_one_side_unguarded_sz(size_with_guards);
+
+ uintptr_t guard1, guard2, addr;
+ san_find_guarded_addr(edata, &guard1, &guard2, &addr, usize, left,
+ right);
+
+ assert(edata_state_get(edata) == extent_state_active);
+ ehooks_guard(tsdn, ehooks, (void *)guard1, (void *)guard2);
+
+ /* Update the guarded addr and usable size of the edata. */
+ edata_size_set(edata, usize);
+ edata_addr_set(edata, (void *)addr);
+ edata_guarded_set(edata, true);
+
+ if (remap) {
+ emap_register_boundary(tsdn, emap, edata, SC_NSIZES,
+ /* slab */ false);
+ }
+}
+
+static void
+san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ emap_t *emap, bool left, bool right, bool remap) {
+ assert(left || right);
+ /* Remove the inner boundary which no longer exists. */
+ if (remap) {
+ assert(edata_state_get(edata) == extent_state_active);
+ emap_deregister_boundary(tsdn, emap, edata);
+ } else {
+ assert(edata_state_get(edata) == extent_state_retained);
+ }
+
+ size_t size = edata_size_get(edata);
+ size_t size_with_guards = (left && right)
+ ? san_two_side_guarded_sz(size)
+ : san_one_side_guarded_sz(size);
+
+ uintptr_t guard1, guard2, addr;
+ san_find_unguarded_addr(edata, &guard1, &guard2, &addr, size, left,
+ right);
+
+ ehooks_unguard(tsdn, ehooks, (void *)guard1, (void *)guard2);
+
+ /* Update the true addr and usable size of the edata. */
+ edata_size_set(edata, size_with_guards);
+ edata_addr_set(edata, (void *)addr);
+ edata_guarded_set(edata, false);
+
+ /*
+ * Then re-register the outer boundary including the guards, if
+ * requested.
+ */
+ if (remap) {
+ emap_register_boundary(tsdn, emap, edata, SC_NSIZES,
+ /* slab */ false);
+ }
+}
+
+void
+san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ emap_t *emap, bool left, bool right) {
+ san_unguard_pages_impl(tsdn, ehooks, edata, emap, left, right,
+ /* remap */ true);
+}
+
+void
+san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ emap_t *emap) {
+ emap_assert_not_mapped(tsdn, emap, edata);
+ /*
+ * We don't want to touch the emap of about to be destroyed extents, as
+ * they have been unmapped upon eviction from the retained ecache. Also,
+ * we unguard the extents to the right, because retained extents only
+ * own their right guard page per san_bump_alloc's logic.
+ */
+ san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* left */ false,
+ /* right */ true, /* remap */ false);
+}
+
+static bool
+san_stashed_corrupted(void *ptr, size_t size) {
+ if (san_junk_ptr_should_slow()) {
+ for (size_t i = 0; i < size; i++) {
+ if (((char *)ptr)[i] != (char)uaf_detect_junk) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void *first, *mid, *last;
+ san_junk_ptr_locations(ptr, size, &first, &mid, &last);
+ if (*(uintptr_t *)first != uaf_detect_junk ||
+ *(uintptr_t *)mid != uaf_detect_junk ||
+ *(uintptr_t *)last != uaf_detect_junk) {
+ return true;
+ }
+
+ return false;
+}
+
+void
+san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize) {
+ /*
+ * Verify that the junked-filled & stashed pointers remain unchanged, to
+ * detect write-after-free.
+ */
+ for (size_t n = 0; n < nstashed; n++) {
+ void *stashed = ptrs[n];
+ assert(stashed != NULL);
+ assert(cache_bin_nonfast_aligned(stashed));
+ if (unlikely(san_stashed_corrupted(stashed, usize))) {
+ safety_check_fail("<jemalloc>: Write-after-free "
+ "detected on deallocated pointer %p (size %zu).\n",
+ stashed, usize);
+ }
+ }
+}
+
+void
+tsd_san_init(tsd_t *tsd) {
+ *tsd_san_extents_until_guard_smallp_get(tsd) = opt_san_guard_small;
+ *tsd_san_extents_until_guard_largep_get(tsd) = opt_san_guard_large;
+}
+
+void
+san_init(ssize_t lg_san_uaf_align) {
+ assert(lg_san_uaf_align == -1 || lg_san_uaf_align >= LG_PAGE);
+ if (lg_san_uaf_align == -1) {
+ san_cache_bin_nonfast_mask = (uintptr_t)-1;
+ return;
+ }
+
+ san_cache_bin_nonfast_mask = ((uintptr_t)1 << lg_san_uaf_align) - 1;
+}
diff --git a/deps/jemalloc/src/san_bump.c b/deps/jemalloc/src/san_bump.c
new file mode 100644
index 000000000..888974555
--- /dev/null
+++ b/deps/jemalloc/src/san_bump.c
@@ -0,0 +1,104 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/san_bump.h"
+#include "jemalloc/internal/pac.h"
+#include "jemalloc/internal/san.h"
+#include "jemalloc/internal/ehooks.h"
+#include "jemalloc/internal/edata_cache.h"
+
+static bool
+san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
+ ehooks_t *ehooks, size_t size);
+
+edata_t *
+san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac,
+ ehooks_t *ehooks, size_t size, bool zero) {
+ assert(san_bump_enabled());
+
+ edata_t* to_destroy;
+ size_t guarded_size = san_one_side_guarded_sz(size);
+
+ malloc_mutex_lock(tsdn, &sba->mtx);
+
+ if (sba->curr_reg == NULL ||
+ edata_size_get(sba->curr_reg) < guarded_size) {
+ /*
+ * If the current region can't accommodate the allocation,
+ * try replacing it with a larger one and destroy current if the
+ * replacement succeeds.
+ */
+ to_destroy = sba->curr_reg;
+ bool err = san_bump_grow_locked(tsdn, sba, pac, ehooks,
+ guarded_size);
+ if (err) {
+ goto label_err;
+ }
+ } else {
+ to_destroy = NULL;
+ }
+ assert(guarded_size <= edata_size_get(sba->curr_reg));
+ size_t trail_size = edata_size_get(sba->curr_reg) - guarded_size;
+
+ edata_t* edata;
+ if (trail_size != 0) {
+ edata_t* curr_reg_trail = extent_split_wrapper(tsdn, pac,
+ ehooks, sba->curr_reg, guarded_size, trail_size,
+ /* holding_core_locks */ true);
+ if (curr_reg_trail == NULL) {
+ goto label_err;
+ }
+ edata = sba->curr_reg;
+ sba->curr_reg = curr_reg_trail;
+ } else {
+ edata = sba->curr_reg;
+ sba->curr_reg = NULL;
+ }
+
+ malloc_mutex_unlock(tsdn, &sba->mtx);
+
+ assert(!edata_guarded_get(edata));
+ assert(sba->curr_reg == NULL || !edata_guarded_get(sba->curr_reg));
+ assert(to_destroy == NULL || !edata_guarded_get(to_destroy));
+
+ if (to_destroy != NULL) {
+ extent_destroy_wrapper(tsdn, pac, ehooks, to_destroy);
+ }
+
+ san_guard_pages(tsdn, ehooks, edata, pac->emap, /* left */ false,
+ /* right */ true, /* remap */ true);
+
+ if (extent_commit_zero(tsdn, ehooks, edata, /* commit */ true, zero,
+ /* growing_retained */ false)) {
+ extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
+ edata);
+ return NULL;
+ }
+
+ if (config_prof) {
+ extent_gdump_add(tsdn, edata);
+ }
+
+ return edata;
+label_err:
+ malloc_mutex_unlock(tsdn, &sba->mtx);
+ return NULL;
+}
+
+static bool
+san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
+ ehooks_t *ehooks, size_t size) {
+ malloc_mutex_assert_owner(tsdn, &sba->mtx);
+
+ bool committed = false, zeroed = false;
+ size_t alloc_size = size > SBA_RETAINED_ALLOC_SIZE ? size :
+ SBA_RETAINED_ALLOC_SIZE;
+ assert((alloc_size & PAGE_MASK) == 0);
+ sba->curr_reg = extent_alloc_wrapper(tsdn, pac, ehooks, NULL,
+ alloc_size, PAGE, zeroed, &committed,
+ /* growing_retained */ true);
+ if (sba->curr_reg == NULL) {
+ return true;
+ }
+ return false;
+}
diff --git a/deps/jemalloc/src/sc.c b/deps/jemalloc/src/sc.c
index 89ddb6ba6..e4a94d89f 100644
--- a/deps/jemalloc/src/sc.c
+++ b/deps/jemalloc/src/sc.c
@@ -13,9 +13,7 @@
* at least the damage is compartmentalized to this file.
*/
-sc_data_t sc_data_global;
-
-static size_t
+size_t
reg_size_compute(int lg_base, int lg_delta, int ndelta) {
return (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
}
@@ -64,9 +62,8 @@ size_class(
sc->lg_base = lg_base;
sc->lg_delta = lg_delta;
sc->ndelta = ndelta;
- sc->psz = (reg_size_compute(lg_base, lg_delta, ndelta)
- % (ZU(1) << lg_page) == 0);
- size_t size = (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
+ size_t size = reg_size_compute(lg_base, lg_delta, ndelta);
+ sc->psz = (size % (ZU(1) << lg_page) == 0);
if (index == 0) {
assert(!sc->psz);
}
@@ -245,7 +242,7 @@ size_classes(
assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS);
assert(sc_data->large_maxclass == SC_LARGE_MAXCLASS);
- /*
+ /*
* In the allocation fastpath, we want to assume that we can
* unconditionally subtract the requested allocation size from
* a ssize_t, and detect passing through 0 correctly. This
@@ -257,12 +254,8 @@ size_classes(
void
sc_data_init(sc_data_t *sc_data) {
- assert(!sc_data->initialized);
-
- int lg_max_lookup = 12;
-
size_classes(sc_data, LG_SIZEOF_PTR, LG_QUANTUM, SC_LG_TINY_MIN,
- lg_max_lookup, LG_PAGE, 2);
+ SC_LG_MAX_LOOKUP, LG_PAGE, SC_LG_NGROUP);
sc_data->initialized = true;
}
diff --git a/deps/jemalloc/src/sec.c b/deps/jemalloc/src/sec.c
new file mode 100644
index 000000000..df6755904
--- /dev/null
+++ b/deps/jemalloc/src/sec.c
@@ -0,0 +1,422 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/sec.h"
+
+static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t alignment, bool zero, bool guarded, bool frequent_reuse,
+ bool *deferred_work_generated);
+static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
+static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool *deferred_work_generated);
+static void sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated);
+
+static void
+sec_bin_init(sec_bin_t *bin) {
+ bin->being_batch_filled = false;
+ bin->bytes_cur = 0;
+ edata_list_active_init(&bin->freelist);
+}
+
+bool
+sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
+ const sec_opts_t *opts) {
+ assert(opts->max_alloc >= PAGE);
+
+ size_t max_alloc = PAGE_FLOOR(opts->max_alloc);
+ pszind_t npsizes = sz_psz2ind(max_alloc) + 1;
+
+ size_t sz_shards = opts->nshards * sizeof(sec_shard_t);
+ size_t sz_bins = opts->nshards * (size_t)npsizes * sizeof(sec_bin_t);
+ size_t sz_alloc = sz_shards + sz_bins;
+ void *dynalloc = base_alloc(tsdn, base, sz_alloc, CACHELINE);
+ if (dynalloc == NULL) {
+ return true;
+ }
+ sec_shard_t *shard_cur = (sec_shard_t *)dynalloc;
+ sec->shards = shard_cur;
+ sec_bin_t *bin_cur = (sec_bin_t *)&shard_cur[opts->nshards];
+ /* Just for asserts, below. */
+ sec_bin_t *bin_start = bin_cur;
+
+ for (size_t i = 0; i < opts->nshards; i++) {
+ sec_shard_t *shard = shard_cur;
+ shard_cur++;
+ bool err = malloc_mutex_init(&shard->mtx, "sec_shard",
+ WITNESS_RANK_SEC_SHARD, malloc_mutex_rank_exclusive);
+ if (err) {
+ return true;
+ }
+ shard->enabled = true;
+ shard->bins = bin_cur;
+ for (pszind_t j = 0; j < npsizes; j++) {
+ sec_bin_init(&shard->bins[j]);
+ bin_cur++;
+ }
+ shard->bytes_cur = 0;
+ shard->to_flush_next = 0;
+ }
+ /*
+ * Should have exactly matched the bin_start to the first unused byte
+ * after the shards.
+ */
+ assert((void *)shard_cur == (void *)bin_start);
+ /* And the last bin to use up the last bytes of the allocation. */
+ assert((char *)bin_cur == ((char *)dynalloc + sz_alloc));
+ sec->fallback = fallback;
+
+
+ sec->opts = *opts;
+ sec->npsizes = npsizes;
+
+ /*
+ * Initialize these last so that an improper use of an SEC whose
+ * initialization failed will segfault in an easy-to-spot way.
+ */
+ sec->pai.alloc = &sec_alloc;
+ sec->pai.alloc_batch = &pai_alloc_batch_default;
+ sec->pai.expand = &sec_expand;
+ sec->pai.shrink = &sec_shrink;
+ sec->pai.dalloc = &sec_dalloc;
+ sec->pai.dalloc_batch = &pai_dalloc_batch_default;
+
+ return false;
+}
+
+static sec_shard_t *
+sec_shard_pick(tsdn_t *tsdn, sec_t *sec) {
+ /*
+ * Eventually, we should implement affinity, tracking source shard using
+ * the edata_t's newly freed up fields. For now, just randomly
+ * distribute across all shards.
+ */
+ if (tsdn_null(tsdn)) {
+ return &sec->shards[0];
+ }
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ uint8_t *idxp = tsd_sec_shardp_get(tsd);
+ if (*idxp == (uint8_t)-1) {
+ /*
+ * First use; initialize using the trick from Daniel Lemire's
+ * "A fast alternative to the modulo reduction. Use a 64 bit
+ * number to store 32 bits, since we'll deliberately overflow
+ * when we multiply by the number of shards.
+ */
+ uint64_t rand32 = prng_lg_range_u64(tsd_prng_statep_get(tsd), 32);
+ uint32_t idx =
+ (uint32_t)((rand32 * (uint64_t)sec->opts.nshards) >> 32);
+ assert(idx < (uint32_t)sec->opts.nshards);
+ *idxp = (uint8_t)idx;
+ }
+ return &sec->shards[*idxp];
+}
+
+/*
+ * Perhaps surprisingly, this can be called on the alloc pathways; if we hit an
+ * empty cache, we'll try to fill it, which can push the shard over it's limit.
+ */
+static void
+sec_flush_some_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ edata_list_active_t to_flush;
+ edata_list_active_init(&to_flush);
+ while (shard->bytes_cur > sec->opts.bytes_after_flush) {
+ /* Pick a victim. */
+ sec_bin_t *bin = &shard->bins[shard->to_flush_next];
+
+ /* Update our victim-picking state. */
+ shard->to_flush_next++;
+ if (shard->to_flush_next == sec->npsizes) {
+ shard->to_flush_next = 0;
+ }
+
+ assert(shard->bytes_cur >= bin->bytes_cur);
+ if (bin->bytes_cur != 0) {
+ shard->bytes_cur -= bin->bytes_cur;
+ bin->bytes_cur = 0;
+ edata_list_active_concat(&to_flush, &bin->freelist);
+ }
+ /*
+ * Either bin->bytes_cur was 0, in which case we didn't touch
+ * the bin list but it should be empty anyways (or else we
+ * missed a bytes_cur update on a list modification), or it
+ * *was* 0 and we emptied it ourselves. Either way, it should
+ * be empty now.
+ */
+ assert(edata_list_active_empty(&bin->freelist));
+ }
+
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ bool deferred_work_generated = false;
+ pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
+ &deferred_work_generated);
+}
+
+static edata_t *
+sec_shard_alloc_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
+ sec_bin_t *bin) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ if (!shard->enabled) {
+ return NULL;
+ }
+ edata_t *edata = edata_list_active_first(&bin->freelist);
+ if (edata != NULL) {
+ edata_list_active_remove(&bin->freelist, edata);
+ assert(edata_size_get(edata) <= bin->bytes_cur);
+ bin->bytes_cur -= edata_size_get(edata);
+ assert(edata_size_get(edata) <= shard->bytes_cur);
+ shard->bytes_cur -= edata_size_get(edata);
+ }
+ return edata;
+}
+
+static edata_t *
+sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
+ sec_bin_t *bin, size_t size) {
+ malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
+
+ edata_list_active_t result;
+ edata_list_active_init(&result);
+ bool deferred_work_generated = false;
+ size_t nalloc = pai_alloc_batch(tsdn, sec->fallback, size,
+ 1 + sec->opts.batch_fill_extra, &result, &deferred_work_generated);
+
+ edata_t *ret = edata_list_active_first(&result);
+ if (ret != NULL) {
+ edata_list_active_remove(&result, ret);
+ }
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ bin->being_batch_filled = false;
+ /*
+ * Handle the easy case first: nothing to cache. Note that this can
+ * only happen in case of OOM, since sec_alloc checks the expected
+ * number of allocs, and doesn't bother going down the batch_fill
+ * pathway if there won't be anything left to cache. So to be in this
+ * code path, we must have asked for > 1 alloc, but only gotten 1 back.
+ */
+ if (nalloc <= 1) {
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ return ret;
+ }
+
+ size_t new_cached_bytes = (nalloc - 1) * size;
+
+ edata_list_active_concat(&bin->freelist, &result);
+ bin->bytes_cur += new_cached_bytes;
+ shard->bytes_cur += new_cached_bytes;
+
+ if (shard->bytes_cur > sec->opts.max_bytes) {
+ sec_flush_some_and_unlock(tsdn, sec, shard);
+ } else {
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ }
+
+ return ret;
+}
+
+static edata_t *
+sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
+ bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
+ assert((size & PAGE_MASK) == 0);
+ assert(!guarded);
+
+ sec_t *sec = (sec_t *)self;
+
+ if (zero || alignment > PAGE || sec->opts.nshards == 0
+ || size > sec->opts.max_alloc) {
+ return pai_alloc(tsdn, sec->fallback, size, alignment, zero,
+ /* guarded */ false, frequent_reuse,
+ deferred_work_generated);
+ }
+ pszind_t pszind = sz_psz2ind(size);
+ assert(pszind < sec->npsizes);
+
+ sec_shard_t *shard = sec_shard_pick(tsdn, sec);
+ sec_bin_t *bin = &shard->bins[pszind];
+ bool do_batch_fill = false;
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ edata_t *edata = sec_shard_alloc_locked(tsdn, sec, shard, bin);
+ if (edata == NULL) {
+ if (!bin->being_batch_filled
+ && sec->opts.batch_fill_extra > 0) {
+ bin->being_batch_filled = true;
+ do_batch_fill = true;
+ }
+ }
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ if (edata == NULL) {
+ if (do_batch_fill) {
+ edata = sec_batch_fill_and_alloc(tsdn, sec, shard, bin,
+ size);
+ } else {
+ edata = pai_alloc(tsdn, sec->fallback, size, alignment,
+ zero, /* guarded */ false, frequent_reuse,
+ deferred_work_generated);
+ }
+ }
+ return edata;
+}
+
+static bool
+sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
+ size_t new_size, bool zero, bool *deferred_work_generated) {
+ sec_t *sec = (sec_t *)self;
+ return pai_expand(tsdn, sec->fallback, edata, old_size, new_size, zero,
+ deferred_work_generated);
+}
+
+static bool
+sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
+ size_t new_size, bool *deferred_work_generated) {
+ sec_t *sec = (sec_t *)self;
+ return pai_shrink(tsdn, sec->fallback, edata, old_size, new_size,
+ deferred_work_generated);
+}
+
+static void
+sec_flush_all_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ shard->bytes_cur = 0;
+ edata_list_active_t to_flush;
+ edata_list_active_init(&to_flush);
+ for (pszind_t i = 0; i < sec->npsizes; i++) {
+ sec_bin_t *bin = &shard->bins[i];
+ bin->bytes_cur = 0;
+ edata_list_active_concat(&to_flush, &bin->freelist);
+ }
+
+ /*
+ * Ordinarily we would try to avoid doing the batch deallocation while
+ * holding the shard mutex, but the flush_all pathways only happen when
+ * we're disabling the HPA or resetting the arena, both of which are
+ * rare pathways.
+ */
+ bool deferred_work_generated = false;
+ pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
+ &deferred_work_generated);
+}
+
+static void
+sec_shard_dalloc_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
+ edata_t *edata) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ assert(shard->bytes_cur <= sec->opts.max_bytes);
+ size_t size = edata_size_get(edata);
+ pszind_t pszind = sz_psz2ind(size);
+ assert(pszind < sec->npsizes);
+ /*
+ * Prepending here results in LIFO allocation per bin, which seems
+ * reasonable.
+ */
+ sec_bin_t *bin = &shard->bins[pszind];
+ edata_list_active_prepend(&bin->freelist, edata);
+ bin->bytes_cur += size;
+ shard->bytes_cur += size;
+ if (shard->bytes_cur > sec->opts.max_bytes) {
+ /*
+ * We've exceeded the shard limit. We make two nods in the
+ * direction of fragmentation avoidance: we flush everything in
+ * the shard, rather than one particular bin, and we hold the
+ * lock while flushing (in case one of the extents we flush is
+ * highly preferred from a fragmentation-avoidance perspective
+ * in the backing allocator). This has the extra advantage of
+ * not requiring advanced cache balancing strategies.
+ */
+ sec_flush_some_and_unlock(tsdn, sec, shard);
+ malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
+ } else {
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ }
+}
+
+static void
+sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated) {
+ sec_t *sec = (sec_t *)self;
+ if (sec->opts.nshards == 0
+ || edata_size_get(edata) > sec->opts.max_alloc) {
+ pai_dalloc(tsdn, sec->fallback, edata,
+ deferred_work_generated);
+ return;
+ }
+ sec_shard_t *shard = sec_shard_pick(tsdn, sec);
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ if (shard->enabled) {
+ sec_shard_dalloc_and_unlock(tsdn, sec, shard, edata);
+ } else {
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ pai_dalloc(tsdn, sec->fallback, edata,
+ deferred_work_generated);
+ }
+}
+
+void
+sec_flush(tsdn_t *tsdn, sec_t *sec) {
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
+ sec_flush_all_locked(tsdn, sec, &sec->shards[i]);
+ malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
+ }
+}
+
+void
+sec_disable(tsdn_t *tsdn, sec_t *sec) {
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
+ sec->shards[i].enabled = false;
+ sec_flush_all_locked(tsdn, sec, &sec->shards[i]);
+ malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
+ }
+}
+
+void
+sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats) {
+ size_t sum = 0;
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ /*
+ * We could save these lock acquisitions by making bytes_cur
+ * atomic, but stats collection is rare anyways and we expect
+ * the number and type of stats to get more interesting.
+ */
+ malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
+ sum += sec->shards[i].bytes_cur;
+ malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
+ }
+ stats->bytes += sum;
+}
+
+void
+sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec,
+ mutex_prof_data_t *mutex_prof_data) {
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
+ malloc_mutex_prof_accum(tsdn, mutex_prof_data,
+ &sec->shards[i].mtx);
+ malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
+ }
+}
+
+void
+sec_prefork2(tsdn_t *tsdn, sec_t *sec) {
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ malloc_mutex_prefork(tsdn, &sec->shards[i].mtx);
+ }
+}
+
+void
+sec_postfork_parent(tsdn_t *tsdn, sec_t *sec) {
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ malloc_mutex_postfork_parent(tsdn, &sec->shards[i].mtx);
+ }
+}
+
+void
+sec_postfork_child(tsdn_t *tsdn, sec_t *sec) {
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ malloc_mutex_postfork_child(tsdn, &sec->shards[i].mtx);
+ }
+}
diff --git a/deps/jemalloc/src/stats.c b/deps/jemalloc/src/stats.c
index 118e05d29..efc70fd3c 100644
--- a/deps/jemalloc/src/stats.c
+++ b/deps/jemalloc/src/stats.c
@@ -1,12 +1,13 @@
-#define JEMALLOC_STATS_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/emitter.h"
+#include "jemalloc/internal/fxp.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
+#include "jemalloc/internal/prof_stats.h"
const char *global_mutex_names[mutex_prof_num_global_mutexes] = {
#define OP(mtx) #mtx,
@@ -25,22 +26,28 @@ const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
xmallctl(n, (void *)v, &sz, NULL, 0); \
} while (0)
-#define CTL_M2_GET(n, i, v, t) do { \
- size_t mib[CTL_MAX_DEPTH]; \
- size_t miblen = sizeof(mib) / sizeof(size_t); \
+#define CTL_LEAF_PREPARE(mib, miblen, name) do { \
+ assert(miblen < CTL_MAX_DEPTH); \
+ size_t miblen_new = CTL_MAX_DEPTH; \
+ xmallctlmibnametomib(mib, miblen, name, &miblen_new); \
+ assert(miblen_new > miblen); \
+} while (0)
+
+#define CTL_LEAF(mib, miblen, leaf, v, t) do { \
+ assert(miblen < CTL_MAX_DEPTH); \
+ size_t miblen_new = CTL_MAX_DEPTH; \
size_t sz = sizeof(t); \
- xmallctlnametomib(n, mib, &miblen); \
- mib[2] = (i); \
- xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
+ xmallctlbymibname(mib, miblen, leaf, &miblen_new, (void *)v, \
+ &sz, NULL, 0); \
+ assert(miblen_new == miblen + 1); \
} while (0)
-#define CTL_M2_M4_GET(n, i, j, v, t) do { \
+#define CTL_M2_GET(n, i, v, t) do { \
size_t mib[CTL_MAX_DEPTH]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \
- mib[4] = (j); \
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
} while (0)
@@ -50,6 +57,13 @@ const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
bool opt_stats_print = false;
char opt_stats_print_opts[stats_print_tot_num_options+1] = "";
+int64_t opt_stats_interval = STATS_INTERVAL_DEFAULT;
+char opt_stats_interval_opts[stats_print_tot_num_options+1] = "";
+
+static counter_accum_t stats_interval_accumulated;
+/* Per thread batch accum size for stats_interval. */
+static uint64_t stats_interval_accum_batch;
+
/******************************************************************************/
static uint64_t
@@ -91,13 +105,6 @@ get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) {
return false;
}
-#define MUTEX_CTL_STR_MAX_LENGTH 128
-static void
-gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix,
- const char *mutex, const char *counter) {
- malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter);
-}
-
static void
mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
emitter_col_t *name,
@@ -118,7 +125,7 @@ mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
#define WIDTH_uint32_t 12
#define WIDTH_uint64_t 16
-#define OP(counter, counter_type, human, derived, base_counter) \
+#define OP(counter, counter_type, human, derived, base_counter) \
col = &col_##counter_type[k_##counter_type]; \
++k_##counter_type; \
emitter_col_init(col, row); \
@@ -134,27 +141,31 @@ mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
}
static void
-mutex_stats_read_global(const char *name, emitter_col_t *col_name,
+mutex_stats_read_global(size_t mib[], size_t miblen, const char *name,
+ emitter_col_t *col_name,
emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
uint64_t uptime) {
- char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+ CTL_LEAF_PREPARE(mib, miblen, name);
+ size_t miblen_name = miblen + 1;
col_name->str_val = name;
emitter_col_t *dst;
#define EMITTER_TYPE_uint32_t emitter_type_uint32
#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, counter_type, human, derived, base_counter) \
+#define OP(counter, counter_type, human, derived, base_counter) \
dst = &col_##counter_type[mutex_counter_##counter]; \
dst->type = EMITTER_TYPE_##counter_type; \
if (!derived) { \
- gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
- "mutexes", name, #counter); \
- CTL_GET(cmd, (counter_type *)&dst->bool_val, counter_type); \
- } else { \
- emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
- dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
+ CTL_LEAF(mib, miblen_name, #counter, \
+ (counter_type *)&dst->bool_val, counter_type); \
+ } else { \
+ emitter_col_t *base = \
+ &col_##counter_type[mutex_counter_##base_counter]; \
+ dst->counter_type##_val = \
+ (counter_type)rate_per_second( \
+ base->counter_type##_val, uptime); \
}
MUTEX_PROF_COUNTERS
#undef OP
@@ -163,28 +174,31 @@ mutex_stats_read_global(const char *name, emitter_col_t *col_name,
}
static void
-mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind,
- const char *name, emitter_col_t *col_name,
+mutex_stats_read_arena(size_t mib[], size_t miblen, const char *name,
+ emitter_col_t *col_name,
emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
uint64_t uptime) {
- char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+ CTL_LEAF_PREPARE(mib, miblen, name);
+ size_t miblen_name = miblen + 1;
col_name->str_val = name;
emitter_col_t *dst;
#define EMITTER_TYPE_uint32_t emitter_type_uint32
#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, counter_type, human, derived, base_counter) \
+#define OP(counter, counter_type, human, derived, base_counter) \
dst = &col_##counter_type[mutex_counter_##counter]; \
dst->type = EMITTER_TYPE_##counter_type; \
- if (!derived) { \
- gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
- "arenas.0.mutexes", arena_mutex_names[mutex_ind], #counter);\
- CTL_M2_GET(cmd, arena_ind, (counter_type *)&dst->bool_val, counter_type); \
- } else { \
- emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
- dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
+ if (!derived) { \
+ CTL_LEAF(mib, miblen_name, #counter, \
+ (counter_type *)&dst->bool_val, counter_type); \
+ } else { \
+ emitter_col_t *base = \
+ &col_##counter_type[mutex_counter_##base_counter]; \
+ dst->counter_type##_val = \
+ (counter_type)rate_per_second( \
+ base->counter_type##_val, uptime); \
}
MUTEX_PROF_COUNTERS
#undef OP
@@ -193,26 +207,29 @@ mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind,
}
static void
-mutex_stats_read_arena_bin(unsigned arena_ind, unsigned bin_ind,
+mutex_stats_read_arena_bin(size_t mib[], size_t miblen,
emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
uint64_t uptime) {
- char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+ CTL_LEAF_PREPARE(mib, miblen, "mutex");
+ size_t miblen_mutex = miblen + 1;
+
emitter_col_t *dst;
#define EMITTER_TYPE_uint32_t emitter_type_uint32
#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, counter_type, human, derived, base_counter) \
+#define OP(counter, counter_type, human, derived, base_counter) \
dst = &col_##counter_type[mutex_counter_##counter]; \
dst->type = EMITTER_TYPE_##counter_type; \
- if (!derived) { \
- gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
- "arenas.0.bins.0","mutex", #counter); \
- CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \
- (counter_type *)&dst->bool_val, counter_type); \
- } else { \
- emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
- dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
+ if (!derived) { \
+ CTL_LEAF(mib, miblen_mutex, #counter, \
+ (counter_type *)&dst->bool_val, counter_type); \
+ } else { \
+ emitter_col_t *base = \
+ &col_##counter_type[mutex_counter_##base_counter]; \
+ dst->counter_type##_val = \
+ (counter_type)rate_per_second( \
+ base->counter_type##_val, uptime); \
}
MUTEX_PROF_COUNTERS
#undef OP
@@ -249,25 +266,42 @@ mutex_stats_emit(emitter_t *emitter, emitter_row_t *row,
#undef EMITTER_TYPE_uint64_t
}
-#define COL(row_name, column_name, left_or_right, col_width, etype) \
- emitter_col_t col_##column_name; \
- emitter_col_init(&col_##column_name, &row_name); \
- col_##column_name.justify = emitter_justify_##left_or_right; \
- col_##column_name.width = col_width; \
+#define COL_DECLARE(column_name) \
+ emitter_col_t col_##column_name;
+
+#define COL_INIT(row_name, column_name, left_or_right, col_width, etype)\
+ emitter_col_init(&col_##column_name, &row_name); \
+ col_##column_name.justify = emitter_justify_##left_or_right; \
+ col_##column_name.width = col_width; \
col_##column_name.type = emitter_type_##etype;
-#define COL_HDR(row_name, column_name, human, left_or_right, col_width, etype) \
- COL(row_name, column_name, left_or_right, col_width, etype) \
- emitter_col_t header_##column_name; \
- emitter_col_init(&header_##column_name, &header_##row_name); \
- header_##column_name.justify = emitter_justify_##left_or_right; \
- header_##column_name.width = col_width; \
- header_##column_name.type = emitter_type_title; \
+#define COL(row_name, column_name, left_or_right, col_width, etype) \
+ COL_DECLARE(column_name); \
+ COL_INIT(row_name, column_name, left_or_right, col_width, etype)
+
+#define COL_HDR_DECLARE(column_name) \
+ COL_DECLARE(column_name); \
+ emitter_col_t header_##column_name;
+
+#define COL_HDR_INIT(row_name, column_name, human, left_or_right, \
+ col_width, etype) \
+ COL_INIT(row_name, column_name, left_or_right, col_width, etype)\
+ emitter_col_init(&header_##column_name, &header_##row_name); \
+ header_##column_name.justify = emitter_justify_##left_or_right; \
+ header_##column_name.width = col_width; \
+ header_##column_name.type = emitter_type_title; \
header_##column_name.str_val = human ? human : #column_name;
+#define COL_HDR(row_name, column_name, human, left_or_right, col_width, \
+ etype) \
+ COL_HDR_DECLARE(column_name) \
+ COL_HDR_INIT(row_name, column_name, human, left_or_right, \
+ col_width, etype)
+JEMALLOC_COLD
static void
-stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t uptime) {
+stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i,
+ uint64_t uptime) {
size_t page;
bool in_gap, in_gap_prev;
unsigned nbins, j;
@@ -282,6 +316,9 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
emitter_row_t row;
emitter_row_init(&row);
+ bool prof_stats_on = config_prof && opt_prof && opt_prof_stats
+ && i == MALLCTL_ARENAS_ALL;
+
COL_HDR(row, size, NULL, right, 20, size)
COL_HDR(row, ind, NULL, right, 4, unsigned)
COL_HDR(row, allocated, NULL, right, 13, uint64)
@@ -291,6 +328,16 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
COL_HDR(row, nrequests, NULL, right, 13, uint64)
COL_HDR(row, nrequests_ps, "(#/sec)", right, 10, uint64)
+ COL_HDR_DECLARE(prof_live_requested);
+ COL_HDR_DECLARE(prof_live_count);
+ COL_HDR_DECLARE(prof_accum_requested);
+ COL_HDR_DECLARE(prof_accum_count);
+ if (prof_stats_on) {
+ COL_HDR_INIT(row, prof_live_requested, NULL, right, 21, uint64)
+ COL_HDR_INIT(row, prof_live_count, NULL, right, 17, uint64)
+ COL_HDR_INIT(row, prof_accum_requested, NULL, right, 21, uint64)
+ COL_HDR_INIT(row, prof_accum_count, NULL, right, 17, uint64)
+ }
COL_HDR(row, nshards, NULL, right, 9, unsigned)
COL_HDR(row, curregs, NULL, right, 13, size)
COL_HDR(row, curslabs, NULL, right, 13, size)
@@ -334,6 +381,19 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
emitter_table_row(emitter, &header_row);
emitter_json_array_kv_begin(emitter, "bins");
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = i;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "bins");
+
+ size_t arenas_bin_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
+
+ size_t prof_stats_mib[CTL_MAX_DEPTH];
+ if (prof_stats_on) {
+ CTL_LEAF_PREPARE(prof_stats_mib, 0, "prof.stats.bins");
+ }
+
for (j = 0, in_gap = false; j < nbins; j++) {
uint64_t nslabs;
size_t reg_size, slab_size, curregs;
@@ -342,44 +402,57 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
uint32_t nregs, nshards;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nreslabs;
+ prof_stats_t prof_live;
+ prof_stats_t prof_accum;
+
+ stats_arenas_mib[4] = j;
+ arenas_bin_mib[2] = j;
+
+ CTL_LEAF(stats_arenas_mib, 5, "nslabs", &nslabs, uint64_t);
+
+ if (prof_stats_on) {
+ prof_stats_mib[3] = j;
+ CTL_LEAF(prof_stats_mib, 4, "live", &prof_live,
+ prof_stats_t);
+ CTL_LEAF(prof_stats_mib, 4, "accum", &prof_accum,
+ prof_stats_t);
+ }
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs,
- uint64_t);
in_gap_prev = in_gap;
- in_gap = (nslabs == 0);
+ if (prof_stats_on) {
+ in_gap = (nslabs == 0 && prof_accum.count == 0);
+ } else {
+ in_gap = (nslabs == 0);
+ }
if (in_gap_prev && !in_gap) {
emitter_table_printf(emitter,
" ---\n");
}
- CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
- CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
- CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t);
- CTL_M2_GET("arenas.bin.0.nshards", j, &nshards, uint32_t);
+ if (in_gap && !emitter_outputs_json(emitter)) {
+ continue;
+ }
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
- size_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
- &nrequests, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills,
+ CTL_LEAF(arenas_bin_mib, 3, "size", &reg_size, size_t);
+ CTL_LEAF(arenas_bin_mib, 3, "nregs", &nregs, uint32_t);
+ CTL_LEAF(arenas_bin_mib, 3, "slab_size", &slab_size, size_t);
+ CTL_LEAF(arenas_bin_mib, 3, "nshards", &nshards, uint32_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "curregs", &curregs, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests,
uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
- size_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nonfull_slabs", i, j, &nonfull_slabs,
+ CTL_LEAF(stats_arenas_mib, 5, "nfills", &nfills, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nflushes", &nflushes, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nreslabs", &nreslabs, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "curslabs", &curslabs, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nonfull_slabs", &nonfull_slabs,
size_t);
if (mutex) {
- mutex_stats_read_arena_bin(i, j, col_mutex64,
- col_mutex32, uptime);
+ mutex_stats_read_arena_bin(stats_arenas_mib, 5,
+ col_mutex64, col_mutex32, uptime);
}
emitter_json_object_begin(emitter);
@@ -391,6 +464,16 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
&curregs);
emitter_json_kv(emitter, "nrequests", emitter_type_uint64,
&nrequests);
+ if (prof_stats_on) {
+ emitter_json_kv(emitter, "prof_live_requested",
+ emitter_type_uint64, &prof_live.req_sum);
+ emitter_json_kv(emitter, "prof_live_count",
+ emitter_type_uint64, &prof_live.count);
+ emitter_json_kv(emitter, "prof_accum_requested",
+ emitter_type_uint64, &prof_accum.req_sum);
+ emitter_json_kv(emitter, "prof_accum_count",
+ emitter_type_uint64, &prof_accum.count);
+ }
emitter_json_kv(emitter, "nfills", emitter_type_uint64,
&nfills);
emitter_json_kv(emitter, "nflushes", emitter_type_uint64,
@@ -437,6 +520,13 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
col_nrequests.uint64_val = nrequests;
col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
+ if (prof_stats_on) {
+ col_prof_live_requested.uint64_val = prof_live.req_sum;
+ col_prof_live_count.uint64_val = prof_live.count;
+ col_prof_accum_requested.uint64_val =
+ prof_accum.req_sum;
+ col_prof_accum_count.uint64_val = prof_accum.count;
+ }
col_nshards.unsigned_val = nshards;
col_curregs.size_val = curregs;
col_curslabs.size_val = curslabs;
@@ -466,6 +556,7 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
}
}
+JEMALLOC_COLD
static void
stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
unsigned nbins, nlextents, j;
@@ -479,6 +570,9 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
emitter_row_t row;
emitter_row_init(&row);
+ bool prof_stats_on = config_prof && opt_prof && opt_prof_stats
+ && i == MALLCTL_ARENAS_ALL;
+
COL_HDR(row, size, NULL, right, 20, size)
COL_HDR(row, ind, NULL, right, 4, unsigned)
COL_HDR(row, allocated, NULL, right, 13, size)
@@ -488,6 +582,16 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
COL_HDR(row, nrequests, NULL, right, 13, uint64)
COL_HDR(row, nrequests_ps, "(#/sec)", right, 8, uint64)
+ COL_HDR_DECLARE(prof_live_requested)
+ COL_HDR_DECLARE(prof_live_count)
+ COL_HDR_DECLARE(prof_accum_requested)
+ COL_HDR_DECLARE(prof_accum_count)
+ if (prof_stats_on) {
+ COL_HDR_INIT(row, prof_live_requested, NULL, right, 21, uint64)
+ COL_HDR_INIT(row, prof_live_count, NULL, right, 17, uint64)
+ COL_HDR_INIT(row, prof_accum_requested, NULL, right, 21, uint64)
+ COL_HDR_INIT(row, prof_accum_count, NULL, right, 17, uint64)
+ }
COL_HDR(row, curlextents, NULL, right, 13, size)
/* As with bins, we label the large extents table. */
@@ -496,16 +600,33 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
emitter_table_row(emitter, &header_row);
emitter_json_array_kv_begin(emitter, "lextents");
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = i;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "lextents");
+
+ size_t arenas_lextent_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(arenas_lextent_mib, 0, "arenas.lextent");
+
+ size_t prof_stats_mib[CTL_MAX_DEPTH];
+ if (prof_stats_on) {
+ CTL_LEAF_PREPARE(prof_stats_mib, 0, "prof.stats.lextents");
+ }
+
for (j = 0, in_gap = false; j < nlextents; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t lextent_size, curlextents;
+ prof_stats_t prof_live;
+ prof_stats_t prof_accum;
+
+ stats_arenas_mib[4] = j;
+ arenas_lextent_mib[2] = j;
+
+ CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests,
+ uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j,
- &nmalloc, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j,
- &ndalloc, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j,
- &nrequests, uint64_t);
in_gap_prev = in_gap;
in_gap = (nrequests == 0);
@@ -514,11 +635,29 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
" ---\n");
}
- CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j,
- &curlextents, size_t);
+ CTL_LEAF(arenas_lextent_mib, 3, "size", &lextent_size, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "curlextents", &curlextents,
+ size_t);
+
+ if (prof_stats_on) {
+ prof_stats_mib[3] = j;
+ CTL_LEAF(prof_stats_mib, 4, "live", &prof_live,
+ prof_stats_t);
+ CTL_LEAF(prof_stats_mib, 4, "accum", &prof_accum,
+ prof_stats_t);
+ }
emitter_json_object_begin(emitter);
+ if (prof_stats_on) {
+ emitter_json_kv(emitter, "prof_live_requested",
+ emitter_type_uint64, &prof_live.req_sum);
+ emitter_json_kv(emitter, "prof_live_count",
+ emitter_type_uint64, &prof_live.count);
+ emitter_json_kv(emitter, "prof_accum_requested",
+ emitter_type_uint64, &prof_accum.req_sum);
+ emitter_json_kv(emitter, "prof_accum_count",
+ emitter_type_uint64, &prof_accum.count);
+ }
emitter_json_kv(emitter, "curlextents", emitter_type_size,
&curlextents);
emitter_json_object_end(emitter);
@@ -532,6 +671,13 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
col_nrequests.uint64_val = nrequests;
col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
+ if (prof_stats_on) {
+ col_prof_live_requested.uint64_val = prof_live.req_sum;
+ col_prof_live_count.uint64_val = prof_live.count;
+ col_prof_accum_requested.uint64_val =
+ prof_accum.req_sum;
+ col_prof_accum_count.uint64_val = prof_accum.count;
+ }
col_curlextents.size_val = curlextents;
if (!in_gap) {
@@ -544,6 +690,7 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
}
}
+JEMALLOC_COLD
static void
stats_arena_extents_print(emitter_t *emitter, unsigned i) {
unsigned j;
@@ -570,22 +717,27 @@ stats_arena_extents_print(emitter_t *emitter, unsigned i) {
emitter_table_row(emitter, &header_row);
emitter_json_array_kv_begin(emitter, "extents");
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = i;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "extents");
+
in_gap = false;
for (j = 0; j < SC_NPSIZES; j++) {
size_t ndirty, nmuzzy, nretained, total, dirty_bytes,
muzzy_bytes, retained_bytes, total_bytes;
- CTL_M2_M4_GET("stats.arenas.0.extents.0.ndirty", i, j,
- &ndirty, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.nmuzzy", i, j,
- &nmuzzy, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.nretained", i, j,
- &nretained, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.dirty_bytes", i, j,
- &dirty_bytes, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.muzzy_bytes", i, j,
- &muzzy_bytes, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.retained_bytes", i, j,
+ stats_arenas_mib[4] = j;
+
+ CTL_LEAF(stats_arenas_mib, 5, "ndirty", &ndirty, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nmuzzy", &nmuzzy, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nretained", &nretained, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "dirty_bytes", &dirty_bytes,
+ size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "muzzy_bytes", &muzzy_bytes,
+ size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "retained_bytes",
&retained_bytes, size_t);
+
total = ndirty + nmuzzy + nretained;
total_bytes = dirty_bytes + muzzy_bytes + retained_bytes;
@@ -633,6 +785,230 @@ stats_arena_extents_print(emitter_t *emitter, unsigned i) {
}
static void
+stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
+ emitter_row_t header_row;
+ emitter_row_init(&header_row);
+ emitter_row_t row;
+ emitter_row_init(&row);
+
+ uint64_t npurge_passes;
+ uint64_t npurges;
+ uint64_t nhugifies;
+ uint64_t ndehugifies;
+
+ CTL_M2_GET("stats.arenas.0.hpa_shard.npurge_passes",
+ i, &npurge_passes, uint64_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.npurges",
+ i, &npurges, uint64_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.nhugifies",
+ i, &nhugifies, uint64_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.ndehugifies",
+ i, &ndehugifies, uint64_t);
+
+ size_t npageslabs_huge;
+ size_t nactive_huge;
+ size_t ndirty_huge;
+
+ size_t npageslabs_nonhuge;
+ size_t nactive_nonhuge;
+ size_t ndirty_nonhuge;
+ size_t nretained_nonhuge;
+
+ size_t sec_bytes;
+ CTL_M2_GET("stats.arenas.0.hpa_sec_bytes", i, &sec_bytes, size_t);
+ emitter_kv(emitter, "sec_bytes", "Bytes in small extent cache",
+ emitter_type_size, &sec_bytes);
+
+ /* First, global stats. */
+ emitter_table_printf(emitter,
+ "HPA shard stats:\n"
+ " Purge passes: %" FMTu64 " (%" FMTu64 " / sec)\n"
+ " Purges: %" FMTu64 " (%" FMTu64 " / sec)\n"
+ " Hugeifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
+ " Dehugifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
+ "\n",
+ npurge_passes, rate_per_second(npurge_passes, uptime),
+ npurges, rate_per_second(npurges, uptime),
+ nhugifies, rate_per_second(nhugifies, uptime),
+ ndehugifies, rate_per_second(ndehugifies, uptime));
+
+ emitter_json_object_kv_begin(emitter, "hpa_shard");
+ emitter_json_kv(emitter, "npurge_passes", emitter_type_uint64,
+ &npurge_passes);
+ emitter_json_kv(emitter, "npurges", emitter_type_uint64,
+ &npurges);
+ emitter_json_kv(emitter, "nhugifies", emitter_type_uint64,
+ &nhugifies);
+ emitter_json_kv(emitter, "ndehugifies", emitter_type_uint64,
+ &ndehugifies);
+
+ /* Next, full slab stats. */
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_huge",
+ i, &npageslabs_huge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_huge",
+ i, &nactive_huge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_huge",
+ i, &ndirty_huge, size_t);
+
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_nonhuge",
+ i, &npageslabs_nonhuge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_nonhuge",
+ i, &nactive_nonhuge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_nonhuge",
+ i, &ndirty_nonhuge, size_t);
+ nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
+ - nactive_nonhuge - ndirty_nonhuge;
+
+ emitter_table_printf(emitter,
+ " In full slabs:\n"
+ " npageslabs: %zu huge, %zu nonhuge\n"
+ " nactive: %zu huge, %zu nonhuge \n"
+ " ndirty: %zu huge, %zu nonhuge \n"
+ " nretained: 0 huge, %zu nonhuge \n",
+ npageslabs_huge, npageslabs_nonhuge,
+ nactive_huge, nactive_nonhuge,
+ ndirty_huge, ndirty_nonhuge,
+ nretained_nonhuge);
+
+ emitter_json_object_kv_begin(emitter, "full_slabs");
+ emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
+ &npageslabs_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
+ &npageslabs_nonhuge);
+ emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
+ &nactive_nonhuge);
+ emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
+ &ndirty_nonhuge);
+ emitter_json_object_end(emitter); /* End "full_slabs" */
+
+ /* Next, empty slab stats. */
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_huge",
+ i, &npageslabs_huge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_huge",
+ i, &nactive_huge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge",
+ i, &ndirty_huge, size_t);
+
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_nonhuge",
+ i, &npageslabs_nonhuge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_nonhuge",
+ i, &nactive_nonhuge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge",
+ i, &ndirty_nonhuge, size_t);
+ nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
+ - nactive_nonhuge - ndirty_nonhuge;
+
+ emitter_table_printf(emitter,
+ " In empty slabs:\n"
+ " npageslabs: %zu huge, %zu nonhuge\n"
+ " nactive: %zu huge, %zu nonhuge \n"
+ " ndirty: %zu huge, %zu nonhuge \n"
+ " nretained: 0 huge, %zu nonhuge \n"
+ "\n",
+ npageslabs_huge, npageslabs_nonhuge,
+ nactive_huge, nactive_nonhuge,
+ ndirty_huge, ndirty_nonhuge,
+ nretained_nonhuge);
+
+ emitter_json_object_kv_begin(emitter, "empty_slabs");
+ emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
+ &npageslabs_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
+ &npageslabs_nonhuge);
+ emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
+ &nactive_nonhuge);
+ emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
+ &ndirty_nonhuge);
+ emitter_json_object_end(emitter); /* End "empty_slabs" */
+
+ COL_HDR(row, size, NULL, right, 20, size)
+ COL_HDR(row, ind, NULL, right, 4, unsigned)
+ COL_HDR(row, npageslabs_huge, NULL, right, 16, size)
+ COL_HDR(row, nactive_huge, NULL, right, 16, size)
+ COL_HDR(row, ndirty_huge, NULL, right, 16, size)
+ COL_HDR(row, npageslabs_nonhuge, NULL, right, 20, size)
+ COL_HDR(row, nactive_nonhuge, NULL, right, 20, size)
+ COL_HDR(row, ndirty_nonhuge, NULL, right, 20, size)
+ COL_HDR(row, nretained_nonhuge, NULL, right, 20, size)
+
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = i;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "hpa_shard.nonfull_slabs");
+
+ emitter_table_row(emitter, &header_row);
+ emitter_json_array_kv_begin(emitter, "nonfull_slabs");
+ bool in_gap = false;
+ for (pszind_t j = 0; j < PSSET_NPSIZES && j < SC_NPSIZES; j++) {
+ stats_arenas_mib[5] = j;
+
+ CTL_LEAF(stats_arenas_mib, 6, "npageslabs_huge",
+ &npageslabs_huge, size_t);
+ CTL_LEAF(stats_arenas_mib, 6, "nactive_huge",
+ &nactive_huge, size_t);
+ CTL_LEAF(stats_arenas_mib, 6, "ndirty_huge",
+ &ndirty_huge, size_t);
+
+ CTL_LEAF(stats_arenas_mib, 6, "npageslabs_nonhuge",
+ &npageslabs_nonhuge, size_t);
+ CTL_LEAF(stats_arenas_mib, 6, "nactive_nonhuge",
+ &nactive_nonhuge, size_t);
+ CTL_LEAF(stats_arenas_mib, 6, "ndirty_nonhuge",
+ &ndirty_nonhuge, size_t);
+ nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
+ - nactive_nonhuge - ndirty_nonhuge;
+
+ bool in_gap_prev = in_gap;
+ in_gap = (npageslabs_huge == 0 && npageslabs_nonhuge == 0);
+ if (in_gap_prev && !in_gap) {
+ emitter_table_printf(emitter,
+ " ---\n");
+ }
+
+ col_size.size_val = sz_pind2sz(j);
+ col_ind.size_val = j;
+ col_npageslabs_huge.size_val = npageslabs_huge;
+ col_nactive_huge.size_val = nactive_huge;
+ col_ndirty_huge.size_val = ndirty_huge;
+ col_npageslabs_nonhuge.size_val = npageslabs_nonhuge;
+ col_nactive_nonhuge.size_val = nactive_nonhuge;
+ col_ndirty_nonhuge.size_val = ndirty_nonhuge;
+ col_nretained_nonhuge.size_val = nretained_nonhuge;
+ if (!in_gap) {
+ emitter_table_row(emitter, &row);
+ }
+
+ emitter_json_object_begin(emitter);
+ emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
+ &npageslabs_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "ndirty_huge", emitter_type_size,
+ &ndirty_huge);
+ emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
+ &npageslabs_nonhuge);
+ emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
+ &nactive_nonhuge);
+ emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
+ &ndirty_nonhuge);
+ emitter_json_object_end(emitter);
+ }
+ emitter_json_array_end(emitter); /* End "nonfull_slabs" */
+ emitter_json_object_end(emitter); /* End "hpa_shard" */
+ if (in_gap) {
+ emitter_table_printf(emitter, " ---\n");
+ }
+}
+
+static void
stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptime) {
emitter_row_t row;
emitter_col_t col_name;
@@ -645,21 +1021,27 @@ stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptim
emitter_json_object_kv_begin(emitter, "mutexes");
emitter_table_row(emitter, &row);
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = arena_ind;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "mutexes");
+
for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes;
i++) {
const char *name = arena_mutex_names[i];
emitter_json_object_kv_begin(emitter, name);
- mutex_stats_read_arena(arena_ind, i, name, &col_name, col64,
- col32, uptime);
+ mutex_stats_read_arena(stats_arenas_mib, 4, name, &col_name,
+ col64, col32, uptime);
mutex_stats_emit(emitter, &row, col64, col32);
emitter_json_object_end(emitter); /* Close the mutex dict. */
}
emitter_json_object_end(emitter); /* End "mutexes". */
}
+JEMALLOC_COLD
static void
stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
- bool mutex, bool extents) {
+ bool mutex, bool extents, bool hpa) {
unsigned nthreads;
const char *dss;
ssize_t dirty_decay_ms, muzzy_decay_ms;
@@ -673,7 +1055,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills,
large_nflushes;
- size_t tcache_bytes, abandoned_vm;
+ size_t tcache_bytes, tcache_stashed_bytes, abandoned_vm;
uint64_t uptime;
CTL_GET("arenas.page", &page, size_t);
@@ -817,12 +1199,12 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
COL(alloc_count_row, count_nmalloc, right, 16, title);
col_count_nmalloc.str_val = "nmalloc";
- COL(alloc_count_row, count_nmalloc_ps, right, 8, title);
+ COL(alloc_count_row, count_nmalloc_ps, right, 10, title);
col_count_nmalloc_ps.str_val = "(#/sec)";
COL(alloc_count_row, count_ndalloc, right, 16, title);
col_count_ndalloc.str_val = "ndalloc";
- COL(alloc_count_row, count_ndalloc_ps, right, 8, title);
+ COL(alloc_count_row, count_ndalloc_ps, right, 10, title);
col_count_ndalloc_ps.str_val = "(#/sec)";
COL(alloc_count_row, count_nrequests, right, 16, title);
@@ -962,6 +1344,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
GET_AND_EMIT_MEM_STAT(internal)
GET_AND_EMIT_MEM_STAT(metadata_thp)
GET_AND_EMIT_MEM_STAT(tcache_bytes)
+ GET_AND_EMIT_MEM_STAT(tcache_stashed_bytes)
GET_AND_EMIT_MEM_STAT(resident)
GET_AND_EMIT_MEM_STAT(abandoned_vm)
GET_AND_EMIT_MEM_STAT(extent_avail)
@@ -979,8 +1362,12 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
if (extents) {
stats_arena_extents_print(emitter, i);
}
+ if (hpa) {
+ stats_arena_hpa_shard_print(emitter, i, uptime);
+ }
}
+JEMALLOC_COLD
static void
stats_general_print(emitter_t *emitter) {
const char *cpv;
@@ -988,14 +1375,18 @@ stats_general_print(emitter_t *emitter) {
unsigned uv;
uint32_t u32v;
uint64_t u64v;
+ int64_t i64v;
ssize_t ssv, ssv2;
- size_t sv, bsz, usz, ssz, sssz, cpsz;
+ size_t sv, bsz, usz, u32sz, u64sz, i64sz, ssz, sssz, cpsz;
bsz = sizeof(bool);
usz = sizeof(unsigned);
ssz = sizeof(size_t);
sssz = sizeof(ssize_t);
cpsz = sizeof(const char *);
+ u32sz = sizeof(uint32_t);
+ i64sz = sizeof(int64_t);
+ u64sz = sizeof(uint64_t);
CTL_GET("version", &cpv, const char *);
emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv);
@@ -1051,6 +1442,11 @@ stats_general_print(emitter_t *emitter) {
#define OPT_WRITE_UNSIGNED(name) \
OPT_WRITE(name, uv, usz, emitter_type_unsigned)
+#define OPT_WRITE_INT64(name) \
+ OPT_WRITE(name, i64v, i64sz, emitter_type_int64)
+#define OPT_WRITE_UINT64(name) \
+ OPT_WRITE(name, u64v, u64sz, emitter_type_uint64)
+
#define OPT_WRITE_SIZE_T(name) \
OPT_WRITE(name, sv, ssz, emitter_type_size)
#define OPT_WRITE_SSIZE_T(name) \
@@ -1066,13 +1462,43 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("abort")
OPT_WRITE_BOOL("abort_conf")
+ OPT_WRITE_BOOL("cache_oblivious")
OPT_WRITE_BOOL("confirm_conf")
OPT_WRITE_BOOL("retain")
OPT_WRITE_CHAR_P("dss")
OPT_WRITE_UNSIGNED("narenas")
OPT_WRITE_CHAR_P("percpu_arena")
OPT_WRITE_SIZE_T("oversize_threshold")
+ OPT_WRITE_BOOL("hpa")
+ OPT_WRITE_SIZE_T("hpa_slab_max_alloc")
+ OPT_WRITE_SIZE_T("hpa_hugification_threshold")
+ OPT_WRITE_UINT64("hpa_hugify_delay_ms")
+ OPT_WRITE_UINT64("hpa_min_purge_interval_ms")
+ if (je_mallctl("opt.hpa_dirty_mult", (void *)&u32v, &u32sz, NULL, 0)
+ == 0) {
+ /*
+ * We cheat a little and "know" the secret meaning of this
+ * representation.
+ */
+ if (u32v == (uint32_t)-1) {
+ const char *neg1 = "-1";
+ emitter_kv(emitter, "hpa_dirty_mult",
+ "opt.hpa_dirty_mult", emitter_type_string, &neg1);
+ } else {
+ char buf[FXP_BUF_SIZE];
+ fxp_print(u32v, buf);
+ const char *bufp = buf;
+ emitter_kv(emitter, "hpa_dirty_mult",
+ "opt.hpa_dirty_mult", emitter_type_string, &bufp);
+ }
+ }
+ OPT_WRITE_SIZE_T("hpa_sec_nshards")
+ OPT_WRITE_SIZE_T("hpa_sec_max_alloc")
+ OPT_WRITE_SIZE_T("hpa_sec_max_bytes")
+ OPT_WRITE_SIZE_T("hpa_sec_bytes_after_flush")
+ OPT_WRITE_SIZE_T("hpa_sec_batch_fill_extra")
OPT_WRITE_CHAR_P("metadata_thp")
+ OPT_WRITE_INT64("mutex_max_spin")
OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread")
OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms")
OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms")
@@ -1081,8 +1507,17 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("zero")
OPT_WRITE_BOOL("utrace")
OPT_WRITE_BOOL("xmalloc")
+ OPT_WRITE_BOOL("experimental_infallible_new")
OPT_WRITE_BOOL("tcache")
- OPT_WRITE_SSIZE_T("lg_tcache_max")
+ OPT_WRITE_SIZE_T("tcache_max")
+ OPT_WRITE_UNSIGNED("tcache_nslots_small_min")
+ OPT_WRITE_UNSIGNED("tcache_nslots_small_max")
+ OPT_WRITE_UNSIGNED("tcache_nslots_large")
+ OPT_WRITE_SSIZE_T("lg_tcache_nslots_mul")
+ OPT_WRITE_SIZE_T("tcache_gc_incr_bytes")
+ OPT_WRITE_SIZE_T("tcache_gc_delay_bytes")
+ OPT_WRITE_UNSIGNED("lg_tcache_flush_small_div")
+ OPT_WRITE_UNSIGNED("lg_tcache_flush_large_div")
OPT_WRITE_CHAR_P("thp")
OPT_WRITE_BOOL("prof")
OPT_WRITE_CHAR_P("prof_prefix")
@@ -1095,8 +1530,14 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("prof_gdump")
OPT_WRITE_BOOL("prof_final")
OPT_WRITE_BOOL("prof_leak")
+ OPT_WRITE_BOOL("prof_leak_error")
OPT_WRITE_BOOL("stats_print")
OPT_WRITE_CHAR_P("stats_print_opts")
+ OPT_WRITE_BOOL("stats_print")
+ OPT_WRITE_CHAR_P("stats_print_opts")
+ OPT_WRITE_INT64("stats_interval")
+ OPT_WRITE_CHAR_P("stats_interval_opts")
+ OPT_WRITE_CHAR_P("zero_realloc")
emitter_dict_end(emitter);
@@ -1167,38 +1608,41 @@ stats_general_print(emitter_t *emitter) {
"Maximum thread-cached size class", emitter_type_size, &sv);
}
- unsigned nbins;
- CTL_GET("arenas.nbins", &nbins, unsigned);
+ unsigned arenas_nbins;
+ CTL_GET("arenas.nbins", &arenas_nbins, unsigned);
emitter_kv(emitter, "nbins", "Number of bin size classes",
- emitter_type_unsigned, &nbins);
+ emitter_type_unsigned, &arenas_nbins);
- unsigned nhbins;
- CTL_GET("arenas.nhbins", &nhbins, unsigned);
+ unsigned arenas_nhbins;
+ CTL_GET("arenas.nhbins", &arenas_nhbins, unsigned);
emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes",
- emitter_type_unsigned, &nhbins);
+ emitter_type_unsigned, &arenas_nhbins);
/*
* We do enough mallctls in a loop that we actually want to omit them
* (not just omit the printing).
*/
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_array_kv_begin(emitter, "bin");
- for (unsigned i = 0; i < nbins; i++) {
+ size_t arenas_bin_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
+ for (unsigned i = 0; i < arenas_nbins; i++) {
+ arenas_bin_mib[2] = i;
emitter_json_object_begin(emitter);
- CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
+ CTL_LEAF(arenas_bin_mib, 3, "size", &sv, size_t);
emitter_json_kv(emitter, "size", emitter_type_size,
&sv);
- CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
+ CTL_LEAF(arenas_bin_mib, 3, "nregs", &u32v, uint32_t);
emitter_json_kv(emitter, "nregs", emitter_type_uint32,
&u32v);
- CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t);
+ CTL_LEAF(arenas_bin_mib, 3, "slab_size", &sv, size_t);
emitter_json_kv(emitter, "slab_size", emitter_type_size,
&sv);
- CTL_M2_GET("arenas.bin.0.nshards", i, &u32v, uint32_t);
+ CTL_LEAF(arenas_bin_mib, 3, "nshards", &u32v, uint32_t);
emitter_json_kv(emitter, "nshards", emitter_type_uint32,
&u32v);
@@ -1212,12 +1656,15 @@ stats_general_print(emitter_t *emitter) {
emitter_kv(emitter, "nlextents", "Number of large size classes",
emitter_type_unsigned, &nlextents);
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_array_kv_begin(emitter, "lextent");
+ size_t arenas_lextent_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(arenas_lextent_mib, 0, "arenas.lextent");
for (unsigned i = 0; i < nlextents; i++) {
+ arenas_lextent_mib[2] = i;
emitter_json_object_begin(emitter);
- CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t);
+ CTL_LEAF(arenas_lextent_mib, 3, "size", &sv, size_t);
emitter_json_kv(emitter, "size", emitter_type_size,
&sv);
@@ -1229,9 +1676,10 @@ stats_general_print(emitter_t *emitter) {
emitter_json_object_end(emitter); /* Close "arenas" */
}
+JEMALLOC_COLD
static void
stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
- bool unmerged, bool bins, bool large, bool mutex, bool extents) {
+ bool unmerged, bool bins, bool large, bool mutex, bool extents, bool hpa) {
/*
* These should be deleted. We keep them around for a while, to aid in
* the transition to the emitter code.
@@ -1239,6 +1687,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
size_t allocated, active, metadata, metadata_thp, resident, mapped,
retained;
size_t num_background_threads;
+ size_t zero_reallocs;
uint64_t background_thread_num_runs, background_thread_run_interval;
CTL_GET("stats.allocated", &allocated, size_t);
@@ -1249,6 +1698,8 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
CTL_GET("stats.mapped", &mapped, size_t);
CTL_GET("stats.retained", &retained, size_t);
+ CTL_GET("stats.zero_reallocs", &zero_reallocs, size_t);
+
if (have_background_thread) {
CTL_GET("stats.background_thread.num_threads",
&num_background_threads, size_t);
@@ -1272,12 +1723,18 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
emitter_json_kv(emitter, "resident", emitter_type_size, &resident);
emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped);
emitter_json_kv(emitter, "retained", emitter_type_size, &retained);
+ emitter_json_kv(emitter, "zero_reallocs", emitter_type_size,
+ &zero_reallocs);
emitter_table_printf(emitter, "Allocated: %zu, active: %zu, "
"metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, "
"retained: %zu\n", allocated, active, metadata, metadata_thp,
resident, mapped, retained);
+ /* Strange behaviors */
+ emitter_table_printf(emitter,
+ "Count of realloc(non-null-ptr, 0) calls: %zu\n", zero_reallocs);
+
/* Background thread stats. */
emitter_json_object_kv_begin(emitter, "background_thread");
emitter_json_kv(emitter, "num_threads", emitter_type_size,
@@ -1308,9 +1765,11 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
CTL_M2_GET("stats.arenas.0.uptime", 0, &uptime, uint64_t);
+ size_t stats_mutexes_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_mutexes_mib, 0, "stats.mutexes");
for (int i = 0; i < mutex_prof_num_global_mutexes; i++) {
- mutex_stats_read_global(global_mutex_names[i], &name,
- col64, col32, uptime);
+ mutex_stats_read_global(stats_mutexes_mib, 2,
+ global_mutex_names[i], &name, col64, col32, uptime);
emitter_json_object_kv_begin(emitter, global_mutex_names[i]);
mutex_stats_emit(emitter, &row, col64, col32);
emitter_json_object_end(emitter);
@@ -1355,7 +1814,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
emitter_table_printf(emitter, "Merged arenas stats:\n");
emitter_json_object_kv_begin(emitter, "merged");
stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins,
- large, mutex, extents);
+ large, mutex, extents, hpa);
emitter_json_object_end(emitter); /* Close "merged". */
}
@@ -1366,7 +1825,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
"Destroyed arenas stats:\n");
emitter_json_object_kv_begin(emitter, "destroyed");
stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED,
- bins, large, mutex, extents);
+ bins, large, mutex, extents, hpa);
emitter_json_object_end(emitter); /* Close "destroyed". */
}
@@ -1382,7 +1841,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
emitter_table_printf(emitter,
"arenas[%s]:\n", arena_ind_str);
stats_arena_print(emitter, i, bins,
- large, mutex, extents);
+ large, mutex, extents, hpa);
/* Close "<arena-ind>". */
emitter_json_object_end(emitter);
}
@@ -1393,8 +1852,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
}
void
-stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts) {
+stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts) {
int err;
uint64_t epoch;
size_t u64sz;
@@ -1437,8 +1895,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
emitter_t emitter;
emitter_init(&emitter,
- json ? emitter_output_json : emitter_output_table, write_cb,
- cbopaque);
+ json ? emitter_output_json_compact : emitter_output_table,
+ write_cb, cbopaque);
emitter_begin(&emitter);
emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n");
emitter_json_object_kv_begin(&emitter, "jemalloc");
@@ -1448,10 +1906,68 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
if (config_stats) {
stats_print_helper(&emitter, merged, destroyed, unmerged,
- bins, large, mutex, extents);
+ bins, large, mutex, extents, hpa);
}
emitter_json_object_end(&emitter); /* Closes the "jemalloc" dict. */
emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n");
emitter_end(&emitter);
}
+
+uint64_t
+stats_interval_new_event_wait(tsd_t *tsd) {
+ return stats_interval_accum_batch;
+}
+
+uint64_t
+stats_interval_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+void
+stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED);
+ if (counter_accum(tsd_tsdn(tsd), &stats_interval_accumulated,
+ elapsed)) {
+ je_malloc_stats_print(NULL, NULL, opt_stats_interval_opts);
+ }
+}
+
+bool
+stats_boot(void) {
+ uint64_t stats_interval;
+ if (opt_stats_interval < 0) {
+ assert(opt_stats_interval == -1);
+ stats_interval = 0;
+ stats_interval_accum_batch = 0;
+ } else{
+ /* See comments in stats.h */
+ stats_interval = (opt_stats_interval > 0) ?
+ opt_stats_interval : 1;
+ uint64_t batch = stats_interval >>
+ STATS_INTERVAL_ACCUM_LG_BATCH_SIZE;
+ if (batch > STATS_INTERVAL_ACCUM_BATCH_MAX) {
+ batch = STATS_INTERVAL_ACCUM_BATCH_MAX;
+ } else if (batch == 0) {
+ batch = 1;
+ }
+ stats_interval_accum_batch = batch;
+ }
+
+ return counter_accum_init(&stats_interval_accumulated, stats_interval);
+}
+
+void
+stats_prefork(tsdn_t *tsdn) {
+ counter_prefork(tsdn, &stats_interval_accumulated);
+}
+
+void
+stats_postfork_parent(tsdn_t *tsdn) {
+ counter_postfork_parent(tsdn, &stats_interval_accumulated);
+}
+
+void
+stats_postfork_child(tsdn_t *tsdn) {
+ counter_postfork_child(tsdn, &stats_interval_accumulated);
+}
diff --git a/deps/jemalloc/src/sz.c b/deps/jemalloc/src/sz.c
index 8633fb050..d3115dda7 100644
--- a/deps/jemalloc/src/sz.c
+++ b/deps/jemalloc/src/sz.c
@@ -1,8 +1,57 @@
#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/sz.h"
JEMALLOC_ALIGNED(CACHELINE)
size_t sz_pind2sz_tab[SC_NPSIZES+1];
+size_t sz_large_pad;
+
+size_t
+sz_psz_quantize_floor(size_t size) {
+ size_t ret;
+ pszind_t pind;
+
+ assert(size > 0);
+ assert((size & PAGE_MASK) == 0);
+
+ pind = sz_psz2ind(size - sz_large_pad + 1);
+ if (pind == 0) {
+ /*
+ * Avoid underflow. This short-circuit would also do the right
+ * thing for all sizes in the range for which there are
+ * PAGE-spaced size classes, but it's simplest to just handle
+ * the one case that would cause erroneous results.
+ */
+ return size;
+ }
+ ret = sz_pind2sz(pind - 1) + sz_large_pad;
+ assert(ret <= size);
+ return ret;
+}
+
+size_t
+sz_psz_quantize_ceil(size_t size) {
+ size_t ret;
+
+ assert(size > 0);
+ assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
+ assert((size & PAGE_MASK) == 0);
+
+ ret = sz_psz_quantize_floor(size);
+ if (ret < size) {
+ /*
+ * Skip a quantization that may have an adequately large extent,
+ * because under-sized extents may be mixed in. This only
+ * happens when an unusual size is requested, i.e. for aligned
+ * allocation, and is just one of several places where linear
+ * search would potentially find sufficiently aligned available
+ * memory somewhere lower.
+ */
+ ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
+ sz_large_pad;
+ }
+ return ret;
+}
static void
sz_boot_pind2sz_tab(const sc_data_t *sc_data) {
@@ -57,7 +106,8 @@ sz_boot_size2index_tab(const sc_data_t *sc_data) {
}
void
-sz_boot(const sc_data_t *sc_data) {
+sz_boot(const sc_data_t *sc_data, bool cache_oblivious) {
+ sz_large_pad = cache_oblivious ? PAGE : 0;
sz_boot_pind2sz_tab(sc_data);
sz_boot_index2size_tab(sc_data);
sz_boot_size2index_tab(sc_data);
diff --git a/deps/jemalloc/src/tcache.c b/deps/jemalloc/src/tcache.c
index 50099a9f2..fa16732e4 100644
--- a/deps/jemalloc/src/tcache.c
+++ b/deps/jemalloc/src/tcache.c
@@ -1,22 +1,71 @@
-#define JEMALLOC_TCACHE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/safety_check.h"
+#include "jemalloc/internal/san.h"
#include "jemalloc/internal/sc.h"
/******************************************************************************/
/* Data. */
-bool opt_tcache = true;
-ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
+bool opt_tcache = true;
+
+/* tcache_maxclass is set to 32KB by default. */
+size_t opt_tcache_max = ((size_t)1) << 15;
+
+/* Reasonable defaults for min and max values. */
+unsigned opt_tcache_nslots_small_min = 20;
+unsigned opt_tcache_nslots_small_max = 200;
+unsigned opt_tcache_nslots_large = 20;
+
+/*
+ * We attempt to make the number of slots in a tcache bin for a given size class
+ * equal to the number of objects in a slab times some multiplier. By default,
+ * the multiplier is 2 (i.e. we set the maximum number of objects in the tcache
+ * to twice the number of objects in a slab).
+ * This is bounded by some other constraints as well, like the fact that it
+ * must be even, must be less than opt_tcache_nslots_small_max, etc..
+ */
+ssize_t opt_lg_tcache_nslots_mul = 1;
+
+/*
+ * Number of allocation bytes between tcache incremental GCs. Again, this
+ * default just seems to work well; more tuning is possible.
+ */
+size_t opt_tcache_gc_incr_bytes = 65536;
+
+/*
+ * With default settings, we may end up flushing small bins frequently with
+ * small flush amounts. To limit this tendency, we can set a number of bytes to
+ * "delay" by. If we try to flush N M-byte items, we decrease that size-class's
+ * delay by N * M. So, if delay is 1024 and we're looking at the 64-byte size
+ * class, we won't do any flushing until we've been asked to flush 1024/64 == 16
+ * items. This can happen in any configuration (i.e. being asked to flush 16
+ * items once, or 4 items 4 times).
+ *
+ * Practically, this is stored as a count of items in a uint8_t, so the
+ * effective maximum value for a size class is 255 * sz.
+ */
+size_t opt_tcache_gc_delay_bytes = 0;
+
+/*
+ * When a cache bin is flushed because it's full, how much of it do we flush?
+ * By default, we flush half the maximum number of items.
+ */
+unsigned opt_lg_tcache_flush_small_div = 1;
+unsigned opt_lg_tcache_flush_large_div = 1;
cache_bin_info_t *tcache_bin_info;
-static unsigned stack_nelms; /* Total stack elms per tcache. */
+/* Total stack size required (per tcache). Include the padding above. */
+static size_t tcache_bin_alloc_size;
+static size_t tcache_bin_alloc_alignment;
+
+/* Number of cache bins enabled, including both large and small. */
unsigned nhbins;
+/* Max size class to be cached (can be small or large). */
size_t tcache_maxclass;
tcaches_t *tcaches;
@@ -37,358 +86,551 @@ tcache_salloc(tsdn_t *tsdn, const void *ptr) {
return arena_salloc(tsdn, ptr);
}
-void
-tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
- szind_t binind = tcache->next_gc_bin;
+uint64_t
+tcache_gc_new_event_wait(tsd_t *tsd) {
+ return opt_tcache_gc_incr_bytes;
+}
+
+uint64_t
+tcache_gc_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+uint64_t
+tcache_gc_dalloc_new_event_wait(tsd_t *tsd) {
+ return opt_tcache_gc_incr_bytes;
+}
- cache_bin_t *tbin;
- if (binind < SC_NBINS) {
- tbin = tcache_small_bin_get(tcache, binind);
+uint64_t
+tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+static uint8_t
+tcache_gc_item_delay_compute(szind_t szind) {
+ assert(szind < SC_NBINS);
+ size_t sz = sz_index2size(szind);
+ size_t item_delay = opt_tcache_gc_delay_bytes / sz;
+ size_t delay_max = ZU(1)
+ << (sizeof(((tcache_slow_t *)NULL)->bin_flush_delay_items[0]) * 8);
+ if (item_delay >= delay_max) {
+ item_delay = delay_max - 1;
+ }
+ return (uint8_t)item_delay;
+}
+
+static void
+tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
+ szind_t szind) {
+ /* Aim to flush 3/4 of items below low-water. */
+ assert(szind < SC_NBINS);
+
+ cache_bin_t *cache_bin = &tcache->bins[szind];
+ cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
+ &tcache_bin_info[szind]);
+ cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
+ &tcache_bin_info[szind]);
+ assert(!tcache_slow->bin_refilled[szind]);
+
+ size_t nflush = low_water - (low_water >> 2);
+ if (nflush < tcache_slow->bin_flush_delay_items[szind]) {
+ /* Workaround for a conversion warning. */
+ uint8_t nflush_uint8 = (uint8_t)nflush;
+ assert(sizeof(tcache_slow->bin_flush_delay_items[0]) ==
+ sizeof(nflush_uint8));
+ tcache_slow->bin_flush_delay_items[szind] -= nflush_uint8;
+ return;
} else {
- tbin = tcache_large_bin_get(tcache, binind);
+ tcache_slow->bin_flush_delay_items[szind]
+ = tcache_gc_item_delay_compute(szind);
}
- if (tbin->low_water > 0) {
- /*
- * Flush (ceiling) 3/4 of the objects below the low water mark.
- */
- if (binind < SC_NBINS) {
- tcache_bin_flush_small(tsd, tcache, tbin, binind,
- tbin->ncached - tbin->low_water + (tbin->low_water
- >> 2));
- /*
- * Reduce fill count by 2X. Limit lg_fill_div such that
- * the fill count is always at least 1.
- */
- cache_bin_info_t *tbin_info = &tcache_bin_info[binind];
- if ((tbin_info->ncached_max >>
- (tcache->lg_fill_div[binind] + 1)) >= 1) {
- tcache->lg_fill_div[binind]++;
- }
+
+ tcache_bin_flush_small(tsd, tcache, cache_bin, szind,
+ (unsigned)(ncached - nflush));
+
+ /*
+ * Reduce fill count by 2X. Limit lg_fill_div such that
+ * the fill count is always at least 1.
+ */
+ if ((cache_bin_info_ncached_max(&tcache_bin_info[szind])
+ >> (tcache_slow->lg_fill_div[szind] + 1)) >= 1) {
+ tcache_slow->lg_fill_div[szind]++;
+ }
+}
+
+static void
+tcache_gc_large(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
+ szind_t szind) {
+ /* Like the small GC; flush 3/4 of untouched items. */
+ assert(szind >= SC_NBINS);
+ cache_bin_t *cache_bin = &tcache->bins[szind];
+ cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
+ &tcache_bin_info[szind]);
+ cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
+ &tcache_bin_info[szind]);
+ tcache_bin_flush_large(tsd, tcache, cache_bin, szind,
+ (unsigned)(ncached - low_water + (low_water >> 2)));
+}
+
+static void
+tcache_event(tsd_t *tsd) {
+ tcache_t *tcache = tcache_get(tsd);
+ if (tcache == NULL) {
+ return;
+ }
+
+ tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
+ szind_t szind = tcache_slow->next_gc_bin;
+ bool is_small = (szind < SC_NBINS);
+ cache_bin_t *cache_bin = &tcache->bins[szind];
+
+ tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind, is_small);
+
+ cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
+ &tcache_bin_info[szind]);
+ if (low_water > 0) {
+ if (is_small) {
+ tcache_gc_small(tsd, tcache_slow, tcache, szind);
} else {
- tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
- - tbin->low_water + (tbin->low_water >> 2), tcache);
+ tcache_gc_large(tsd, tcache_slow, tcache, szind);
}
- } else if (tbin->low_water < 0) {
+ } else if (is_small && tcache_slow->bin_refilled[szind]) {
+ assert(low_water == 0);
/*
* Increase fill count by 2X for small bins. Make sure
* lg_fill_div stays greater than 0.
*/
- if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) {
- tcache->lg_fill_div[binind]--;
+ if (tcache_slow->lg_fill_div[szind] > 1) {
+ tcache_slow->lg_fill_div[szind]--;
}
+ tcache_slow->bin_refilled[szind] = false;
}
- tbin->low_water = tbin->ncached;
+ cache_bin_low_water_set(cache_bin);
- tcache->next_gc_bin++;
- if (tcache->next_gc_bin == nhbins) {
- tcache->next_gc_bin = 0;
+ tcache_slow->next_gc_bin++;
+ if (tcache_slow->next_gc_bin == nhbins) {
+ tcache_slow->next_gc_bin = 0;
}
}
+void
+tcache_gc_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ assert(elapsed == TE_INVALID_ELAPSED);
+ tcache_event(tsd);
+}
+
+void
+tcache_gc_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ assert(elapsed == TE_INVALID_ELAPSED);
+ tcache_event(tsd);
+}
+
void *
-tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- cache_bin_t *tbin, szind_t binind, bool *tcache_success) {
+tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena,
+ tcache_t *tcache, cache_bin_t *cache_bin, szind_t binind,
+ bool *tcache_success) {
+ tcache_slow_t *tcache_slow = tcache->tcache_slow;
void *ret;
- assert(tcache->arena != NULL);
- arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
- config_prof ? tcache->prof_accumbytes : 0);
- if (config_prof) {
- tcache->prof_accumbytes = 0;
- }
- ret = cache_bin_alloc_easy(tbin, tcache_success);
+ assert(tcache_slow->arena != NULL);
+ unsigned nfill = cache_bin_info_ncached_max(&tcache_bin_info[binind])
+ >> tcache_slow->lg_fill_div[binind];
+ arena_cache_bin_fill_small(tsdn, arena, cache_bin,
+ &tcache_bin_info[binind], binind, nfill);
+ tcache_slow->bin_refilled[binind] = true;
+ ret = cache_bin_alloc(cache_bin, tcache_success);
return ret;
}
-/* Enabled with --enable-extra-size-check. */
+static const void *
+tcache_bin_flush_ptr_getter(void *arr_ctx, size_t ind) {
+ cache_bin_ptr_array_t *arr = (cache_bin_ptr_array_t *)arr_ctx;
+ return arr->ptr[ind];
+}
+
static void
-tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
- size_t nflush, extent_t **extents){
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+tcache_bin_flush_metadata_visitor(void *szind_sum_ctx,
+ emap_full_alloc_ctx_t *alloc_ctx) {
+ size_t *szind_sum = (size_t *)szind_sum_ctx;
+ *szind_sum -= alloc_ctx->szind;
+ util_prefetch_write_range(alloc_ctx->edata, sizeof(edata_t));
+}
- /*
- * Verify that the items in the tcache all have the correct size; this
- * is useful for catching sized deallocation bugs, also to fail early
- * instead of corrupting metadata. Since this can be turned on for opt
- * builds, avoid the branch in the loop.
- */
- szind_t szind;
- size_t sz_sum = binind * nflush;
- for (unsigned i = 0 ; i < nflush; i++) {
- rtree_extent_szind_read(tsdn, &extents_rtree,
- rtree_ctx, (uintptr_t)*(tbin->avail - 1 - i), true,
- &extents[i], &szind);
- sz_sum -= szind;
- }
- if (sz_sum != 0) {
- safety_check_fail("<jemalloc>: size mismatch in thread cache "
- "detected, likely caused by sized deallocation bugs by "
- "application. Abort.\n");
- abort();
+JEMALLOC_NOINLINE static void
+tcache_bin_flush_size_check_fail(cache_bin_ptr_array_t *arr, szind_t szind,
+ size_t nptrs, emap_batch_lookup_result_t *edatas) {
+ bool found_mismatch = false;
+ for (size_t i = 0; i < nptrs; i++) {
+ szind_t true_szind = edata_szind_get(edatas[i].edata);
+ if (true_szind != szind) {
+ found_mismatch = true;
+ safety_check_fail_sized_dealloc(
+ /* current_dealloc */ false,
+ /* ptr */ tcache_bin_flush_ptr_getter(arr, i),
+ /* true_size */ sz_index2size(true_szind),
+ /* input_size */ sz_index2size(szind));
+ }
}
+ assert(found_mismatch);
}
-void
-tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
- szind_t binind, unsigned rem) {
- bool merged_stats = false;
-
- assert(binind < SC_NBINS);
- assert((cache_bin_sz_t)rem <= tbin->ncached);
+static void
+tcache_bin_flush_edatas_lookup(tsd_t *tsd, cache_bin_ptr_array_t *arr,
+ szind_t binind, size_t nflush, emap_batch_lookup_result_t *edatas) {
- arena_t *arena = tcache->arena;
- assert(arena != NULL);
- unsigned nflush = tbin->ncached - rem;
- VARIABLE_ARRAY(extent_t *, item_extent, nflush);
+ /*
+ * This gets compiled away when config_opt_safety_checks is false.
+ * Checks for sized deallocation bugs, failing early rather than
+ * corrupting metadata.
+ */
+ size_t szind_sum = binind * nflush;
+ emap_edata_lookup_batch(tsd, &arena_emap_global, nflush,
+ &tcache_bin_flush_ptr_getter, (void *)arr,
+ &tcache_bin_flush_metadata_visitor, (void *)&szind_sum,
+ edatas);
+ if (config_opt_safety_checks && unlikely(szind_sum != 0)) {
+ tcache_bin_flush_size_check_fail(arr, binind, nflush, edatas);
+ }
+}
- /* Look up extent once per item. */
- if (config_opt_safety_checks) {
- tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
- nflush, item_extent);
+JEMALLOC_ALWAYS_INLINE bool
+tcache_bin_flush_match(edata_t *edata, unsigned cur_arena_ind,
+ unsigned cur_binshard, bool small) {
+ if (small) {
+ return edata_arena_ind_get(edata) == cur_arena_ind
+ && edata_binshard_get(edata) == cur_binshard;
} else {
- for (unsigned i = 0 ; i < nflush; i++) {
- item_extent[i] = iealloc(tsd_tsdn(tsd),
- *(tbin->avail - 1 - i));
- }
+ return edata_arena_ind_get(edata) == cur_arena_ind;
}
- while (nflush > 0) {
- /* Lock the arena bin associated with the first object. */
- extent_t *extent = item_extent[0];
- unsigned bin_arena_ind = extent_arena_ind_get(extent);
- arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind,
- false);
- unsigned binshard = extent_binshard_get(extent);
- assert(binshard < bin_infos[binind].n_shards);
- bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard];
-
- if (config_prof && bin_arena == arena) {
- if (arena_prof_accum(tsd_tsdn(tsd), arena,
- tcache->prof_accumbytes)) {
- prof_idump(tsd_tsdn(tsd));
- }
- tcache->prof_accumbytes = 0;
- }
+}
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- if (config_stats && bin_arena == arena && !merged_stats) {
- merged_stats = true;
- bin->stats.nflushes++;
- bin->stats.nrequests += tbin->tstats.nrequests;
- tbin->tstats.nrequests = 0;
- }
- unsigned ndeferred = 0;
- for (unsigned i = 0; i < nflush; i++) {
- void *ptr = *(tbin->avail - 1 - i);
- extent = item_extent[i];
- assert(ptr != NULL && extent != NULL);
-
- if (extent_arena_ind_get(extent) == bin_arena_ind
- && extent_binshard_get(extent) == binshard) {
- arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
- bin_arena, bin, binind, extent, ptr);
- } else {
- /*
- * This object was allocated via a different
- * arena bin than the one that is currently
- * locked. Stash the object, so that it can be
- * handled in a future pass.
- */
- *(tbin->avail - 1 - ndeferred) = ptr;
- item_extent[ndeferred] = extent;
- ndeferred++;
- }
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
- nflush = ndeferred;
- }
- if (config_stats && !merged_stats) {
- /*
- * The flush loop didn't happen to flush to this thread's
- * arena, so the stats didn't get merged. Manually do so now.
- */
- unsigned binshard;
- bin_t *bin = arena_bin_choose_lock(tsd_tsdn(tsd), arena, binind,
- &binshard);
- bin->stats.nflushes++;
- bin->stats.nrequests += tbin->tstats.nrequests;
- tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- }
+JEMALLOC_ALWAYS_INLINE void
+tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
+ szind_t binind, cache_bin_ptr_array_t *ptrs, unsigned nflush, bool small) {
+ tcache_slow_t *tcache_slow = tcache->tcache_slow;
+ /*
+ * A couple lookup calls take tsdn; declare it once for convenience
+ * instead of calling tsd_tsdn(tsd) all the time.
+ */
+ tsdn_t *tsdn = tsd_tsdn(tsd);
- memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
- sizeof(void *));
- tbin->ncached = rem;
- if (tbin->ncached < tbin->low_water) {
- tbin->low_water = tbin->ncached;
+ if (small) {
+ assert(binind < SC_NBINS);
+ } else {
+ assert(binind < nhbins);
}
-}
+ arena_t *tcache_arena = tcache_slow->arena;
+ assert(tcache_arena != NULL);
-void
-tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
- unsigned rem, tcache_t *tcache) {
- bool merged_stats = false;
+ /*
+ * Variable length array must have > 0 length; the last element is never
+ * touched (it's just included to satisfy the no-zero-length rule).
+ */
+ VARIABLE_ARRAY(emap_batch_lookup_result_t, item_edata, nflush + 1);
+ tcache_bin_flush_edatas_lookup(tsd, ptrs, binind, nflush, item_edata);
- assert(binind < nhbins);
- assert((cache_bin_sz_t)rem <= tbin->ncached);
+ /*
+ * The slabs where we freed the last remaining object in the slab (and
+ * so need to free the slab itself).
+ * Used only if small == true.
+ */
+ unsigned dalloc_count = 0;
+ VARIABLE_ARRAY(edata_t *, dalloc_slabs, nflush + 1);
- arena_t *tcache_arena = tcache->arena;
- assert(tcache_arena != NULL);
- unsigned nflush = tbin->ncached - rem;
- VARIABLE_ARRAY(extent_t *, item_extent, nflush);
-
-#ifndef JEMALLOC_EXTRA_SIZE_CHECK
- /* Look up extent once per item. */
- for (unsigned i = 0 ; i < nflush; i++) {
- item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
- }
-#else
- tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
- item_extent);
-#endif
+ /*
+ * We're about to grab a bunch of locks. If one of them happens to be
+ * the one guarding the arena-level stats counters we flush our
+ * thread-local ones to, we do so under one critical section.
+ */
+ bool merged_stats = false;
while (nflush > 0) {
- /* Lock the arena associated with the first object. */
- extent_t *extent = item_extent[0];
- unsigned locked_arena_ind = extent_arena_ind_get(extent);
- arena_t *locked_arena = arena_get(tsd_tsdn(tsd),
- locked_arena_ind, false);
- bool idump;
-
- if (config_prof) {
- idump = false;
+ /* Lock the arena, or bin, associated with the first object. */
+ edata_t *edata = item_edata[0].edata;
+ unsigned cur_arena_ind = edata_arena_ind_get(edata);
+ arena_t *cur_arena = arena_get(tsdn, cur_arena_ind, false);
+
+ /*
+ * These assignments are always overwritten when small is true,
+ * and their values are always ignored when small is false, but
+ * to avoid the technical UB when we pass them as parameters, we
+ * need to intialize them.
+ */
+ unsigned cur_binshard = 0;
+ bin_t *cur_bin = NULL;
+ if (small) {
+ cur_binshard = edata_binshard_get(edata);
+ cur_bin = arena_get_bin(cur_arena, binind,
+ cur_binshard);
+ assert(cur_binshard < bin_infos[binind].n_shards);
+ /*
+ * If you're looking at profiles, you might think this
+ * is a good place to prefetch the bin stats, which are
+ * often a cache miss. This turns out not to be
+ * helpful on the workloads we've looked at, with moving
+ * the bin stats next to the lock seeming to do better.
+ */
}
- bool lock_large = !arena_is_auto(locked_arena);
- if (lock_large) {
- malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
+ if (small) {
+ malloc_mutex_lock(tsdn, &cur_bin->lock);
}
- for (unsigned i = 0; i < nflush; i++) {
- void *ptr = *(tbin->avail - 1 - i);
- assert(ptr != NULL);
- extent = item_extent[i];
- if (extent_arena_ind_get(extent) == locked_arena_ind) {
- large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
- extent);
- }
+ if (!small && !arena_is_auto(cur_arena)) {
+ malloc_mutex_lock(tsdn, &cur_arena->large_mtx);
}
- if ((config_prof || config_stats) &&
- (locked_arena == tcache_arena)) {
- if (config_prof) {
- idump = arena_prof_accum(tsd_tsdn(tsd),
- tcache_arena, tcache->prof_accumbytes);
- tcache->prof_accumbytes = 0;
+
+ /*
+ * If we acquired the right lock and have some stats to flush,
+ * flush them.
+ */
+ if (config_stats && tcache_arena == cur_arena
+ && !merged_stats) {
+ merged_stats = true;
+ if (small) {
+ cur_bin->stats.nflushes++;
+ cur_bin->stats.nrequests +=
+ cache_bin->tstats.nrequests;
+ cache_bin->tstats.nrequests = 0;
+ } else {
+ arena_stats_large_flush_nrequests_add(tsdn,
+ &tcache_arena->stats, binind,
+ cache_bin->tstats.nrequests);
+ cache_bin->tstats.nrequests = 0;
}
- if (config_stats) {
- merged_stats = true;
- arena_stats_large_flush_nrequests_add(
- tsd_tsdn(tsd), &tcache_arena->stats, binind,
- tbin->tstats.nrequests);
- tbin->tstats.nrequests = 0;
+ }
+
+ /*
+ * Large allocations need special prep done. Afterwards, we can
+ * drop the large lock.
+ */
+ if (!small) {
+ for (unsigned i = 0; i < nflush; i++) {
+ void *ptr = ptrs->ptr[i];
+ edata = item_edata[i].edata;
+ assert(ptr != NULL && edata != NULL);
+
+ if (tcache_bin_flush_match(edata, cur_arena_ind,
+ cur_binshard, small)) {
+ large_dalloc_prep_locked(tsdn,
+ edata);
+ }
}
}
- if (lock_large) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
+ if (!small && !arena_is_auto(cur_arena)) {
+ malloc_mutex_unlock(tsdn, &cur_arena->large_mtx);
}
+ /* Deallocate whatever we can. */
unsigned ndeferred = 0;
+ /* Init only to avoid used-uninitialized warning. */
+ arena_dalloc_bin_locked_info_t dalloc_bin_info = {0};
+ if (small) {
+ arena_dalloc_bin_locked_begin(&dalloc_bin_info, binind);
+ }
for (unsigned i = 0; i < nflush; i++) {
- void *ptr = *(tbin->avail - 1 - i);
- extent = item_extent[i];
- assert(ptr != NULL && extent != NULL);
-
- if (extent_arena_ind_get(extent) == locked_arena_ind) {
- large_dalloc_finish(tsd_tsdn(tsd), extent);
- } else {
+ void *ptr = ptrs->ptr[i];
+ edata = item_edata[i].edata;
+ assert(ptr != NULL && edata != NULL);
+ if (!tcache_bin_flush_match(edata, cur_arena_ind,
+ cur_binshard, small)) {
/*
- * This object was allocated via a different
- * arena than the one that is currently locked.
- * Stash the object, so that it can be handled
- * in a future pass.
+ * The object was allocated either via a
+ * different arena, or a different bin in this
+ * arena. Either way, stash the object so that
+ * it can be handled in a future pass.
*/
- *(tbin->avail - 1 - ndeferred) = ptr;
- item_extent[ndeferred] = extent;
+ ptrs->ptr[ndeferred] = ptr;
+ item_edata[ndeferred].edata = edata;
ndeferred++;
+ continue;
+ }
+ if (small) {
+ if (arena_dalloc_bin_locked_step(tsdn,
+ cur_arena, cur_bin, &dalloc_bin_info,
+ binind, edata, ptr)) {
+ dalloc_slabs[dalloc_count] = edata;
+ dalloc_count++;
+ }
+ } else {
+ if (large_dalloc_safety_checks(edata, ptr,
+ binind)) {
+ /* See the comment in isfree. */
+ continue;
+ }
+ large_dalloc_finish(tsdn, edata);
}
}
- if (config_prof && idump) {
- prof_idump(tsd_tsdn(tsd));
+
+ if (small) {
+ arena_dalloc_bin_locked_finish(tsdn, cur_arena, cur_bin,
+ &dalloc_bin_info);
+ malloc_mutex_unlock(tsdn, &cur_bin->lock);
}
- arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
- ndeferred);
+ arena_decay_ticks(tsdn, cur_arena, nflush - ndeferred);
nflush = ndeferred;
}
+
+ /* Handle all deferred slab dalloc. */
+ assert(small || dalloc_count == 0);
+ for (unsigned i = 0; i < dalloc_count; i++) {
+ edata_t *slab = dalloc_slabs[i];
+ arena_slab_dalloc(tsdn, arena_get_from_edata(slab), slab);
+
+ }
+
if (config_stats && !merged_stats) {
- /*
- * The flush loop didn't happen to flush to this thread's
- * arena, so the stats didn't get merged. Manually do so now.
- */
- arena_stats_large_flush_nrequests_add(tsd_tsdn(tsd),
- &tcache_arena->stats, binind, tbin->tstats.nrequests);
- tbin->tstats.nrequests = 0;
+ if (small) {
+ /*
+ * The flush loop didn't happen to flush to this
+ * thread's arena, so the stats didn't get merged.
+ * Manually do so now.
+ */
+ bin_t *bin = arena_bin_choose(tsdn, tcache_arena,
+ binind, NULL);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ bin->stats.nflushes++;
+ bin->stats.nrequests += cache_bin->tstats.nrequests;
+ cache_bin->tstats.nrequests = 0;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ } else {
+ arena_stats_large_flush_nrequests_add(tsdn,
+ &tcache_arena->stats, binind,
+ cache_bin->tstats.nrequests);
+ cache_bin->tstats.nrequests = 0;
+ }
}
- memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
- sizeof(void *));
- tbin->ncached = rem;
- if (tbin->ncached < tbin->low_water) {
- tbin->low_water = tbin->ncached;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tcache_bin_flush_bottom(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
+ szind_t binind, unsigned rem, bool small) {
+ tcache_bin_flush_stashed(tsd, tcache, cache_bin, binind, small);
+
+ cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
+ &tcache_bin_info[binind]);
+ assert((cache_bin_sz_t)rem <= ncached);
+ unsigned nflush = ncached - rem;
+
+ CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush);
+ cache_bin_init_ptr_array_for_flush(cache_bin, &tcache_bin_info[binind],
+ &ptrs, nflush);
+
+ tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nflush,
+ small);
+
+ cache_bin_finish_flush(cache_bin, &tcache_bin_info[binind], &ptrs,
+ ncached - rem);
+}
+
+void
+tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
+ szind_t binind, unsigned rem) {
+ tcache_bin_flush_bottom(tsd, tcache, cache_bin, binind, rem, true);
+}
+
+void
+tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
+ szind_t binind, unsigned rem) {
+ tcache_bin_flush_bottom(tsd, tcache, cache_bin, binind, rem, false);
+}
+
+/*
+ * Flushing stashed happens when 1) tcache fill, 2) tcache flush, or 3) tcache
+ * GC event. This makes sure that the stashed items do not hold memory for too
+ * long, and new buffers can only be allocated when nothing is stashed.
+ *
+ * The downside is, the time between stash and flush may be relatively short,
+ * especially when the request rate is high. It lowers the chance of detecting
+ * write-after-free -- however that is a delayed detection anyway, and is less
+ * of a focus than the memory overhead.
+ */
+void
+tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
+ szind_t binind, bool is_small) {
+ cache_bin_info_t *info = &tcache_bin_info[binind];
+ /*
+ * The two below are for assertion only. The content of original cached
+ * items remain unchanged -- the stashed items reside on the other end
+ * of the stack. Checking the stack head and ncached to verify.
+ */
+ void *head_content = *cache_bin->stack_head;
+ cache_bin_sz_t orig_cached = cache_bin_ncached_get_local(cache_bin,
+ info);
+
+ cache_bin_sz_t nstashed = cache_bin_nstashed_get_local(cache_bin, info);
+ assert(orig_cached + nstashed <= cache_bin_info_ncached_max(info));
+ if (nstashed == 0) {
+ return;
}
+
+ CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nstashed);
+ cache_bin_init_ptr_array_for_stashed(cache_bin, binind, info, &ptrs,
+ nstashed);
+ san_check_stashed_ptrs(ptrs.ptr, nstashed, sz_index2size(binind));
+ tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nstashed,
+ is_small);
+ cache_bin_finish_flush_stashed(cache_bin, info);
+
+ assert(cache_bin_nstashed_get_local(cache_bin, info) == 0);
+ assert(cache_bin_ncached_get_local(cache_bin, info) == orig_cached);
+ assert(head_content == *cache_bin->stack_head);
}
void
-tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
- assert(tcache->arena == NULL);
- tcache->arena = arena;
+tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
+ tcache_t *tcache, arena_t *arena) {
+ assert(tcache_slow->arena == NULL);
+ tcache_slow->arena = arena;
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
- ql_elm_new(tcache, link);
- ql_tail_insert(&arena->tcache_ql, tcache, link);
+ ql_elm_new(tcache_slow, link);
+ ql_tail_insert(&arena->tcache_ql, tcache_slow, link);
cache_bin_array_descriptor_init(
- &tcache->cache_bin_array_descriptor, tcache->bins_small,
- tcache->bins_large);
+ &tcache_slow->cache_bin_array_descriptor, tcache->bins);
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
- &tcache->cache_bin_array_descriptor, link);
+ &tcache_slow->cache_bin_array_descriptor, link);
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
}
static void
-tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
- arena_t *arena = tcache->arena;
+tcache_arena_dissociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
+ tcache_t *tcache) {
+ arena_t *arena = tcache_slow->arena;
assert(arena != NULL);
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
if (config_debug) {
bool in_ql = false;
- tcache_t *iter;
+ tcache_slow_t *iter;
ql_foreach(iter, &arena->tcache_ql, link) {
- if (iter == tcache) {
+ if (iter == tcache_slow) {
in_ql = true;
break;
}
}
assert(in_ql);
}
- ql_remove(&arena->tcache_ql, tcache, link);
+ ql_remove(&arena->tcache_ql, tcache_slow, link);
ql_remove(&arena->cache_bin_array_descriptor_ql,
- &tcache->cache_bin_array_descriptor, link);
- tcache_stats_merge(tsdn, tcache, arena);
+ &tcache_slow->cache_bin_array_descriptor, link);
+ tcache_stats_merge(tsdn, tcache_slow->tcache, arena);
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
- tcache->arena = NULL;
+ tcache_slow->arena = NULL;
}
void
-tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
- tcache_arena_dissociate(tsdn, tcache);
- tcache_arena_associate(tsdn, tcache, arena);
+tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
+ tcache_t *tcache, arena_t *arena) {
+ tcache_arena_dissociate(tsdn, tcache_slow, tcache);
+ tcache_arena_associate(tsdn, tcache_slow, tcache, arena);
}
bool
@@ -405,56 +647,80 @@ tsd_tcache_enabled_data_init(tsd_t *tsd) {
return false;
}
-/* Initialize auto tcache (embedded in TSD). */
static void
-tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
- memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
- tcache->prof_accumbytes = 0;
- tcache->next_gc_bin = 0;
- tcache->arena = NULL;
-
- ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
-
- size_t stack_offset = 0;
- assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
- memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS);
- memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - SC_NBINS));
- unsigned i = 0;
- for (; i < SC_NBINS; i++) {
- tcache->lg_fill_div[i] = 1;
- stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
- /*
- * avail points past the available space. Allocations will
- * access the slots toward higher addresses (for the benefit of
- * prefetch).
- */
- tcache_small_bin_get(tcache, i)->avail =
- (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
+tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
+ void *mem) {
+ tcache->tcache_slow = tcache_slow;
+ tcache_slow->tcache = tcache;
+
+ memset(&tcache_slow->link, 0, sizeof(ql_elm(tcache_t)));
+ tcache_slow->next_gc_bin = 0;
+ tcache_slow->arena = NULL;
+ tcache_slow->dyn_alloc = mem;
+
+ /*
+ * We reserve cache bins for all small size classes, even if some may
+ * not get used (i.e. bins higher than nhbins). This allows the fast
+ * and common paths to access cache bin metadata safely w/o worrying
+ * about which ones are disabled.
+ */
+ unsigned n_reserved_bins = nhbins < SC_NBINS ? SC_NBINS : nhbins;
+ memset(tcache->bins, 0, sizeof(cache_bin_t) * n_reserved_bins);
+
+ size_t cur_offset = 0;
+ cache_bin_preincrement(tcache_bin_info, nhbins, mem,
+ &cur_offset);
+ for (unsigned i = 0; i < nhbins; i++) {
+ if (i < SC_NBINS) {
+ tcache_slow->lg_fill_div[i] = 1;
+ tcache_slow->bin_refilled[i] = false;
+ tcache_slow->bin_flush_delay_items[i]
+ = tcache_gc_item_delay_compute(i);
+ }
+ cache_bin_t *cache_bin = &tcache->bins[i];
+ cache_bin_init(cache_bin, &tcache_bin_info[i], mem,
+ &cur_offset);
}
- for (; i < nhbins; i++) {
- stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
- tcache_large_bin_get(tcache, i)->avail =
- (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
+ /*
+ * For small size classes beyond tcache_maxclass (i.e. nhbins < NBINS),
+ * their cache bins are initialized to a state to safely and efficiently
+ * fail all fastpath alloc / free, so that no additional check around
+ * nhbins is needed on fastpath.
+ */
+ for (unsigned i = nhbins; i < SC_NBINS; i++) {
+ /* Disabled small bins. */
+ cache_bin_t *cache_bin = &tcache->bins[i];
+ void *fake_stack = mem;
+ size_t fake_offset = 0;
+
+ cache_bin_init(cache_bin, &tcache_bin_info[i], fake_stack,
+ &fake_offset);
+ assert(tcache_small_bin_disabled(i, cache_bin));
}
- assert(stack_offset == stack_nelms * sizeof(void *));
+
+ cache_bin_postincrement(tcache_bin_info, nhbins, mem,
+ &cur_offset);
+ /* Sanity check that the whole stack is used. */
+ assert(cur_offset == tcache_bin_alloc_size);
}
/* Initialize auto tcache (embedded in TSD). */
bool
tsd_tcache_data_init(tsd_t *tsd) {
+ tcache_slow_t *tcache_slow = tsd_tcache_slowp_get_unsafe(tsd);
tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
- assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
- size_t size = stack_nelms * sizeof(void *);
- /* Avoid false cacheline sharing. */
- size = sz_sa2u(size, CACHELINE);
-
- void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
- NULL, true, arena_get(TSDN_NULL, 0, true));
- if (avail_array == NULL) {
+
+ assert(cache_bin_still_zero_initialized(&tcache->bins[0]));
+ size_t alignment = tcache_bin_alloc_alignment;
+ size_t size = sz_sa2u(tcache_bin_alloc_size, alignment);
+
+ void *mem = ipallocztm(tsd_tsdn(tsd), size, alignment, true, NULL,
+ true, arena_get(TSDN_NULL, 0, true));
+ if (mem == NULL) {
return true;
}
- tcache_init(tsd, tcache, avail_array);
+ tcache_init(tsd, tcache_slow, tcache, mem);
/*
* Initialization is a bit tricky here. After malloc init is done, all
* threads can rely on arena_choose and associate tcache accordingly.
@@ -463,20 +729,22 @@ tsd_tcache_data_init(tsd_t *tsd) {
* associate its tcache to a0 temporarily, and later on
* arena_choose_hard() will re-associate properly.
*/
- tcache->arena = NULL;
+ tcache_slow->arena = NULL;
arena_t *arena;
if (!malloc_initialized()) {
/* If in initialization, assign to a0. */
arena = arena_get(tsd_tsdn(tsd), 0, false);
- tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
+ tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,
+ arena);
} else {
arena = arena_choose(tsd, NULL);
/* This may happen if thread.tcache.enabled is used. */
- if (tcache->arena == NULL) {
- tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
+ if (tcache_slow->arena == NULL) {
+ tcache_arena_associate(tsd_tsdn(tsd), tcache_slow,
+ tcache, arena);
}
}
- assert(arena == tcache->arena);
+ assert(arena == tcache_slow->arena);
return false;
}
@@ -484,56 +752,49 @@ tsd_tcache_data_init(tsd_t *tsd) {
/* Created manual tcache for tcache.create mallctl. */
tcache_t *
tcache_create_explicit(tsd_t *tsd) {
- tcache_t *tcache;
- size_t size, stack_offset;
-
- size = sizeof(tcache_t);
+ /*
+ * We place the cache bin stacks, then the tcache_t, then a pointer to
+ * the beginning of the whole allocation (for freeing). The makes sure
+ * the cache bins have the requested alignment.
+ */
+ size_t size = tcache_bin_alloc_size + sizeof(tcache_t)
+ + sizeof(tcache_slow_t);
/* Naturally align the pointer stacks. */
size = PTR_CEILING(size);
- stack_offset = size;
- size += stack_nelms * sizeof(void *);
- /* Avoid false cacheline sharing. */
- size = sz_sa2u(size, CACHELINE);
+ size = sz_sa2u(size, tcache_bin_alloc_alignment);
- tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
- arena_get(TSDN_NULL, 0, true));
- if (tcache == NULL) {
+ void *mem = ipallocztm(tsd_tsdn(tsd), size, tcache_bin_alloc_alignment,
+ true, NULL, true, arena_get(TSDN_NULL, 0, true));
+ if (mem == NULL) {
return NULL;
}
+ tcache_t *tcache = (void *)((uintptr_t)mem + tcache_bin_alloc_size);
+ tcache_slow_t *tcache_slow =
+ (void *)((uintptr_t)mem + tcache_bin_alloc_size + sizeof(tcache_t));
+ tcache_init(tsd, tcache_slow, tcache, mem);
- tcache_init(tsd, tcache,
- (void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
- tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
+ tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,
+ arena_ichoose(tsd, NULL));
return tcache;
}
static void
tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
- assert(tcache->arena != NULL);
-
- for (unsigned i = 0; i < SC_NBINS; i++) {
- cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
- tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
+ tcache_slow_t *tcache_slow = tcache->tcache_slow;
+ assert(tcache_slow->arena != NULL);
- if (config_stats) {
- assert(tbin->tstats.nrequests == 0);
+ for (unsigned i = 0; i < nhbins; i++) {
+ cache_bin_t *cache_bin = &tcache->bins[i];
+ if (i < SC_NBINS) {
+ tcache_bin_flush_small(tsd, tcache, cache_bin, i, 0);
+ } else {
+ tcache_bin_flush_large(tsd, tcache, cache_bin, i, 0);
}
- }
- for (unsigned i = SC_NBINS; i < nhbins; i++) {
- cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
- tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
-
if (config_stats) {
- assert(tbin->tstats.nrequests == 0);
+ assert(cache_bin->tstats.nrequests == 0);
}
}
-
- if (config_prof && tcache->prof_accumbytes > 0 &&
- arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
- tcache->prof_accumbytes)) {
- prof_idump(tsd_tsdn(tsd));
- }
}
void
@@ -544,20 +805,17 @@ tcache_flush(tsd_t *tsd) {
static void
tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
+ tcache_slow_t *tcache_slow = tcache->tcache_slow;
tcache_flush_cache(tsd, tcache);
- arena_t *arena = tcache->arena;
- tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
+ arena_t *arena = tcache_slow->arena;
+ tcache_arena_dissociate(tsd_tsdn(tsd), tcache_slow, tcache);
if (tsd_tcache) {
- /* Release the avail array for the TSD embedded auto tcache. */
- void *avail_array =
- (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
- (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
- idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true);
- } else {
- /* Release both the tcache struct and avail array. */
- idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true);
+ cache_bin_t *cache_bin = &tcache->bins[0];
+ cache_bin_assert_empty(cache_bin, &tcache_bin_info[0]);
}
+ idalloctm(tsd_tsdn(tsd), tcache_slow->dyn_alloc, NULL, NULL, true,
+ true);
/*
* The deallocation and tcache flush above may not trigger decay since
@@ -571,9 +829,11 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
if (arena_nthreads_get(arena, false) == 0 &&
!background_thread_enabled()) {
/* Force purging when no threads assigned to the arena anymore. */
- arena_decay(tsd_tsdn(tsd), arena, false, true);
+ arena_decay(tsd_tsdn(tsd), arena,
+ /* is_background_thread */ false, /* all */ true);
} else {
- arena_decay(tsd_tsdn(tsd), arena, false, false);
+ arena_decay(tsd_tsdn(tsd), arena,
+ /* is_background_thread */ false, /* all */ false);
}
}
@@ -583,53 +843,51 @@ tcache_cleanup(tsd_t *tsd) {
tcache_t *tcache = tsd_tcachep_get(tsd);
if (!tcache_available(tsd)) {
assert(tsd_tcache_enabled_get(tsd) == false);
- if (config_debug) {
- assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
- }
+ assert(cache_bin_still_zero_initialized(&tcache->bins[0]));
return;
}
assert(tsd_tcache_enabled_get(tsd));
- assert(tcache_small_bin_get(tcache, 0)->avail != NULL);
+ assert(!cache_bin_still_zero_initialized(&tcache->bins[0]));
tcache_destroy(tsd, tcache, true);
if (config_debug) {
- tcache_small_bin_get(tcache, 0)->avail = NULL;
+ /*
+ * For debug testing only, we want to pretend we're still in the
+ * zero-initialized state.
+ */
+ memset(tcache->bins, 0, sizeof(cache_bin_t) * nhbins);
}
}
void
tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
- unsigned i;
-
cassert(config_stats);
/* Merge and reset tcache stats. */
- for (i = 0; i < SC_NBINS; i++) {
- cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
- unsigned binshard;
- bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, &binshard);
- bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(tsdn, &bin->lock);
- tbin->tstats.nrequests = 0;
- }
-
- for (; i < nhbins; i++) {
- cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
- arena_stats_large_flush_nrequests_add(tsdn, &arena->stats, i,
- tbin->tstats.nrequests);
- tbin->tstats.nrequests = 0;
+ for (unsigned i = 0; i < nhbins; i++) {
+ cache_bin_t *cache_bin = &tcache->bins[i];
+ if (i < SC_NBINS) {
+ bin_t *bin = arena_bin_choose(tsdn, arena, i, NULL);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ bin->stats.nrequests += cache_bin->tstats.nrequests;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ } else {
+ arena_stats_large_flush_nrequests_add(tsdn,
+ &arena->stats, i, cache_bin->tstats.nrequests);
+ }
+ cache_bin->tstats.nrequests = 0;
}
}
static bool
-tcaches_create_prep(tsd_t *tsd) {
+tcaches_create_prep(tsd_t *tsd, base_t *base) {
bool err;
- malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
if (tcaches == NULL) {
- tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
- * (MALLOCX_TCACHE_MAX+1), CACHELINE);
+ tcaches = base_alloc(tsd_tsdn(tsd), base,
+ sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1), CACHELINE);
if (tcaches == NULL) {
err = true;
goto label_return;
@@ -643,17 +901,18 @@ tcaches_create_prep(tsd_t *tsd) {
err = false;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
return err;
}
bool
-tcaches_create(tsd_t *tsd, unsigned *r_ind) {
+tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind) {
witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
bool err;
- if (tcaches_create_prep(tsd)) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
+
+ if (tcaches_create_prep(tsd, base)) {
err = true;
goto label_return;
}
@@ -665,7 +924,6 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) {
}
tcaches_t *elm;
- malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
if (tcaches_avail != NULL) {
elm = tcaches_avail;
tcaches_avail = tcaches_avail->next;
@@ -677,10 +935,10 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) {
*r_ind = tcaches_past;
tcaches_past++;
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
err = false;
label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
return err;
}
@@ -729,70 +987,115 @@ tcaches_destroy(tsd_t *tsd, unsigned ind) {
}
}
-bool
-tcache_boot(tsdn_t *tsdn) {
- /* If necessary, clamp opt_lg_tcache_max. */
- if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
- SC_SMALL_MAXCLASS) {
- tcache_maxclass = SC_SMALL_MAXCLASS;
+static unsigned
+tcache_ncached_max_compute(szind_t szind) {
+ if (szind >= SC_NBINS) {
+ assert(szind < nhbins);
+ return opt_tcache_nslots_large;
+ }
+ unsigned slab_nregs = bin_infos[szind].nregs;
+
+ /* We may modify these values; start with the opt versions. */
+ unsigned nslots_small_min = opt_tcache_nslots_small_min;
+ unsigned nslots_small_max = opt_tcache_nslots_small_max;
+
+ /*
+ * Clamp values to meet our constraints -- even, nonzero, min < max, and
+ * suitable for a cache bin size.
+ */
+ if (opt_tcache_nslots_small_max > CACHE_BIN_NCACHED_MAX) {
+ nslots_small_max = CACHE_BIN_NCACHED_MAX;
+ }
+ if (nslots_small_min % 2 != 0) {
+ nslots_small_min++;
+ }
+ if (nslots_small_max % 2 != 0) {
+ nslots_small_max--;
+ }
+ if (nslots_small_min < 2) {
+ nslots_small_min = 2;
+ }
+ if (nslots_small_max < 2) {
+ nslots_small_max = 2;
+ }
+ if (nslots_small_min > nslots_small_max) {
+ nslots_small_min = nslots_small_max;
+ }
+
+ unsigned candidate;
+ if (opt_lg_tcache_nslots_mul < 0) {
+ candidate = slab_nregs >> (-opt_lg_tcache_nslots_mul);
} else {
- tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
+ candidate = slab_nregs << opt_lg_tcache_nslots_mul;
+ }
+ if (candidate % 2 != 0) {
+ /*
+ * We need the candidate size to be even -- we assume that we
+ * can divide by two and get a positive number (e.g. when
+ * flushing).
+ */
+ ++candidate;
}
+ if (candidate <= nslots_small_min) {
+ return nslots_small_min;
+ } else if (candidate <= nslots_small_max) {
+ return candidate;
+ } else {
+ return nslots_small_max;
+ }
+}
+
+bool
+tcache_boot(tsdn_t *tsdn, base_t *base) {
+ tcache_maxclass = sz_s2u(opt_tcache_max);
+ assert(tcache_maxclass <= TCACHE_MAXCLASS_LIMIT);
+ nhbins = sz_size2index(tcache_maxclass) + 1;
if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
malloc_mutex_rank_exclusive)) {
return true;
}
- nhbins = sz_size2index(tcache_maxclass) + 1;
-
- /* Initialize tcache_bin_info. */
- tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
- * sizeof(cache_bin_info_t), CACHELINE);
+ /* Initialize tcache_bin_info. See comments in tcache_init(). */
+ unsigned n_reserved_bins = nhbins < SC_NBINS ? SC_NBINS : nhbins;
+ size_t size = n_reserved_bins * sizeof(cache_bin_info_t);
+ tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, base, size,
+ CACHELINE);
if (tcache_bin_info == NULL) {
return true;
}
- stack_nelms = 0;
- unsigned i;
- for (i = 0; i < SC_NBINS; i++) {
- if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
- tcache_bin_info[i].ncached_max =
- TCACHE_NSLOTS_SMALL_MIN;
- } else if ((bin_infos[i].nregs << 1) <=
- TCACHE_NSLOTS_SMALL_MAX) {
- tcache_bin_info[i].ncached_max =
- (bin_infos[i].nregs << 1);
- } else {
- tcache_bin_info[i].ncached_max =
- TCACHE_NSLOTS_SMALL_MAX;
- }
- stack_nelms += tcache_bin_info[i].ncached_max;
+
+ for (szind_t i = 0; i < nhbins; i++) {
+ unsigned ncached_max = tcache_ncached_max_compute(i);
+ cache_bin_info_init(&tcache_bin_info[i], ncached_max);
}
- for (; i < nhbins; i++) {
- tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
- stack_nelms += tcache_bin_info[i].ncached_max;
+ for (szind_t i = nhbins; i < SC_NBINS; i++) {
+ /* Disabled small bins. */
+ cache_bin_info_init(&tcache_bin_info[i], 0);
+ assert(tcache_small_bin_disabled(i, NULL));
}
+ cache_bin_info_compute_alloc(tcache_bin_info, nhbins,
+ &tcache_bin_alloc_size, &tcache_bin_alloc_alignment);
+
return false;
}
void
tcache_prefork(tsdn_t *tsdn) {
- if (!config_prof && opt_tcache) {
- malloc_mutex_prefork(tsdn, &tcaches_mtx);
- }
+ malloc_mutex_prefork(tsdn, &tcaches_mtx);
}
void
tcache_postfork_parent(tsdn_t *tsdn) {
- if (!config_prof && opt_tcache) {
- malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
- }
+ malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
}
void
tcache_postfork_child(tsdn_t *tsdn) {
- if (!config_prof && opt_tcache) {
- malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
- }
+ malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
+}
+
+void tcache_assert_initialized(tcache_t *tcache) {
+ assert(!cache_bin_still_zero_initialized(&tcache->bins[0]));
}
diff --git a/deps/jemalloc/src/thread_event.c b/deps/jemalloc/src/thread_event.c
new file mode 100644
index 000000000..37eb5827d
--- /dev/null
+++ b/deps/jemalloc/src/thread_event.c
@@ -0,0 +1,343 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/thread_event.h"
+
+/*
+ * Signatures for event specific functions. These functions should be defined
+ * by the modules owning each event. The signatures here verify that the
+ * definitions follow the right format.
+ *
+ * The first two are functions computing new / postponed event wait time. New
+ * event wait time is the time till the next event if an event is currently
+ * being triggered; postponed event wait time is the time till the next event
+ * if an event should be triggered but needs to be postponed, e.g. when the TSD
+ * is not nominal or during reentrancy.
+ *
+ * The third is the event handler function, which is called whenever an event
+ * is triggered. The parameter is the elapsed time since the last time an
+ * event of the same type was triggered.
+ */
+#define E(event, condition_unused, is_alloc_event_unused) \
+uint64_t event##_new_event_wait(tsd_t *tsd); \
+uint64_t event##_postponed_event_wait(tsd_t *tsd); \
+void event##_event_handler(tsd_t *tsd, uint64_t elapsed);
+
+ITERATE_OVER_ALL_EVENTS
+#undef E
+
+/* Signatures for internal functions fetching elapsed time. */
+#define E(event, condition_unused, is_alloc_event_unused) \
+static uint64_t event##_fetch_elapsed(tsd_t *tsd);
+
+ITERATE_OVER_ALL_EVENTS
+#undef E
+
+static uint64_t
+tcache_gc_fetch_elapsed(tsd_t *tsd) {
+ return TE_INVALID_ELAPSED;
+}
+
+static uint64_t
+tcache_gc_dalloc_fetch_elapsed(tsd_t *tsd) {
+ return TE_INVALID_ELAPSED;
+}
+
+static uint64_t
+prof_sample_fetch_elapsed(tsd_t *tsd) {
+ uint64_t last_event = thread_allocated_last_event_get(tsd);
+ uint64_t last_sample_event = prof_sample_last_event_get(tsd);
+ prof_sample_last_event_set(tsd, last_event);
+ return last_event - last_sample_event;
+}
+
+static uint64_t
+stats_interval_fetch_elapsed(tsd_t *tsd) {
+ uint64_t last_event = thread_allocated_last_event_get(tsd);
+ uint64_t last_stats_event = stats_interval_last_event_get(tsd);
+ stats_interval_last_event_set(tsd, last_event);
+ return last_event - last_stats_event;
+}
+
+static uint64_t
+peak_alloc_fetch_elapsed(tsd_t *tsd) {
+ return TE_INVALID_ELAPSED;
+}
+
+static uint64_t
+peak_dalloc_fetch_elapsed(tsd_t *tsd) {
+ return TE_INVALID_ELAPSED;
+}
+
+/* Per event facilities done. */
+
+static bool
+te_ctx_has_active_events(te_ctx_t *ctx) {
+ assert(config_debug);
+#define E(event, condition, alloc_event) \
+ if (condition && alloc_event == ctx->is_alloc) { \
+ return true; \
+ }
+ ITERATE_OVER_ALL_EVENTS
+#undef E
+ return false;
+}
+
+static uint64_t
+te_next_event_compute(tsd_t *tsd, bool is_alloc) {
+ uint64_t wait = TE_MAX_START_WAIT;
+#define E(event, condition, alloc_event) \
+ if (is_alloc == alloc_event && condition) { \
+ uint64_t event_wait = \
+ event##_event_wait_get(tsd); \
+ assert(event_wait <= TE_MAX_START_WAIT); \
+ if (event_wait > 0U && event_wait < wait) { \
+ wait = event_wait; \
+ } \
+ }
+
+ ITERATE_OVER_ALL_EVENTS
+#undef E
+ assert(wait <= TE_MAX_START_WAIT);
+ return wait;
+}
+
+static void
+te_assert_invariants_impl(tsd_t *tsd, te_ctx_t *ctx) {
+ uint64_t current_bytes = te_ctx_current_bytes_get(ctx);
+ uint64_t last_event = te_ctx_last_event_get(ctx);
+ uint64_t next_event = te_ctx_next_event_get(ctx);
+ uint64_t next_event_fast = te_ctx_next_event_fast_get(ctx);
+
+ assert(last_event != next_event);
+ if (next_event > TE_NEXT_EVENT_FAST_MAX || !tsd_fast(tsd)) {
+ assert(next_event_fast == 0U);
+ } else {
+ assert(next_event_fast == next_event);
+ }
+
+ /* The subtraction is intentionally susceptible to underflow. */
+ uint64_t interval = next_event - last_event;
+
+ /* The subtraction is intentionally susceptible to underflow. */
+ assert(current_bytes - last_event < interval);
+ uint64_t min_wait = te_next_event_compute(tsd, te_ctx_is_alloc(ctx));
+ /*
+ * next_event should have been pushed up only except when no event is
+ * on and the TSD is just initialized. The last_event == 0U guard
+ * below is stronger than needed, but having an exactly accurate guard
+ * is more complicated to implement.
+ */
+ assert((!te_ctx_has_active_events(ctx) && last_event == 0U) ||
+ interval == min_wait ||
+ (interval < min_wait && interval == TE_MAX_INTERVAL));
+}
+
+void
+te_assert_invariants_debug(tsd_t *tsd) {
+ te_ctx_t ctx;
+ te_ctx_get(tsd, &ctx, true);
+ te_assert_invariants_impl(tsd, &ctx);
+
+ te_ctx_get(tsd, &ctx, false);
+ te_assert_invariants_impl(tsd, &ctx);
+}
+
+/*
+ * Synchronization around the fast threshold in tsd --
+ * There are two threads to consider in the synchronization here:
+ * - The owner of the tsd being updated by a slow path change
+ * - The remote thread, doing that slow path change.
+ *
+ * As a design constraint, we want to ensure that a slow-path transition cannot
+ * be ignored for arbitrarily long, and that if the remote thread causes a
+ * slow-path transition and then communicates with the owner thread that it has
+ * occurred, then the owner will go down the slow path on the next allocator
+ * operation (so that we don't want to just wait until the owner hits its slow
+ * path reset condition on its own).
+ *
+ * Here's our strategy to do that:
+ *
+ * The remote thread will update the slow-path stores to TSD variables, issue a
+ * SEQ_CST fence, and then update the TSD next_event_fast counter. The owner
+ * thread will update next_event_fast, issue an SEQ_CST fence, and then check
+ * its TSD to see if it's on the slow path.
+
+ * This is fairly straightforward when 64-bit atomics are supported. Assume that
+ * the remote fence is sandwiched between two owner fences in the reset pathway.
+ * The case where there is no preceding or trailing owner fence (i.e. because
+ * the owner thread is near the beginning or end of its life) can be analyzed
+ * similarly. The owner store to next_event_fast preceding the earlier owner
+ * fence will be earlier in coherence order than the remote store to it, so that
+ * the owner thread will go down the slow path once the store becomes visible to
+ * it, which is no later than the time of the second fence.
+
+ * The case where we don't support 64-bit atomics is trickier, since word
+ * tearing is possible. We'll repeat the same analysis, and look at the two
+ * owner fences sandwiching the remote fence. The next_event_fast stores done
+ * alongside the earlier owner fence cannot overwrite any of the remote stores
+ * (since they precede the earlier owner fence in sb, which precedes the remote
+ * fence in sc, which precedes the remote stores in sb). After the second owner
+ * fence there will be a re-check of the slow-path variables anyways, so the
+ * "owner will notice that it's on the slow path eventually" guarantee is
+ * satisfied. To make sure that the out-of-band-messaging constraint is as well,
+ * note that either the message passing is sequenced before the second owner
+ * fence (in which case the remote stores happen before the second set of owner
+ * stores, so malloc sees a value of zero for next_event_fast and goes down the
+ * slow path), or it is not (in which case the owner sees the tsd slow-path
+ * writes on its previous update). This leaves open the possibility that the
+ * remote thread will (at some arbitrary point in the future) zero out one half
+ * of the owner thread's next_event_fast, but that's always safe (it just sends
+ * it down the slow path earlier).
+ */
+static void
+te_ctx_next_event_fast_update(te_ctx_t *ctx) {
+ uint64_t next_event = te_ctx_next_event_get(ctx);
+ uint64_t next_event_fast = (next_event <= TE_NEXT_EVENT_FAST_MAX) ?
+ next_event : 0U;
+ te_ctx_next_event_fast_set(ctx, next_event_fast);
+}
+
+void
+te_recompute_fast_threshold(tsd_t *tsd) {
+ if (tsd_state_get(tsd) != tsd_state_nominal) {
+ /* Check first because this is also called on purgatory. */
+ te_next_event_fast_set_non_nominal(tsd);
+ return;
+ }
+
+ te_ctx_t ctx;
+ te_ctx_get(tsd, &ctx, true);
+ te_ctx_next_event_fast_update(&ctx);
+ te_ctx_get(tsd, &ctx, false);
+ te_ctx_next_event_fast_update(&ctx);
+
+ atomic_fence(ATOMIC_SEQ_CST);
+ if (tsd_state_get(tsd) != tsd_state_nominal) {
+ te_next_event_fast_set_non_nominal(tsd);
+ }
+}
+
+static void
+te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx,
+ uint64_t wait) {
+ /*
+ * The next threshold based on future events can only be adjusted after
+ * progressing the last_event counter (which is set to current).
+ */
+ assert(te_ctx_current_bytes_get(ctx) == te_ctx_last_event_get(ctx));
+ assert(wait <= TE_MAX_START_WAIT);
+
+ uint64_t next_event = te_ctx_last_event_get(ctx) + (wait <=
+ TE_MAX_INTERVAL ? wait : TE_MAX_INTERVAL);
+ te_ctx_next_event_set(tsd, ctx, next_event);
+}
+
+static uint64_t
+te_clip_event_wait(uint64_t event_wait) {
+ assert(event_wait > 0U);
+ if (TE_MIN_START_WAIT > 1U &&
+ unlikely(event_wait < TE_MIN_START_WAIT)) {
+ event_wait = TE_MIN_START_WAIT;
+ }
+ if (TE_MAX_START_WAIT < UINT64_MAX &&
+ unlikely(event_wait > TE_MAX_START_WAIT)) {
+ event_wait = TE_MAX_START_WAIT;
+ }
+ return event_wait;
+}
+
+void
+te_event_trigger(tsd_t *tsd, te_ctx_t *ctx) {
+ /* usize has already been added to thread_allocated. */
+ uint64_t bytes_after = te_ctx_current_bytes_get(ctx);
+ /* The subtraction is intentionally susceptible to underflow. */
+ uint64_t accumbytes = bytes_after - te_ctx_last_event_get(ctx);
+
+ te_ctx_last_event_set(ctx, bytes_after);
+
+ bool allow_event_trigger = tsd_nominal(tsd) &&
+ tsd_reentrancy_level_get(tsd) == 0;
+ bool is_alloc = ctx->is_alloc;
+ uint64_t wait = TE_MAX_START_WAIT;
+
+#define E(event, condition, alloc_event) \
+ bool is_##event##_triggered = false; \
+ if (is_alloc == alloc_event && condition) { \
+ uint64_t event_wait = event##_event_wait_get(tsd); \
+ assert(event_wait <= TE_MAX_START_WAIT); \
+ if (event_wait > accumbytes) { \
+ event_wait -= accumbytes; \
+ } else if (!allow_event_trigger) { \
+ event_wait = event##_postponed_event_wait(tsd); \
+ } else { \
+ is_##event##_triggered = true; \
+ event_wait = event##_new_event_wait(tsd); \
+ } \
+ event_wait = te_clip_event_wait(event_wait); \
+ event##_event_wait_set(tsd, event_wait); \
+ if (event_wait < wait) { \
+ wait = event_wait; \
+ } \
+ }
+
+ ITERATE_OVER_ALL_EVENTS
+#undef E
+
+ assert(wait <= TE_MAX_START_WAIT);
+ te_adjust_thresholds_helper(tsd, ctx, wait);
+ te_assert_invariants(tsd);
+
+#define E(event, condition, alloc_event) \
+ if (is_alloc == alloc_event && condition && \
+ is_##event##_triggered) { \
+ assert(allow_event_trigger); \
+ uint64_t elapsed = event##_fetch_elapsed(tsd); \
+ event##_event_handler(tsd, elapsed); \
+ }
+
+ ITERATE_OVER_ALL_EVENTS
+#undef E
+
+ te_assert_invariants(tsd);
+}
+
+static void
+te_init(tsd_t *tsd, bool is_alloc) {
+ te_ctx_t ctx;
+ te_ctx_get(tsd, &ctx, is_alloc);
+ /*
+ * Reset the last event to current, which starts the events from a clean
+ * state. This is necessary when re-init the tsd event counters.
+ *
+ * The event counters maintain a relationship with the current bytes:
+ * last_event <= current < next_event. When a reinit happens (e.g.
+ * reincarnated tsd), the last event needs progressing because all
+ * events start fresh from the current bytes.
+ */
+ te_ctx_last_event_set(&ctx, te_ctx_current_bytes_get(&ctx));
+
+ uint64_t wait = TE_MAX_START_WAIT;
+#define E(event, condition, alloc_event) \
+ if (is_alloc == alloc_event && condition) { \
+ uint64_t event_wait = event##_new_event_wait(tsd); \
+ event_wait = te_clip_event_wait(event_wait); \
+ event##_event_wait_set(tsd, event_wait); \
+ if (event_wait < wait) { \
+ wait = event_wait; \
+ } \
+ }
+
+ ITERATE_OVER_ALL_EVENTS
+#undef E
+ te_adjust_thresholds_helper(tsd, &ctx, wait);
+}
+
+void
+tsd_te_init(tsd_t *tsd) {
+ /* Make sure no overflow for the bytes accumulated on event_trigger. */
+ assert(TE_MAX_INTERVAL <= UINT64_MAX - SC_LARGE_MAXCLASS + 1);
+ te_init(tsd, true);
+ te_init(tsd, false);
+ te_assert_invariants(tsd);
+}
diff --git a/deps/jemalloc/src/ticker.c b/deps/jemalloc/src/ticker.c
index d7b8cd26c..790b5c200 100644
--- a/deps/jemalloc/src/ticker.c
+++ b/deps/jemalloc/src/ticker.c
@@ -1,3 +1,32 @@
-#define JEMALLOC_TICKER_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+/*
+ * To avoid using floating point math down core paths (still necessary because
+ * versions of the glibc dynamic loader that did not preserve xmm registers are
+ * still somewhat common, requiring us to be compilable with -mno-sse), and also
+ * to avoid generally expensive library calls, we use a precomputed table of
+ * values. We want to sample U uniformly on [0, 1], and then compute
+ * ceil(log(u)/log(1-1/nticks)). We're mostly interested in the case where
+ * nticks is reasonably big, so 1/log(1-1/nticks) is well-approximated by
+ * -nticks.
+ *
+ * To compute log(u), we sample an integer in [1, 64] and divide, then just look
+ * up results in a table. As a space-compression mechanism, we store these as
+ * uint8_t by dividing the range (255) by the highest-magnitude value the log
+ * can take on, and using that as a multiplier. We then have to divide by that
+ * multiplier at the end of the computation.
+ *
+ * The values here are computed in src/ticker.py
+ */
+
+const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS] = {
+ 254, 211, 187, 169, 156, 144, 135, 127,
+ 120, 113, 107, 102, 97, 93, 89, 85,
+ 81, 77, 74, 71, 68, 65, 62, 60,
+ 57, 55, 53, 50, 48, 46, 44, 42,
+ 40, 39, 37, 35, 33, 32, 30, 29,
+ 27, 26, 24, 23, 21, 20, 19, 18,
+ 16, 15, 14, 13, 12, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0
+};
diff --git a/deps/jemalloc/src/ticker.py b/deps/jemalloc/src/ticker.py
new file mode 100755
index 000000000..3807740c3
--- /dev/null
+++ b/deps/jemalloc/src/ticker.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python3
+
+import math
+
+# Must match TICKER_GEOM_NBITS
+lg_table_size = 6
+table_size = 2**lg_table_size
+byte_max = 255
+mul = math.floor(-byte_max/math.log(1 / table_size))
+values = [round(-mul * math.log(i / table_size))
+ for i in range(1, table_size+1)]
+print("mul =", mul)
+print("values:")
+for i in range(table_size // 8):
+ print(", ".join((str(x) for x in values[i*8 : i*8 + 8])))
diff --git a/deps/jemalloc/src/tsd.c b/deps/jemalloc/src/tsd.c
index a31f6b969..e8e4f3a33 100644
--- a/deps/jemalloc/src/tsd.c
+++ b/deps/jemalloc/src/tsd.c
@@ -1,17 +1,14 @@
-#define JEMALLOC_TSD_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/san.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
/******************************************************************************/
/* Data. */
-static unsigned ncleanups;
-static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
-
/* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
@@ -74,7 +71,7 @@ tsd_in_nominal_list(tsd_t *tsd) {
* out of it here.
*/
malloc_mutex_lock(TSDN_NULL, &tsd_nominal_tsds_lock);
- ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) {
+ ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
if (tsd == tsd_list) {
found = true;
break;
@@ -88,9 +85,9 @@ static void
tsd_add_nominal(tsd_t *tsd) {
assert(!tsd_in_nominal_list(tsd));
assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
- ql_elm_new(tsd, TSD_MANGLE(tcache).tsd_link);
+ ql_elm_new(tsd, TSD_MANGLE(tsd_link));
malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
- ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link);
+ ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tsd_link));
malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
}
@@ -99,7 +96,7 @@ tsd_remove_nominal(tsd_t *tsd) {
assert(tsd_in_nominal_list(tsd));
assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
- ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link);
+ ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tsd_link));
malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
}
@@ -112,11 +109,14 @@ tsd_force_recompute(tsdn_t *tsdn) {
atomic_fence(ATOMIC_RELEASE);
malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock);
tsd_t *remote_tsd;
- ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) {
+ ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED)
<= tsd_state_nominal_max);
- tsd_atomic_store(&remote_tsd->state, tsd_state_nominal_recompute,
- ATOMIC_RELAXED);
+ tsd_atomic_store(&remote_tsd->state,
+ tsd_state_nominal_recompute, ATOMIC_RELAXED);
+ /* See comments in te_recompute_fast_threshold(). */
+ atomic_fence(ATOMIC_SEQ_CST);
+ te_next_event_fast_set_non_nominal(remote_tsd);
}
malloc_mutex_unlock(tsdn, &tsd_nominal_tsds_lock);
}
@@ -175,6 +175,8 @@ tsd_slow_update(tsd_t *tsd) {
old_state = tsd_atomic_exchange(&tsd->state, new_state,
ATOMIC_ACQUIRE);
} while (old_state == tsd_state_nominal_recompute);
+
+ te_recompute_fast_threshold(tsd);
}
void
@@ -207,22 +209,17 @@ tsd_state_set(tsd_t *tsd, uint8_t new_state) {
/*
* This is the tricky case. We're transitioning from
* one nominal state to another. The caller can't know
- * about any races that are occuring at the same time,
+ * about any races that are occurring at the same time,
* so we always have to recompute no matter what.
*/
tsd_slow_update(tsd);
}
}
+ te_recompute_fast_threshold(tsd);
}
-static bool
-tsd_data_init(tsd_t *tsd) {
- /*
- * We initialize the rtree context first (before the tcache), since the
- * tcache initialization depends on it.
- */
- rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
-
+static void
+tsd_prng_state_init(tsd_t *tsd) {
/*
* A nondeterministic seed based on the address of tsd reduces
* the likelihood of lockstep non-uniform cache index
@@ -230,9 +227,20 @@ tsd_data_init(tsd_t *tsd) {
* cost of test repeatability. For debug builds, instead use a
* deterministic seed.
*/
- *tsd_offset_statep_get(tsd) = config_debug ? 0 :
+ *tsd_prng_statep_get(tsd) = config_debug ? 0 :
(uint64_t)(uintptr_t)tsd;
+}
+static bool
+tsd_data_init(tsd_t *tsd) {
+ /*
+ * We initialize the rtree context first (before the tcache), since the
+ * tcache initialization depends on it.
+ */
+ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
+ tsd_prng_state_init(tsd);
+ tsd_te_init(tsd); /* event_init may use the prng state above. */
+ tsd_san_init(tsd);
return tsd_tcache_enabled_data_init(tsd);
}
@@ -242,8 +250,6 @@ assert_tsd_data_cleanup_done(tsd_t *tsd) {
assert(!tsd_in_nominal_list(tsd));
assert(*tsd_arenap_get_unsafe(tsd) == NULL);
assert(*tsd_iarenap_get_unsafe(tsd) == NULL);
- assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true);
- assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL);
assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false);
assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL);
}
@@ -258,9 +264,11 @@ tsd_data_init_nocleanup(tsd_t *tsd) {
* We set up tsd in a way that no cleanup is needed.
*/
rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
*tsd_tcache_enabledp_get_unsafe(tsd) = false;
*tsd_reentrancy_levelp_get(tsd) = 1;
+ tsd_prng_state_init(tsd);
+ tsd_te_init(tsd); /* event_init may use the prng state above. */
+ tsd_san_init(tsd);
assert_tsd_data_cleanup_done(tsd);
return false;
@@ -326,6 +334,9 @@ malloc_tsd_dalloc(void *wrapper) {
}
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
+static unsigned ncleanups;
+static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
+
#ifndef _WIN32
JEMALLOC_EXPORT
#endif
@@ -350,23 +361,27 @@ _malloc_thread_cleanup(void) {
}
} while (again);
}
-#endif
+#ifndef _WIN32
+JEMALLOC_EXPORT
+#endif
void
-malloc_tsd_cleanup_register(bool (*f)(void)) {
+_malloc_tsd_cleanup_register(bool (*f)(void)) {
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
cleanups[ncleanups] = f;
ncleanups++;
}
+#endif
+
static void
tsd_do_data_cleanup(tsd_t *tsd) {
prof_tdata_cleanup(tsd);
iarena_cleanup(tsd);
arena_cleanup(tsd);
- arenas_tdata_cleanup(tsd);
tcache_cleanup(tsd);
witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd));
+ *tsd_reentrancy_levelp_get(tsd) = 1;
}
void
@@ -387,7 +402,7 @@ tsd_cleanup(void *arg) {
* is still called for testing and completeness.
*/
assert_tsd_data_cleanup_done(tsd);
- /* Fall through. */
+ JEMALLOC_FALLTHROUGH;
case tsd_state_nominal:
case tsd_state_nominal_slow:
tsd_do_data_cleanup(tsd);
@@ -418,7 +433,9 @@ tsd_t *
malloc_tsd_boot0(void) {
tsd_t *tsd;
+#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
ncleanups = 0;
+#endif
if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock",
WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) {
return NULL;
@@ -427,7 +444,6 @@ malloc_tsd_boot0(void) {
return NULL;
}
tsd = tsd_fetch();
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
return tsd;
}
@@ -437,7 +453,6 @@ malloc_tsd_boot1(void) {
tsd_t *tsd = tsd_fetch();
/* malloc_slow has been set properly. Update tsd_slow. */
tsd_slow_update(tsd);
- *tsd_arenas_tdata_bypassp_get(tsd) = false;
}
#ifdef _WIN32
diff --git a/deps/jemalloc/src/witness.c b/deps/jemalloc/src/witness.c
index f42b72ad1..4474af04c 100644
--- a/deps/jemalloc/src/witness.c
+++ b/deps/jemalloc/src/witness.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_WITNESS_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -15,14 +14,41 @@ witness_init(witness_t *witness, const char *name, witness_rank_t rank,
}
static void
-witness_lock_error_impl(const witness_list_t *witnesses,
- const witness_t *witness) {
- witness_t *w;
+witness_print_witness(witness_t *w, unsigned n) {
+ assert(n > 0);
+ if (n == 1) {
+ malloc_printf(" %s(%u)", w->name, w->rank);
+ } else {
+ malloc_printf(" %s(%u)X%u", w->name, w->rank, n);
+ }
+}
- malloc_printf("<jemalloc>: Lock rank order reversal:");
+static void
+witness_print_witnesses(const witness_list_t *witnesses) {
+ witness_t *w, *last = NULL;
+ unsigned n = 0;
ql_foreach(w, witnesses, link) {
- malloc_printf(" %s(%u)", w->name, w->rank);
+ if (last != NULL && w->rank > last->rank) {
+ assert(w->name != last->name);
+ witness_print_witness(last, n);
+ n = 0;
+ } else if (last != NULL) {
+ assert(w->rank == last->rank);
+ assert(w->name == last->name);
+ }
+ last = w;
+ ++n;
}
+ if (last != NULL) {
+ witness_print_witness(last, n);
+ }
+}
+
+static void
+witness_lock_error_impl(const witness_list_t *witnesses,
+ const witness_t *witness) {
+ malloc_printf("<jemalloc>: Lock rank order reversal:");
+ witness_print_witnesses(witnesses);
malloc_printf(" %s(%u)\n", witness->name, witness->rank);
abort();
}
@@ -49,13 +75,9 @@ witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error =
static void
witness_depth_error_impl(const witness_list_t *witnesses,
witness_rank_t rank_inclusive, unsigned depth) {
- witness_t *w;
-
malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth,
(depth != 1) ? "s" : "", rank_inclusive);
- ql_foreach(w, witnesses, link) {
- malloc_printf(" %s(%u)", w->name, w->rank);
- }
+ witness_print_witnesses(witnesses);
malloc_printf("\n");
abort();
}