summaryrefslogtreecommitdiff
path: root/deps/jemalloc/src/arena.c
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/src/arena.c')
-rw-r--r--deps/jemalloc/src/arena.c3742
1 files changed, 1604 insertions, 2138 deletions
diff --git a/deps/jemalloc/src/arena.c b/deps/jemalloc/src/arena.c
index dad707b63..5d55bf1a0 100644
--- a/deps/jemalloc/src/arena.c
+++ b/deps/jemalloc/src/arena.c
@@ -1,2577 +1,2043 @@
-#define JEMALLOC_ARENA_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_ARENA_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/div.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Data. */
-ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
-arena_bin_info_t arena_bin_info[NBINS];
-
-JEMALLOC_ALIGNED(CACHELINE)
-const uint8_t small_size2bin[] = {
-#define S2B_8(i) i,
-#define S2B_16(i) S2B_8(i) S2B_8(i)
-#define S2B_32(i) S2B_16(i) S2B_16(i)
-#define S2B_64(i) S2B_32(i) S2B_32(i)
-#define S2B_128(i) S2B_64(i) S2B_64(i)
-#define S2B_256(i) S2B_128(i) S2B_128(i)
-#define S2B_512(i) S2B_256(i) S2B_256(i)
-#define S2B_1024(i) S2B_512(i) S2B_512(i)
-#define S2B_2048(i) S2B_1024(i) S2B_1024(i)
-#define S2B_4096(i) S2B_2048(i) S2B_2048(i)
-#define S2B_8192(i) S2B_4096(i) S2B_4096(i)
-#define SIZE_CLASS(bin, delta, size) \
- S2B_##delta(bin)
- SIZE_CLASSES
-#undef S2B_8
-#undef S2B_16
-#undef S2B_32
-#undef S2B_64
-#undef S2B_128
-#undef S2B_256
-#undef S2B_512
-#undef S2B_1024
-#undef S2B_2048
-#undef S2B_4096
-#undef S2B_8192
-#undef SIZE_CLASS
+/*
+ * Define names for both unininitialized and initialized phases, so that
+ * options and mallctl processing are straightforward.
+ */
+const char *percpu_arena_mode_names[] = {
+ "percpu",
+ "phycpu",
+ "disabled",
+ "percpu",
+ "phycpu"
+};
+percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
+
+ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
+ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
+
+static atomic_zd_t dirty_decay_ms_default;
+static atomic_zd_t muzzy_decay_ms_default;
+
+const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
+#define STEP(step, h, x, y) \
+ h,
+ SMOOTHSTEP
+#undef STEP
};
+static div_info_t arena_binind_div_info[NBINS];
+
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
-static void arena_purge(arena_t *arena, bool all);
-static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
- bool cleaned);
-static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin);
-static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin);
+static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
+ arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
+ size_t npages_decay_max, bool is_background_thread);
+static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
+ bool is_background_thread, bool all);
+static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ bin_t *bin);
+static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ bin_t *bin);
/******************************************************************************/
-static inline int
-arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
-{
- uintptr_t a_mapelm = (uintptr_t)a;
- uintptr_t b_mapelm = (uintptr_t)b;
-
- assert(a != NULL);
- assert(b != NULL);
-
- return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
+void
+arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
+ size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
+ *nthreads += arena_nthreads_get(arena, false);
+ *dss = dss_prec_names[arena_dss_prec_get(arena)];
+ *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
+ *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
+ *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
+ *ndirty += extents_npages_get(&arena->extents_dirty);
+ *nmuzzy += extents_npages_get(&arena->extents_muzzy);
}
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
- u.rb_link, arena_run_comp)
-
-static inline int
-arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
-{
- int ret;
- size_t a_size = a->bits & ~PAGE_MASK;
- size_t b_size = b->bits & ~PAGE_MASK;
-
- ret = (a_size > b_size) - (a_size < b_size);
- if (ret == 0) {
- uintptr_t a_mapelm, b_mapelm;
-
- if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
- a_mapelm = (uintptr_t)a;
- else {
- /*
- * Treat keys as though they are lower than anything
- * else.
- */
- a_mapelm = 0;
+void
+arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
+ size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
+ bin_stats_t *bstats, arena_stats_large_t *lstats) {
+ cassert(config_stats);
+
+ arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
+ muzzy_decay_ms, nactive, ndirty, nmuzzy);
+
+ size_t base_allocated, base_resident, base_mapped, metadata_thp;
+ base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
+ &base_mapped, &metadata_thp);
+
+ arena_stats_lock(tsdn, &arena->stats);
+
+ arena_stats_accum_zu(&astats->mapped, base_mapped
+ + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
+ arena_stats_accum_zu(&astats->retained,
+ extents_npages_get(&arena->extents_retained) << LG_PAGE);
+
+ arena_stats_accum_u64(&astats->decay_dirty.npurge,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_dirty.npurge));
+ arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_dirty.nmadvise));
+ arena_stats_accum_u64(&astats->decay_dirty.purged,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_dirty.purged));
+
+ arena_stats_accum_u64(&astats->decay_muzzy.npurge,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_muzzy.npurge));
+ arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_muzzy.nmadvise));
+ arena_stats_accum_u64(&astats->decay_muzzy.purged,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_muzzy.purged));
+
+ arena_stats_accum_zu(&astats->base, base_allocated);
+ arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
+ arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
+ arena_stats_accum_zu(&astats->resident, base_resident +
+ (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
+ extents_npages_get(&arena->extents_dirty) +
+ extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
+
+ for (szind_t i = 0; i < NSIZES - NBINS; i++) {
+ uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].nmalloc);
+ arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
+ arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
+
+ uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].ndalloc);
+ arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
+ arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
+
+ uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].nrequests);
+ arena_stats_accum_u64(&lstats[i].nrequests,
+ nmalloc + nrequests);
+ arena_stats_accum_u64(&astats->nrequests_large,
+ nmalloc + nrequests);
+
+ assert(nmalloc >= ndalloc);
+ assert(nmalloc - ndalloc <= SIZE_T_MAX);
+ size_t curlextents = (size_t)(nmalloc - ndalloc);
+ lstats[i].curlextents += curlextents;
+ arena_stats_accum_zu(&astats->allocated_large,
+ curlextents * sz_index2size(NBINS + i));
+ }
+
+ arena_stats_unlock(tsdn, &arena->stats);
+
+ /* tcache_bytes counts currently cached bytes. */
+ atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
+ cache_bin_array_descriptor_t *descriptor;
+ ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
+ szind_t i = 0;
+ for (; i < NBINS; i++) {
+ cache_bin_t *tbin = &descriptor->bins_small[i];
+ arena_stats_accum_zu(&astats->tcache_bytes,
+ tbin->ncached * sz_index2size(i));
+ }
+ for (; i < nhbins; i++) {
+ cache_bin_t *tbin = &descriptor->bins_large[i];
+ arena_stats_accum_zu(&astats->tcache_bytes,
+ tbin->ncached * sz_index2size(i));
}
- b_mapelm = (uintptr_t)b;
-
- ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
}
+ malloc_mutex_prof_read(tsdn,
+ &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
+ &arena->tcache_ql_mtx);
+ malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
+
+#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
+ malloc_mutex_lock(tsdn, &arena->mtx); \
+ malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
+ &arena->mtx); \
+ malloc_mutex_unlock(tsdn, &arena->mtx);
+
+ /* Gather per arena mutex profiling data. */
+ READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
+ READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
+ arena_prof_mutex_extent_avail)
+ READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
+ arena_prof_mutex_extents_dirty)
+ READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
+ arena_prof_mutex_extents_muzzy)
+ READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
+ arena_prof_mutex_extents_retained)
+ READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
+ arena_prof_mutex_decay_dirty)
+ READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
+ arena_prof_mutex_decay_muzzy)
+ READ_ARENA_MUTEX_PROF_DATA(base->mtx,
+ arena_prof_mutex_base)
+#undef READ_ARENA_MUTEX_PROF_DATA
+
+ nstime_copy(&astats->uptime, &arena->create_time);
+ nstime_update(&astats->uptime);
+ nstime_subtract(&astats->uptime, &arena->create_time);
+
+ for (szind_t i = 0; i < NBINS; i++) {
+ bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]);
+ }
+}
- return (ret);
+void
+arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
+ extent);
+ if (arena_dirty_decay_ms_get(arena) == 0) {
+ arena_decay_dirty(tsdn, arena, false, true);
+ } else {
+ arena_background_thread_inactivity_check(tsdn, arena, false);
+ }
}
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
- u.rb_link, arena_avail_comp)
+static void *
+arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
+ void *ret;
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ size_t regind;
-static inline int
-arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
-{
+ assert(extent_nfree_get(slab) > 0);
+ assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
- assert(a != NULL);
- assert(b != NULL);
+ regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
+ ret = (void *)((uintptr_t)extent_addr_get(slab) +
+ (uintptr_t)(bin_info->reg_size * regind));
+ extent_nfree_dec(slab);
+ return ret;
+}
- /*
- * Short-circuit for self comparison. The following comparison code
- * would come to the same result, but at the cost of executing the slow
- * path.
- */
- if (a == b)
- return (0);
+#ifndef JEMALLOC_JET
+static
+#endif
+size_t
+arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
+ size_t diff, regind;
- /*
- * Order such that chunks with higher fragmentation are "less than"
- * those with lower fragmentation -- purging order is from "least" to
- * "greatest". Fragmentation is measured as:
- *
- * mean current avail run size
- * --------------------------------
- * mean defragmented avail run size
- *
- * navail
- * -----------
- * nruns_avail nruns_avail-nruns_adjac
- * = ========================= = -----------------------
- * navail nruns_avail
- * -----------------------
- * nruns_avail-nruns_adjac
- *
- * The following code multiplies away the denominator prior to
- * comparison, in order to avoid division.
- *
- */
- {
- size_t a_val = (a->nruns_avail - a->nruns_adjac) *
- b->nruns_avail;
- size_t b_val = (b->nruns_avail - b->nruns_adjac) *
- a->nruns_avail;
+ /* Freeing a pointer outside the slab can cause assertion failure. */
+ assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
+ assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
+ /* Freeing an interior pointer can cause assertion failure. */
+ assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
+ (uintptr_t)bin_infos[binind].reg_size == 0);
- if (a_val < b_val)
- return (1);
- if (a_val > b_val)
- return (-1);
- }
- /*
- * Break ties by chunk address. For fragmented chunks, report lower
- * addresses as "lower", so that fragmentation reduction happens first
- * at lower addresses. However, use the opposite ordering for
- * unfragmented chunks, in order to increase the chances of
- * re-allocating dirty runs.
- */
- {
- uintptr_t a_chunk = (uintptr_t)a;
- uintptr_t b_chunk = (uintptr_t)b;
- int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
- if (a->nruns_adjac == 0) {
- assert(b->nruns_adjac == 0);
- ret = -ret;
- }
- return (ret);
- }
-}
+ diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
- dirty_link, arena_chunk_dirty_comp)
+ /* Avoid doing division with a variable divisor. */
+ regind = div_compute(&arena_binind_div_info[binind], diff);
-static inline bool
-arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
-{
- bool ret;
+ assert(regind < bin_infos[binind].nregs);
- if (pageind-1 < map_bias)
- ret = false;
- else {
- ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
- assert(ret == false || arena_mapbits_dirty_get(chunk,
- pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
- }
- return (ret);
+ return regind;
}
-static inline bool
-arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
-{
- bool ret;
-
- if (pageind+npages == chunk_npages)
- ret = false;
- else {
- assert(pageind+npages < chunk_npages);
- ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
- assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
- != arena_mapbits_dirty_get(chunk, pageind+npages));
- }
- return (ret);
-}
+static void
+arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
+ szind_t binind = extent_szind_get(slab);
+ const bin_info_t *bin_info = &bin_infos[binind];
+ size_t regind = arena_slab_regind(slab, binind, ptr);
-static inline bool
-arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
-{
+ assert(extent_nfree_get(slab) < bin_info->nregs);
+ /* Freeing an unallocated pointer can cause assertion failure. */
+ assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
- return (arena_avail_adjac_pred(chunk, pageind) ||
- arena_avail_adjac_succ(chunk, pageind, npages));
+ bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
+ extent_nfree_inc(slab);
}
static void
-arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
-{
+arena_nactive_add(arena_t *arena, size_t add_pages) {
+ atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
+}
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
+static void
+arena_nactive_sub(arena_t *arena, size_t sub_pages) {
+ assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
+ atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
+}
- /*
- * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
- * removed and reinserted even if the run to be inserted is clean.
- */
- if (chunk->ndirty != 0)
- arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
+static void
+arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
+ szind_t index, hindex;
- if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
- chunk->nruns_adjac++;
- if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
- chunk->nruns_adjac++;
- chunk->nruns_avail++;
- assert(chunk->nruns_avail > chunk->nruns_adjac);
+ cassert(config_stats);
- if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- arena->ndirty += npages;
- chunk->ndirty += npages;
+ if (usize < LARGE_MINCLASS) {
+ usize = LARGE_MINCLASS;
}
- if (chunk->ndirty != 0)
- arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+ index = sz_size2index(usize);
+ hindex = (index >= NBINS) ? index - NBINS : 0;
- arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
- pageind));
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[hindex].nmalloc, 1);
}
static void
-arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
-{
-
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
-
- /*
- * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
- * removed and reinserted even if the run to be removed is clean.
- */
- if (chunk->ndirty != 0)
- arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
+arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
+ szind_t index, hindex;
- if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
- chunk->nruns_adjac--;
- if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
- chunk->nruns_adjac--;
- chunk->nruns_avail--;
- assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
- == 0 && chunk->nruns_adjac == 0));
+ cassert(config_stats);
- if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- arena->ndirty -= npages;
- chunk->ndirty -= npages;
+ if (usize < LARGE_MINCLASS) {
+ usize = LARGE_MINCLASS;
}
- if (chunk->ndirty != 0)
- arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+ index = sz_size2index(usize);
+ hindex = (index >= NBINS) ? index - NBINS : 0;
- arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
- pageind));
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[hindex].ndalloc, 1);
}
-static inline void *
-arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
-{
- void *ret;
- unsigned regind;
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
-
- assert(run->nfree > 0);
- assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
-
- regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
- ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
- (uintptr_t)(bin_info->reg_interval * regind));
- run->nfree--;
- if (regind == run->nextind)
- run->nextind++;
- assert(regind < run->nextind);
- return (ret);
-}
-
-static inline void
-arena_run_reg_dalloc(arena_run_t *run, void *ptr)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t mapbits = arena_mapbits_get(chunk, pageind);
- size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
- unsigned regind = arena_run_regind(run, bin_info, ptr);
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
-
- assert(run->nfree < bin_info->nregs);
- /* Freeing an interior pointer can cause assertion failure. */
- assert(((uintptr_t)ptr - ((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset)) %
- (uintptr_t)bin_info->reg_interval == 0);
- assert((uintptr_t)ptr >= (uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset);
- /* Freeing an unallocated pointer can cause assertion failure. */
- assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
-
- bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
- run->nfree++;
-}
+static void
+arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
+ size_t usize) {
+ arena_large_dalloc_stats_update(tsdn, arena, oldusize);
+ arena_large_malloc_stats_update(tsdn, arena, usize);
+}
+
+extent_t *
+arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool *zero) {
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ szind_t szind = sz_size2index(usize);
+ size_t mapped_add;
+ bool commit = true;
+ extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
+ szind, zero, &commit);
+ if (extent == NULL) {
+ extent = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
+ false, szind, zero, &commit);
+ }
+ size_t size = usize + sz_large_pad;
+ if (extent == NULL) {
+ extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
+ usize, sz_large_pad, alignment, false, szind, zero,
+ &commit);
+ if (config_stats) {
+ /*
+ * extent may be NULL on OOM, but in that case
+ * mapped_add isn't used below, so there's no need to
+ * conditionlly set it to 0 here.
+ */
+ mapped_add = size;
+ }
+ } else if (config_stats) {
+ mapped_add = 0;
+ }
-static inline void
-arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
-{
+ if (extent != NULL) {
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_malloc_stats_update(tsdn, arena, usize);
+ if (mapped_add != 0) {
+ arena_stats_add_zu(tsdn, &arena->stats,
+ &arena->stats.mapped, mapped_add);
+ }
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+ arena_nactive_add(arena, size >> LG_PAGE);
+ }
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), (npages << LG_PAGE));
- memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
- (npages << LG_PAGE));
+ return extent;
}
-static inline void
-arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
-{
-
- VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), PAGE);
+void
+arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_dalloc_stats_update(tsdn, arena,
+ extent_usize_get(extent));
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+ arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
}
-static inline void
-arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
-{
- size_t i;
- UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
+void
+arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ size_t oldusize) {
+ size_t usize = extent_usize_get(extent);
+ size_t udiff = oldusize - usize;
- arena_run_page_mark_zeroed(chunk, run_ind);
- for (i = 0; i < PAGE / sizeof(size_t); i++)
- assert(p[i] == 0);
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+ arena_nactive_sub(arena, udiff >> LG_PAGE);
}
-static void
-arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
-{
+void
+arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ size_t oldusize) {
+ size_t usize = extent_usize_get(extent);
+ size_t udiff = usize - oldusize;
if (config_stats) {
- ssize_t cactive_diff = CHUNK_CEILING((arena->nactive +
- add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive -
- sub_pages) << LG_PAGE);
- if (cactive_diff != 0)
- stats_cactive_add(cactive_diff);
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
+ arena_stats_unlock(tsdn, &arena->stats);
}
+ arena_nactive_add(arena, udiff >> LG_PAGE);
}
-static void
-arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
- size_t flag_dirty, size_t need_pages)
-{
- size_t total_pages, rem_pages;
-
- total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
- LG_PAGE;
- assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
- flag_dirty);
- assert(need_pages <= total_pages);
- rem_pages = total_pages - need_pages;
-
- arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
- arena_cactive_update(arena, need_pages, 0);
- arena->nactive += need_pages;
-
- /* Keep track of trailing unused pages for later use. */
- if (rem_pages > 0) {
- if (flag_dirty != 0) {
- arena_mapbits_unallocated_set(chunk,
- run_ind+need_pages, (rem_pages << LG_PAGE),
- flag_dirty);
- arena_mapbits_unallocated_set(chunk,
- run_ind+total_pages-1, (rem_pages << LG_PAGE),
- flag_dirty);
- } else {
- arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
- (rem_pages << LG_PAGE),
- arena_mapbits_unzeroed_get(chunk,
- run_ind+need_pages));
- arena_mapbits_unallocated_set(chunk,
- run_ind+total_pages-1, (rem_pages << LG_PAGE),
- arena_mapbits_unzeroed_get(chunk,
- run_ind+total_pages-1));
- }
- arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
- false, true);
- }
+static ssize_t
+arena_decay_ms_read(arena_decay_t *decay) {
+ return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
}
static void
-arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
- bool remove, bool zero)
-{
- arena_chunk_t *chunk;
- size_t flag_dirty, run_ind, need_pages, i;
+arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
+ atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
+}
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
- need_pages = (size >> LG_PAGE);
- assert(need_pages > 0);
+static void
+arena_decay_deadline_init(arena_decay_t *decay) {
+ /*
+ * Generate a new deadline that is uniformly random within the next
+ * epoch after the current one.
+ */
+ nstime_copy(&decay->deadline, &decay->epoch);
+ nstime_add(&decay->deadline, &decay->interval);
+ if (arena_decay_ms_read(decay) > 0) {
+ nstime_t jitter;
- if (remove) {
- arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
- need_pages);
+ nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
+ nstime_ns(&decay->interval)));
+ nstime_add(&decay->deadline, &jitter);
}
+}
- if (zero) {
- if (flag_dirty == 0) {
- /*
- * The run is clean, so some pages may be zeroed (i.e.
- * never before touched).
- */
- for (i = 0; i < need_pages; i++) {
- if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
- != 0)
- arena_run_zero(chunk, run_ind+i, 1);
- else if (config_debug) {
- arena_run_page_validate_zeroed(chunk,
- run_ind+i);
- } else {
- arena_run_page_mark_zeroed(chunk,
- run_ind+i);
- }
- }
- } else {
- /* The run is dirty, so all pages must be zeroed. */
- arena_run_zero(chunk, run_ind, need_pages);
- }
- } else {
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
- (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
- }
+static bool
+arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
+ return (nstime_compare(&decay->deadline, time) <= 0);
+}
+
+static size_t
+arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
+ uint64_t sum;
+ size_t npages_limit_backlog;
+ unsigned i;
/*
- * Set the last element first, in case the run only contains one page
- * (i.e. both statements set the same element).
+ * For each element of decay_backlog, multiply by the corresponding
+ * fixed-point smoothstep decay factor. Sum the products, then divide
+ * to round down to the nearest whole number of pages.
*/
- arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
-}
-
-static void
-arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
-{
+ sum = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ sum += decay->backlog[i] * h_steps[i];
+ }
+ npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
- arena_run_split_large_helper(arena, run, size, true, zero);
+ return npages_limit_backlog;
}
static void
-arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
-{
+arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
+ size_t npages_delta = (current_npages > decay->nunpurged) ?
+ current_npages - decay->nunpurged : 0;
+ decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
- arena_run_split_large_helper(arena, run, size, false, zero);
+ if (config_debug) {
+ if (current_npages > decay->ceil_npages) {
+ decay->ceil_npages = current_npages;
+ }
+ size_t npages_limit = arena_decay_backlog_npages_limit(decay);
+ assert(decay->ceil_npages >= npages_limit);
+ if (decay->ceil_npages > npages_limit) {
+ decay->ceil_npages = npages_limit;
+ }
+ }
}
static void
-arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
- size_t binind)
-{
- arena_chunk_t *chunk;
- size_t flag_dirty, run_ind, need_pages, i;
-
- assert(binind != BININD_INVALID);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
- need_pages = (size >> LG_PAGE);
- assert(need_pages > 0);
-
- arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
-
- /*
- * Propagate the dirty and unzeroed flags to the allocated small run,
- * so that arena_dalloc_bin_run() has the ability to conditionally trim
- * clean pages.
- */
- arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
- /*
- * The first page will always be dirtied during small run
- * initialization, so a validation failure here would not actually
- * cause an observable failure.
- */
- if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
- run_ind) == 0)
- arena_run_page_validate_zeroed(chunk, run_ind);
- for (i = 1; i < need_pages - 1; i++) {
- arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
- arena_run_page_validate_zeroed(chunk, run_ind+i);
- }
- arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
- binind, flag_dirty);
- if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
- run_ind+need_pages-1) == 0)
- arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
- (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
-}
-
-static arena_chunk_t *
-arena_chunk_init_spare(arena_t *arena)
-{
- arena_chunk_t *chunk;
-
- assert(arena->spare != NULL);
-
- chunk = arena->spare;
- arena->spare = NULL;
-
- assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
- assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxclass);
- assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxclass);
- assert(arena_mapbits_dirty_get(chunk, map_bias) ==
- arena_mapbits_dirty_get(chunk, chunk_npages-1));
-
- return (chunk);
-}
-
-static arena_chunk_t *
-arena_chunk_init_hard(arena_t *arena)
-{
- arena_chunk_t *chunk;
- bool zero;
- size_t unzeroed, i;
-
- assert(arena->spare == NULL);
-
- zero = false;
- malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false,
- &zero, arena->dss_prec);
- malloc_mutex_lock(&arena->lock);
- if (chunk == NULL)
- return (NULL);
- if (config_stats)
- arena->stats.mapped += chunksize;
-
- chunk->arena = arena;
-
- /*
- * Claim that no pages are in use, since the header is merely overhead.
- */
- chunk->ndirty = 0;
+arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
+ size_t current_npages) {
+ if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
+ memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
+ sizeof(size_t));
+ } else {
+ size_t nadvance_z = (size_t)nadvance_u64;
- chunk->nruns_avail = 0;
- chunk->nruns_adjac = 0;
+ assert((uint64_t)nadvance_z == nadvance_u64);
- /*
- * Initialize the map to contain one maximal free untouched run. Mark
- * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
- */
- unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
- arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
- unzeroed);
- /*
- * There is no need to initialize the internal page map entries unless
- * the chunk is not zeroed.
- */
- if (zero == false) {
- VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk,
- map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
- chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
- map_bias+1)));
- for (i = map_bias+1; i < chunk_npages-1; i++)
- arena_mapbits_unzeroed_set(chunk, i, unzeroed);
- } else {
- VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
- map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
- chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
- map_bias+1)));
- if (config_debug) {
- for (i = map_bias+1; i < chunk_npages-1; i++) {
- assert(arena_mapbits_unzeroed_get(chunk, i) ==
- unzeroed);
- }
+ memmove(decay->backlog, &decay->backlog[nadvance_z],
+ (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
+ if (nadvance_z > 1) {
+ memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
+ nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
- arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
- unzeroed);
- return (chunk);
+ arena_decay_backlog_update_last(decay, current_npages);
}
-static arena_chunk_t *
-arena_chunk_alloc(arena_t *arena)
-{
- arena_chunk_t *chunk;
-
- if (arena->spare != NULL)
- chunk = arena_chunk_init_spare(arena);
- else {
- chunk = arena_chunk_init_hard(arena);
- if (chunk == NULL)
- return (NULL);
+static void
+arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, size_t current_npages, size_t npages_limit,
+ bool is_background_thread) {
+ if (current_npages > npages_limit) {
+ arena_decay_to_limit(tsdn, arena, decay, extents, false,
+ npages_limit, current_npages - npages_limit,
+ is_background_thread);
}
-
- /* Insert the run into the runs_avail tree. */
- arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
- false, false);
-
- return (chunk);
}
static void
-arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
-{
- assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
- assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxclass);
- assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxclass);
- assert(arena_mapbits_dirty_get(chunk, map_bias) ==
- arena_mapbits_dirty_get(chunk, chunk_npages-1));
+arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
+ size_t current_npages) {
+ assert(arena_decay_deadline_reached(decay, time));
- /*
- * Remove run from the runs_avail tree, so that the arena does not use
- * it.
- */
- arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
- false, false);
+ nstime_t delta;
+ nstime_copy(&delta, time);
+ nstime_subtract(&delta, &decay->epoch);
- if (arena->spare != NULL) {
- arena_chunk_t *spare = arena->spare;
-
- arena->spare = chunk;
- malloc_mutex_unlock(&arena->lock);
- chunk_dealloc((void *)spare, chunksize, true);
- malloc_mutex_lock(&arena->lock);
- if (config_stats)
- arena->stats.mapped -= chunksize;
- } else
- arena->spare = chunk;
-}
+ uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
+ assert(nadvance_u64 > 0);
-static arena_run_t *
-arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
-{
- arena_run_t *run;
- arena_chunk_map_t *mapelm, key;
+ /* Add nadvance_u64 decay intervals to epoch. */
+ nstime_copy(&delta, &decay->interval);
+ nstime_imultiply(&delta, nadvance_u64);
+ nstime_add(&decay->epoch, &delta);
- key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
+ /* Set a new deadline. */
+ arena_decay_deadline_init(decay);
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- LG_PAGE));
- arena_run_split_large(arena, run, size, zero);
- return (run);
- }
-
- return (NULL);
+ /* Update the backlog. */
+ arena_decay_backlog_update(decay, nadvance_u64, current_npages);
}
-static arena_run_t *
-arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
-{
- arena_chunk_t *chunk;
- arena_run_t *run;
-
- assert(size <= arena_maxclass);
- assert((size & PAGE_MASK) == 0);
+static void
+arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, const nstime_t *time, bool is_background_thread) {
+ size_t current_npages = extents_npages_get(extents);
+ arena_decay_epoch_advance_helper(decay, time, current_npages);
- /* Search the arena's chunks for the lowest best fit. */
- run = arena_run_alloc_large_helper(arena, size, zero);
- if (run != NULL)
- return (run);
+ size_t npages_limit = arena_decay_backlog_npages_limit(decay);
+ /* We may unlock decay->mtx when try_purge(). Finish logging first. */
+ decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
+ current_npages;
- /*
- * No usable runs. Create a new chunk from which to allocate the run.
- */
- chunk = arena_chunk_alloc(arena);
- if (chunk != NULL) {
- run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
- arena_run_split_large(arena, run, size, zero);
- return (run);
+ if (!background_thread_enabled() || is_background_thread) {
+ arena_decay_try_purge(tsdn, arena, decay, extents,
+ current_npages, npages_limit, is_background_thread);
}
-
- /*
- * arena_chunk_alloc() failed, but another thread may have made
- * sufficient memory available while this one dropped arena->lock in
- * arena_chunk_alloc(), so search one more time.
- */
- return (arena_run_alloc_large_helper(arena, size, zero));
}
-static arena_run_t *
-arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
-{
- arena_run_t *run;
- arena_chunk_map_t *mapelm, key;
+static void
+arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
+ arena_decay_ms_write(decay, decay_ms);
+ if (decay_ms > 0) {
+ nstime_init(&decay->interval, (uint64_t)decay_ms *
+ KQU(1000000));
+ nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
+ }
- key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
+ nstime_init(&decay->epoch, 0);
+ nstime_update(&decay->epoch);
+ decay->jitter_state = (uint64_t)(uintptr_t)decay;
+ arena_decay_deadline_init(decay);
+ decay->nunpurged = 0;
+ memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
+}
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- LG_PAGE));
- arena_run_split_small(arena, run, size, binind);
- return (run);
+static bool
+arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
+ arena_stats_decay_t *stats) {
+ if (config_debug) {
+ for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
+ assert(((char *)decay)[i] == 0);
+ }
+ decay->ceil_npages = 0;
}
-
- return (NULL);
+ if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ decay->purging = false;
+ arena_decay_reinit(decay, decay_ms);
+ /* Memory is zeroed, so there is no need to clear stats. */
+ if (config_stats) {
+ decay->stats = stats;
+ }
+ return false;
}
-static arena_run_t *
-arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
-{
- arena_chunk_t *chunk;
- arena_run_t *run;
+static bool
+arena_decay_ms_valid(ssize_t decay_ms) {
+ if (decay_ms < -1) {
+ return false;
+ }
+ if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
+ KQU(1000)) {
+ return true;
+ }
+ return false;
+}
- assert(size <= arena_maxclass);
- assert((size & PAGE_MASK) == 0);
- assert(binind != BININD_INVALID);
+static bool
+arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, bool is_background_thread) {
+ malloc_mutex_assert_owner(tsdn, &decay->mtx);
+
+ /* Purge all or nothing if the option is disabled. */
+ ssize_t decay_ms = arena_decay_ms_read(decay);
+ if (decay_ms <= 0) {
+ if (decay_ms == 0) {
+ arena_decay_to_limit(tsdn, arena, decay, extents, false,
+ 0, extents_npages_get(extents),
+ is_background_thread);
+ }
+ return false;
+ }
- /* Search the arena's chunks for the lowest best fit. */
- run = arena_run_alloc_small_helper(arena, size, binind);
- if (run != NULL)
- return (run);
+ nstime_t time;
+ nstime_init(&time, 0);
+ nstime_update(&time);
+ if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
+ > 0)) {
+ /*
+ * Time went backwards. Move the epoch back in time and
+ * generate a new deadline, with the expectation that time
+ * typically flows forward for long enough periods of time that
+ * epochs complete. Unfortunately, this strategy is susceptible
+ * to clock jitter triggering premature epoch advances, but
+ * clock jitter estimation and compensation isn't feasible here
+ * because calls into this code are event-driven.
+ */
+ nstime_copy(&decay->epoch, &time);
+ arena_decay_deadline_init(decay);
+ } else {
+ /* Verify that time does not go backwards. */
+ assert(nstime_compare(&decay->epoch, &time) <= 0);
+ }
/*
- * No usable runs. Create a new chunk from which to allocate the run.
+ * If the deadline has been reached, advance to the current epoch and
+ * purge to the new limit if necessary. Note that dirty pages created
+ * during the current epoch are not subject to purge until a future
+ * epoch, so as a result purging only happens during epoch advances, or
+ * being triggered by background threads (scheduled event).
*/
- chunk = arena_chunk_alloc(arena);
- if (chunk != NULL) {
- run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
- arena_run_split_small(arena, run, size, binind);
- return (run);
+ bool advance_epoch = arena_decay_deadline_reached(decay, &time);
+ if (advance_epoch) {
+ arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
+ is_background_thread);
+ } else if (is_background_thread) {
+ arena_decay_try_purge(tsdn, arena, decay, extents,
+ extents_npages_get(extents),
+ arena_decay_backlog_npages_limit(decay),
+ is_background_thread);
}
- /*
- * arena_chunk_alloc() failed, but another thread may have made
- * sufficient memory available while this one dropped arena->lock in
- * arena_chunk_alloc(), so search one more time.
- */
- return (arena_run_alloc_small_helper(arena, size, binind));
+ return advance_epoch;
}
-static inline void
-arena_maybe_purge(arena_t *arena)
-{
- size_t npurgeable, threshold;
-
- /* Don't purge if the option is disabled. */
- if (opt_lg_dirty_mult < 0)
- return;
- /* Don't purge if all dirty pages are already being purged. */
- if (arena->ndirty <= arena->npurgatory)
- return;
- npurgeable = arena->ndirty - arena->npurgatory;
- threshold = (arena->nactive >> opt_lg_dirty_mult);
- /*
- * Don't purge unless the number of purgeable pages exceeds the
- * threshold.
- */
- if (npurgeable <= threshold)
- return;
-
- arena_purge(arena, false);
+static ssize_t
+arena_decay_ms_get(arena_decay_t *decay) {
+ return arena_decay_ms_read(decay);
}
-static arena_chunk_t *
-chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
-{
- size_t *ndirty = (size_t *)arg;
+ssize_t
+arena_dirty_decay_ms_get(arena_t *arena) {
+ return arena_decay_ms_get(&arena->decay_dirty);
+}
- assert(chunk->ndirty != 0);
- *ndirty += chunk->ndirty;
- return (NULL);
+ssize_t
+arena_muzzy_decay_ms_get(arena_t *arena) {
+ return arena_decay_ms_get(&arena->decay_muzzy);
}
-static size_t
-arena_compute_npurgatory(arena_t *arena, bool all)
-{
- size_t npurgatory, npurgeable;
+static bool
+arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, ssize_t decay_ms) {
+ if (!arena_decay_ms_valid(decay_ms)) {
+ return true;
+ }
+ malloc_mutex_lock(tsdn, &decay->mtx);
/*
- * Compute the minimum number of pages that this thread should try to
- * purge.
+ * Restart decay backlog from scratch, which may cause many dirty pages
+ * to be immediately purged. It would conceptually be possible to map
+ * the old backlog onto the new backlog, but there is no justification
+ * for such complexity since decay_ms changes are intended to be
+ * infrequent, either between the {-1, 0, >0} states, or a one-time
+ * arbitrary change during initial arena configuration.
*/
- npurgeable = arena->ndirty - arena->npurgatory;
+ arena_decay_reinit(decay, decay_ms);
+ arena_maybe_decay(tsdn, arena, decay, extents, false);
+ malloc_mutex_unlock(tsdn, &decay->mtx);
- if (all == false) {
- size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
+ return false;
+}
- npurgatory = npurgeable - threshold;
- } else
- npurgatory = npurgeable;
+bool
+arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
+ ssize_t decay_ms) {
+ return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
+ &arena->extents_dirty, decay_ms);
+}
- return (npurgatory);
+bool
+arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
+ ssize_t decay_ms) {
+ return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
+ &arena->extents_muzzy, decay_ms);
}
-static void
-arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all,
- arena_chunk_mapelms_t *mapelms)
-{
- size_t pageind, npages;
+static size_t
+arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
+ size_t npages_decay_max, extent_list_t *decay_extents) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- /*
- * Temporarily allocate free dirty runs within chunk. If all is false,
- * only operate on dirty runs that are fragments; otherwise operate on
- * all dirty runs.
- */
- for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
- if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
- size_t run_size =
- arena_mapbits_unallocated_size_get(chunk, pageind);
-
- npages = run_size >> LG_PAGE;
- assert(pageind + npages <= chunk_npages);
- assert(arena_mapbits_dirty_get(chunk, pageind) ==
- arena_mapbits_dirty_get(chunk, pageind+npages-1));
-
- if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
- (all || arena_avail_adjac(chunk, pageind,
- npages))) {
- arena_run_t *run = (arena_run_t *)((uintptr_t)
- chunk + (uintptr_t)(pageind << LG_PAGE));
-
- arena_run_split_large(arena, run, run_size,
- false);
- /* Append to list for later processing. */
- ql_elm_new(mapelm, u.ql_link);
- ql_tail_insert(mapelms, mapelm, u.ql_link);
- }
- } else {
- /* Skip run. */
- if (arena_mapbits_large_get(chunk, pageind) != 0) {
- npages = arena_mapbits_large_size_get(chunk,
- pageind) >> LG_PAGE;
- } else {
- size_t binind;
- arena_bin_info_t *bin_info;
- arena_run_t *run = (arena_run_t *)((uintptr_t)
- chunk + (uintptr_t)(pageind << LG_PAGE));
-
- assert(arena_mapbits_small_runind_get(chunk,
- pageind) == 0);
- binind = arena_bin_index(arena, run->bin);
- bin_info = &arena_bin_info[binind];
- npages = bin_info->run_size >> LG_PAGE;
- }
- }
+ /* Stash extents according to npages_limit. */
+ size_t nstashed = 0;
+ extent_t *extent;
+ while (nstashed < npages_decay_max &&
+ (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
+ npages_limit)) != NULL) {
+ extent_list_append(decay_extents, extent);
+ nstashed += extent_size_get(extent) >> LG_PAGE;
}
- assert(pageind == chunk_npages);
- assert(chunk->ndirty == 0 || all == false);
- assert(chunk->nruns_adjac == 0);
+ return nstashed;
}
static size_t
-arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk,
- arena_chunk_mapelms_t *mapelms)
-{
- size_t npurged, pageind, npages, nmadvise;
- arena_chunk_map_t *mapelm;
-
- malloc_mutex_unlock(&arena->lock);
- if (config_stats)
+arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
+ bool all, extent_list_t *decay_extents, bool is_background_thread) {
+ UNUSED size_t nmadvise, nunmapped;
+ size_t npurged;
+
+ if (config_stats) {
nmadvise = 0;
+ nunmapped = 0;
+ }
npurged = 0;
- ql_foreach(mapelm, mapelms, u.ql_link) {
- bool unzeroed;
- size_t flag_unzeroed, i;
-
- pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- npages = arena_mapbits_large_size_get(chunk, pageind) >>
- LG_PAGE;
- assert(pageind + npages <= chunk_npages);
- unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
- LG_PAGE)), (npages << LG_PAGE));
- flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
- /*
- * Set the unzeroed flag for all pages, now that pages_purge()
- * has returned whether the pages were zeroed as a side effect
- * of purging. This chunk map modification is safe even though
- * the arena mutex isn't currently owned by this thread,
- * because the run is marked as allocated, thus protecting it
- * from being modified by any other thread. As long as these
- * writes don't perturb the first and last elements'
- * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
- */
- for (i = 0; i < npages; i++) {
- arena_mapbits_unzeroed_set(chunk, pageind+i,
- flag_unzeroed);
+
+ ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
+ for (extent_t *extent = extent_list_first(decay_extents); extent !=
+ NULL; extent = extent_list_first(decay_extents)) {
+ if (config_stats) {
+ nmadvise++;
}
+ size_t npages = extent_size_get(extent) >> LG_PAGE;
npurged += npages;
- if (config_stats)
- nmadvise++;
+ extent_list_remove(decay_extents, extent);
+ switch (extents_state_get(extents)) {
+ case extent_state_active:
+ not_reached();
+ case extent_state_dirty:
+ if (!all && muzzy_decay_ms != 0 &&
+ !extent_purge_lazy_wrapper(tsdn, arena,
+ r_extent_hooks, extent, 0,
+ extent_size_get(extent))) {
+ extents_dalloc(tsdn, arena, r_extent_hooks,
+ &arena->extents_muzzy, extent);
+ arena_background_thread_inactivity_check(tsdn,
+ arena, is_background_thread);
+ break;
+ }
+ /* Fall through. */
+ case extent_state_muzzy:
+ extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
+ extent);
+ if (config_stats) {
+ nunmapped += npages;
+ }
+ break;
+ case extent_state_retained:
+ default:
+ not_reached();
+ }
+ }
+
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
+ 1);
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &decay->stats->nmadvise, nmadvise);
+ arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
+ npurged);
+ arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
+ nunmapped << LG_PAGE);
+ arena_stats_unlock(tsdn, &arena->stats);
}
- malloc_mutex_lock(&arena->lock);
- if (config_stats)
- arena->stats.nmadvise += nmadvise;
- return (npurged);
+ return npurged;
}
+/*
+ * npages_limit: Decay at most npages_decay_max pages without violating the
+ * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
+ * bound on number of pages in order to prevent unbounded growth (namely in
+ * stashed), otherwise unbounded new pages could be added to extents during the
+ * current decay run, so that the purging thread never finishes.
+ */
static void
-arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk,
- arena_chunk_mapelms_t *mapelms)
-{
- arena_chunk_map_t *mapelm;
- size_t pageind;
-
- /* Deallocate runs. */
- for (mapelm = ql_first(mapelms); mapelm != NULL;
- mapelm = ql_first(mapelms)) {
- arena_run_t *run;
-
- pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
- LG_PAGE));
- ql_remove(mapelms, mapelm, u.ql_link);
- arena_run_dalloc(arena, run, false, true);
+arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
+ bool is_background_thread) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 1);
+ malloc_mutex_assert_owner(tsdn, &decay->mtx);
+
+ if (decay->purging) {
+ return;
}
-}
+ decay->purging = true;
+ malloc_mutex_unlock(tsdn, &decay->mtx);
-static inline size_t
-arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
-{
- size_t npurged;
- arena_chunk_mapelms_t mapelms;
+ extent_hooks_t *extent_hooks = extent_hooks_get(arena);
- ql_new(&mapelms);
+ extent_list_t decay_extents;
+ extent_list_init(&decay_extents);
- /*
- * If chunk is the spare, temporarily re-allocate it, 1) so that its
- * run is reinserted into runs_avail, and 2) so that it cannot be
- * completely discarded by another thread while arena->lock is dropped
- * by this thread. Note that the arena_run_dalloc() call will
- * implicitly deallocate the chunk, so no explicit action is required
- * in this function to deallocate the chunk.
- *
- * Note that once a chunk contains dirty pages, it cannot again contain
- * a single run unless 1) it is a dirty run, or 2) this function purges
- * dirty pages and causes the transition to a single clean run. Thus
- * (chunk == arena->spare) is possible, but it is not possible for
- * this function to be called on the spare unless it contains a dirty
- * run.
- */
- if (chunk == arena->spare) {
- assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
- assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
-
- arena_chunk_alloc(arena);
+ size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
+ npages_limit, npages_decay_max, &decay_extents);
+ if (npurge != 0) {
+ UNUSED size_t npurged = arena_decay_stashed(tsdn, arena,
+ &extent_hooks, decay, extents, all, &decay_extents,
+ is_background_thread);
+ assert(npurged == npurge);
}
- if (config_stats)
- arena->stats.purged += chunk->ndirty;
-
- /*
- * Operate on all dirty runs if there is no clean/dirty run
- * fragmentation.
- */
- if (chunk->nruns_adjac == 0)
- all = true;
-
- arena_chunk_stash_dirty(arena, chunk, all, &mapelms);
- npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms);
- arena_chunk_unstash_purged(arena, chunk, &mapelms);
-
- return (npurged);
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ decay->purging = false;
}
-static void
-arena_purge(arena_t *arena, bool all)
-{
- arena_chunk_t *chunk;
- size_t npurgatory;
- if (config_debug) {
- size_t ndirty = 0;
+static bool
+arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, bool is_background_thread, bool all) {
+ if (all) {
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
+ extents_npages_get(extents), is_background_thread);
+ malloc_mutex_unlock(tsdn, &decay->mtx);
- arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
- chunks_dirty_iter_cb, (void *)&ndirty);
- assert(ndirty == arena->ndirty);
+ return false;
}
- assert(arena->ndirty > arena->npurgatory || all);
- assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
- arena->npurgatory) || all);
- if (config_stats)
- arena->stats.npurge++;
+ if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
+ /* No need to wait if another thread is in progress. */
+ return true;
+ }
- /*
- * Add the minimum number of pages this thread should try to purge to
- * arena->npurgatory. This will keep multiple threads from racing to
- * reduce ndirty below the threshold.
- */
- npurgatory = arena_compute_npurgatory(arena, all);
- arena->npurgatory += npurgatory;
+ bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
+ is_background_thread);
+ UNUSED size_t npages_new;
+ if (epoch_advanced) {
+ /* Backlog is updated on epoch advance. */
+ npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
+ }
+ malloc_mutex_unlock(tsdn, &decay->mtx);
- while (npurgatory > 0) {
- size_t npurgeable, npurged, nunpurged;
+ if (have_background_thread && background_thread_enabled() &&
+ epoch_advanced && !is_background_thread) {
+ background_thread_interval_check(tsdn, arena, decay,
+ npages_new);
+ }
- /* Get next chunk with dirty pages. */
- chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
- if (chunk == NULL) {
- /*
- * This thread was unable to purge as many pages as
- * originally intended, due to races with other threads
- * that either did some of the purging work, or re-used
- * dirty pages.
- */
- arena->npurgatory -= npurgatory;
- return;
- }
- npurgeable = chunk->ndirty;
- assert(npurgeable != 0);
+ return false;
+}
- if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
- /*
- * This thread will purge all the dirty pages in chunk,
- * so set npurgatory to reflect this thread's intent to
- * purge the pages. This tends to reduce the chances
- * of the following scenario:
- *
- * 1) This thread sets arena->npurgatory such that
- * (arena->ndirty - arena->npurgatory) is at the
- * threshold.
- * 2) This thread drops arena->lock.
- * 3) Another thread causes one or more pages to be
- * dirtied, and immediately determines that it must
- * purge dirty pages.
- *
- * If this scenario *does* play out, that's okay,
- * because all of the purging work being done really
- * needs to happen.
- */
- arena->npurgatory += npurgeable - npurgatory;
- npurgatory = npurgeable;
- }
+static bool
+arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
+ bool all) {
+ return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
+ &arena->extents_dirty, is_background_thread, all);
+}
- /*
- * Keep track of how many pages are purgeable, versus how many
- * actually get purged, and adjust counters accordingly.
- */
- arena->npurgatory -= npurgeable;
- npurgatory -= npurgeable;
- npurged = arena_chunk_purge(arena, chunk, all);
- nunpurged = npurgeable - npurged;
- arena->npurgatory += nunpurged;
- npurgatory += nunpurged;
- }
+static bool
+arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
+ bool all) {
+ return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
+ &arena->extents_muzzy, is_background_thread, all);
}
void
-arena_purge_all(arena_t *arena)
-{
-
- malloc_mutex_lock(&arena->lock);
- arena_purge(arena, true);
- malloc_mutex_unlock(&arena->lock);
+arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
+ if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
+ return;
+ }
+ arena_decay_muzzy(tsdn, arena, is_background_thread, all);
}
static void
-arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
- size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
-{
- size_t size = *p_size;
- size_t run_ind = *p_run_ind;
- size_t run_pages = *p_run_pages;
-
- /* Try to coalesce forward. */
- if (run_ind + run_pages < chunk_npages &&
- arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
- arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
- size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
- run_ind+run_pages);
- size_t nrun_pages = nrun_size >> LG_PAGE;
+arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
+ arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
- /*
- * Remove successor from runs_avail; the coalesced run is
- * inserted later.
- */
- assert(arena_mapbits_unallocated_size_get(chunk,
- run_ind+run_pages+nrun_pages-1) == nrun_size);
- assert(arena_mapbits_dirty_get(chunk,
- run_ind+run_pages+nrun_pages-1) == flag_dirty);
- arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
- false, true);
-
- size += nrun_size;
- run_pages += nrun_pages;
-
- arena_mapbits_unallocated_size_set(chunk, run_ind, size);
- arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
- size);
- }
-
- /* Try to coalesce backward. */
- if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
- run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
- flag_dirty) {
- size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
- run_ind-1);
- size_t prun_pages = prun_size >> LG_PAGE;
-
- run_ind -= prun_pages;
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+ arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
+}
- /*
- * Remove predecessor from runs_avail; the coalesced run is
- * inserted later.
- */
- assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
- prun_size);
- assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
- arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
- false);
+static void
+arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
+ assert(extent_nfree_get(slab) > 0);
+ extent_heap_insert(&bin->slabs_nonfull, slab);
+}
- size += prun_size;
- run_pages += prun_pages;
+static void
+arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
+ extent_heap_remove(&bin->slabs_nonfull, slab);
+}
- arena_mapbits_unallocated_size_set(chunk, run_ind, size);
- arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
- size);
+static extent_t *
+arena_bin_slabs_nonfull_tryget(bin_t *bin) {
+ extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
+ if (slab == NULL) {
+ return NULL;
}
-
- *p_size = size;
- *p_run_ind = run_ind;
- *p_run_pages = run_pages;
+ if (config_stats) {
+ bin->stats.reslabs++;
+ }
+ return slab;
}
static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
-{
- arena_chunk_t *chunk;
- size_t size, run_ind, run_pages, flag_dirty;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- assert(run_ind >= map_bias);
- assert(run_ind < chunk_npages);
- if (arena_mapbits_large_get(chunk, run_ind) != 0) {
- size = arena_mapbits_large_size_get(chunk, run_ind);
- assert(size == PAGE ||
- arena_mapbits_large_size_get(chunk,
- run_ind+(size>>LG_PAGE)-1) == 0);
- } else {
- size_t binind = arena_bin_index(arena, run->bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
- size = bin_info->run_size;
- }
- run_pages = (size >> LG_PAGE);
- arena_cactive_update(arena, 0, run_pages);
- arena->nactive -= run_pages;
-
+arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
+ assert(extent_nfree_get(slab) == 0);
/*
- * The run is dirty if the caller claims to have dirtied it, as well as
- * if it was already dirty before being allocated and the caller
- * doesn't claim to have cleaned it.
+ * Tracking extents is required by arena_reset, which is not allowed
+ * for auto arenas. Bypass this step to avoid touching the extent
+ * linkage (often results in cache misses) for auto arenas.
*/
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
- dirty = true;
- flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
-
- /* Mark pages as unallocated in the chunk map. */
- if (dirty) {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- CHUNK_MAP_DIRTY);
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- CHUNK_MAP_DIRTY);
- } else {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- arena_mapbits_unzeroed_get(chunk, run_ind));
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
+ if (arena_is_auto(arena)) {
+ return;
}
+ extent_list_append(&bin->slabs_full, slab);
+}
- arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
- flag_dirty);
-
- /* Insert into runs_avail, now that coalescing is complete. */
- assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
- arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
-
- /* Deallocate chunk if it is now completely unused. */
- if (size == arena_maxclass) {
- assert(run_ind == map_bias);
- assert(run_pages == (arena_maxclass >> LG_PAGE));
- arena_chunk_dealloc(arena, chunk);
+static void
+arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
+ if (arena_is_auto(arena)) {
+ return;
}
+ extent_list_remove(&bin->slabs_full, slab);
+}
+void
+arena_reset(tsd_t *tsd, arena_t *arena) {
/*
- * It is okay to do dirty page processing here even if the chunk was
- * deallocated above, since in that case it is the spare. Waiting
- * until after possible chunk deallocation to do dirty processing
- * allows for an old spare to be fully deallocated, thus decreasing the
- * chances of spuriously crossing the dirty page purging threshold.
+ * Locking in this function is unintuitive. The caller guarantees that
+ * no concurrent operations are happening in this arena, but there are
+ * still reasons that some locking is necessary:
+ *
+ * - Some of the functions in the transitive closure of calls assume
+ * appropriate locks are held, and in some cases these locks are
+ * temporarily dropped to avoid lock order reversal or deadlock due to
+ * reentry.
+ * - mallctl("epoch", ...) may concurrently refresh stats. While
+ * strictly speaking this is a "concurrent operation", disallowing
+ * stats refreshes would impose an inconvenient burden.
*/
- if (dirty)
- arena_maybe_purge(arena);
-}
-static void
-arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize)
-{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- size_t head_npages = (oldsize - newsize) >> LG_PAGE;
- size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
+ /* Large allocations. */
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
- assert(oldsize > newsize);
+ for (extent_t *extent = extent_list_first(&arena->large); extent !=
+ NULL; extent = extent_list_first(&arena->large)) {
+ void *ptr = extent_base_get(extent);
+ size_t usize;
- /*
- * Update the chunk map so that arena_run_dalloc() can treat the
- * leading run as separately allocated. Set the last element of each
- * run first, in case of single-page runs.
- */
- assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != NSIZES);
- if (config_debug) {
- UNUSED size_t tail_npages = newsize >> LG_PAGE;
- assert(arena_mapbits_large_size_get(chunk,
- pageind+head_npages+tail_npages-1) == 0);
- assert(arena_mapbits_dirty_get(chunk,
- pageind+head_npages+tail_npages-1) == flag_dirty);
+ if (config_stats || (config_prof && opt_prof)) {
+ usize = sz_index2size(alloc_ctx.szind);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+ }
+ /* Remove large allocation from prof sample set. */
+ if (config_prof && opt_prof) {
+ prof_free(tsd, ptr, usize, &alloc_ctx);
+ }
+ large_dalloc(tsd_tsdn(tsd), extent);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
+
+ /* Bins. */
+ for (unsigned i = 0; i < NBINS; i++) {
+ extent_t *slab;
+ bin_t *bin = &arena->bins[i];
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ if (bin->slabcur != NULL) {
+ slab = bin->slabcur;
+ bin->slabcur = NULL;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ }
+ while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
+ NULL) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ }
+ for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
+ slab = extent_list_first(&bin->slabs_full)) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ }
+ if (config_stats) {
+ bin->stats.curregs = 0;
+ bin->stats.curslabs = 0;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
- arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
- flag_dirty);
- arena_run_dalloc(arena, run, false, false);
+ atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
}
static void
-arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize, bool dirty)
-{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- size_t head_npages = newsize >> LG_PAGE;
- size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
-
- assert(oldsize > newsize);
-
+arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
/*
- * Update the chunk map so that arena_run_dalloc() can treat the
- * trailing run as separately allocated. Set the last element of each
- * run first, in case of single-page runs.
+ * Iterate over the retained extents and destroy them. This gives the
+ * extent allocator underlying the extent hooks an opportunity to unmap
+ * all retained memory without having to keep its own metadata
+ * structures. In practice, virtual memory for dss-allocated extents is
+ * leaked here, so best practice is to avoid dss for arenas to be
+ * destroyed, or provide custom extent hooks that track retained
+ * dss-based extents for later reuse.
*/
- assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
-
- if (config_debug) {
- UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
- assert(arena_mapbits_large_size_get(chunk,
- pageind+head_npages+tail_npages-1) == 0);
- assert(arena_mapbits_dirty_get(chunk,
- pageind+head_npages+tail_npages-1) == flag_dirty);
+ extent_hooks_t *extent_hooks = extent_hooks_get(arena);
+ extent_t *extent;
+ while ((extent = extents_evict(tsdn, arena, &extent_hooks,
+ &arena->extents_retained, 0)) != NULL) {
+ extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
}
- arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
- flag_dirty);
-
- arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
- dirty, false);
}
-static arena_run_t *
-arena_bin_runs_first(arena_bin_t *bin)
-{
- arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
- if (mapelm != NULL) {
- arena_chunk_t *chunk;
- size_t pageind;
- arena_run_t *run;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
- pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t))) + map_bias;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) <<
- LG_PAGE));
- return (run);
- }
+void
+arena_destroy(tsd_t *tsd, arena_t *arena) {
+ assert(base_ind_get(arena->base) >= narenas_auto);
+ assert(arena_nthreads_get(arena, false) == 0);
+ assert(arena_nthreads_get(arena, true) == 0);
- return (NULL);
-}
+ /*
+ * No allocations have occurred since arena_reset() was called.
+ * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
+ * extents, so only retained extents may remain.
+ */
+ assert(extents_npages_get(&arena->extents_dirty) == 0);
+ assert(extents_npages_get(&arena->extents_muzzy) == 0);
-static void
-arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
-{
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
+ /* Deallocate retained memory. */
+ arena_destroy_retained(tsd_tsdn(tsd), arena);
- assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
+ /*
+ * Remove the arena pointer from the arenas array. We rely on the fact
+ * that there is no way for the application to get a dirty read from the
+ * arenas array unless there is an inherent race in the application
+ * involving access of an arena being concurrently destroyed. The
+ * application must synchronize knowledge of the arena's validity, so as
+ * long as we use an atomic write to update the arenas array, the
+ * application will get a clean read any time after it synchronizes
+ * knowledge that the arena is no longer valid.
+ */
+ arena_set(base_ind_get(arena->base), NULL);
- arena_run_tree_insert(&bin->runs, mapelm);
+ /*
+ * Destroy the base allocator, which manages all metadata ever mapped by
+ * this arena.
+ */
+ base_delete(tsd_tsdn(tsd), arena->base);
}
-static void
-arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
-
- assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
+static extent_t *
+arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
+ szind_t szind) {
+ extent_t *slab;
+ bool zero, commit;
- arena_run_tree_remove(&bin->runs, mapelm);
-}
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
-static arena_run_t *
-arena_bin_nonfull_run_tryget(arena_bin_t *bin)
-{
- arena_run_t *run = arena_bin_runs_first(bin);
- if (run != NULL) {
- arena_bin_runs_remove(bin, run);
- if (config_stats)
- bin->stats.reruns++;
+ zero = false;
+ commit = true;
+ slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
+ bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
+
+ if (config_stats && slab != NULL) {
+ arena_stats_mapped_add(tsdn, &arena->stats,
+ bin_info->slab_size);
+ }
+
+ return slab;
+}
+
+static extent_t *
+arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
+ const bin_info_t *bin_info) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+ szind_t szind = sz_size2index(bin_info->reg_size);
+ bool zero = false;
+ bool commit = true;
+ extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
+ binind, &zero, &commit);
+ if (slab == NULL) {
+ slab = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
+ true, binind, &zero, &commit);
+ }
+ if (slab == NULL) {
+ slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
+ bin_info, szind);
+ if (slab == NULL) {
+ return NULL;
+ }
}
- return (run);
+ assert(extent_slab_get(slab));
+
+ /* Initialize slab internals. */
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ extent_nfree_set(slab, bin_info->nregs);
+ bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
+
+ arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
+
+ return slab;
}
-static arena_run_t *
-arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
-{
- arena_run_t *run;
- size_t binind;
- arena_bin_info_t *bin_info;
+static extent_t *
+arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ szind_t binind) {
+ extent_t *slab;
+ const bin_info_t *bin_info;
- /* Look for a usable run. */
- run = arena_bin_nonfull_run_tryget(bin);
- if (run != NULL)
- return (run);
- /* No existing runs have any space available. */
+ /* Look for a usable slab. */
+ slab = arena_bin_slabs_nonfull_tryget(bin);
+ if (slab != NULL) {
+ return slab;
+ }
+ /* No existing slabs have any space available. */
- binind = arena_bin_index(arena, bin);
- bin_info = &arena_bin_info[binind];
+ bin_info = &bin_infos[binind];
- /* Allocate a new run. */
- malloc_mutex_unlock(&bin->lock);
+ /* Allocate a new slab. */
+ malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
- malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc_small(arena, bin_info->run_size, binind);
- if (run != NULL) {
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
-
- /* Initialize run internals. */
- run->bin = bin;
- run->nextind = 0;
- run->nfree = bin_info->nregs;
- bitmap_init(bitmap, &bin_info->bitmap_info);
- }
- malloc_mutex_unlock(&arena->lock);
+ slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
/********************************/
- malloc_mutex_lock(&bin->lock);
- if (run != NULL) {
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (slab != NULL) {
if (config_stats) {
- bin->stats.nruns++;
- bin->stats.curruns++;
+ bin->stats.nslabs++;
+ bin->stats.curslabs++;
}
- return (run);
+ return slab;
}
/*
- * arena_run_alloc_small() failed, but another thread may have made
+ * arena_slab_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
- run = arena_bin_nonfull_run_tryget(bin);
- if (run != NULL)
- return (run);
+ slab = arena_bin_slabs_nonfull_tryget(bin);
+ if (slab != NULL) {
+ return slab;
+ }
- return (NULL);
+ return NULL;
}
-/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
+/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static void *
-arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
-{
- void *ret;
- size_t binind;
- arena_bin_info_t *bin_info;
- arena_run_t *run;
-
- binind = arena_bin_index(arena, bin);
- bin_info = &arena_bin_info[binind];
- bin->runcur = NULL;
- run = arena_bin_nonfull_run_get(arena, bin);
- if (bin->runcur != NULL && bin->runcur->nfree > 0) {
+arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ szind_t binind) {
+ const bin_info_t *bin_info;
+ extent_t *slab;
+
+ bin_info = &bin_infos[binind];
+ if (!arena_is_auto(arena) && bin->slabcur != NULL) {
+ arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
+ bin->slabcur = NULL;
+ }
+ slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
+ if (bin->slabcur != NULL) {
/*
- * Another thread updated runcur while this one ran without the
- * bin lock in arena_bin_nonfull_run_get().
+ * Another thread updated slabcur while this one ran without the
+ * bin lock in arena_bin_nonfull_slab_get().
*/
- assert(bin->runcur->nfree > 0);
- ret = arena_run_reg_alloc(bin->runcur, bin_info);
- if (run != NULL) {
- arena_chunk_t *chunk;
-
- /*
- * arena_run_alloc_small() may have allocated run, or
- * it may have pulled run from the bin's run tree.
- * Therefore it is unsafe to make any assumptions about
- * how run has previously been used, and
- * arena_bin_lower_run() must be called, as if a region
- * were just deallocated from the run.
- */
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- if (run->nfree == bin_info->nregs)
- arena_dalloc_bin_run(arena, chunk, run, bin);
- else
- arena_bin_lower_run(arena, chunk, run, bin);
+ if (extent_nfree_get(bin->slabcur) > 0) {
+ void *ret = arena_slab_reg_alloc(bin->slabcur,
+ bin_info);
+ if (slab != NULL) {
+ /*
+ * arena_slab_alloc() may have allocated slab,
+ * or it may have been pulled from
+ * slabs_nonfull. Therefore it is unsafe to
+ * make any assumptions about how slab has
+ * previously been used, and
+ * arena_bin_lower_slab() must be called, as if
+ * a region were just deallocated from the slab.
+ */
+ if (extent_nfree_get(slab) == bin_info->nregs) {
+ arena_dalloc_bin_slab(tsdn, arena, slab,
+ bin);
+ } else {
+ arena_bin_lower_slab(tsdn, arena, slab,
+ bin);
+ }
+ }
+ return ret;
}
- return (ret);
- }
- if (run == NULL)
- return (NULL);
+ arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
+ bin->slabcur = NULL;
+ }
- bin->runcur = run;
+ if (slab == NULL) {
+ return NULL;
+ }
+ bin->slabcur = slab;
- assert(bin->runcur->nfree > 0);
+ assert(extent_nfree_get(bin->slabcur) > 0);
- return (arena_run_reg_alloc(bin->runcur, bin_info));
+ return arena_slab_reg_alloc(slab, bin_info);
}
void
-arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
- uint64_t prof_accumbytes)
-{
+arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
unsigned i, nfill;
- arena_bin_t *bin;
- arena_run_t *run;
- void *ptr;
+ bin_t *bin;
assert(tbin->ncached == 0);
- if (config_prof && arena_prof_accum(arena, prof_accumbytes))
- prof_idump();
+ if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
+ prof_idump(tsdn);
+ }
bin = &arena->bins[binind];
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
- tbin->lg_fill_div); i < nfill; i++) {
- if ((run = bin->runcur) != NULL && run->nfree > 0)
- ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
- else
- ptr = arena_bin_malloc_hard(arena, bin);
- if (ptr == NULL)
+ tcache->lg_fill_div[binind]); i < nfill; i++) {
+ extent_t *slab;
+ void *ptr;
+ if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
+ 0) {
+ ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]);
+ } else {
+ ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
+ }
+ if (ptr == NULL) {
+ /*
+ * OOM. tbin->avail isn't yet filled down to its first
+ * element, so the successful allocations (if any) must
+ * be moved just before tbin->avail before bailing out.
+ */
+ if (i > 0) {
+ memmove(tbin->avail - i, tbin->avail - nfill,
+ i * sizeof(void *));
+ }
break;
- if (config_fill && opt_junk) {
- arena_alloc_junk_small(ptr, &arena_bin_info[binind],
- true);
+ }
+ if (config_fill && unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ptr, &bin_infos[binind], true);
}
/* Insert such that low regions get used first. */
- tbin->avail[nfill - 1 - i] = ptr;
+ *(tbin->avail - nfill + i) = ptr;
}
if (config_stats) {
- bin->stats.allocated += i * arena_bin_info[binind].reg_size;
bin->stats.nmalloc += i;
bin->stats.nrequests += tbin->tstats.nrequests;
+ bin->stats.curregs += i;
bin->stats.nfills++;
tbin->tstats.nrequests = 0;
}
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
tbin->ncached = i;
+ arena_decay_tick(tsdn, arena);
}
void
-arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
-{
-
- if (zero) {
- size_t redzone_size = bin_info->redzone_size;
- memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
- redzone_size);
- memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
- redzone_size);
- } else {
- memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
- bin_info->reg_interval);
+arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
+ if (!zero) {
+ memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
}
}
-#ifdef JEMALLOC_JET
-#undef arena_redzone_corruption
-#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
-#endif
-static void
-arena_redzone_corruption(void *ptr, size_t usize, bool after,
- size_t offset, uint8_t byte)
-{
-
- malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
- "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
- after ? "after" : "before", ptr, usize, byte);
-}
-#ifdef JEMALLOC_JET
-#undef arena_redzone_corruption
-#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
-arena_redzone_corruption_t *arena_redzone_corruption =
- JEMALLOC_N(arena_redzone_corruption_impl);
-#endif
-
static void
-arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
-{
- size_t size = bin_info->reg_size;
- size_t redzone_size = bin_info->redzone_size;
- size_t i;
- bool error = false;
-
- for (i = 1; i <= redzone_size; i++) {
- uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
- if (*byte != 0xa5) {
- error = true;
- arena_redzone_corruption(ptr, size, false, i, *byte);
- if (reset)
- *byte = 0xa5;
- }
- }
- for (i = 0; i < redzone_size; i++) {
- uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
- if (*byte != 0xa5) {
- error = true;
- arena_redzone_corruption(ptr, size, true, i, *byte);
- if (reset)
- *byte = 0xa5;
- }
- }
- if (opt_abort && error)
- abort();
-}
-
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_small
-#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
-#endif
-void
-arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
-{
- size_t redzone_size = bin_info->redzone_size;
-
- arena_redzones_validate(ptr, bin_info, false);
- memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
- bin_info->reg_interval);
-}
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_small
-#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
-arena_dalloc_junk_small_t *arena_dalloc_junk_small =
- JEMALLOC_N(arena_dalloc_junk_small_impl);
-#endif
-
-void
-arena_quarantine_junk_small(void *ptr, size_t usize)
-{
- size_t binind;
- arena_bin_info_t *bin_info;
- cassert(config_fill);
- assert(opt_junk);
- assert(opt_quarantine);
- assert(usize <= SMALL_MAXCLASS);
-
- binind = SMALL_SIZE2BIN(usize);
- bin_info = &arena_bin_info[binind];
- arena_redzones_validate(ptr, bin_info, true);
+arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
+ memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
}
+arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
+ arena_dalloc_junk_small_impl;
-void *
-arena_malloc_small(arena_t *arena, size_t size, bool zero)
-{
+static void *
+arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
void *ret;
- arena_bin_t *bin;
- arena_run_t *run;
- size_t binind;
+ bin_t *bin;
+ size_t usize;
+ extent_t *slab;
- binind = SMALL_SIZE2BIN(size);
assert(binind < NBINS);
bin = &arena->bins[binind];
- size = arena_bin_info[binind].reg_size;
+ usize = sz_index2size(binind);
- malloc_mutex_lock(&bin->lock);
- if ((run = bin->runcur) != NULL && run->nfree > 0)
- ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
- else
- ret = arena_bin_malloc_hard(arena, bin);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
+ ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
+ } else {
+ ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
+ }
if (ret == NULL) {
- malloc_mutex_unlock(&bin->lock);
- return (NULL);
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ return NULL;
}
if (config_stats) {
- bin->stats.allocated += size;
bin->stats.nmalloc++;
bin->stats.nrequests++;
+ bin->stats.curregs++;
+ }
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
+ prof_idump(tsdn);
}
- malloc_mutex_unlock(&bin->lock);
- if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
- prof_idump();
- if (zero == false) {
+ if (!zero) {
if (config_fill) {
- if (opt_junk) {
+ if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
- &arena_bin_info[binind], false);
- } else if (opt_zero)
- memset(ret, 0, size);
+ &bin_infos[binind], false);
+ } else if (unlikely(opt_zero)) {
+ memset(ret, 0, usize);
+ }
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
- if (config_fill && opt_junk) {
- arena_alloc_junk_small(ret, &arena_bin_info[binind],
+ if (config_fill && unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ret, &bin_infos[binind],
true);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- memset(ret, 0, size);
+ memset(ret, 0, usize);
}
- return (ret);
+ arena_decay_tick(tsdn, arena);
+ return ret;
}
void *
-arena_malloc_large(arena_t *arena, size_t size, bool zero)
-{
- void *ret;
- UNUSED bool idump;
+arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
+ bool zero) {
+ assert(!tsdn_null(tsdn) || arena != NULL);
- /* Large allocation. */
- size = PAGE_CEILING(size);
- malloc_mutex_lock(&arena->lock);
- ret = (void *)arena_run_alloc_large(arena, size, zero);
- if (ret == NULL) {
- malloc_mutex_unlock(&arena->lock);
- return (NULL);
+ if (likely(!tsdn_null(tsdn))) {
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
}
- if (config_stats) {
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
- }
- if (config_prof)
- idump = arena_prof_accum_locked(arena, size);
- malloc_mutex_unlock(&arena->lock);
- if (config_prof && idump)
- prof_idump();
-
- if (zero == false) {
- if (config_fill) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
- }
+ if (unlikely(arena == NULL)) {
+ return NULL;
}
- return (ret);
+ if (likely(size <= SMALL_MAXCLASS)) {
+ return arena_malloc_small(tsdn, arena, ind, zero);
+ }
+ return large_malloc(tsdn, arena, sz_index2size(ind), zero);
}
-/* Only handles large allocations that require more than page alignment. */
void *
-arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
-{
+arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero, tcache_t *tcache) {
void *ret;
- size_t alloc_size, leadsize, trailsize;
- arena_run_t *run;
- arena_chunk_t *chunk;
- assert((size & PAGE_MASK) == 0);
+ if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
+ && (usize & PAGE_MASK) == 0))) {
+ /* Small; alignment doesn't require special slab placement. */
+ ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
+ zero, tcache, true);
+ } else {
+ if (likely(alignment <= CACHELINE)) {
+ ret = large_malloc(tsdn, arena, usize, zero);
+ } else {
+ ret = large_palloc(tsdn, arena, usize, alignment, zero);
+ }
+ }
+ return ret;
+}
+
+void
+arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
+ assert(usize <= SMALL_MAXCLASS);
- alignment = PAGE_CEILING(alignment);
- alloc_size = size + alignment - PAGE;
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc_large(arena, alloc_size, false);
- if (run == NULL) {
- malloc_mutex_unlock(&arena->lock);
- return (NULL);
- }
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true);
+ arena_t *arena = extent_arena_get(extent);
- leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
- (uintptr_t)run;
- assert(alloc_size >= leadsize + size);
- trailsize = alloc_size - leadsize - size;
- ret = (void *)((uintptr_t)run + leadsize);
- if (leadsize != 0) {
- arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
- leadsize);
- }
- if (trailsize != 0) {
- arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
- false);
- }
- arena_run_init_large(arena, (arena_run_t *)ret, size, zero);
+ szind_t szind = sz_size2index(usize);
+ extent_szind_set(extent, szind);
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
+ szind, false);
- if (config_stats) {
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
- }
- malloc_mutex_unlock(&arena->lock);
+ prof_accum_cancel(tsdn, &arena->prof_accum, usize);
- if (config_fill && zero == false) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
- }
- return (ret);
+ assert(isalloc(tsdn, ptr) == usize);
}
-void
-arena_prof_promoted(const void *ptr, size_t size)
-{
- arena_chunk_t *chunk;
- size_t pageind, binind;
-
+static size_t
+arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
- assert(isalloc(ptr, false) == PAGE);
- assert(isalloc(ptr, true) == PAGE);
- assert(size <= SMALL_MAXCLASS);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- binind = SMALL_SIZE2BIN(size);
- assert(binind < NBINS);
- arena_mapbits_large_binind_set(chunk, pageind, binind);
- assert(isalloc(ptr, false) == PAGE);
- assert(isalloc(ptr, true) == size);
+ extent_szind_set(extent, NBINS);
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
+ NBINS, false);
+
+ assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
+
+ return LARGE_MINCLASS;
+}
+
+void
+arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
+ bool slow_path) {
+ cassert(config_prof);
+ assert(opt_prof);
+
+ extent_t *extent = iealloc(tsdn, ptr);
+ size_t usize = arena_prof_demote(tsdn, extent, ptr);
+ if (usize <= tcache_maxclass) {
+ tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
+ sz_size2index(usize), slow_path);
+ } else {
+ large_dalloc(tsdn, extent);
+ }
}
static void
-arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
-{
-
- /* Dissociate run from bin. */
- if (run == bin->runcur)
- bin->runcur = NULL;
- else {
- size_t binind = arena_bin_index(chunk->arena, bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
-
- if (bin_info->nregs != 1) {
- /*
- * This block's conditional is necessary because if the
- * run only contains one region, then it never gets
- * inserted into the non-full runs tree.
- */
- arena_bin_runs_remove(bin, run);
+arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
+ /* Dissociate slab from bin. */
+ if (slab == bin->slabcur) {
+ bin->slabcur = NULL;
+ } else {
+ szind_t binind = extent_szind_get(slab);
+ const bin_info_t *bin_info = &bin_infos[binind];
+
+ /*
+ * The following block's conditional is necessary because if the
+ * slab only contains one region, then it never gets inserted
+ * into the non-full slabs heap.
+ */
+ if (bin_info->nregs == 1) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ } else {
+ arena_bin_slabs_nonfull_remove(bin, slab);
}
}
}
static void
-arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
-{
- size_t binind;
- arena_bin_info_t *bin_info;
- size_t npages, run_ind, past;
-
- assert(run != bin->runcur);
- assert(arena_run_tree_search(&bin->runs,
- arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
- == NULL);
-
- binind = arena_bin_index(chunk->arena, run->bin);
- bin_info = &arena_bin_info[binind];
-
- malloc_mutex_unlock(&bin->lock);
- /******************************/
- npages = bin_info->run_size >> LG_PAGE;
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- past = (size_t)(PAGE_CEILING((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
- bin_info->reg_interval - bin_info->redzone_size) -
- (uintptr_t)chunk) >> LG_PAGE);
- malloc_mutex_lock(&arena->lock);
+arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ bin_t *bin) {
+ assert(slab != bin->slabcur);
- /*
- * If the run was originally clean, and some pages were never touched,
- * trim the clean pages before deallocating the dirty portion of the
- * run.
- */
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+npages-1));
- if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
- npages) {
- /* Trim clean pages. Convert to large run beforehand. */
- assert(npages > 0);
- arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
- arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
- arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
- ((past - run_ind) << LG_PAGE), false);
- /* npages = past - run_ind; */
- }
- arena_run_dalloc(arena, run, true, false);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ /******************************/
+ arena_slab_dalloc(tsdn, arena, slab);
/****************************/
- malloc_mutex_lock(&bin->lock);
- if (config_stats)
- bin->stats.curruns--;
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (config_stats) {
+ bin->stats.curslabs--;
+ }
}
static void
-arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
-{
+arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ bin_t *bin) {
+ assert(extent_nfree_get(slab) > 0);
/*
- * Make sure that if bin->runcur is non-NULL, it refers to the lowest
- * non-full run. It is okay to NULL runcur out rather than proactively
- * keeping it pointing at the lowest non-full run.
+ * Make sure that if bin->slabcur is non-NULL, it refers to the
+ * oldest/lowest non-full slab. It is okay to NULL slabcur out rather
+ * than proactively keeping it pointing at the oldest/lowest non-full
+ * slab.
*/
- if ((uintptr_t)run < (uintptr_t)bin->runcur) {
- /* Switch runcur. */
- if (bin->runcur->nfree > 0)
- arena_bin_runs_insert(bin, bin->runcur);
- bin->runcur = run;
- if (config_stats)
- bin->stats.reruns++;
- } else
- arena_bin_runs_insert(bin, run);
+ if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
+ /* Switch slabcur. */
+ if (extent_nfree_get(bin->slabcur) > 0) {
+ arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
+ } else {
+ arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
+ }
+ bin->slabcur = slab;
+ if (config_stats) {
+ bin->stats.reslabs++;
+ }
+ } else {
+ arena_bin_slabs_nonfull_insert(bin, slab);
+ }
}
-void
-arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- arena_chunk_map_t *mapelm)
-{
- size_t pageind;
- arena_run_t *run;
- arena_bin_t *bin;
- arena_bin_info_t *bin_info;
- size_t size, binind;
-
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
- bin = run->bin;
- binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
- bin_info = &arena_bin_info[binind];
- if (config_fill || config_stats)
- size = bin_info->reg_size;
-
- if (config_fill && opt_junk)
+static void
+arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ void *ptr, bool junked) {
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ szind_t binind = extent_szind_get(slab);
+ bin_t *bin = &arena->bins[binind];
+ const bin_info_t *bin_info = &bin_infos[binind];
+
+ if (!junked && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, bin_info);
+ }
- arena_run_reg_dalloc(run, ptr);
- if (run->nfree == bin_info->nregs) {
- arena_dissociate_bin_run(chunk, run, bin);
- arena_dalloc_bin_run(arena, chunk, run, bin);
- } else if (run->nfree == 1 && run != bin->runcur)
- arena_bin_lower_run(arena, chunk, run, bin);
+ arena_slab_reg_dalloc(slab, slab_data, ptr);
+ unsigned nfree = extent_nfree_get(slab);
+ if (nfree == bin_info->nregs) {
+ arena_dissociate_bin_slab(arena, slab, bin);
+ arena_dalloc_bin_slab(tsdn, arena, slab, bin);
+ } else if (nfree == 1 && slab != bin->slabcur) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ arena_bin_lower_slab(tsdn, arena, slab, bin);
+ }
if (config_stats) {
- bin->stats.allocated -= size;
bin->stats.ndalloc++;
+ bin->stats.curregs--;
}
}
void
-arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind, arena_chunk_map_t *mapelm)
-{
- arena_run_t *run;
- arena_bin_t *bin;
-
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
- bin = run->bin;
- malloc_mutex_lock(&bin->lock);
- arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
- malloc_mutex_unlock(&bin->lock);
+arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ void *ptr) {
+ arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
}
-void
-arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind)
-{
- arena_chunk_map_t *mapelm;
+static void
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
+ szind_t binind = extent_szind_get(extent);
+ bin_t *bin = &arena->bins[binind];
- if (config_debug) {
- /* arena_ptr_small_binind_get() does extra sanity checking. */
- assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
- pageind)) != BININD_INVALID);
- }
- mapelm = arena_mapp_get(chunk, pageind);
- arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
+ malloc_mutex_unlock(tsdn, &bin->lock);
}
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_large
-#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
-#endif
-static void
-arena_dalloc_junk_large(void *ptr, size_t usize)
-{
+void
+arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
+ extent_t *extent = iealloc(tsdn, ptr);
+ arena_t *arena = extent_arena_get(extent);
- if (config_fill && opt_junk)
- memset(ptr, 0x5a, usize);
+ arena_dalloc_bin(tsdn, arena, extent, ptr);
+ arena_decay_tick(tsdn, arena);
}
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_large
-#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
-arena_dalloc_junk_large_t *arena_dalloc_junk_large =
- JEMALLOC_N(arena_dalloc_junk_large_impl);
-#endif
-void
-arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
-{
+bool
+arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
+ size_t extra, bool zero) {
+ /* Calls with non-zero extra had to clamp extra. */
+ assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
- if (config_fill || config_stats) {
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t usize = arena_mapbits_large_size_get(chunk, pageind);
+ if (unlikely(size > LARGE_MAXCLASS)) {
+ return true;
+ }
- arena_dalloc_junk_large(ptr, usize);
- if (config_stats) {
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= usize;
- arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
+ extent_t *extent = iealloc(tsdn, ptr);
+ size_t usize_min = sz_s2u(size);
+ size_t usize_max = sz_s2u(size + extra);
+ if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
+ /*
+ * Avoid moving the allocation if the size class can be left the
+ * same.
+ */
+ assert(bin_infos[sz_size2index(oldsize)].reg_size ==
+ oldsize);
+ if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
+ sz_size2index(oldsize)) && (size > oldsize || usize_max <
+ oldsize)) {
+ return true;
}
+
+ arena_decay_tick(tsdn, extent_arena_get(extent));
+ return false;
+ } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
+ return large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
+ zero);
}
- arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
+ return true;
}
-void
-arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
-{
-
- malloc_mutex_lock(&arena->lock);
- arena_dalloc_large_locked(arena, chunk, ptr);
- malloc_mutex_unlock(&arena->lock);
+static void *
+arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache) {
+ if (alignment == 0) {
+ return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
+ zero, tcache, true);
+ }
+ usize = sz_sa2u(usize, alignment);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
+ return NULL;
+ }
+ return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
}
-static void
-arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t oldsize, size_t size)
-{
+void *
+arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
+ size_t size, size_t alignment, bool zero, tcache_t *tcache) {
+ size_t usize = sz_s2u(size);
+ if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
+ return NULL;
+ }
- assert(size < oldsize);
+ if (likely(usize <= SMALL_MAXCLASS)) {
+ /* Try to avoid moving the allocation. */
+ if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) {
+ return ptr;
+ }
+ }
+
+ if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
+ return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize,
+ alignment, zero, tcache);
+ }
/*
- * Shrink the run, and make trailing pages available for other
- * allocations.
+ * size and oldsize are different enough that we need to move the
+ * object. In that case, fall back to allocating new space and copying.
*/
- malloc_mutex_lock(&arena->lock);
- arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
- true);
- if (config_stats) {
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
+ zero, tcache);
+ if (ret == NULL) {
+ return NULL;
}
- malloc_mutex_unlock(&arena->lock);
-}
-static bool
-arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t oldsize, size_t size, size_t extra, bool zero)
-{
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t npages = oldsize >> LG_PAGE;
- size_t followsize;
-
- assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
-
- /* Try to extend the run. */
- assert(size + extra > oldsize);
- malloc_mutex_lock(&arena->lock);
- if (pageind + npages < chunk_npages &&
- arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
- (followsize = arena_mapbits_unallocated_size_get(chunk,
- pageind+npages)) >= size - oldsize) {
- /*
- * The next run is available and sufficiently large. Split the
- * following run, then merge the first part with the existing
- * allocation.
- */
- size_t flag_dirty;
- size_t splitsize = (oldsize + followsize <= size + extra)
- ? followsize : size + extra - oldsize;
- arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk +
- ((pageind+npages) << LG_PAGE)), splitsize, zero);
+ /*
+ * Junk/zero-filling were already done by
+ * ipalloc()/arena_malloc().
+ */
- size = oldsize + splitsize;
- npages = size >> LG_PAGE;
+ size_t copysize = (usize < oldsize) ? usize : oldsize;
+ memcpy(ret, ptr, copysize);
+ isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
+ return ret;
+}
- /*
- * Mark the extended run as dirty if either portion of the run
- * was dirty before allocation. This is rather pedantic,
- * because there's not actually any sequence of events that
- * could cause the resulting run to be passed to
- * arena_run_dalloc() with the dirty argument set to false
- * (which is when dirty flag consistency would really matter).
- */
- flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
- arena_mapbits_dirty_get(chunk, pageind+npages-1);
- arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
- arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
+dss_prec_t
+arena_dss_prec_get(arena_t *arena) {
+ return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
+}
- if (config_stats) {
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
- }
- malloc_mutex_unlock(&arena->lock);
- return (false);
+bool
+arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
+ if (!have_dss) {
+ return (dss_prec != dss_prec_disabled);
}
- malloc_mutex_unlock(&arena->lock);
-
- return (true);
+ atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
+ return false;
}
-#ifdef JEMALLOC_JET
-#undef arena_ralloc_junk_large
-#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
-#endif
-static void
-arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
-{
+ssize_t
+arena_dirty_decay_ms_default_get(void) {
+ return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
+}
- if (config_fill && opt_junk) {
- memset((void *)((uintptr_t)ptr + usize), 0x5a,
- old_usize - usize);
+bool
+arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
+ if (!arena_decay_ms_valid(decay_ms)) {
+ return true;
}
+ atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
+ return false;
}
-#ifdef JEMALLOC_JET
-#undef arena_ralloc_junk_large
-#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
-arena_ralloc_junk_large_t *arena_ralloc_junk_large =
- JEMALLOC_N(arena_ralloc_junk_large_impl);
-#endif
-/*
- * Try to resize a large allocation, in order to avoid copying. This will
- * always fail if growing an object, and the following run is already in use.
- */
-static bool
-arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
- bool zero)
-{
- size_t psize;
-
- psize = PAGE_CEILING(size + extra);
- if (psize == oldsize) {
- /* Same size class. */
- return (false);
- } else {
- arena_chunk_t *chunk;
- arena_t *arena;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena = chunk->arena;
-
- if (psize < oldsize) {
- /* Fill before shrinking in order avoid a race. */
- arena_ralloc_junk_large(ptr, oldsize, psize);
- arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
- psize);
- return (false);
- } else {
- bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
- oldsize, PAGE_CEILING(size),
- psize - PAGE_CEILING(size), zero);
- if (config_fill && ret == false && zero == false) {
- if (opt_junk) {
- memset((void *)((uintptr_t)ptr +
- oldsize), 0xa5, isalloc(ptr,
- config_prof) - oldsize);
- } else if (opt_zero) {
- memset((void *)((uintptr_t)ptr +
- oldsize), 0, isalloc(ptr,
- config_prof) - oldsize);
- }
- }
- return (ret);
- }
- }
+ssize_t
+arena_muzzy_decay_ms_default_get(void) {
+ return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
}
bool
-arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
- bool zero)
-{
-
- /*
- * Avoid moving the allocation if the size class can be left the same.
- */
- if (oldsize <= arena_maxclass) {
- if (oldsize <= SMALL_MAXCLASS) {
- assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
- == oldsize);
- if ((size + extra <= SMALL_MAXCLASS &&
- SMALL_SIZE2BIN(size + extra) ==
- SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
- size + extra >= oldsize))
- return (false);
- } else {
- assert(size <= arena_maxclass);
- if (size + extra > SMALL_MAXCLASS) {
- if (arena_ralloc_large(ptr, oldsize, size,
- extra, zero) == false)
- return (false);
- }
- }
+arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
+ if (!arena_decay_ms_valid(decay_ms)) {
+ return true;
}
-
- /* Reallocation would require a move. */
- return (true);
+ atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
+ return false;
}
-void *
-arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
- bool try_tcache_dalloc)
-{
- void *ret;
- size_t copysize;
-
- /* Try to avoid moving the allocation. */
- if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
- return (ptr);
-
- /*
- * size and oldsize are different enough that we need to move the
- * object. In that case, fall back to allocating new space and
- * copying.
- */
- if (alignment != 0) {
- size_t usize = sa2u(size + extra, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
- } else
- ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
-
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, this time without extra. */
- if (alignment != 0) {
- size_t usize = sa2u(size, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
- arena);
- } else
- ret = arena_malloc(arena, size, zero, try_tcache_alloc);
-
- if (ret == NULL)
- return (NULL);
+bool
+arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
+ size_t *new_limit) {
+ assert(opt_retain);
+
+ pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
+ if (new_limit != NULL) {
+ size_t limit = *new_limit;
+ /* Grow no more than the new limit. */
+ if ((new_ind = sz_psz2ind(limit + 1) - 1) >
+ EXTENT_GROW_MAX_PIND) {
+ return true;
+ }
}
- /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
+ if (old_limit != NULL) {
+ *old_limit = sz_pind2sz(arena->retain_grow_limit);
+ }
+ if (new_limit != NULL) {
+ arena->retain_grow_limit = new_ind;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
- /*
- * Copy at most size bytes (not size+extra), since the caller has no
- * expectation that the extra bytes will be reliably preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
- VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
- memcpy(ret, ptr, copysize);
- iqalloct(ptr, try_tcache_dalloc);
- return (ret);
+ return false;
}
-dss_prec_t
-arena_dss_prec_get(arena_t *arena)
-{
- dss_prec_t ret;
+unsigned
+arena_nthreads_get(arena_t *arena, bool internal) {
+ return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
+}
- malloc_mutex_lock(&arena->lock);
- ret = arena->dss_prec;
- malloc_mutex_unlock(&arena->lock);
- return (ret);
+void
+arena_nthreads_inc(arena_t *arena, bool internal) {
+ atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
}
void
-arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
-{
+arena_nthreads_dec(arena_t *arena, bool internal) {
+ atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
+}
- malloc_mutex_lock(&arena->lock);
- arena->dss_prec = dss_prec;
- malloc_mutex_unlock(&arena->lock);
+size_t
+arena_extent_sn_next(arena_t *arena) {
+ return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
}
-void
-arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
- size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats)
-{
+arena_t *
+arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+ arena_t *arena;
+ base_t *base;
unsigned i;
- malloc_mutex_lock(&arena->lock);
- *dss = dss_prec_names[arena->dss_prec];
- *nactive += arena->nactive;
- *ndirty += arena->ndirty;
+ if (ind == 0) {
+ base = b0get();
+ } else {
+ base = base_new(tsdn, ind, extent_hooks);
+ if (base == NULL) {
+ return NULL;
+ }
+ }
+
+ arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE);
+ if (arena == NULL) {
+ goto label_error;
+ }
- astats->mapped += arena->stats.mapped;
- astats->npurge += arena->stats.npurge;
- astats->nmadvise += arena->stats.nmadvise;
- astats->purged += arena->stats.purged;
- astats->allocated_large += arena->stats.allocated_large;
- astats->nmalloc_large += arena->stats.nmalloc_large;
- astats->ndalloc_large += arena->stats.ndalloc_large;
- astats->nrequests_large += arena->stats.nrequests_large;
+ atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
+ atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
+ arena->last_thd = NULL;
- for (i = 0; i < nlclasses; i++) {
- lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
- lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
- lstats[i].nrequests += arena->stats.lstats[i].nrequests;
- lstats[i].curruns += arena->stats.lstats[i].curruns;
+ if (config_stats) {
+ if (arena_stats_init(tsdn, &arena->stats)) {
+ goto label_error;
+ }
+
+ ql_new(&arena->tcache_ql);
+ ql_new(&arena->cache_bin_array_descriptor_ql);
+ if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
+ WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
}
- malloc_mutex_unlock(&arena->lock);
- for (i = 0; i < NBINS; i++) {
- arena_bin_t *bin = &arena->bins[i];
-
- malloc_mutex_lock(&bin->lock);
- bstats[i].allocated += bin->stats.allocated;
- bstats[i].nmalloc += bin->stats.nmalloc;
- bstats[i].ndalloc += bin->stats.ndalloc;
- bstats[i].nrequests += bin->stats.nrequests;
- if (config_tcache) {
- bstats[i].nfills += bin->stats.nfills;
- bstats[i].nflushes += bin->stats.nflushes;
+ if (config_prof) {
+ if (prof_accum_init(tsdn, &arena->prof_accum)) {
+ goto label_error;
}
- bstats[i].nruns += bin->stats.nruns;
- bstats[i].reruns += bin->stats.reruns;
- bstats[i].curruns += bin->stats.curruns;
- malloc_mutex_unlock(&bin->lock);
}
-}
-bool
-arena_new(arena_t *arena, unsigned ind)
-{
- unsigned i;
- arena_bin_t *bin;
+ if (config_cache_oblivious) {
+ /*
+ * A nondeterministic seed based on the address of arena reduces
+ * the likelihood of lockstep non-uniform cache index
+ * utilization among identical concurrent processes, but at the
+ * cost of test repeatability. For debug builds, instead use a
+ * deterministic seed.
+ */
+ atomic_store_zu(&arena->offset_state, config_debug ? ind :
+ (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
+ }
- arena->ind = ind;
- arena->nthreads = 0;
+ atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
- if (malloc_mutex_init(&arena->lock))
- return (true);
+ atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
+ ATOMIC_RELAXED);
- if (config_stats) {
- memset(&arena->stats, 0, sizeof(arena_stats_t));
- arena->stats.lstats =
- (malloc_large_stats_t *)base_alloc(nlclasses *
- sizeof(malloc_large_stats_t));
- if (arena->stats.lstats == NULL)
- return (true);
- memset(arena->stats.lstats, 0, nlclasses *
- sizeof(malloc_large_stats_t));
- if (config_tcache)
- ql_new(&arena->tcache_ql);
- }
+ atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
- if (config_prof)
- arena->prof_accumbytes = 0;
+ extent_list_init(&arena->large);
+ if (malloc_mutex_init(&arena->large_mtx, "arena_large",
+ WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
- arena->dss_prec = chunk_dss_prec_get();
+ /*
+ * Delay coalescing for dirty extents despite the disruptive effect on
+ * memory layout for best-fit extent allocation, since cached extents
+ * are likely to be reused soon after deallocation, and the cost of
+ * merging/splitting extents is non-trivial.
+ */
+ if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
+ true)) {
+ goto label_error;
+ }
+ /*
+ * Coalesce muzzy extents immediately, because operations on them are in
+ * the critical path much less often than for dirty extents.
+ */
+ if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
+ false)) {
+ goto label_error;
+ }
+ /*
+ * Coalesce retained extents immediately, in part because they will
+ * never be evicted (and therefore there's no opportunity for delayed
+ * coalescing), but also because operations on retained extents are not
+ * in the critical path.
+ */
+ if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
+ false)) {
+ goto label_error;
+ }
- /* Initialize chunks. */
- arena_chunk_dirty_new(&arena->chunks_dirty);
- arena->spare = NULL;
+ if (arena_decay_init(&arena->decay_dirty,
+ arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
+ goto label_error;
+ }
+ if (arena_decay_init(&arena->decay_muzzy,
+ arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
+ goto label_error;
+ }
- arena->nactive = 0;
- arena->ndirty = 0;
- arena->npurgatory = 0;
+ arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
+ arena->retain_grow_limit = EXTENT_GROW_MAX_PIND;
+ if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
+ WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
- arena_avail_tree_new(&arena->runs_avail);
+ extent_avail_new(&arena->extent_avail);
+ if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
+ WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
- bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock))
- return (true);
- bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
- if (config_stats)
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+ bool err = bin_init(&arena->bins[i]);
+ if (err) {
+ goto label_error;
+ }
}
- return (false);
-}
-
-/*
- * Calculate bin_info->run_size such that it meets the following constraints:
- *
- * *) bin_info->run_size >= min_run_size
- * *) bin_info->run_size <= arena_maxclass
- * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
- * *) bin_info->nregs <= RUN_MAXREGS
- *
- * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
- * calculated here, since these settings are all interdependent.
- */
-static size_t
-bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
-{
- size_t pad_size;
- size_t try_run_size, good_run_size;
- uint32_t try_nregs, good_nregs;
- uint32_t try_hdr_size, good_hdr_size;
- uint32_t try_bitmap_offset, good_bitmap_offset;
- uint32_t try_ctx0_offset, good_ctx0_offset;
- uint32_t try_redzone0_offset, good_redzone0_offset;
-
- assert(min_run_size >= PAGE);
- assert(min_run_size <= arena_maxclass);
+ arena->base = base;
+ /* Set arena before creating background threads. */
+ arena_set(ind, arena);
- /*
- * Determine redzone size based on minimum alignment and minimum
- * redzone size. Add padding to the end of the run if it is needed to
- * align the regions. The padding allows each redzone to be half the
- * minimum alignment; without the padding, each redzone would have to
- * be twice as large in order to maintain alignment.
- */
- if (config_fill && opt_redzone) {
- size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
- if (align_min <= REDZONE_MINSIZE) {
- bin_info->redzone_size = REDZONE_MINSIZE;
- pad_size = 0;
- } else {
- bin_info->redzone_size = align_min >> 1;
- pad_size = bin_info->redzone_size;
- }
- } else {
- bin_info->redzone_size = 0;
- pad_size = 0;
- }
- bin_info->reg_interval = bin_info->reg_size +
- (bin_info->redzone_size << 1);
+ nstime_init(&arena->create_time, 0);
+ nstime_update(&arena->create_time);
- /*
- * Calculate known-valid settings before entering the run_size
- * expansion loop, so that the first part of the loop always copies
- * valid settings.
- *
- * The do..while loop iteratively reduces the number of regions until
- * the run header and the regions no longer overlap. A closed formula
- * would be quite messy, since there is an interdependency between the
- * header's mask length and the number of regions.
- */
- try_run_size = min_run_size;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) /
- bin_info->reg_interval)
- + 1; /* Counter-act try_nregs-- in loop. */
- if (try_nregs > RUN_MAXREGS) {
- try_nregs = RUN_MAXREGS
- + 1; /* Counter-act try_nregs-- in loop. */
- }
- do {
- try_nregs--;
- try_hdr_size = sizeof(arena_run_t);
- /* Pad to a long boundary. */
- try_hdr_size = LONG_CEILING(try_hdr_size);
- try_bitmap_offset = try_hdr_size;
- /* Add space for bitmap. */
- try_hdr_size += bitmap_size(try_nregs);
- if (config_prof && opt_prof && prof_promote == false) {
- /* Pad to a quantum boundary. */
- try_hdr_size = QUANTUM_CEILING(try_hdr_size);
- try_ctx0_offset = try_hdr_size;
- /* Add space for one (prof_ctx_t *) per region. */
- try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
- } else
- try_ctx0_offset = 0;
- try_redzone0_offset = try_run_size - (try_nregs *
- bin_info->reg_interval) - pad_size;
- } while (try_hdr_size > try_redzone0_offset);
-
- /* run_size expansion loop. */
- do {
+ /* We don't support reentrancy for arena 0 bootstrapping. */
+ if (ind != 0) {
/*
- * Copy valid settings before trying more aggressive settings.
+ * If we're here, then arena 0 already exists, so bootstrapping
+ * is done enough that we should have tsd.
*/
- good_run_size = try_run_size;
- good_nregs = try_nregs;
- good_hdr_size = try_hdr_size;
- good_bitmap_offset = try_bitmap_offset;
- good_ctx0_offset = try_ctx0_offset;
- good_redzone0_offset = try_redzone0_offset;
-
- /* Try more aggressive settings. */
- try_run_size += PAGE;
- try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
- bin_info->reg_interval)
- + 1; /* Counter-act try_nregs-- in loop. */
- if (try_nregs > RUN_MAXREGS) {
- try_nregs = RUN_MAXREGS
- + 1; /* Counter-act try_nregs-- in loop. */
+ assert(!tsdn_null(tsdn));
+ pre_reentrancy(tsdn_tsd(tsdn), arena);
+ if (hooks_arena_new_hook) {
+ hooks_arena_new_hook();
}
- do {
- try_nregs--;
- try_hdr_size = sizeof(arena_run_t);
- /* Pad to a long boundary. */
- try_hdr_size = LONG_CEILING(try_hdr_size);
- try_bitmap_offset = try_hdr_size;
- /* Add space for bitmap. */
- try_hdr_size += bitmap_size(try_nregs);
- if (config_prof && opt_prof && prof_promote == false) {
- /* Pad to a quantum boundary. */
- try_hdr_size = QUANTUM_CEILING(try_hdr_size);
- try_ctx0_offset = try_hdr_size;
- /*
- * Add space for one (prof_ctx_t *) per region.
- */
- try_hdr_size += try_nregs *
- sizeof(prof_ctx_t *);
- }
- try_redzone0_offset = try_run_size - (try_nregs *
- bin_info->reg_interval) - pad_size;
- } while (try_hdr_size > try_redzone0_offset);
- } while (try_run_size <= arena_maxclass
- && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
- RUN_MAX_OVRHD_RELAX
- && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
- && try_nregs < RUN_MAXREGS);
-
- assert(good_hdr_size <= good_redzone0_offset);
-
- /* Copy final settings. */
- bin_info->run_size = good_run_size;
- bin_info->nregs = good_nregs;
- bin_info->bitmap_offset = good_bitmap_offset;
- bin_info->ctx0_offset = good_ctx0_offset;
- bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
-
- assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
- * bin_info->reg_interval) + pad_size == bin_info->run_size);
+ post_reentrancy(tsdn_tsd(tsdn));
+ }
- return (good_run_size);
+ return arena;
+label_error:
+ if (ind != 0) {
+ base_delete(tsdn, base);
+ }
+ return NULL;
}
-static void
-bin_info_init(void)
-{
- arena_bin_info_t *bin_info;
- size_t prev_run_size = PAGE;
-
-#define SIZE_CLASS(bin, delta, size) \
- bin_info = &arena_bin_info[bin]; \
- bin_info->reg_size = size; \
- prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
- bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
+void
+arena_boot(void) {
+ arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
+ arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
+#define REGIND_bin_yes(index, reg_size) \
+ div_init(&arena_binind_div_info[(index)], (reg_size));
+#define REGIND_bin_no(index, reg_size)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
+ lg_delta_lookup) \
+ REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta))
SIZE_CLASSES
-#undef SIZE_CLASS
+#undef REGIND_bin_yes
+#undef REGIND_bin_no
+#undef SC
}
void
-arena_boot(void)
-{
- size_t header_size;
- unsigned i;
+arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
+}
- /*
- * Compute the header size such that it is large enough to contain the
- * page map. The page map is biased to omit entries for the header
- * itself, so some iteration is necessary to compute the map bias.
- *
- * 1) Compute safe header_size and map_bias values that include enough
- * space for an unbiased page map.
- * 2) Refine map_bias based on (1) to omit the header pages in the page
- * map. The resulting map_bias may be one too small.
- * 3) Refine map_bias based on (2). The result will be >= the result
- * from (2), and will always be correct.
- */
- map_bias = 0;
- for (i = 0; i < 3; i++) {
- header_size = offsetof(arena_chunk_t, map) +
- (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
- map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
- != 0);
+void
+arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
+ if (config_stats) {
+ malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
}
- assert(map_bias > 0);
+}
- arena_maxclass = chunksize - (map_bias << LG_PAGE);
+void
+arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
+}
- bin_info_init();
+void
+arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
+ extents_prefork(tsdn, &arena->extents_dirty);
+ extents_prefork(tsdn, &arena->extents_muzzy);
+ extents_prefork(tsdn, &arena->extents_retained);
}
void
-arena_prefork(arena_t *arena)
-{
- unsigned i;
+arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
+}
- malloc_mutex_prefork(&arena->lock);
- for (i = 0; i < NBINS; i++)
- malloc_mutex_prefork(&arena->bins[i].lock);
+void
+arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
+ base_prefork(tsdn, arena->base);
}
void
-arena_postfork_parent(arena_t *arena)
-{
+arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->large_mtx);
+}
+
+void
+arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
+ for (unsigned i = 0; i < NBINS; i++) {
+ bin_prefork(tsdn, &arena->bins[i]);
+ }
+}
+
+void
+arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
- for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_parent(&arena->bins[i].lock);
- malloc_mutex_postfork_parent(&arena->lock);
+ for (i = 0; i < NBINS; i++) {
+ bin_postfork_parent(tsdn, &arena->bins[i]);
+ }
+ malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
+ base_postfork_parent(tsdn, arena->base);
+ malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
+ extents_postfork_parent(tsdn, &arena->extents_dirty);
+ extents_postfork_parent(tsdn, &arena->extents_muzzy);
+ extents_postfork_parent(tsdn, &arena->extents_retained);
+ malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
+ if (config_stats) {
+ malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
+ }
}
void
-arena_postfork_child(arena_t *arena)
-{
+arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
- for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_child(&arena->bins[i].lock);
- malloc_mutex_postfork_child(&arena->lock);
+ atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
+ atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
+ if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
+ arena_nthreads_inc(arena, false);
+ }
+ if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
+ arena_nthreads_inc(arena, true);
+ }
+ if (config_stats) {
+ ql_new(&arena->tcache_ql);
+ ql_new(&arena->cache_bin_array_descriptor_ql);
+ tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
+ if (tcache != NULL && tcache->arena == arena) {
+ ql_elm_new(tcache, link);
+ ql_tail_insert(&arena->tcache_ql, tcache, link);
+ cache_bin_array_descriptor_init(
+ &tcache->cache_bin_array_descriptor,
+ tcache->bins_small, tcache->bins_large);
+ ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
+ &tcache->cache_bin_array_descriptor, link);
+ }
+ }
+
+ for (i = 0; i < NBINS; i++) {
+ bin_postfork_child(tsdn, &arena->bins[i]);
+ }
+ malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
+ base_postfork_child(tsdn, arena->base);
+ malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
+ extents_postfork_child(tsdn, &arena->extents_dirty);
+ extents_postfork_child(tsdn, &arena->extents_muzzy);
+ extents_postfork_child(tsdn, &arena->extents_retained);
+ malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
+ if (config_stats) {
+ malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
+ }
}