summaryrefslogtreecommitdiff
path: root/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h
diff options
context:
space:
mode:
authorOran Agra <oran@redislabs.com>2023-05-01 15:38:08 +0300
committerOran Agra <oran@redislabs.com>2023-05-01 15:38:08 +0300
commitb8beda3cf8e5c8218fbe84539d6b5117b3f909d9 (patch)
tree66934c98c93ac48f6ca912ad547e651221e525d2 /deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h
parentd659c734569be4ed32a270bac2527ccf35418c43 (diff)
parent6d23d3ac3b3f9d13ad2ce99029e69a9ea9f2e517 (diff)
downloadredis-b8beda3cf8e5c8218fbe84539d6b5117b3f909d9.tar.gz
Merge commit jemalloc 5.3.0
Diffstat (limited to 'deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h')
-rw-r--r--deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h489
1 files changed, 306 insertions, 183 deletions
diff --git a/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h b/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h
index dd926575f..fa81537c4 100644
--- a/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h
+++ b/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h
@@ -1,16 +1,20 @@
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
+#include "jemalloc/internal/div.h"
+#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
-JEMALLOC_ALWAYS_INLINE bool
-arena_has_default_hooks(arena_t *arena) {
- return (extent_hooks_get(arena) == &extent_hooks_default);
+static inline arena_t *
+arena_get_from_edata(edata_t *edata) {
+ return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
+ ATOMIC_RELAXED);
}
JEMALLOC_ALWAYS_INLINE arena_t *
@@ -34,127 +38,109 @@ arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
return arena_choose(tsd, NULL);
}
-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
+JEMALLOC_ALWAYS_INLINE void
+arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
+ prof_info_t *prof_info, bool reset_recent) {
cassert(config_prof);
assert(ptr != NULL);
+ assert(prof_info != NULL);
+
+ edata_t *edata = NULL;
+ bool is_slab;
/* Static check. */
if (alloc_ctx == NULL) {
- const extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(!extent_slab_get(extent))) {
- return large_prof_tctx_get(tsdn, extent);
- }
+ edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
+ ptr);
+ is_slab = edata_slab_get(edata);
+ } else if (unlikely(!(is_slab = alloc_ctx->slab))) {
+ edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
+ ptr);
+ }
+
+ if (unlikely(!is_slab)) {
+ /* edata must have been initialized at this point. */
+ assert(edata != NULL);
+ large_prof_info_get(tsd, edata, prof_info, reset_recent);
} else {
- if (unlikely(!alloc_ctx->slab)) {
- return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
- }
+ prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
+ /*
+ * No need to set other fields in prof_info; they will never be
+ * accessed if (uintptr_t)alloc_tctx == (uintptr_t)1U.
+ */
}
- return (prof_tctx_t *)(uintptr_t)1U;
}
JEMALLOC_ALWAYS_INLINE void
-arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
+arena_prof_tctx_reset(tsd_t *tsd, const void *ptr,
+ emap_alloc_ctx_t *alloc_ctx) {
cassert(config_prof);
assert(ptr != NULL);
/* Static check. */
if (alloc_ctx == NULL) {
- extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(!extent_slab_get(extent))) {
- large_prof_tctx_set(tsdn, extent, tctx);
+ edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
+ &arena_emap_global, ptr);
+ if (unlikely(!edata_slab_get(edata))) {
+ large_prof_tctx_reset(edata);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
- large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
+ edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
+ &arena_emap_global, ptr);
+ large_prof_tctx_reset(edata);
}
}
}
-static inline void
-arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
+JEMALLOC_ALWAYS_INLINE void
+arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
- extent_t *extent = iealloc(tsdn, ptr);
- assert(!extent_slab_get(extent));
+ edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
+ ptr);
+ assert(!edata_slab_get(edata));
- large_prof_tctx_reset(tsdn, extent);
-}
-
-JEMALLOC_ALWAYS_INLINE nstime_t
-arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr,
- alloc_ctx_t *alloc_ctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- extent_t *extent = iealloc(tsdn, ptr);
- /*
- * Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're
- * sure we have a sampled allocation.
- */
- assert(!extent_slab_get(extent));
- return large_prof_alloc_time_get(extent);
+ large_prof_tctx_reset(edata);
}
JEMALLOC_ALWAYS_INLINE void
-arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
- nstime_t t) {
+arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx,
+ size_t size) {
cassert(config_prof);
- assert(ptr != NULL);
- extent_t *extent = iealloc(tsdn, ptr);
- assert(!extent_slab_get(extent));
- large_prof_alloc_time_set(extent, t);
+ assert(!edata_slab_get(edata));
+ large_prof_info_set(edata, tctx, size);
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
- tsd_t *tsd;
- ticker_t *decay_ticker;
-
if (unlikely(tsdn_null(tsdn))) {
return;
}
- tsd = tsdn_tsd(tsdn);
- decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
- if (unlikely(decay_ticker == NULL)) {
- return;
- }
- if (unlikely(ticker_ticks(decay_ticker, nticks))) {
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ /*
+ * We use the ticker_geom_t to avoid having per-arena state in the tsd.
+ * Instead of having a countdown-until-decay timer running for every
+ * arena in every thread, we flip a coin once per tick, whose
+ * probability of coming up heads is 1/nticks; this is effectively the
+ * operation of the ticker_geom_t. Each arena has the same chance of a
+ * coinflip coming up heads (1/ARENA_DECAY_NTICKS_PER_UPDATE), so we can
+ * use a single ticker for all of them.
+ */
+ ticker_geom_t *decay_ticker = tsd_arena_decay_tickerp_get(tsd);
+ uint64_t *prng_state = tsd_prng_statep_get(tsd);
+ if (unlikely(ticker_geom_ticks(decay_ticker, prng_state, nticks))) {
arena_decay(tsdn, arena, false, false);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
- malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
-
arena_decay_ticks(tsdn, arena, 1);
}
-/* Purge a single extent to retained / unmapped directly. */
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_extent(tsdn_t *tsdn,arena_t *arena, extent_hooks_t **r_extent_hooks,
- extent_t *extent) {
- size_t extent_size = extent_size_get(extent);
- extent_dalloc_wrapper(tsdn, arena,
- r_extent_hooks, extent);
- if (config_stats) {
- /* Update stats accordingly. */
- arena_stats_lock(tsdn, &arena->stats);
- arena_stats_add_u64(tsdn, &arena->stats,
- &arena->decay_dirty.stats->nmadvise, 1);
- arena_stats_add_u64(tsdn, &arena->stats,
- &arena->decay_dirty.stats->purged, extent_size >> LG_PAGE);
- arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
- extent_size);
- arena_stats_unlock(tsdn, &arena->stats);
- }
-}
-
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool slow_path) {
@@ -178,21 +164,19 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
- return extent_arena_get(iealloc(tsdn, ptr));
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ unsigned arena_ind = edata_arena_ind_get(edata);
+ return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
+ emap_alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
+ assert(alloc_ctx.szind != SC_NSIZES);
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true);
- assert(szind != SC_NSIZES);
-
- return sz_index2size(szind);
+ return sz_index2size(alloc_ctx.szind);
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -206,26 +190,53 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
* failure.
*/
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- extent_t *extent;
- szind_t szind;
- if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, false, &extent, &szind)) {
+ emap_full_alloc_ctx_t full_alloc_ctx;
+ bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
+ ptr, &full_alloc_ctx);
+ if (missing) {
return 0;
}
- if (extent == NULL) {
+ if (full_alloc_ctx.edata == NULL) {
return 0;
}
- assert(extent_state_get(extent) == extent_state_active);
+ assert(edata_state_get(full_alloc_ctx.edata) == extent_state_active);
/* Only slab members should be looked up via interior pointers. */
- assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
+ assert(edata_addr_get(full_alloc_ctx.edata) == ptr
+ || edata_slab_get(full_alloc_ctx.edata));
+
+ assert(full_alloc_ctx.szind != SC_NSIZES);
+
+ return sz_index2size(full_alloc_ctx.szind);
+}
- assert(szind != SC_NSIZES);
+JEMALLOC_ALWAYS_INLINE bool
+large_dalloc_safety_checks(edata_t *edata, void *ptr, szind_t szind) {
+ if (!config_opt_safety_checks) {
+ return false;
+ }
+
+ /*
+ * Eagerly detect double free and sized dealloc bugs for large sizes.
+ * The cost is low enough (as edata will be accessed anyway) to be
+ * enabled all the time.
+ */
+ if (unlikely(edata == NULL ||
+ edata_state_get(edata) != extent_state_active)) {
+ safety_check_fail("Invalid deallocation detected: "
+ "pages being freed (%p) not currently active, "
+ "possibly caused by double free bugs.",
+ (uintptr_t)edata_addr_get(edata));
+ return true;
+ }
+ size_t input_size = sz_index2size(szind);
+ if (unlikely(input_size != edata_usize_get(edata))) {
+ safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr,
+ /* true_size */ edata_usize_get(edata), input_size);
+ return true;
+ }
- return sz_index2size(szind);
+ return false;
}
static inline void
@@ -233,8 +244,13 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, NULL, true);
} else {
- extent_t *extent = iealloc(tsdn, ptr);
- large_dalloc(tsdn, extent);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
+ ptr);
+ if (large_dalloc_safety_checks(edata, ptr, szind)) {
+ /* See the comment in isfree. */
+ return;
+ }
+ large_dalloc(tsdn, edata);
}
}
@@ -242,27 +258,22 @@ static inline void
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
assert(ptr != NULL);
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- szind_t szind;
- bool slab;
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
- true, &szind, &slab);
+ emap_alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
if (config_debug) {
- extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
- rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(szind < SC_NSIZES);
- assert(slab == extent_slab_get(extent));
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
+ ptr);
+ assert(alloc_ctx.szind == edata_szind_get(edata));
+ assert(alloc_ctx.szind < SC_NSIZES);
+ assert(alloc_ctx.slab == edata_slab_get(edata));
}
- if (likely(slab)) {
+ if (likely(alloc_ctx.slab)) {
/* Small allocation. */
arena_dalloc_small(tsdn, ptr);
} else {
- arena_dalloc_large_no_tcache(tsdn, ptr, szind);
+ arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
}
}
@@ -277,14 +288,19 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
slow_path);
}
} else {
- extent_t *extent = iealloc(tsdn, ptr);
- large_dalloc(tsdn, extent);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
+ ptr);
+ if (large_dalloc_safety_checks(edata, ptr, szind)) {
+ /* See the comment in isfree. */
+ return;
+ }
+ large_dalloc(tsdn, edata);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
- alloc_ctx_t *alloc_ctx, bool slow_path) {
+ emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
@@ -293,34 +309,30 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
return;
}
- szind_t szind;
- bool slab;
- rtree_ctx_t *rtree_ctx;
- if (alloc_ctx != NULL) {
- szind = alloc_ctx->szind;
- slab = alloc_ctx->slab;
- assert(szind != SC_NSIZES);
+ emap_alloc_ctx_t alloc_ctx;
+ if (caller_alloc_ctx != NULL) {
+ alloc_ctx = *caller_alloc_ctx;
} else {
- rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &szind, &slab);
+ util_assume(!tsdn_null(tsdn));
+ emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
+ &alloc_ctx);
}
if (config_debug) {
- rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
- extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
- rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(szind < SC_NSIZES);
- assert(slab == extent_slab_get(extent));
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
+ ptr);
+ assert(alloc_ctx.szind == edata_szind_get(edata));
+ assert(alloc_ctx.szind < SC_NSIZES);
+ assert(alloc_ctx.slab == edata_slab_get(edata));
}
- if (likely(slab)) {
+ if (likely(alloc_ctx.slab)) {
/* Small allocation. */
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
- slow_path);
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
+ alloc_ctx.szind, slow_path);
} else {
- arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
+ arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
+ slow_path);
}
}
@@ -329,47 +341,43 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
assert(ptr != NULL);
assert(size <= SC_LARGE_MAXCLASS);
- szind_t szind;
- bool slab;
+ emap_alloc_ctx_t alloc_ctx;
if (!config_prof || !opt_prof) {
/*
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
*/
- szind = sz_size2index(size);
- slab = (szind < SC_NBINS);
+ alloc_ctx.szind = sz_size2index(size);
+ alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
}
if ((config_prof && opt_prof) || config_debug) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
- &rtree_ctx_fallback);
-
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &szind, &slab);
+ emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
+ &alloc_ctx);
- assert(szind == sz_size2index(size));
- assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
+ assert(alloc_ctx.szind == sz_size2index(size));
+ assert((config_prof && opt_prof)
+ || alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS));
if (config_debug) {
- extent_t *extent = rtree_extent_read(tsdn,
- &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(slab == extent_slab_get(extent));
+ edata_t *edata = emap_edata_lookup(tsdn,
+ &arena_emap_global, ptr);
+ assert(alloc_ctx.szind == edata_szind_get(edata));
+ assert(alloc_ctx.slab == edata_slab_get(edata));
}
}
- if (likely(slab)) {
+ if (likely(alloc_ctx.slab)) {
/* Small allocation. */
arena_dalloc_small(tsdn, ptr);
} else {
- arena_dalloc_large_no_tcache(tsdn, ptr, szind);
+ arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- alloc_ctx_t *alloc_ctx, bool slow_path) {
+ emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
assert(size <= SC_LARGE_MAXCLASS);
@@ -379,49 +387,164 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
return;
}
- szind_t szind;
- bool slab;
- alloc_ctx_t local_ctx;
+ emap_alloc_ctx_t alloc_ctx;
if (config_prof && opt_prof) {
- if (alloc_ctx == NULL) {
+ if (caller_alloc_ctx == NULL) {
/* Uncommon case and should be a static check. */
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
- &rtree_ctx_fallback);
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &local_ctx.szind,
- &local_ctx.slab);
- assert(local_ctx.szind == sz_size2index(size));
- alloc_ctx = &local_ctx;
+ emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
+ &alloc_ctx);
+ assert(alloc_ctx.szind == sz_size2index(size));
+ } else {
+ alloc_ctx = *caller_alloc_ctx;
}
- slab = alloc_ctx->slab;
- szind = alloc_ctx->szind;
} else {
/*
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
*/
- szind = sz_size2index(size);
- slab = (szind < SC_NBINS);
+ alloc_ctx.szind = sz_size2index(size);
+ alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
}
if (config_debug) {
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &szind, &slab);
- extent_t *extent = rtree_extent_read(tsdn,
- &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(slab == extent_slab_get(extent));
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
+ ptr);
+ assert(alloc_ctx.szind == edata_szind_get(edata));
+ assert(alloc_ctx.slab == edata_slab_get(edata));
}
- if (likely(slab)) {
+ if (likely(alloc_ctx.slab)) {
/* Small allocation. */
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
- slow_path);
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
+ alloc_ctx.szind, slow_path);
} else {
- arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
+ arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
+ slow_path);
+ }
+}
+
+static inline void
+arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
+ size_t alignment) {
+ assert(edata_base_get(edata) == edata_addr_get(edata));
+
+ if (alignment < PAGE) {
+ unsigned lg_range = LG_PAGE -
+ lg_floor(CACHELINE_CEILING(alignment));
+ size_t r;
+ if (!tsdn_null(tsdn)) {
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ r = (size_t)prng_lg_range_u64(
+ tsd_prng_statep_get(tsd), lg_range);
+ } else {
+ uint64_t stack_value = (uint64_t)(uintptr_t)&r;
+ r = (size_t)prng_lg_range_u64(&stack_value, lg_range);
+ }
+ uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
+ lg_range);
+ edata->e_addr = (void *)((uintptr_t)edata->e_addr +
+ random_offset);
+ assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) ==
+ edata->e_addr);
+ }
+}
+
+/*
+ * The dalloc bin info contains just the information that the common paths need
+ * during tcache flushes. By force-inlining these paths, and using local copies
+ * of data (so that the compiler knows it's constant), we avoid a whole bunch of
+ * redundant loads and stores by leaving this information in registers.
+ */
+typedef struct arena_dalloc_bin_locked_info_s arena_dalloc_bin_locked_info_t;
+struct arena_dalloc_bin_locked_info_s {
+ div_info_t div_info;
+ uint32_t nregs;
+ uint64_t ndalloc;
+};
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_slab_regind(arena_dalloc_bin_locked_info_t *info, szind_t binind,
+ edata_t *slab, const void *ptr) {
+ size_t diff, regind;
+
+ /* Freeing a pointer outside the slab can cause assertion failure. */
+ assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
+ assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
+ /* Freeing an interior pointer can cause assertion failure. */
+ assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
+ (uintptr_t)bin_infos[binind].reg_size == 0);
+
+ diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
+
+ /* Avoid doing division with a variable divisor. */
+ regind = div_compute(&info->div_info, diff);
+
+ assert(regind < bin_infos[binind].nregs);
+
+ return regind;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_dalloc_bin_locked_begin(arena_dalloc_bin_locked_info_t *info,
+ szind_t binind) {
+ info->div_info = arena_binind_div_info[binind];
+ info->nregs = bin_infos[binind].nregs;
+ info->ndalloc = 0;
+}
+
+/*
+ * Does the deallocation work associated with freeing a single pointer (a
+ * "step") in between a arena_dalloc_bin_locked begin and end call.
+ *
+ * Returns true if arena_slab_dalloc must be called on slab. Doesn't do
+ * stats updates, which happen during finish (this lets running counts get left
+ * in a register).
+ */
+JEMALLOC_ALWAYS_INLINE bool
+arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ arena_dalloc_bin_locked_info_t *info, szind_t binind, edata_t *slab,
+ void *ptr) {
+ const bin_info_t *bin_info = &bin_infos[binind];
+ size_t regind = arena_slab_regind(info, binind, slab, ptr);
+ slab_data_t *slab_data = edata_slab_data_get(slab);
+
+ assert(edata_nfree_get(slab) < bin_info->nregs);
+ /* Freeing an unallocated pointer can cause assertion failure. */
+ assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
+
+ bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
+ edata_nfree_inc(slab);
+
+ if (config_stats) {
+ info->ndalloc++;
+ }
+
+ unsigned nfree = edata_nfree_get(slab);
+ if (nfree == bin_info->nregs) {
+ arena_dalloc_bin_locked_handle_newly_empty(tsdn, arena, slab,
+ bin);
+ return true;
+ } else if (nfree == 1 && slab != bin->slabcur) {
+ arena_dalloc_bin_locked_handle_newly_nonempty(tsdn, arena, slab,
+ bin);
}
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_dalloc_bin_locked_finish(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ arena_dalloc_bin_locked_info_t *info) {
+ if (config_stats) {
+ bin->stats.ndalloc += info->ndalloc;
+ assert(bin->stats.curregs >= (size_t)info->ndalloc);
+ bin->stats.curregs -= (size_t)info->ndalloc;
+ }
+}
+
+static inline bin_t *
+arena_get_bin(arena_t *arena, szind_t binind, unsigned binshard) {
+ bin_t *shard0 = (bin_t *)((uintptr_t)arena + arena_bin_offsets[binind]);
+ return shard0 + binshard;
}
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */