diff options
author | Guy Benoish <guy.benoish@redislabs.com> | 2017-05-09 18:42:32 +0300 |
---|---|---|
committer | Guy Benoish <guy.benoish@redislabs.com> | 2017-05-09 18:42:32 +0300 |
commit | 89a9e5a9a2dba7bcdb4ef32fa73f105a14923c1a (patch) | |
tree | 4c160d8b97a3792e00a0e2e9ff79ea605371dc1c /deps/jemalloc/include/jemalloc/internal/arena.h | |
parent | 71a8df6a2b6fe37a625636a3902fcc713c7c4919 (diff) | |
parent | a4c7f34d3ab9a182a26d6a6f59f87d253929100d (diff) | |
download | redis-89a9e5a9a2dba7bcdb4ef32fa73f105a14923c1a.tar.gz |
Merge branch 'unstable' of https://github.com/antirez/redis into unstable
Diffstat (limited to 'deps/jemalloc/include/jemalloc/internal/arena.h')
-rw-r--r-- | deps/jemalloc/include/jemalloc/internal/arena.h | 582 |
1 files changed, 201 insertions, 381 deletions
diff --git a/deps/jemalloc/include/jemalloc/internal/arena.h b/deps/jemalloc/include/jemalloc/internal/arena.h index ce4e6029e..12c617979 100644 --- a/deps/jemalloc/include/jemalloc/internal/arena.h +++ b/deps/jemalloc/include/jemalloc/internal/arena.h @@ -23,29 +23,14 @@ */ #define LG_DIRTY_MULT_DEFAULT 3 -typedef enum { - purge_mode_ratio = 0, - purge_mode_decay = 1, - - purge_mode_limit = 2 -} purge_mode_t; -#define PURGE_DEFAULT purge_mode_ratio -/* Default decay time in seconds. */ -#define DECAY_TIME_DEFAULT 10 -/* Number of event ticks between time checks. */ -#define DECAY_NTICKS_PER_UPDATE 1000 - typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t; -typedef struct arena_avail_links_s arena_avail_links_t; typedef struct arena_run_s arena_run_t; typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t; typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t; typedef struct arena_chunk_s arena_chunk_t; typedef struct arena_bin_info_s arena_bin_info_t; -typedef struct arena_decay_s arena_decay_t; typedef struct arena_bin_s arena_bin_t; typedef struct arena_s arena_t; -typedef struct arena_tdata_s arena_tdata_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ @@ -155,13 +140,13 @@ struct arena_runs_dirty_link_s { */ struct arena_chunk_map_misc_s { /* - * Linkage for run heaps. There are two disjoint uses: + * Linkage for run trees. There are two disjoint uses: * - * 1) arena_t's runs_avail heaps. + * 1) arena_t's runs_avail tree. * 2) arena_run_t conceptually uses this linkage for in-use non-full * runs, rather than directly embedding linkage. */ - phn(arena_chunk_map_misc_t) ph_link; + rb_node(arena_chunk_map_misc_t) rb_link; union { /* Linkage for list of dirty runs. */ @@ -169,15 +154,16 @@ struct arena_chunk_map_misc_s { /* Profile counters, used for large object runs. */ union { - void *prof_tctx_pun; - prof_tctx_t *prof_tctx; + void *prof_tctx_pun; + prof_tctx_t *prof_tctx; }; /* Small region run metadata. */ arena_run_t run; }; }; -typedef ph(arena_chunk_map_misc_t) arena_run_heap_t; +typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t; +typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t; #endif /* JEMALLOC_ARENA_STRUCTS_A */ #ifdef JEMALLOC_ARENA_STRUCTS_B @@ -191,14 +177,6 @@ struct arena_chunk_s { extent_node_t node; /* - * True if memory could be backed by transparent huge pages. This is - * only directly relevant to Linux, since it is the only supported - * platform on which jemalloc interacts with explicit transparent huge - * page controls. - */ - bool hugepage; - - /* * Map of pages within chunk that keeps track of free/large/small. The * first map_bias entries are omitted, since the chunk header does not * need to be tracked in the map. This omission saves a header page @@ -242,71 +220,28 @@ struct arena_chunk_s { */ struct arena_bin_info_s { /* Size of regions in a run for this bin's size class. */ - size_t reg_size; + size_t reg_size; /* Redzone size. */ - size_t redzone_size; + size_t redzone_size; /* Interval between regions (reg_size + (redzone_size << 1)). */ - size_t reg_interval; + size_t reg_interval; /* Total size of a run for this bin's size class. */ - size_t run_size; + size_t run_size; /* Total number of regions in a run for this bin's size class. */ - uint32_t nregs; + uint32_t nregs; /* * Metadata used to manipulate bitmaps for runs associated with this * bin. */ - bitmap_info_t bitmap_info; + bitmap_info_t bitmap_info; /* Offset of first region in a run for this bin's size class. */ - uint32_t reg0_offset; -}; - -struct arena_decay_s { - /* - * Approximate time in seconds from the creation of a set of unused - * dirty pages until an equivalent set of unused dirty pages is purged - * and/or reused. - */ - ssize_t time; - /* time / SMOOTHSTEP_NSTEPS. */ - nstime_t interval; - /* - * Time at which the current decay interval logically started. We do - * not actually advance to a new epoch until sometime after it starts - * because of scheduling and computation delays, and it is even possible - * to completely skip epochs. In all cases, during epoch advancement we - * merge all relevant activity into the most recently recorded epoch. - */ - nstime_t epoch; - /* Deadline randomness generator. */ - uint64_t jitter_state; - /* - * Deadline for current epoch. This is the sum of interval and per - * epoch jitter which is a uniform random variable in [0..interval). - * Epochs always advance by precise multiples of interval, but we - * randomize the deadline to reduce the likelihood of arenas purging in - * lockstep. - */ - nstime_t deadline; - /* - * Number of dirty pages at beginning of current epoch. During epoch - * advancement we use the delta between arena->decay.ndirty and - * arena->ndirty to determine how many dirty pages, if any, were - * generated. - */ - size_t ndirty; - /* - * Trailing log of how many unused dirty pages were generated during - * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last - * element is the most recent epoch. Corresponding epoch times are - * relative to epoch. - */ - size_t backlog[SMOOTHSTEP_NSTEPS]; + uint32_t reg0_offset; }; struct arena_bin_s { @@ -316,25 +251,25 @@ struct arena_bin_s { * which may be acquired while holding one or more bin locks, but not * vise versa. */ - malloc_mutex_t lock; + malloc_mutex_t lock; /* * Current run being used to service allocations of this bin's size * class. */ - arena_run_t *runcur; + arena_run_t *runcur; /* - * Heap of non-full runs. This heap is used when looking for an + * Tree of non-full runs. This tree is used when looking for an * existing run when runcur is no longer usable. We choose the * non-full run that is lowest in memory; this policy tends to keep * objects packed well, and it can also help reduce the number of * almost-empty chunks. */ - arena_run_heap_t runs; + arena_run_tree_t runs; /* Bin statistics. */ - malloc_bin_stats_t stats; + malloc_bin_stats_t stats; }; struct arena_s { @@ -342,23 +277,15 @@ struct arena_s { unsigned ind; /* - * Number of threads currently assigned to this arena, synchronized via - * atomic operations. Each thread has two distinct assignments, one for - * application-serving allocation, and the other for internal metadata - * allocation. Internal metadata must not be allocated from arenas - * created via the arenas.extend mallctl, because the arena.<i>.reset - * mallctl indiscriminately discards all allocations for the affected - * arena. - * - * 0: Application allocation. - * 1: Internal metadata allocation. + * Number of threads currently assigned to this arena. This field is + * protected by arenas_lock. */ - unsigned nthreads[2]; + unsigned nthreads; /* * There are three classes of arena operations from a locking * perspective: - * 1) Thread assignment (modifies nthreads) is synchronized via atomics. + * 1) Thread assignment (modifies nthreads) is protected by arenas_lock. * 2) Bin-related operations are protected by bin locks. * 3) Chunk- and run-related operations are protected by this mutex. */ @@ -378,16 +305,10 @@ struct arena_s { * PRNG state for cache index randomization of large allocation base * pointers. */ - size_t offset_state; + uint64_t offset_state; dss_prec_t dss_prec; - /* Extant arena chunks. */ - ql_head(extent_node_t) achunks; - - /* Extent serial number generator state. */ - size_t extent_sn_next; - /* * In order to avoid rapid chunk allocation/deallocation when an arena * oscillates right on the cusp of needing a new chunk, cache the most @@ -403,7 +324,7 @@ struct arena_s { /* Minimum ratio (log base 2) of nactive:ndirty. */ ssize_t lg_dirty_mult; - /* True if a thread is currently executing arena_purge_to_limit(). */ + /* True if a thread is currently executing arena_purge(). */ bool purging; /* Number of pages in active runs and huge regions. */ @@ -418,6 +339,12 @@ struct arena_s { size_t ndirty; /* + * Size/address-ordered tree of this arena's available runs. The tree + * is used for first-best-fit run allocation. + */ + arena_avail_tree_t runs_avail; + + /* * Unused dirty memory this arena manages. Dirty memory is conceptually * tracked as an arbitrarily interleaved LRU of dirty runs and cached * chunks, but the list linkage is actually semi-duplicated in order to @@ -448,9 +375,6 @@ struct arena_s { arena_runs_dirty_link_t runs_dirty; extent_node_t chunks_cache; - /* Decay-based purging state. */ - arena_decay_t decay; - /* Extant huge allocations. */ ql_head(extent_node_t) huge; /* Synchronizes all huge allocation/update/deallocation. */ @@ -463,9 +387,9 @@ struct arena_s { * orderings are needed, which is why there are two trees with the same * contents. */ - extent_tree_t chunks_szsnad_cached; + extent_tree_t chunks_szad_cached; extent_tree_t chunks_ad_cached; - extent_tree_t chunks_szsnad_retained; + extent_tree_t chunks_szad_retained; extent_tree_t chunks_ad_retained; malloc_mutex_t chunks_mtx; @@ -478,19 +402,6 @@ struct arena_s { /* bins is used to store trees of free regions. */ arena_bin_t bins[NBINS]; - - /* - * Size-segregated address-ordered heaps of this arena's available runs, - * used for first-best-fit run allocation. Runs are quantized, i.e. - * they reside in the last heap which corresponds to a size class less - * than or equal to the run size. - */ - arena_run_heap_t runs_avail[NPSIZES]; -}; - -/* Used in conjunction with tsd for fast arena-related context lookup. */ -struct arena_tdata_s { - ticker_t decay_ticker; }; #endif /* JEMALLOC_ARENA_STRUCTS_B */ @@ -506,10 +417,7 @@ static const size_t large_pad = #endif ; -extern purge_mode_t opt_purge; -extern const char *purge_mode_names[]; extern ssize_t opt_lg_dirty_mult; -extern ssize_t opt_decay_time; extern arena_bin_info_t arena_bin_info[NBINS]; @@ -520,37 +428,27 @@ extern size_t large_maxclass; /* Max large size class. */ extern unsigned nlclasses; /* Number of large size classes. */ extern unsigned nhclasses; /* Number of huge size classes. */ -#ifdef JEMALLOC_JET -typedef size_t (run_quantize_t)(size_t); -extern run_quantize_t *run_quantize_floor; -extern run_quantize_t *run_quantize_ceil; -#endif void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache); void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool cache); -extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena); -void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node); -void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, size_t *sn, bool *zero); -void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, - size_t usize, size_t sn); -void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, - void *chunk, size_t oldsize, size_t usize); -void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, - void *chunk, size_t oldsize, size_t usize, size_t sn); -bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, - void *chunk, size_t oldsize, size_t usize, bool *zero); -ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); -bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, - ssize_t lg_dirty_mult); -ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena); -bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time); -void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all); -void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena); -void arena_reset(tsd_t *tsd, arena_t *arena); -void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, - tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); +extent_node_t *arena_node_alloc(arena_t *arena); +void arena_node_dalloc(arena_t *arena, extent_node_t *node); +void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, + bool *zero); +void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize); +void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, + size_t oldsize, size_t usize); +void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, + size_t oldsize, size_t usize); +bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, + size_t oldsize, size_t usize, bool *zero); +ssize_t arena_lg_dirty_mult_get(arena_t *arena); +bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult); +void arena_maybe_purge(arena_t *arena); +void arena_purge_all(arena_t *arena); +void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, + szind_t binind, uint64_t prof_accumbytes); void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero); #ifdef JEMALLOC_JET @@ -563,100 +461,75 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); #endif void arena_quarantine_junk_small(void *ptr, size_t usize); -void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind, - bool zero); -void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, - szind_t ind, bool zero); -void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, +void *arena_malloc_small(arena_t *arena, size_t size, bool zero); +void *arena_malloc_large(arena_t *arena, size_t size, bool zero); +void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache); -void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size); -void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, - arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm); -void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm); -void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t pageind); +void arena_prof_promoted(const void *ptr, size_t size); +void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, + void *ptr, arena_chunk_map_bits_t *bitselm); +void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t pageind, arena_chunk_map_bits_t *bitselm); +void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t pageind); #ifdef JEMALLOC_JET typedef void (arena_dalloc_junk_large_t)(void *, size_t); extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; #else void arena_dalloc_junk_large(void *ptr, size_t usize); #endif -void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, - arena_chunk_t *chunk, void *ptr); -void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, +void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr); +void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); #ifdef JEMALLOC_JET typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; #endif -bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, - size_t size, size_t extra, bool zero); +bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero); void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache); -dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena); -bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec); +dss_prec_t arena_dss_prec_get(arena_t *arena); +bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); ssize_t arena_lg_dirty_mult_default_get(void); bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); -ssize_t arena_decay_time_default_get(void); -bool arena_decay_time_default_set(ssize_t decay_time); -void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, - unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, - ssize_t *decay_time, size_t *nactive, size_t *ndirty); -void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, - const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, - size_t *nactive, size_t *ndirty, arena_stats_t *astats, - malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, - malloc_huge_stats_t *hstats); -unsigned arena_nthreads_get(arena_t *arena, bool internal); -void arena_nthreads_inc(arena_t *arena, bool internal); -void arena_nthreads_dec(arena_t *arena, bool internal); -size_t arena_extent_sn_next(arena_t *arena); -arena_t *arena_new(tsdn_t *tsdn, unsigned ind); -void arena_boot(void); -void arena_prefork0(tsdn_t *tsdn, arena_t *arena); -void arena_prefork1(tsdn_t *tsdn, arena_t *arena); -void arena_prefork2(tsdn_t *tsdn, arena_t *arena); -void arena_prefork3(tsdn_t *tsdn, arena_t *arena); -void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); -void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); +void arena_stats_merge(arena_t *arena, const char **dss, + ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty, + arena_stats_t *astats, malloc_bin_stats_t *bstats, + malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats); +arena_t *arena_new(unsigned ind); +bool arena_boot(void); +void arena_prefork(arena_t *arena); +void arena_postfork_parent(arena_t *arena); +void arena_postfork_child(arena_t *arena); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE -arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk, +arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk, size_t pageind); -const arena_chunk_map_bits_t *arena_bitselm_get_const( - const arena_chunk_t *chunk, size_t pageind); -arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk, +arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk, size_t pageind); -const arena_chunk_map_misc_t *arena_miscelm_get_const( - const arena_chunk_t *chunk, size_t pageind); -size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm); -void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm); +size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm); +void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm); arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd); arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run); -size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind); -const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbitsp_read(const size_t *mapbitsp); -size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind); +size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbitsp_read(size_t *mapbitsp); +size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); size_t arena_mapbits_size_decode(size_t mapbits); -size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk, +size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind); -szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); +szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); size_t arena_mapbits_size_encode(size_t size); void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, @@ -676,31 +549,27 @@ void arena_metadata_allocated_sub(arena_t *arena, size_t size); size_t arena_metadata_allocated_get(arena_t *arena); bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes); +bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); -size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, +unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr); -prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr); -void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx); -void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, +prof_tctx_t *arena_prof_tctx_get(const void *ptr); +void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx); +void arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx); -void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks); -void arena_decay_tick(tsdn_t *tsdn, arena_t *arena); -void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, - bool zero, tcache_t *tcache, bool slow_path); +void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, + tcache_t *tcache); arena_t *arena_aalloc(const void *ptr); -size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote); -void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); -void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - bool slow_path); +size_t arena_salloc(const void *ptr, bool demote); +void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache); +void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) # ifdef JEMALLOC_ARENA_INLINE_A JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t * -arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind) +arena_bitselm_get(arena_chunk_t *chunk, size_t pageind) { assert(pageind >= map_bias); @@ -709,15 +578,8 @@ arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind) return (&chunk->map_bits[pageind-map_bias]); } -JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t * -arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind)); -} - JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * -arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind) +arena_miscelm_get(arena_chunk_t *chunk, size_t pageind) { assert(pageind >= map_bias); @@ -727,15 +589,8 @@ arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind) (uintptr_t)map_misc_offset) + pageind-map_bias); } -JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t * -arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind)); -} - JEMALLOC_ALWAYS_INLINE size_t -arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm) +arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk + @@ -748,7 +603,7 @@ arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm) } JEMALLOC_ALWAYS_INLINE void * -arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm) +arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); size_t pageind = arena_miscelm_to_pageind(miscelm); @@ -781,31 +636,24 @@ arena_run_to_miscelm(arena_run_t *run) } JEMALLOC_ALWAYS_INLINE size_t * -arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind) +arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) { - return (&arena_bitselm_get_mutable(chunk, pageind)->bits); -} - -JEMALLOC_ALWAYS_INLINE const size_t * -arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind)); + return (&arena_bitselm_get(chunk, pageind)->bits); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbitsp_read(const size_t *mapbitsp) +arena_mapbitsp_read(size_t *mapbitsp) { return (*mapbitsp); } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind) +arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) { - return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind))); + return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); } JEMALLOC_ALWAYS_INLINE size_t @@ -825,7 +673,7 @@ arena_mapbits_size_decode(size_t mapbits) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind) +arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -835,7 +683,7 @@ arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind) +arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -846,7 +694,7 @@ arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind) +arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -857,7 +705,7 @@ arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE szind_t -arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind) +arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; szind_t binind; @@ -869,7 +717,7 @@ arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind) +arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -880,7 +728,7 @@ arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind) +arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -891,7 +739,7 @@ arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind) +arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -902,7 +750,7 @@ arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind) +arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -911,7 +759,7 @@ arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind) } JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind) +arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) { size_t mapbits; @@ -947,7 +795,7 @@ JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); @@ -961,7 +809,7 @@ JEMALLOC_ALWAYS_INLINE void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, size_t size) { - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); assert((size & PAGE_MASK) == 0); @@ -973,7 +821,7 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, JEMALLOC_ALWAYS_INLINE void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags) { - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); assert((flags & CHUNK_MAP_UNZEROED) == flags); arena_mapbitsp_write(mapbitsp, flags); @@ -983,7 +831,7 @@ JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, size_t flags) { - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); assert((size & PAGE_MASK) == 0); assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); @@ -998,7 +846,7 @@ JEMALLOC_ALWAYS_INLINE void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, szind_t binind) { - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); size_t mapbits = arena_mapbitsp_read(mapbitsp); assert(binind <= BININD_INVALID); @@ -1012,7 +860,7 @@ JEMALLOC_ALWAYS_INLINE void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, szind_t binind, size_t flags) { - size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind); + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); assert(binind < BININD_INVALID); assert(pageind - runind >= map_bias); @@ -1069,7 +917,7 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) } JEMALLOC_INLINE bool -arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) +arena_prof_accum(arena_t *arena, uint64_t accumbytes) { cassert(config_prof); @@ -1080,9 +928,9 @@ arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { bool ret; - malloc_mutex_lock(tsdn, &arena->lock); + malloc_mutex_lock(&arena->lock); ret = arena_prof_accum_impl(arena, accumbytes); - malloc_mutex_unlock(tsdn, &arena->lock); + malloc_mutex_unlock(&arena->lock); return (ret); } } @@ -1100,12 +948,12 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits) size_t pageind; size_t actual_mapbits; size_t rpages_ind; - const arena_run_t *run; + arena_run_t *run; arena_bin_t *bin; szind_t run_binind, actual_binind; arena_bin_info_t *bin_info; - const arena_chunk_map_misc_t *miscelm; - const void *rpages; + arena_chunk_map_misc_t *miscelm; + void *rpages; assert(binind != BININD_INVALID); assert(binind < NBINS); @@ -1118,11 +966,11 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits) assert(arena_mapbits_allocated_get(chunk, pageind) != 0); rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - miscelm = arena_miscelm_get_const(chunk, rpages_ind); + miscelm = arena_miscelm_get(chunk, rpages_ind); run = &miscelm->run; run_binind = run->binind; bin = &arena->bins[run_binind]; - actual_binind = (szind_t)(bin - arena->bins); + actual_binind = bin - arena->bins; assert(run_binind == actual_binind); bin_info = &arena_bin_info[actual_binind]; rpages = arena_miscelm_to_rpages(miscelm); @@ -1139,15 +987,16 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits) JEMALLOC_INLINE szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin) { - szind_t binind = (szind_t)(bin - arena->bins); + szind_t binind = bin - arena->bins; assert(binind < NBINS); return (binind); } -JEMALLOC_INLINE size_t +JEMALLOC_INLINE unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) { - size_t diff, interval, shift, regind; + unsigned shift, diff, regind; + size_t interval; arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); void *rpages = arena_miscelm_to_rpages(miscelm); @@ -1162,12 +1011,12 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) * Avoid doing division with a variable divisor if possible. Using * actual division here can reduce allocator throughput by over 20%! */ - diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages - + diff = (unsigned)((uintptr_t)ptr - (uintptr_t)rpages - bin_info->reg0_offset); /* Rescale (factor powers of 2 out of the numerator and denominator). */ interval = bin_info->reg_interval; - shift = ffs_zu(interval) - 1; + shift = jemalloc_ffs(interval) - 1; diff >>= shift; interval >>= shift; @@ -1189,9 +1038,9 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) * divide by 0, and 1 and 2 are both powers of two, which are * handled above. */ -#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS) -#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1) - static const size_t interval_invs[] = { +#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) +#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) + static const unsigned interval_invs[] = { SIZE_INV(3), SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), @@ -1202,8 +1051,8 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) }; - if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t)) - + 2))) { + if (likely(interval <= ((sizeof(interval_invs) / + sizeof(unsigned)) + 2))) { regind = (diff * interval_invs[interval - 3]) >> SIZE_INV_SHIFT; } else @@ -1218,7 +1067,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) } JEMALLOC_INLINE prof_tctx_t * -arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr) +arena_prof_tctx_get(const void *ptr) { prof_tctx_t *ret; arena_chunk_t *chunk; @@ -1234,19 +1083,18 @@ arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr) if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) ret = (prof_tctx_t *)(uintptr_t)1U; else { - arena_chunk_map_misc_t *elm = - arena_miscelm_get_mutable(chunk, pageind); + arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk, + pageind); ret = atomic_read_p(&elm->prof_tctx_pun); } } else - ret = huge_prof_tctx_get(tsdn, ptr); + ret = huge_prof_tctx_get(ptr); return (ret); } JEMALLOC_INLINE void -arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, - prof_tctx_t *tctx) +arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx) { arena_chunk_t *chunk; @@ -1265,7 +1113,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, assert(arena_mapbits_large_get(chunk, pageind) != 0); - elm = arena_miscelm_get_mutable(chunk, pageind); + elm = arena_miscelm_get(chunk, pageind); atomic_write_p(&elm->prof_tctx_pun, tctx); } else { /* @@ -1277,12 +1125,12 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, assert(arena_mapbits_large_get(chunk, pageind) == 0); } } else - huge_prof_tctx_set(tsdn, ptr, tctx); + huge_prof_tctx_set(ptr, tctx); } JEMALLOC_INLINE void -arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, - const void *old_ptr, prof_tctx_t *old_tctx) +arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, + prof_tctx_t *old_tctx) { cassert(config_prof); @@ -1301,59 +1149,43 @@ arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, 0); assert(arena_mapbits_large_get(chunk, pageind) != 0); - elm = arena_miscelm_get_mutable(chunk, pageind); + elm = arena_miscelm_get(chunk, pageind); atomic_write_p(&elm->prof_tctx_pun, (prof_tctx_t *)(uintptr_t)1U); } else - huge_prof_tctx_reset(tsdn, ptr); + huge_prof_tctx_reset(ptr); } } -JEMALLOC_ALWAYS_INLINE void -arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) -{ - tsd_t *tsd; - ticker_t *decay_ticker; - - if (unlikely(tsdn_null(tsdn))) - return; - tsd = tsdn_tsd(tsdn); - decay_ticker = decay_ticker_get(tsd, arena->ind); - if (unlikely(decay_ticker == NULL)) - return; - if (unlikely(ticker_ticks(decay_ticker, nticks))) - arena_purge(tsdn, arena, false); -} - -JEMALLOC_ALWAYS_INLINE void -arena_decay_tick(tsdn_t *tsdn, arena_t *arena) -{ - - arena_decay_ticks(tsdn, arena, 1); -} - JEMALLOC_ALWAYS_INLINE void * -arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, - tcache_t *tcache, bool slow_path) +arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, + tcache_t *tcache) { - assert(!tsdn_null(tsdn) || tcache == NULL); assert(size != 0); - if (likely(tcache != NULL)) { - if (likely(size <= SMALL_MAXCLASS)) { - return (tcache_alloc_small(tsdn_tsd(tsdn), arena, - tcache, size, ind, zero, slow_path)); - } - if (likely(size <= tcache_maxclass)) { - return (tcache_alloc_large(tsdn_tsd(tsdn), arena, - tcache, size, ind, zero, slow_path)); - } - /* (size > tcache_maxclass) case falls through. */ - assert(size > tcache_maxclass); - } + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) + return (NULL); - return (arena_malloc_hard(tsdn, arena, size, ind, zero)); + if (likely(size <= SMALL_MAXCLASS)) { + if (likely(tcache != NULL)) { + return (tcache_alloc_small(tsd, arena, tcache, size, + zero)); + } else + return (arena_malloc_small(arena, size, zero)); + } else if (likely(size <= large_maxclass)) { + /* + * Initialize tcache after checking size in order to avoid + * infinite recursion during tcache initialization. + */ + if (likely(tcache != NULL) && size <= tcache_maxclass) { + return (tcache_alloc_large(tsd, arena, tcache, size, + zero)); + } else + return (arena_malloc_large(arena, size, zero)); + } else + return (huge_malloc(tsd, arena, size, zero, tcache)); } JEMALLOC_ALWAYS_INLINE arena_t * @@ -1370,7 +1202,7 @@ arena_aalloc(const void *ptr) /* Return the size of the allocation pointed to by ptr. */ JEMALLOC_ALWAYS_INLINE size_t -arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote) +arena_salloc(const void *ptr, bool demote) { size_t ret; arena_chunk_t *chunk; @@ -1413,18 +1245,17 @@ arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote) ret = index2size(binind); } } else - ret = huge_salloc(tsdn, ptr); + ret = huge_salloc(ptr); return (ret); } JEMALLOC_ALWAYS_INLINE void -arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) +arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) { arena_chunk_t *chunk; size_t pageind, mapbits; - assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); @@ -1437,12 +1268,10 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) if (likely(tcache != NULL)) { szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); - tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, - binind, slow_path); + tcache_dalloc_small(tsd, tcache, ptr, binind); } else { - arena_dalloc_small(tsdn, - extent_node_arena_get(&chunk->node), chunk, - ptr, pageind); + arena_dalloc_small(extent_node_arena_get( + &chunk->node), chunk, ptr, pageind); } } else { size_t size = arena_mapbits_large_size_get(chunk, @@ -1453,33 +1282,28 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) if (likely(tcache != NULL) && size - large_pad <= tcache_maxclass) { - tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, - size - large_pad, slow_path); + tcache_dalloc_large(tsd, tcache, ptr, size - + large_pad); } else { - arena_dalloc_large(tsdn, - extent_node_arena_get(&chunk->node), chunk, - ptr); + arena_dalloc_large(extent_node_arena_get( + &chunk->node), chunk, ptr); } } } else - huge_dalloc(tsdn, ptr); + huge_dalloc(tsd, ptr, tcache); } JEMALLOC_ALWAYS_INLINE void -arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, - bool slow_path) +arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) { arena_chunk_t *chunk; - assert(!tsdn_null(tsdn) || tcache == NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { if (config_prof && opt_prof) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != - 0); + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); if (arena_mapbits_large_get(chunk, pageind) != 0) { /* * Make sure to use promoted size, not request @@ -1489,36 +1313,32 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, pageind) - large_pad; } } - assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false))); + assert(s2u(size) == s2u(arena_salloc(ptr, false))); if (likely(size <= SMALL_MAXCLASS)) { /* Small allocation. */ if (likely(tcache != NULL)) { szind_t binind = size2index(size); - tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, - binind, slow_path); + tcache_dalloc_small(tsd, tcache, ptr, binind); } else { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_dalloc_small(tsdn, - extent_node_arena_get(&chunk->node), chunk, - ptr, pageind); + arena_dalloc_small(extent_node_arena_get( + &chunk->node), chunk, ptr, pageind); } } else { assert(config_cache_oblivious || ((uintptr_t)ptr & PAGE_MASK) == 0); - if (likely(tcache != NULL) && size <= tcache_maxclass) { - tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, - size, slow_path); - } else { - arena_dalloc_large(tsdn, - extent_node_arena_get(&chunk->node), chunk, - ptr); + if (likely(tcache != NULL) && size <= tcache_maxclass) + tcache_dalloc_large(tsd, tcache, ptr, size); + else { + arena_dalloc_large(extent_node_arena_get( + &chunk->node), chunk, ptr); } } } else - huge_dalloc(tsdn, ptr); + huge_dalloc(tsd, ptr, tcache); } # endif /* JEMALLOC_ARENA_INLINE_B */ #endif |