summaryrefslogtreecommitdiff
path: root/deps/jemalloc/include
diff options
context:
space:
mode:
authorGuy Benoish <guy.benoish@redislabs.com>2017-05-09 18:42:32 +0300
committerGuy Benoish <guy.benoish@redislabs.com>2017-05-09 18:42:32 +0300
commit89a9e5a9a2dba7bcdb4ef32fa73f105a14923c1a (patch)
tree4c160d8b97a3792e00a0e2e9ff79ea605371dc1c /deps/jemalloc/include
parent71a8df6a2b6fe37a625636a3902fcc713c7c4919 (diff)
parenta4c7f34d3ab9a182a26d6a6f59f87d253929100d (diff)
downloadredis-89a9e5a9a2dba7bcdb4ef32fa73f105a14923c1a.tar.gz
Merge branch 'unstable' of https://github.com/antirez/redis into unstable
Diffstat (limited to 'deps/jemalloc/include')
-rw-r--r--deps/jemalloc/include/jemalloc/internal/arena.h582
-rw-r--r--deps/jemalloc/include/jemalloc/internal/assert.h45
-rw-r--r--deps/jemalloc/include/jemalloc/internal/atomic.h4
-rw-r--r--deps/jemalloc/include/jemalloc/internal/base.h11
-rw-r--r--deps/jemalloc/include/jemalloc/internal/bitmap.h76
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk.h38
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk_dss.h10
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk_mmap.h4
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ckh.h6
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ctl.h29
-rw-r--r--deps/jemalloc/include/jemalloc/internal/extent.h43
-rw-r--r--deps/jemalloc/include/jemalloc/internal/hash.h33
-rw-r--r--deps/jemalloc/include/jemalloc/internal/huge.h21
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in440
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h11
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in73
-rw-r--r--deps/jemalloc/include/jemalloc/internal/mb.h10
-rw-r--r--deps/jemalloc/include/jemalloc/internal/mutex.h62
-rw-r--r--deps/jemalloc/include/jemalloc/internal/nstime.h48
-rw-r--r--deps/jemalloc/include/jemalloc/internal/pages.h7
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ph.h345
-rw-r--r--deps/jemalloc/include/jemalloc/internal/private_symbols.txt252
-rw-r--r--deps/jemalloc/include/jemalloc/internal/prng.h195
-rw-r--r--deps/jemalloc/include/jemalloc/internal/prof.h86
-rw-r--r--deps/jemalloc/include/jemalloc/internal/rb.h208
-rw-r--r--deps/jemalloc/include/jemalloc/internal/rtree.h160
-rwxr-xr-xdeps/jemalloc/include/jemalloc/internal/size_classes.sh50
-rw-r--r--deps/jemalloc/include/jemalloc/internal/smoothstep.h246
-rwxr-xr-xdeps/jemalloc/include/jemalloc/internal/smoothstep.sh115
-rw-r--r--deps/jemalloc/include/jemalloc/internal/spin.h51
-rw-r--r--deps/jemalloc/include/jemalloc/internal/stats.h14
-rw-r--r--deps/jemalloc/include/jemalloc/internal/tcache.h159
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ticker.h75
-rw-r--r--deps/jemalloc/include/jemalloc/internal/tsd.h164
-rw-r--r--deps/jemalloc/include/jemalloc/internal/util.h214
-rw-r--r--deps/jemalloc/include/jemalloc/internal/valgrind.h40
-rw-r--r--deps/jemalloc/include/jemalloc/internal/witness.h266
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_defs.h.in8
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_macros.h.in61
-rw-r--r--deps/jemalloc/include/msvc_compat/strings.h30
-rw-r--r--deps/jemalloc/include/msvc_compat/windows_extra.h22
41 files changed, 971 insertions, 3343 deletions
diff --git a/deps/jemalloc/include/jemalloc/internal/arena.h b/deps/jemalloc/include/jemalloc/internal/arena.h
index ce4e6029e..12c617979 100644
--- a/deps/jemalloc/include/jemalloc/internal/arena.h
+++ b/deps/jemalloc/include/jemalloc/internal/arena.h
@@ -23,29 +23,14 @@
*/
#define LG_DIRTY_MULT_DEFAULT 3
-typedef enum {
- purge_mode_ratio = 0,
- purge_mode_decay = 1,
-
- purge_mode_limit = 2
-} purge_mode_t;
-#define PURGE_DEFAULT purge_mode_ratio
-/* Default decay time in seconds. */
-#define DECAY_TIME_DEFAULT 10
-/* Number of event ticks between time checks. */
-#define DECAY_NTICKS_PER_UPDATE 1000
-
typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
-typedef struct arena_avail_links_s arena_avail_links_t;
typedef struct arena_run_s arena_run_t;
typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
typedef struct arena_chunk_s arena_chunk_t;
typedef struct arena_bin_info_s arena_bin_info_t;
-typedef struct arena_decay_s arena_decay_t;
typedef struct arena_bin_s arena_bin_t;
typedef struct arena_s arena_t;
-typedef struct arena_tdata_s arena_tdata_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
@@ -155,13 +140,13 @@ struct arena_runs_dirty_link_s {
*/
struct arena_chunk_map_misc_s {
/*
- * Linkage for run heaps. There are two disjoint uses:
+ * Linkage for run trees. There are two disjoint uses:
*
- * 1) arena_t's runs_avail heaps.
+ * 1) arena_t's runs_avail tree.
* 2) arena_run_t conceptually uses this linkage for in-use non-full
* runs, rather than directly embedding linkage.
*/
- phn(arena_chunk_map_misc_t) ph_link;
+ rb_node(arena_chunk_map_misc_t) rb_link;
union {
/* Linkage for list of dirty runs. */
@@ -169,15 +154,16 @@ struct arena_chunk_map_misc_s {
/* Profile counters, used for large object runs. */
union {
- void *prof_tctx_pun;
- prof_tctx_t *prof_tctx;
+ void *prof_tctx_pun;
+ prof_tctx_t *prof_tctx;
};
/* Small region run metadata. */
arena_run_t run;
};
};
-typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
+typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t;
+typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
#endif /* JEMALLOC_ARENA_STRUCTS_A */
#ifdef JEMALLOC_ARENA_STRUCTS_B
@@ -191,14 +177,6 @@ struct arena_chunk_s {
extent_node_t node;
/*
- * True if memory could be backed by transparent huge pages. This is
- * only directly relevant to Linux, since it is the only supported
- * platform on which jemalloc interacts with explicit transparent huge
- * page controls.
- */
- bool hugepage;
-
- /*
* Map of pages within chunk that keeps track of free/large/small. The
* first map_bias entries are omitted, since the chunk header does not
* need to be tracked in the map. This omission saves a header page
@@ -242,71 +220,28 @@ struct arena_chunk_s {
*/
struct arena_bin_info_s {
/* Size of regions in a run for this bin's size class. */
- size_t reg_size;
+ size_t reg_size;
/* Redzone size. */
- size_t redzone_size;
+ size_t redzone_size;
/* Interval between regions (reg_size + (redzone_size << 1)). */
- size_t reg_interval;
+ size_t reg_interval;
/* Total size of a run for this bin's size class. */
- size_t run_size;
+ size_t run_size;
/* Total number of regions in a run for this bin's size class. */
- uint32_t nregs;
+ uint32_t nregs;
/*
* Metadata used to manipulate bitmaps for runs associated with this
* bin.
*/
- bitmap_info_t bitmap_info;
+ bitmap_info_t bitmap_info;
/* Offset of first region in a run for this bin's size class. */
- uint32_t reg0_offset;
-};
-
-struct arena_decay_s {
- /*
- * Approximate time in seconds from the creation of a set of unused
- * dirty pages until an equivalent set of unused dirty pages is purged
- * and/or reused.
- */
- ssize_t time;
- /* time / SMOOTHSTEP_NSTEPS. */
- nstime_t interval;
- /*
- * Time at which the current decay interval logically started. We do
- * not actually advance to a new epoch until sometime after it starts
- * because of scheduling and computation delays, and it is even possible
- * to completely skip epochs. In all cases, during epoch advancement we
- * merge all relevant activity into the most recently recorded epoch.
- */
- nstime_t epoch;
- /* Deadline randomness generator. */
- uint64_t jitter_state;
- /*
- * Deadline for current epoch. This is the sum of interval and per
- * epoch jitter which is a uniform random variable in [0..interval).
- * Epochs always advance by precise multiples of interval, but we
- * randomize the deadline to reduce the likelihood of arenas purging in
- * lockstep.
- */
- nstime_t deadline;
- /*
- * Number of dirty pages at beginning of current epoch. During epoch
- * advancement we use the delta between arena->decay.ndirty and
- * arena->ndirty to determine how many dirty pages, if any, were
- * generated.
- */
- size_t ndirty;
- /*
- * Trailing log of how many unused dirty pages were generated during
- * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
- * element is the most recent epoch. Corresponding epoch times are
- * relative to epoch.
- */
- size_t backlog[SMOOTHSTEP_NSTEPS];
+ uint32_t reg0_offset;
};
struct arena_bin_s {
@@ -316,25 +251,25 @@ struct arena_bin_s {
* which may be acquired while holding one or more bin locks, but not
* vise versa.
*/
- malloc_mutex_t lock;
+ malloc_mutex_t lock;
/*
* Current run being used to service allocations of this bin's size
* class.
*/
- arena_run_t *runcur;
+ arena_run_t *runcur;
/*
- * Heap of non-full runs. This heap is used when looking for an
+ * Tree of non-full runs. This tree is used when looking for an
* existing run when runcur is no longer usable. We choose the
* non-full run that is lowest in memory; this policy tends to keep
* objects packed well, and it can also help reduce the number of
* almost-empty chunks.
*/
- arena_run_heap_t runs;
+ arena_run_tree_t runs;
/* Bin statistics. */
- malloc_bin_stats_t stats;
+ malloc_bin_stats_t stats;
};
struct arena_s {
@@ -342,23 +277,15 @@ struct arena_s {
unsigned ind;
/*
- * Number of threads currently assigned to this arena, synchronized via
- * atomic operations. Each thread has two distinct assignments, one for
- * application-serving allocation, and the other for internal metadata
- * allocation. Internal metadata must not be allocated from arenas
- * created via the arenas.extend mallctl, because the arena.<i>.reset
- * mallctl indiscriminately discards all allocations for the affected
- * arena.
- *
- * 0: Application allocation.
- * 1: Internal metadata allocation.
+ * Number of threads currently assigned to this arena. This field is
+ * protected by arenas_lock.
*/
- unsigned nthreads[2];
+ unsigned nthreads;
/*
* There are three classes of arena operations from a locking
* perspective:
- * 1) Thread assignment (modifies nthreads) is synchronized via atomics.
+ * 1) Thread assignment (modifies nthreads) is protected by arenas_lock.
* 2) Bin-related operations are protected by bin locks.
* 3) Chunk- and run-related operations are protected by this mutex.
*/
@@ -378,16 +305,10 @@ struct arena_s {
* PRNG state for cache index randomization of large allocation base
* pointers.
*/
- size_t offset_state;
+ uint64_t offset_state;
dss_prec_t dss_prec;
- /* Extant arena chunks. */
- ql_head(extent_node_t) achunks;
-
- /* Extent serial number generator state. */
- size_t extent_sn_next;
-
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
@@ -403,7 +324,7 @@ struct arena_s {
/* Minimum ratio (log base 2) of nactive:ndirty. */
ssize_t lg_dirty_mult;
- /* True if a thread is currently executing arena_purge_to_limit(). */
+ /* True if a thread is currently executing arena_purge(). */
bool purging;
/* Number of pages in active runs and huge regions. */
@@ -418,6 +339,12 @@ struct arena_s {
size_t ndirty;
/*
+ * Size/address-ordered tree of this arena's available runs. The tree
+ * is used for first-best-fit run allocation.
+ */
+ arena_avail_tree_t runs_avail;
+
+ /*
* Unused dirty memory this arena manages. Dirty memory is conceptually
* tracked as an arbitrarily interleaved LRU of dirty runs and cached
* chunks, but the list linkage is actually semi-duplicated in order to
@@ -448,9 +375,6 @@ struct arena_s {
arena_runs_dirty_link_t runs_dirty;
extent_node_t chunks_cache;
- /* Decay-based purging state. */
- arena_decay_t decay;
-
/* Extant huge allocations. */
ql_head(extent_node_t) huge;
/* Synchronizes all huge allocation/update/deallocation. */
@@ -463,9 +387,9 @@ struct arena_s {
* orderings are needed, which is why there are two trees with the same
* contents.
*/
- extent_tree_t chunks_szsnad_cached;
+ extent_tree_t chunks_szad_cached;
extent_tree_t chunks_ad_cached;
- extent_tree_t chunks_szsnad_retained;
+ extent_tree_t chunks_szad_retained;
extent_tree_t chunks_ad_retained;
malloc_mutex_t chunks_mtx;
@@ -478,19 +402,6 @@ struct arena_s {
/* bins is used to store trees of free regions. */
arena_bin_t bins[NBINS];
-
- /*
- * Size-segregated address-ordered heaps of this arena's available runs,
- * used for first-best-fit run allocation. Runs are quantized, i.e.
- * they reside in the last heap which corresponds to a size class less
- * than or equal to the run size.
- */
- arena_run_heap_t runs_avail[NPSIZES];
-};
-
-/* Used in conjunction with tsd for fast arena-related context lookup. */
-struct arena_tdata_s {
- ticker_t decay_ticker;
};
#endif /* JEMALLOC_ARENA_STRUCTS_B */
@@ -506,10 +417,7 @@ static const size_t large_pad =
#endif
;
-extern purge_mode_t opt_purge;
-extern const char *purge_mode_names[];
extern ssize_t opt_lg_dirty_mult;
-extern ssize_t opt_decay_time;
extern arena_bin_info_t arena_bin_info[NBINS];
@@ -520,37 +428,27 @@ extern size_t large_maxclass; /* Max large size class. */
extern unsigned nlclasses; /* Number of large size classes. */
extern unsigned nhclasses; /* Number of huge size classes. */
-#ifdef JEMALLOC_JET
-typedef size_t (run_quantize_t)(size_t);
-extern run_quantize_t *run_quantize_floor;
-extern run_quantize_t *run_quantize_ceil;
-#endif
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
bool cache);
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
bool cache);
-extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
-void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
-void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, size_t *sn, bool *zero);
-void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t usize, size_t sn);
-void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
- void *chunk, size_t oldsize, size_t usize);
-void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
- void *chunk, size_t oldsize, size_t usize, size_t sn);
-bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
- void *chunk, size_t oldsize, size_t usize, bool *zero);
-ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
-bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
- ssize_t lg_dirty_mult);
-ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
-bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
-void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
-void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
-void arena_reset(tsd_t *tsd, arena_t *arena);
-void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
- tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
+extent_node_t *arena_node_alloc(arena_t *arena);
+void arena_node_dalloc(arena_t *arena, extent_node_t *node);
+void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
+ bool *zero);
+void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
+void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize);
+void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize);
+bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize, bool *zero);
+ssize_t arena_lg_dirty_mult_get(arena_t *arena);
+bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
+void arena_maybe_purge(arena_t *arena);
+void arena_purge_all(arena_t *arena);
+void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
+ szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
@@ -563,100 +461,75 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
#endif
void arena_quarantine_junk_small(void *ptr, size_t usize);
-void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
- bool zero);
-void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
- szind_t ind, bool zero);
-void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
+void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
+void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
+void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
-void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size);
-void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm);
-void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm);
-void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t pageind);
+void arena_prof_promoted(const void *ptr, size_t size);
+void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, arena_chunk_map_bits_t *bitselm);
+void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind, arena_chunk_map_bits_t *bitselm);
+void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind);
#ifdef JEMALLOC_JET
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
#else
void arena_dalloc_junk_large(void *ptr, size_t usize);
#endif
-void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr);
-void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
+void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
#ifdef JEMALLOC_JET
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
#endif
-bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t size, size_t extra, bool zero);
+bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+ size_t extra, bool zero);
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache);
-dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
-bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
+dss_prec_t arena_dss_prec_get(arena_t *arena);
+bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_lg_dirty_mult_default_get(void);
bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
-ssize_t arena_decay_time_default_get(void);
-bool arena_decay_time_default_set(ssize_t decay_time);
-void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
- unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult,
- ssize_t *decay_time, size_t *nactive, size_t *ndirty);
-void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
- size_t *nactive, size_t *ndirty, arena_stats_t *astats,
- malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
- malloc_huge_stats_t *hstats);
-unsigned arena_nthreads_get(arena_t *arena, bool internal);
-void arena_nthreads_inc(arena_t *arena, bool internal);
-void arena_nthreads_dec(arena_t *arena, bool internal);
-size_t arena_extent_sn_next(arena_t *arena);
-arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
-void arena_boot(void);
-void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
-void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
-void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
+void arena_stats_merge(arena_t *arena, const char **dss,
+ ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty,
+ arena_stats_t *astats, malloc_bin_stats_t *bstats,
+ malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
+arena_t *arena_new(unsigned ind);
+bool arena_boot(void);
+void arena_prefork(arena_t *arena);
+void arena_postfork_parent(arena_t *arena);
+void arena_postfork_child(arena_t *arena);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk,
+arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
size_t pageind);
-const arena_chunk_map_bits_t *arena_bitselm_get_const(
- const arena_chunk_t *chunk, size_t pageind);
-arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk,
+arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
size_t pageind);
-const arena_chunk_map_misc_t *arena_miscelm_get_const(
- const arena_chunk_t *chunk, size_t pageind);
-size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
-void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm);
+size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm);
+void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
-size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
-const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk,
- size_t pageind);
-size_t arena_mapbitsp_read(const size_t *mapbitsp);
-size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
+size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbitsp_read(size_t *mapbitsp);
+size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_size_decode(size_t mapbits);
-size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
- size_t pageind);
-size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk,
- size_t pageind);
-size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
+size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
-szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
- size_t pageind);
-size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
+szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
size_t arena_mapbits_size_encode(size_t size);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
@@ -676,31 +549,27 @@ void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
-bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
+bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
-size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
+unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
-prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
-void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx);
-void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
+prof_tctx_t *arena_prof_tctx_get(const void *ptr);
+void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
+void arena_prof_tctx_reset(const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *old_tctx);
-void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
-void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
-void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
- bool zero, tcache_t *tcache, bool slow_path);
+void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache);
arena_t *arena_aalloc(const void *ptr);
-size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote);
-void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
-void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path);
+size_t arena_salloc(const void *ptr, bool demote);
+void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
+void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
-arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
+arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
@@ -709,15 +578,8 @@ arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
return (&chunk->map_bits[pageind-map_bias]);
}
-JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
-arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
-{
-
- return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
-}
-
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
-arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
+arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
@@ -727,15 +589,8 @@ arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
(uintptr_t)map_misc_offset) + pageind-map_bias);
}
-JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
-arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
-{
-
- return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
-}
-
JEMALLOC_ALWAYS_INLINE size_t
-arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
+arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
@@ -748,7 +603,7 @@ arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
}
JEMALLOC_ALWAYS_INLINE void *
-arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm)
+arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -781,31 +636,24 @@ arena_run_to_miscelm(arena_run_t *run)
}
JEMALLOC_ALWAYS_INLINE size_t *
-arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
+arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
{
- return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
-}
-
-JEMALLOC_ALWAYS_INLINE const size_t *
-arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
-{
-
- return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
+ return (&arena_bitselm_get(chunk, pageind)->bits);
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbitsp_read(const size_t *mapbitsp)
+arena_mapbitsp_read(size_t *mapbitsp)
{
return (*mapbitsp);
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
{
- return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind)));
+ return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -825,7 +673,7 @@ arena_mapbits_size_decode(size_t mapbits)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -835,7 +683,7 @@ arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -846,7 +694,7 @@ arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -857,7 +705,7 @@ arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE szind_t
-arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
szind_t binind;
@@ -869,7 +717,7 @@ arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -880,7 +728,7 @@ arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -891,7 +739,7 @@ arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -902,7 +750,7 @@ arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -911,7 +759,7 @@ arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -947,7 +795,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
@@ -961,7 +809,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert((size & PAGE_MASK) == 0);
@@ -973,7 +821,7 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((flags & CHUNK_MAP_UNZEROED) == flags);
arena_mapbitsp_write(mapbitsp, flags);
@@ -983,7 +831,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
@@ -998,7 +846,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
szind_t binind)
{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
@@ -1012,7 +860,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
szind_t binind, size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(binind < BININD_INVALID);
assert(pageind - runind >= map_bias);
@@ -1069,7 +917,7 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
}
JEMALLOC_INLINE bool
-arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
+arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
@@ -1080,9 +928,9 @@ arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
{
bool ret;
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
ret = arena_prof_accum_impl(arena, accumbytes);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (ret);
}
}
@@ -1100,12 +948,12 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
size_t pageind;
size_t actual_mapbits;
size_t rpages_ind;
- const arena_run_t *run;
+ arena_run_t *run;
arena_bin_t *bin;
szind_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
- const arena_chunk_map_misc_t *miscelm;
- const void *rpages;
+ arena_chunk_map_misc_t *miscelm;
+ void *rpages;
assert(binind != BININD_INVALID);
assert(binind < NBINS);
@@ -1118,11 +966,11 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
pageind);
- miscelm = arena_miscelm_get_const(chunk, rpages_ind);
+ miscelm = arena_miscelm_get(chunk, rpages_ind);
run = &miscelm->run;
run_binind = run->binind;
bin = &arena->bins[run_binind];
- actual_binind = (szind_t)(bin - arena->bins);
+ actual_binind = bin - arena->bins;
assert(run_binind == actual_binind);
bin_info = &arena_bin_info[actual_binind];
rpages = arena_miscelm_to_rpages(miscelm);
@@ -1139,15 +987,16 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
JEMALLOC_INLINE szind_t
arena_bin_index(arena_t *arena, arena_bin_t *bin)
{
- szind_t binind = (szind_t)(bin - arena->bins);
+ szind_t binind = bin - arena->bins;
assert(binind < NBINS);
return (binind);
}
-JEMALLOC_INLINE size_t
+JEMALLOC_INLINE unsigned
arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
{
- size_t diff, interval, shift, regind;
+ unsigned shift, diff, regind;
+ size_t interval;
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
void *rpages = arena_miscelm_to_rpages(miscelm);
@@ -1162,12 +1011,12 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
* Avoid doing division with a variable divisor if possible. Using
* actual division here can reduce allocator throughput by over 20%!
*/
- diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages -
+ diff = (unsigned)((uintptr_t)ptr - (uintptr_t)rpages -
bin_info->reg0_offset);
/* Rescale (factor powers of 2 out of the numerator and denominator). */
interval = bin_info->reg_interval;
- shift = ffs_zu(interval) - 1;
+ shift = jemalloc_ffs(interval) - 1;
diff >>= shift;
interval >>= shift;
@@ -1189,9 +1038,9 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
* divide by 0, and 1 and 2 are both powers of two, which are
* handled above.
*/
-#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS)
-#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1)
- static const size_t interval_invs[] = {
+#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
+#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
+ static const unsigned interval_invs[] = {
SIZE_INV(3),
SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
@@ -1202,8 +1051,8 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
};
- if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t))
- + 2))) {
+ if (likely(interval <= ((sizeof(interval_invs) /
+ sizeof(unsigned)) + 2))) {
regind = (diff * interval_invs[interval - 3]) >>
SIZE_INV_SHIFT;
} else
@@ -1218,7 +1067,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
}
JEMALLOC_INLINE prof_tctx_t *
-arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
+arena_prof_tctx_get(const void *ptr)
{
prof_tctx_t *ret;
arena_chunk_t *chunk;
@@ -1234,19 +1083,18 @@ arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
- arena_chunk_map_misc_t *elm =
- arena_miscelm_get_mutable(chunk, pageind);
+ arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
+ pageind);
ret = atomic_read_p(&elm->prof_tctx_pun);
}
} else
- ret = huge_prof_tctx_get(tsdn, ptr);
+ ret = huge_prof_tctx_get(ptr);
return (ret);
}
JEMALLOC_INLINE void
-arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx)
+arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
arena_chunk_t *chunk;
@@ -1265,7 +1113,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
assert(arena_mapbits_large_get(chunk, pageind) != 0);
- elm = arena_miscelm_get_mutable(chunk, pageind);
+ elm = arena_miscelm_get(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun, tctx);
} else {
/*
@@ -1277,12 +1125,12 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
assert(arena_mapbits_large_get(chunk, pageind) == 0);
}
} else
- huge_prof_tctx_set(tsdn, ptr, tctx);
+ huge_prof_tctx_set(ptr, tctx);
}
JEMALLOC_INLINE void
-arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
- const void *old_ptr, prof_tctx_t *old_tctx)
+arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+ prof_tctx_t *old_tctx)
{
cassert(config_prof);
@@ -1301,59 +1149,43 @@ arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
0);
assert(arena_mapbits_large_get(chunk, pageind) != 0);
- elm = arena_miscelm_get_mutable(chunk, pageind);
+ elm = arena_miscelm_get(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun,
(prof_tctx_t *)(uintptr_t)1U);
} else
- huge_prof_tctx_reset(tsdn, ptr);
+ huge_prof_tctx_reset(ptr);
}
}
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
-{
- tsd_t *tsd;
- ticker_t *decay_ticker;
-
- if (unlikely(tsdn_null(tsdn)))
- return;
- tsd = tsdn_tsd(tsdn);
- decay_ticker = decay_ticker_get(tsd, arena->ind);
- if (unlikely(decay_ticker == NULL))
- return;
- if (unlikely(ticker_ticks(decay_ticker, nticks)))
- arena_purge(tsdn, arena, false);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
-{
-
- arena_decay_ticks(tsdn, arena, 1);
-}
-
JEMALLOC_ALWAYS_INLINE void *
-arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
- tcache_t *tcache, bool slow_path)
+arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache)
{
- assert(!tsdn_null(tsdn) || tcache == NULL);
assert(size != 0);
- if (likely(tcache != NULL)) {
- if (likely(size <= SMALL_MAXCLASS)) {
- return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
- tcache, size, ind, zero, slow_path));
- }
- if (likely(size <= tcache_maxclass)) {
- return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
- tcache, size, ind, zero, slow_path));
- }
- /* (size > tcache_maxclass) case falls through. */
- assert(size > tcache_maxclass);
- }
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL))
+ return (NULL);
- return (arena_malloc_hard(tsdn, arena, size, ind, zero));
+ if (likely(size <= SMALL_MAXCLASS)) {
+ if (likely(tcache != NULL)) {
+ return (tcache_alloc_small(tsd, arena, tcache, size,
+ zero));
+ } else
+ return (arena_malloc_small(arena, size, zero));
+ } else if (likely(size <= large_maxclass)) {
+ /*
+ * Initialize tcache after checking size in order to avoid
+ * infinite recursion during tcache initialization.
+ */
+ if (likely(tcache != NULL) && size <= tcache_maxclass) {
+ return (tcache_alloc_large(tsd, arena, tcache, size,
+ zero));
+ } else
+ return (arena_malloc_large(arena, size, zero));
+ } else
+ return (huge_malloc(tsd, arena, size, zero, tcache));
}
JEMALLOC_ALWAYS_INLINE arena_t *
@@ -1370,7 +1202,7 @@ arena_aalloc(const void *ptr)
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE size_t
-arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote)
+arena_salloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
@@ -1413,18 +1245,17 @@ arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote)
ret = index2size(binind);
}
} else
- ret = huge_salloc(tsdn, ptr);
+ ret = huge_salloc(ptr);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
-arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
+arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
arena_chunk_t *chunk;
size_t pageind, mapbits;
- assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
@@ -1437,12 +1268,10 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
if (likely(tcache != NULL)) {
szind_t binind = arena_ptr_small_binind_get(ptr,
mapbits);
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
- binind, slow_path);
+ tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
- arena_dalloc_small(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
- ptr, pageind);
+ arena_dalloc_small(extent_node_arena_get(
+ &chunk->node), chunk, ptr, pageind);
}
} else {
size_t size = arena_mapbits_large_size_get(chunk,
@@ -1453,33 +1282,28 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
if (likely(tcache != NULL) && size - large_pad <=
tcache_maxclass) {
- tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
- size - large_pad, slow_path);
+ tcache_dalloc_large(tsd, tcache, ptr, size -
+ large_pad);
} else {
- arena_dalloc_large(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
- ptr);
+ arena_dalloc_large(extent_node_arena_get(
+ &chunk->node), chunk, ptr);
}
}
} else
- huge_dalloc(tsdn, ptr);
+ huge_dalloc(tsd, ptr, tcache);
}
JEMALLOC_ALWAYS_INLINE void
-arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path)
+arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
arena_chunk_t *chunk;
- assert(!tsdn_null(tsdn) || tcache == NULL);
-
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
if (config_prof && opt_prof) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
LG_PAGE;
- assert(arena_mapbits_allocated_get(chunk, pageind) !=
- 0);
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (arena_mapbits_large_get(chunk, pageind) != 0) {
/*
* Make sure to use promoted size, not request
@@ -1489,36 +1313,32 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
pageind) - large_pad;
}
}
- assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false)));
+ assert(s2u(size) == s2u(arena_salloc(ptr, false)));
if (likely(size <= SMALL_MAXCLASS)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
szind_t binind = size2index(size);
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
- binind, slow_path);
+ tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
- arena_dalloc_small(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
- ptr, pageind);
+ arena_dalloc_small(extent_node_arena_get(
+ &chunk->node), chunk, ptr, pageind);
}
} else {
assert(config_cache_oblivious || ((uintptr_t)ptr &
PAGE_MASK) == 0);
- if (likely(tcache != NULL) && size <= tcache_maxclass) {
- tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
- size, slow_path);
- } else {
- arena_dalloc_large(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
- ptr);
+ if (likely(tcache != NULL) && size <= tcache_maxclass)
+ tcache_dalloc_large(tsd, tcache, ptr, size);
+ else {
+ arena_dalloc_large(extent_node_arena_get(
+ &chunk->node), chunk, ptr);
}
}
} else
- huge_dalloc(tsdn, ptr);
+ huge_dalloc(tsd, ptr, tcache);
}
# endif /* JEMALLOC_ARENA_INLINE_B */
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/assert.h b/deps/jemalloc/include/jemalloc/internal/assert.h
deleted file mode 100644
index 6f8f7eb93..000000000
--- a/deps/jemalloc/include/jemalloc/internal/assert.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Define a custom assert() in order to reduce the chances of deadlock during
- * assertion failure.
- */
-#ifndef assert
-#define assert(e) do { \
- if (unlikely(config_debug && !(e))) { \
- malloc_printf( \
- "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
- __FILE__, __LINE__, #e); \
- abort(); \
- } \
-} while (0)
-#endif
-
-#ifndef not_reached
-#define not_reached() do { \
- if (config_debug) { \
- malloc_printf( \
- "<jemalloc>: %s:%d: Unreachable code reached\n", \
- __FILE__, __LINE__); \
- abort(); \
- } \
- unreachable(); \
-} while (0)
-#endif
-
-#ifndef not_implemented
-#define not_implemented() do { \
- if (config_debug) { \
- malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
- __FILE__, __LINE__); \
- abort(); \
- } \
-} while (0)
-#endif
-
-#ifndef assert_not_implemented
-#define assert_not_implemented(e) do { \
- if (unlikely(config_debug && !(e))) \
- not_implemented(); \
-} while (0)
-#endif
-
-
diff --git a/deps/jemalloc/include/jemalloc/internal/atomic.h b/deps/jemalloc/include/jemalloc/internal/atomic.h
index 3f15ea149..a9aad35d1 100644
--- a/deps/jemalloc/include/jemalloc/internal/atomic.h
+++ b/deps/jemalloc/include/jemalloc/internal/atomic.h
@@ -28,8 +28,8 @@
* callers.
*
* <t> atomic_read_<t>(<t> *p) { return (*p); }
- * <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
- * <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
+ * <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); }
+ * <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); }
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
* {
* if (*p != c)
diff --git a/deps/jemalloc/include/jemalloc/internal/base.h b/deps/jemalloc/include/jemalloc/internal/base.h
index d6b81e162..39e46ee44 100644
--- a/deps/jemalloc/include/jemalloc/internal/base.h
+++ b/deps/jemalloc/include/jemalloc/internal/base.h
@@ -9,13 +9,12 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *base_alloc(tsdn_t *tsdn, size_t size);
-void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
- size_t *mapped);
+void *base_alloc(size_t size);
+void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped);
bool base_boot(void);
-void base_prefork(tsdn_t *tsdn);
-void base_postfork_parent(tsdn_t *tsdn);
-void base_postfork_child(tsdn_t *tsdn);
+void base_prefork(void);
+void base_postfork_parent(void);
+void base_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/bitmap.h b/deps/jemalloc/include/jemalloc/internal/bitmap.h
index 36f38b59c..fcc6005c7 100644
--- a/deps/jemalloc/include/jemalloc/internal/bitmap.h
+++ b/deps/jemalloc/include/jemalloc/internal/bitmap.h
@@ -15,15 +15,6 @@ typedef unsigned long bitmap_t;
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
-/*
- * Do some analysis on how big the bitmap is before we use a tree. For a brute
- * force linear search, if we would have to call ffs_lu() more than 2^3 times,
- * use a tree instead.
- */
-#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
-# define USE_TREE
-#endif
-
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
@@ -57,8 +48,6 @@ typedef unsigned long bitmap_t;
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
-#ifdef USE_TREE
-
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
@@ -76,12 +65,6 @@ typedef unsigned long bitmap_t;
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
-#else /* USE_TREE */
-
-#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
-
-#endif /* USE_TREE */
-
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
@@ -95,7 +78,6 @@ struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t nbits;
-#ifdef USE_TREE
/* Number of levels necessary for nbits. */
unsigned nlevels;
@@ -104,10 +86,6 @@ struct bitmap_info_s {
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
-#else /* USE_TREE */
- /* Number of groups necessary for nbits. */
- size_t ngroups;
-#endif /* USE_TREE */
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -115,8 +93,9 @@ struct bitmap_info_s {
#ifdef JEMALLOC_H_EXTERNS
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
+size_t bitmap_info_ngroups(const bitmap_info_t *binfo);
+size_t bitmap_size(size_t nbits);
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
-size_t bitmap_size(const bitmap_info_t *binfo);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
@@ -134,20 +113,10 @@ void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
JEMALLOC_INLINE bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
-#ifdef USE_TREE
- size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
+ unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
return (rg == 0);
-#else
- size_t i;
-
- for (i = 0; i < binfo->ngroups; i++) {
- if (bitmap[i] != 0)
- return (false);
- }
- return (true);
-#endif
}
JEMALLOC_INLINE bool
@@ -159,7 +128,7 @@ bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
- return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))));
+ return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
}
JEMALLOC_INLINE void
@@ -174,11 +143,10 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
- assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
+ g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit));
-#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if (g == 0) {
unsigned i;
@@ -187,14 +155,13 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
- assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
+ g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (g != 0)
break;
}
}
-#endif
}
/* sfu: set first unset. */
@@ -207,24 +174,15 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
assert(!bitmap_full(bitmap, binfo));
-#ifdef USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
- bit = ffs_lu(g) - 1;
+ bit = jemalloc_ffsl(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
- bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
+ bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1);
}
-#else
- i = 0;
- g = bitmap[0];
- while ((bit = ffs_lu(g)) == 0) {
- i++;
- g = bitmap[i];
- }
- bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
-#endif
+
bitmap_set(bitmap, binfo, bit);
return (bit);
}
@@ -235,7 +193,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
size_t goff;
bitmap_t *gp;
bitmap_t g;
- UNUSED bool propagate;
+ bool propagate;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit));
@@ -243,11 +201,10 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
gp = &bitmap[goff];
g = *gp;
propagate = (g == 0);
- assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
+ g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(!bitmap_get(bitmap, binfo, bit));
-#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
@@ -257,15 +214,14 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
propagate = (g == 0);
- assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
+ assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
== 0);
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (!propagate)
break;
}
}
-#endif /* USE_TREE */
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk.h b/deps/jemalloc/include/jemalloc/internal/chunk.h
index 50b9904b0..5d1938353 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk.h
@@ -48,30 +48,32 @@ extern size_t chunk_npages;
extern const chunk_hooks_t chunk_hooks_default;
-chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
-chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
+chunk_hooks_t chunk_hooks_get(arena_t *arena);
+chunk_hooks_t chunk_hooks_set(arena_t *arena,
const chunk_hooks_t *chunk_hooks);
-bool chunk_register(tsdn_t *tsdn, const void *chunk,
- const extent_node_t *node);
+bool chunk_register(const void *chunk, const extent_node_t *node);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
-void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
- size_t *sn, bool *zero, bool *commit, bool dalloc_node);
-void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
- size_t *sn, bool *zero, bool *commit);
-void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
- bool committed);
-void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
- bool zeroed, bool committed);
-bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
+void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool dalloc_node);
+void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
+void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool committed);
+void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool zeroed, bool committed);
+void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool committed);
+bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
size_t length);
+bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, size_t offset, size_t length);
bool chunk_boot(void);
+void chunk_prefork(void);
+void chunk_postfork_parent(void);
+void chunk_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk_dss.h b/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
index da8511ba0..388f46be0 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
@@ -23,11 +23,13 @@ extern const char *dss_prec_names[];
dss_prec_t chunk_dss_prec_get(void);
bool chunk_dss_prec_set(dss_prec_t dss_prec);
-void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit);
+void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit);
bool chunk_in_dss(void *chunk);
-bool chunk_dss_mergeable(void *chunk_a, void *chunk_b);
-void chunk_dss_boot(void);
+bool chunk_dss_boot(void);
+void chunk_dss_prefork(void);
+void chunk_dss_postfork_parent(void);
+void chunk_dss_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h b/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
index 6f2d0ac2e..7d8014c58 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
@@ -9,8 +9,8 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment,
- bool *zero, bool *commit);
+void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero,
+ bool *commit);
bool chunk_dalloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
diff --git a/deps/jemalloc/include/jemalloc/internal/ckh.h b/deps/jemalloc/include/jemalloc/internal/ckh.h
index f75ad90b7..75c1c979f 100644
--- a/deps/jemalloc/include/jemalloc/internal/ckh.h
+++ b/deps/jemalloc/include/jemalloc/internal/ckh.h
@@ -40,7 +40,9 @@ struct ckh_s {
#endif
/* Used for pseudo-random number generation. */
- uint64_t prng_state;
+#define CKH_A 1103515241
+#define CKH_C 12347
+ uint32_t prng_state;
/* Total number of items. */
size_t count;
@@ -72,7 +74,7 @@ bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data);
-bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
+bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]);
bool ckh_string_keycomp(const void *k1, const void *k2);
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
diff --git a/deps/jemalloc/include/jemalloc/internal/ctl.h b/deps/jemalloc/include/jemalloc/internal/ctl.h
index af0f6d7c5..751c14b5b 100644
--- a/deps/jemalloc/include/jemalloc/internal/ctl.h
+++ b/deps/jemalloc/include/jemalloc/internal/ctl.h
@@ -21,14 +21,13 @@ struct ctl_named_node_s {
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
- int (*ctl)(tsd_t *, const size_t *, size_t, void *,
- size_t *, void *, size_t);
+ int (*ctl)(const size_t *, size_t, void *, size_t *,
+ void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
- const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
- size_t);
+ const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
};
struct ctl_arena_stats_s {
@@ -36,12 +35,8 @@ struct ctl_arena_stats_s {
unsigned nthreads;
const char *dss;
ssize_t lg_dirty_mult;
- ssize_t decay_time;
size_t pactive;
size_t pdirty;
-
- /* The remainder are only populated if config_stats is true. */
-
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
@@ -61,7 +56,6 @@ struct ctl_stats_s {
size_t metadata;
size_t resident;
size_t mapped;
- size_t retained;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
@@ -70,17 +64,16 @@ struct ctl_stats_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen);
-int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
- size_t *miblenp);
+int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen);
+int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
-int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen);
+int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen);
bool ctl_boot(void);
-void ctl_prefork(tsdn_t *tsdn);
-void ctl_postfork_parent(tsdn_t *tsdn);
-void ctl_postfork_child(tsdn_t *tsdn);
+void ctl_prefork(void);
+void ctl_postfork_parent(void);
+void ctl_postfork_child(void);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
diff --git a/deps/jemalloc/include/jemalloc/internal/extent.h b/deps/jemalloc/include/jemalloc/internal/extent.h
index 168ffe643..386d50ef4 100644
--- a/deps/jemalloc/include/jemalloc/internal/extent.h
+++ b/deps/jemalloc/include/jemalloc/internal/extent.h
@@ -19,20 +19,6 @@ struct extent_node_s {
size_t en_size;
/*
- * Serial number (potentially non-unique).
- *
- * In principle serial numbers can wrap around on 32-bit systems if
- * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
- * back on address comparison for equal serial numbers, stable (if
- * imperfect) ordering is maintained.
- *
- * Serial numbers may not be unique even in the absence of wrap-around,
- * e.g. when splitting an extent and assigning the same serial number to
- * both resulting adjacent extents.
- */
- size_t en_sn;
-
- /*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
@@ -59,10 +45,10 @@ struct extent_node_s {
qr(extent_node_t) cc_link;
union {
- /* Linkage for the size/sn/address-ordered tree. */
- rb_node(extent_node_t) szsnad_link;
+ /* Linkage for the size/address-ordered tree. */
+ rb_node(extent_node_t) szad_link;
- /* Linkage for arena's achunks, huge, and node_cache lists. */
+ /* Linkage for arena's huge and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};
@@ -75,7 +61,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
+rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
@@ -87,7 +73,6 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
arena_t *extent_node_arena_get(const extent_node_t *node);
void *extent_node_addr_get(const extent_node_t *node);
size_t extent_node_size_get(const extent_node_t *node);
-size_t extent_node_sn_get(const extent_node_t *node);
bool extent_node_zeroed_get(const extent_node_t *node);
bool extent_node_committed_get(const extent_node_t *node);
bool extent_node_achunk_get(const extent_node_t *node);
@@ -95,13 +80,12 @@ prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
void extent_node_addr_set(extent_node_t *node, void *addr);
void extent_node_size_set(extent_node_t *node, size_t size);
-void extent_node_sn_set(extent_node_t *node, size_t sn);
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
void extent_node_committed_set(extent_node_t *node, bool committed);
void extent_node_achunk_set(extent_node_t *node, bool achunk);
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
- size_t size, size_t sn, bool zeroed, bool committed);
+ size_t size, bool zeroed, bool committed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
@@ -130,13 +114,6 @@ extent_node_size_get(const extent_node_t *node)
return (node->en_size);
}
-JEMALLOC_INLINE size_t
-extent_node_sn_get(const extent_node_t *node)
-{
-
- return (node->en_sn);
-}
-
JEMALLOC_INLINE bool
extent_node_zeroed_get(const extent_node_t *node)
{
@@ -188,13 +165,6 @@ extent_node_size_set(extent_node_t *node, size_t size)
}
JEMALLOC_INLINE void
-extent_node_sn_set(extent_node_t *node, size_t sn)
-{
-
- node->en_sn = sn;
-}
-
-JEMALLOC_INLINE void
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
{
@@ -224,13 +194,12 @@ extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
JEMALLOC_INLINE void
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
- size_t sn, bool zeroed, bool committed)
+ bool zeroed, bool committed)
{
extent_node_arena_set(node, arena);
extent_node_addr_set(node, addr);
extent_node_size_set(node, size);
- extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, zeroed);
extent_node_committed_set(node, committed);
extent_node_achunk_set(node, false);
diff --git a/deps/jemalloc/include/jemalloc/internal/hash.h b/deps/jemalloc/include/jemalloc/internal/hash.h
index 1ff2d9a05..bcead337a 100644
--- a/deps/jemalloc/include/jemalloc/internal/hash.h
+++ b/deps/jemalloc/include/jemalloc/internal/hash.h
@@ -1,6 +1,6 @@
/*
* The following hash function is based on MurmurHash3, placed into the public
- * domain by Austin Appleby. See https://github.com/aappleby/smhasher for
+ * domain by Austin Appleby. See http://code.google.com/p/smhasher/ for
* details.
*/
/******************************************************************************/
@@ -49,14 +49,6 @@ JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
- /* Handle unaligned read. */
- if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
- uint32_t ret;
-
- memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
- return (ret);
- }
-
return (p[i]);
}
@@ -64,14 +56,6 @@ JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
- /* Handle unaligned read. */
- if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
- uint64_t ret;
-
- memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
- return (ret);
- }
-
return (p[i]);
}
@@ -337,18 +321,13 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
-
- assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
-
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
- hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
+ hash_x64_128(key, len, seed, (uint64_t *)r_hash);
#else
- {
- uint64_t hashes[2];
- hash_x86_128(key, (int)len, seed, hashes);
- r_hash[0] = (size_t)hashes[0];
- r_hash[1] = (size_t)hashes[1];
- }
+ uint64_t hashes[2];
+ hash_x86_128(key, len, seed, hashes);
+ r_hash[0] = (size_t)hashes[0];
+ r_hash[1] = (size_t)hashes[1];
#endif
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/huge.h b/deps/jemalloc/include/jemalloc/internal/huge.h
index 22184d9bb..ece7af980 100644
--- a/deps/jemalloc/include/jemalloc/internal/huge.h
+++ b/deps/jemalloc/include/jemalloc/internal/huge.h
@@ -9,23 +9,24 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
-void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero);
-bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t usize_min, size_t usize_max, bool zero);
+void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache);
+void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
+ bool zero, tcache_t *tcache);
+bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero);
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
-void huge_dalloc(tsdn_t *tsdn, void *ptr);
+void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
arena_t *huge_aalloc(const void *ptr);
-size_t huge_salloc(tsdn_t *tsdn, const void *ptr);
-prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
-void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
-void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr);
+size_t huge_salloc(const void *ptr);
+prof_tctx_t *huge_prof_tctx_get(const void *ptr);
+void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
+void huge_prof_tctx_reset(const void *ptr);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
index e7ace7d8c..8536a3eda 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
@@ -49,7 +49,6 @@ static const bool config_lazy_lock =
false
#endif
;
-static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
@@ -161,10 +160,7 @@ static const bool config_cache_oblivious =
#include <malloc/malloc.h>
#endif
-#include "jemalloc/internal/ph.h"
-#ifndef __PGI
#define RB_COMPACT
-#endif
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
@@ -187,9 +183,6 @@ static const bool config_cache_oblivious =
#include "jemalloc/internal/jemalloc_internal_macros.h"
-/* Page size index type. */
-typedef unsigned pszind_t;
-
/* Size class index type. */
typedef unsigned szind_t;
@@ -239,7 +232,7 @@ typedef unsigned szind_t;
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
-# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
+# if (defined(__sparc64__) || defined(__sparcv9))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
@@ -263,9 +256,6 @@ typedef unsigned szind_t;
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
-# ifdef __riscv__
-# define LG_QUANTUM 4
-# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
@@ -327,17 +317,13 @@ typedef unsigned szind_t;
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
-/* Return the page base address for the page containing address a. */
-#define PAGE_ADDR2BASE(a) \
- ((void *)((uintptr_t)(a) & ~PAGE_MASK))
-
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
- ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
+ ((void *)((uintptr_t)(a) & (-(alignment))))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
@@ -345,7 +331,7 @@ typedef unsigned szind_t;
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
- (((s) + (alignment - 1)) & ((~(alignment)) + 1))
+ (((s) + (alignment - 1)) & (-(alignment)))
/* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L
@@ -365,19 +351,14 @@ typedef unsigned szind_t;
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
-#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
@@ -398,19 +379,14 @@ typedef unsigned szind_t;
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
-#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
@@ -446,28 +422,14 @@ extern bool opt_redzone;
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_zero;
-extern unsigned opt_narenas;
+extern size_t opt_narenas;
extern bool in_valgrind;
/* Number of CPUs. */
-extern unsigned ncpus;
-
-/* Number of arenas used for automatic multiplexing of threads and arenas. */
-extern unsigned narenas_auto;
+extern unsigned ncpus;
/*
- * Arenas that are used to service external requests. Not all elements of the
- * arenas array are necessarily used; arenas are created lazily as needed.
- */
-extern arena_t **arenas;
-
-/*
- * pind2sz_tab encodes the same information as could be computed by
- * pind2sz_compute().
- */
-extern size_t const pind2sz_tab[NPSIZES];
-/*
* index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by index2size_compute().
*/
@@ -485,35 +447,31 @@ void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
+arena_t *arenas_extend(unsigned ind);
+arena_t *arena_init(unsigned ind);
unsigned narenas_total_get(void);
-arena_t *arena_init(tsdn_t *tsdn, unsigned ind);
-arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
-arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
+arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing);
+arena_t *arena_choose_hard(tsd_t *tsd);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
+unsigned arena_nbound(unsigned ind);
void thread_allocated_cleanup(tsd_t *tsd);
void thread_deallocated_cleanup(tsd_t *tsd);
-void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
-void arenas_tdata_cleanup(tsd_t *tsd);
-void narenas_tdata_cleanup(tsd_t *tsd);
-void arenas_tdata_bypass_cleanup(tsd_t *tsd);
+void arenas_cache_cleanup(tsd_t *tsd);
+void narenas_cache_cleanup(tsd_t *tsd);
+void arenas_cache_bypass_cleanup(tsd_t *tsd);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
-#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
@@ -534,21 +492,16 @@ void jemalloc_postfork_child(void);
/******************************************************************************/
#define JEMALLOC_H_INLINES
-#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/tsd.h"
-#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
@@ -558,11 +511,6 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
-pszind_t psz2ind(size_t psz);
-size_t pind2sz_compute(pszind_t pind);
-size_t pind2sz_lookup(pszind_t pind);
-size_t pind2sz(pszind_t pind);
-size_t psz2u(size_t psz);
szind_t size2index_compute(size_t size);
szind_t size2index_lookup(size_t size);
szind_t size2index(size_t size);
@@ -573,121 +521,39 @@ size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
-arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
-arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena);
-arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
+arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing);
-arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
-ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_INLINE pszind_t
-psz2ind(size_t psz)
-{
-
- if (unlikely(psz > HUGE_MAXCLASS))
- return (NPSIZES);
- {
- pszind_t x = lg_floor((psz<<1)-1);
- pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
- (LG_SIZE_CLASS_GROUP + LG_PAGE);
- pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
-
- pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
- LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
-
- size_t delta_inverse_mask = ZI(-1) << lg_delta;
- pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
- ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
- pszind_t ind = grp + mod;
- return (ind);
- }
-}
-
-JEMALLOC_INLINE size_t
-pind2sz_compute(pszind_t pind)
-{
-
- {
- size_t grp = pind >> LG_SIZE_CLASS_GROUP;
- size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
- size_t grp_size_mask = ~((!!grp)-1);
- size_t grp_size = ((ZU(1) << (LG_PAGE +
- (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
-
- size_t shift = (grp == 0) ? 1 : grp;
- size_t lg_delta = shift + (LG_PAGE-1);
- size_t mod_size = (mod+1) << lg_delta;
-
- size_t sz = grp_size + mod_size;
- return (sz);
- }
-}
-
-JEMALLOC_INLINE size_t
-pind2sz_lookup(pszind_t pind)
-{
- size_t ret = (size_t)pind2sz_tab[pind];
- assert(ret == pind2sz_compute(pind));
- return (ret);
-}
-
-JEMALLOC_INLINE size_t
-pind2sz(pszind_t pind)
-{
-
- assert(pind < NPSIZES);
- return (pind2sz_lookup(pind));
-}
-
-JEMALLOC_INLINE size_t
-psz2u(size_t psz)
-{
-
- if (unlikely(psz > HUGE_MAXCLASS))
- return (0);
- {
- size_t x = lg_floor((psz<<1)-1);
- size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
- LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
- size_t delta = ZU(1) << lg_delta;
- size_t delta_mask = delta - 1;
- size_t usize = (psz + delta_mask) & ~delta_mask;
- return (usize);
- }
-}
-
JEMALLOC_INLINE szind_t
size2index_compute(size_t size)
{
- if (unlikely(size > HUGE_MAXCLASS))
- return (NSIZES);
#if (NTBINS != 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
- szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
- szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+ size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
+ size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
}
#endif
{
- szind_t x = lg_floor((size<<1)-1);
- szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
+ size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
+ (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
+ : lg_floor((size<<1)-1);
+ size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
- szind_t grp = shift << LG_SIZE_CLASS_GROUP;
+ size_t grp = shift << LG_SIZE_CLASS_GROUP;
- szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
+ size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZI(-1) << lg_delta;
- szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
+ size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
- szind_t index = NTBINS + grp + mod;
+ size_t index = NTBINS + grp + mod;
return (index);
}
}
@@ -698,7 +564,8 @@ size2index_lookup(size_t size)
assert(size <= LOOKUP_MAXCLASS);
{
- szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
+ size_t ret = ((size_t)(size2index_tab[(size-1) >>
+ LG_TINY_MIN]));
assert(ret == size2index_compute(size));
return (ret);
}
@@ -761,18 +628,18 @@ JEMALLOC_ALWAYS_INLINE size_t
s2u_compute(size_t size)
{
- if (unlikely(size > HUGE_MAXCLASS))
- return (0);
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
- size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+ size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));
}
#endif
{
- size_t x = lg_floor((size<<1)-1);
+ size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
+ (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
+ : lg_floor((size<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
@@ -856,16 +723,17 @@ sa2u(size_t size, size_t alignment)
return (usize);
}
- /* Huge size class. Beware of overflow. */
-
- if (unlikely(alignment > HUGE_MAXCLASS))
- return (0);
+ /* Huge size class. Beware of size_t overflow. */
/*
* We can't achieve subchunk alignment, so round up alignment to the
* minimum that can actually be supported.
*/
alignment = CHUNK_CEILING(alignment);
+ if (alignment == 0) {
+ /* size_t overflow. */
+ return (0);
+ }
/* Make sure result is a huge size class. */
if (size <= chunksize)
@@ -891,84 +759,45 @@ sa2u(size_t size, size_t alignment)
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
-arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
+arena_choose(tsd_t *tsd, arena_t *arena)
{
arena_t *ret;
if (arena != NULL)
return (arena);
- ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
- if (unlikely(ret == NULL))
- ret = arena_choose_hard(tsd, internal);
+ if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
+ ret = arena_choose_hard(tsd);
return (ret);
}
JEMALLOC_INLINE arena_t *
-arena_choose(tsd_t *tsd, arena_t *arena)
+arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
+ bool refresh_if_missing)
{
+ arena_t *arena;
+ arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
- return (arena_choose_impl(tsd, arena, false));
-}
-
-JEMALLOC_INLINE arena_t *
-arena_ichoose(tsd_t *tsd, arena_t *arena)
-{
+ /* init_if_missing requires refresh_if_missing. */
+ assert(!init_if_missing || refresh_if_missing);
- return (arena_choose_impl(tsd, arena, true));
-}
-
-JEMALLOC_INLINE arena_tdata_t *
-arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
-{
- arena_tdata_t *tdata;
- arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
-
- if (unlikely(arenas_tdata == NULL)) {
- /* arenas_tdata hasn't been initialized yet. */
- return (arena_tdata_get_hard(tsd, ind));
+ if (unlikely(arenas_cache == NULL)) {
+ /* arenas_cache hasn't been initialized yet. */
+ return (arena_get_hard(tsd, ind, init_if_missing));
}
- if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
+ if (unlikely(ind >= tsd_narenas_cache_get(tsd))) {
/*
- * ind is invalid, cache is old (too small), or tdata to be
+ * ind is invalid, cache is old (too small), or arena to be
* initialized.
*/
- return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
- NULL);
- }
-
- tdata = &arenas_tdata[ind];
- if (likely(tdata != NULL) || !refresh_if_missing)
- return (tdata);
- return (arena_tdata_get_hard(tsd, ind));
-}
-
-JEMALLOC_INLINE arena_t *
-arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
-{
- arena_t *ret;
-
- assert(ind <= MALLOCX_ARENA_MAX);
-
- ret = arenas[ind];
- if (unlikely(ret == NULL)) {
- ret = atomic_read_p((void *)&arenas[ind]);
- if (init_if_missing && unlikely(ret == NULL))
- ret = arena_init(tsdn, ind);
+ return (refresh_if_missing ? arena_get_hard(tsd, ind,
+ init_if_missing) : NULL);
}
- return (ret);
-}
-
-JEMALLOC_INLINE ticker_t *
-decay_ticker_get(tsd_t *tsd, unsigned ind)
-{
- arena_tdata_t *tdata;
-
- tdata = arena_tdata_get(tsd, ind, true);
- if (unlikely(tdata == NULL))
- return (NULL);
- return (&tdata->decay_ticker);
+ arena = arenas_cache[ind];
+ if (likely(arena != NULL) || !refresh_if_missing)
+ return (arena);
+ return (arena_get_hard(tsd, ind, init_if_missing));
}
#endif
@@ -989,27 +818,27 @@ decay_ticker_get(tsd_t *tsd, unsigned ind)
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *iaalloc(const void *ptr);
-size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
-void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
- tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
-void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
- bool slow_path);
-void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+size_t isalloc(const void *ptr, bool demote);
+void *iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache,
+ bool is_metadata, arena_t *arena);
+void *imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
+void *imalloc(tsd_t *tsd, size_t size);
+void *icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
+void *icalloc(tsd_t *tsd, size_t size);
+void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena);
-void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
-size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
+size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
-size_t p2rz(tsdn_t *tsdn, const void *ptr);
-void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
- bool slow_path);
+size_t p2rz(const void *ptr);
+void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata);
+void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
void idalloc(tsd_t *tsd, void *ptr);
-void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
-void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path);
-void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path);
+void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
+void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
+void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena);
@@ -1017,8 +846,8 @@ void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero);
-bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero);
+bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
@@ -1033,85 +862,100 @@ iaalloc(const void *ptr)
/*
* Typical usage:
- * tsdn_t *tsdn = [...]
* void *ptr = [...]
- * size_t sz = isalloc(tsdn, ptr, config_prof);
+ * size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
-isalloc(tsdn_t *tsdn, const void *ptr, bool demote)
+isalloc(const void *ptr, bool demote)
{
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
- return (arena_salloc(tsdn, ptr, demote));
+ return (arena_salloc(ptr, demote));
}
JEMALLOC_ALWAYS_INLINE void *
-iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
- bool is_metadata, arena_t *arena, bool slow_path)
+iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata,
+ arena_t *arena)
{
void *ret;
assert(size != 0);
- assert(!is_metadata || tcache == NULL);
- assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
- ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
+ ret = arena_malloc(tsd, arena, size, zero, tcache);
if (config_stats && is_metadata && likely(ret != NULL)) {
- arena_metadata_allocated_add(iaalloc(ret),
- isalloc(tsdn, ret, config_prof));
+ arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
+ config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
-ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
+imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
{
- return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
- false, NULL, slow_path));
+ return (iallocztm(tsd, size, false, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
-ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+imalloc(tsd_t *tsd, size_t size)
+{
+
+ return (iallocztm(tsd, size, false, tcache_get(tsd, true), false, NULL));
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
+{
+
+ return (iallocztm(tsd, size, true, tcache, false, arena));
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+icalloc(tsd_t *tsd, size_t size)
+{
+
+ return (iallocztm(tsd, size, true, tcache_get(tsd, true), false, NULL));
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
- assert(!is_metadata || tcache == NULL);
- assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
- ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
+ ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
- arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret,
+ arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
-ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{
- return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
+ return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
- return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
- tcache_get(tsd, true), false, NULL));
+ return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd,
+ NULL), false, NULL));
}
JEMALLOC_ALWAYS_INLINE size_t
-ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
+ivsalloc(const void *ptr, bool demote)
{
extent_node_t *node;
@@ -1123,7 +967,7 @@ ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
- return (isalloc(tsdn, ptr, demote));
+ return (isalloc(ptr, demote));
}
JEMALLOC_INLINE size_t
@@ -1141,62 +985,65 @@ u2rz(size_t usize)
}
JEMALLOC_INLINE size_t
-p2rz(tsdn_t *tsdn, const void *ptr)
+p2rz(const void *ptr)
{
- size_t usize = isalloc(tsdn, ptr, false);
+ size_t usize = isalloc(ptr, false);
return (u2rz(usize));
}
JEMALLOC_ALWAYS_INLINE void
-idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
- bool slow_path)
+idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata)
{
assert(ptr != NULL);
- assert(!is_metadata || tcache == NULL);
- assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
if (config_stats && is_metadata) {
- arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr,
+ arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
config_prof));
}
- arena_dalloc(tsdn, ptr, tcache, slow_path);
+ arena_dalloc(tsd, ptr, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
+{
+
+ idalloctm(tsd, ptr, tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr)
{
- idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true);
+ idalloctm(tsd, ptr, tcache_get(tsd, false), false);
}
JEMALLOC_ALWAYS_INLINE void
-iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
+iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
- if (slow_path && config_fill && unlikely(opt_quarantine))
+ if (config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
- idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path);
+ idalloctm(tsd, ptr, tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
-isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path)
+isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
- arena_sdalloc(tsdn, ptr, size, tcache, slow_path);
+ arena_sdalloc(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void
-isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path)
+isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
- if (slow_path && config_fill && unlikely(opt_quarantine))
+ if (config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
- isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path);
+ isdalloct(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
@@ -1207,18 +1054,17 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t usize, copysize;
usize = sa2u(size + extra, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ if (usize == 0)
return (NULL);
- p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena);
+ p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ if (usize == 0)
return (NULL);
- p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache,
- arena);
+ p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
if (p == NULL)
return (NULL);
}
@@ -1228,7 +1074,7 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache, true);
+ isqalloc(tsd, ptr, oldsize, tcache);
return (p);
}
@@ -1264,8 +1110,8 @@ iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
}
JEMALLOC_ALWAYS_INLINE bool
-ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero)
+ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
+ bool zero)
{
assert(ptr != NULL);
@@ -1277,7 +1123,7 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
return (true);
}
- return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero));
+ return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
index c907d9109..a601d6ebb 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
@@ -17,18 +17,7 @@
# include <sys/uio.h>
# endif
# include <pthread.h>
-# ifdef JEMALLOC_OS_UNFAIR_LOCK
-# include <os/lock.h>
-# endif
-# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
-# include <sched.h>
-# endif
# include <errno.h>
-# include <sys/time.h>
-# include <time.h>
-# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
-# include <mach/mach_time.h>
-# endif
#endif
#include <sys/types.h>
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
index def4ba550..b0f8caaf8 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -56,9 +56,9 @@
#undef JEMALLOC_HAVE_BUILTIN_CLZ
/*
- * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
+ * Defined if madvise(2) is available.
*/
-#undef JEMALLOC_OS_UNFAIR_LOCK
+#undef JEMALLOC_HAVE_MADVISE
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
@@ -66,9 +66,6 @@
*/
#undef JEMALLOC_OSSPIN
-/* Defined if syscall(2) is usable. */
-#undef JEMALLOC_USE_SYSCALL
-
/*
* Defined if secure_getenv(3) is available.
*/
@@ -79,24 +76,6 @@
*/
#undef JEMALLOC_HAVE_ISSETUGID
-/* Defined if pthread_atfork(3) is available. */
-#undef JEMALLOC_HAVE_PTHREAD_ATFORK
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
-
-/*
- * Defined if mach_absolute_time() is available.
- */
-#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
-
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
@@ -210,16 +189,9 @@
#undef JEMALLOC_TLS
/*
- * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
- * Don't use this directly; instead use unreachable() from util.h
+ * ffs()/ffsl() functions to use for bitmapping. Don't use these directly;
+ * instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h.
*/
-#undef JEMALLOC_INTERNAL_UNREACHABLE
-
-/*
- * ffs*() functions to use for bitmapping. Don't use these directly; instead,
- * use ffs_*() from util.h.
- */
-#undef JEMALLOC_INTERNAL_FFSLL
#undef JEMALLOC_INTERNAL_FFSL
#undef JEMALLOC_INTERNAL_FFS
@@ -242,34 +214,17 @@
#undef JEMALLOC_ZONE_VERSION
/*
- * Methods for determining whether the OS overcommits.
- * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
- * /proc/sys/vm.overcommit_memory file.
- * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
- */
-#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
-#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
-
-/* Defined if madvise(2) is available. */
-#undef JEMALLOC_HAVE_MADVISE
-
-/*
* Methods for purging unused pages differ between operating systems.
*
- * madvise(..., MADV_FREE) : This marks pages as being unused, such that they
- * will be discarded rather than swapped out.
- * madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
- * new pages will be demand-zeroed if the
- * address region is later touched.
+ * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
+ * such that new pages will be demand-zeroed if
+ * the address region is later touched.
+ * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
+ * unused, such that they will be discarded rather
+ * than swapped out.
*/
-#undef JEMALLOC_PURGE_MADVISE_FREE
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
-
-/*
- * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
- * arguments to madvise(2).
- */
-#undef JEMALLOC_THP
+#undef JEMALLOC_PURGE_MADVISE_FREE
/* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H
@@ -286,9 +241,6 @@
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
-/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
-#undef LG_SIZEOF_LONG_LONG
-
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
@@ -307,7 +259,4 @@
*/
#undef JEMALLOC_EXPORT
-/* config.malloc_conf options string. */
-#undef JEMALLOC_CONFIG_MALLOC_CONF
-
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
diff --git a/deps/jemalloc/include/jemalloc/internal/mb.h b/deps/jemalloc/include/jemalloc/internal/mb.h
index 5384728fd..3cfa78729 100644
--- a/deps/jemalloc/include/jemalloc/internal/mb.h
+++ b/deps/jemalloc/include/jemalloc/internal/mb.h
@@ -42,7 +42,7 @@ mb_write(void)
: /* Inputs. */
: "memory" /* Clobbers. */
);
-# else
+#else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
@@ -52,7 +52,7 @@ mb_write(void)
: /* Inputs. */
: "memory" /* Clobbers. */
);
-# endif
+#endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE void
@@ -104,9 +104,9 @@ mb_write(void)
{
malloc_mutex_t mtx;
- malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
- malloc_mutex_lock(TSDN_NULL, &mtx);
- malloc_mutex_unlock(TSDN_NULL, &mtx);
+ malloc_mutex_init(&mtx);
+ malloc_mutex_lock(&mtx);
+ malloc_mutex_unlock(&mtx);
}
#endif
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/mutex.h b/deps/jemalloc/include/jemalloc/internal/mutex.h
index b442d2d4e..f051f2917 100644
--- a/deps/jemalloc/include/jemalloc/internal/mutex.h
+++ b/deps/jemalloc/include/jemalloc/internal/mutex.h
@@ -5,25 +5,18 @@ typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-# define MALLOC_MUTEX_INITIALIZER \
- {OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
-# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+# define MALLOC_MUTEX_INITIALIZER {0}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
-# define MALLOC_MUTEX_INITIALIZER \
- {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
#else
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
-# define MALLOC_MUTEX_INITIALIZER \
- {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
- WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
-# define MALLOC_MUTEX_INITIALIZER \
- {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
# endif
#endif
@@ -38,8 +31,6 @@ struct malloc_mutex_s {
# else
CRITICAL_SECTION lock;
# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
@@ -48,7 +39,6 @@ struct malloc_mutex_s {
#else
pthread_mutex_t lock;
#endif
- witness_t witness;
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -62,62 +52,52 @@ extern bool isthreaded;
# define isthreaded true
#endif
-bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
- witness_rank_t rank);
-void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
-bool malloc_mutex_boot(void);
+bool malloc_mutex_init(malloc_mutex_t *mutex);
+void malloc_mutex_prefork(malloc_mutex_t *mutex);
+void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
+void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
+bool mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_lock(malloc_mutex_t *mutex);
+void malloc_mutex_unlock(malloc_mutex_t *mutex);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
-malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
+malloc_mutex_lock(malloc_mutex_t *mutex)
{
if (isthreaded) {
- witness_assert_not_owner(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
# else
EnterCriticalSection(&mutex->lock);
# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock_lock(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock);
#else
pthread_mutex_lock(&mutex->lock);
#endif
- witness_lock(tsdn, &mutex->witness);
}
}
JEMALLOC_INLINE void
-malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
+malloc_mutex_unlock(malloc_mutex_t *mutex)
{
if (isthreaded) {
- witness_unlock(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
# else
LeaveCriticalSection(&mutex->lock);
# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock_unlock(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock);
#else
@@ -125,22 +105,6 @@ malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
#endif
}
}
-
-JEMALLOC_INLINE void
-malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-
- if (isthreaded)
- witness_assert_owner(tsdn, &mutex->witness);
-}
-
-JEMALLOC_INLINE void
-malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-
- if (isthreaded)
- witness_assert_not_owner(tsdn, &mutex->witness);
-}
#endif
#endif /* JEMALLOC_H_INLINES */
diff --git a/deps/jemalloc/include/jemalloc/internal/nstime.h b/deps/jemalloc/include/jemalloc/internal/nstime.h
deleted file mode 100644
index 93b27dc80..000000000
--- a/deps/jemalloc/include/jemalloc/internal/nstime.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct nstime_s nstime_t;
-
-/* Maximum supported number of seconds (~584 years). */
-#define NSTIME_SEC_MAX KQU(18446744072)
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct nstime_s {
- uint64_t ns;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-void nstime_init(nstime_t *time, uint64_t ns);
-void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
-uint64_t nstime_ns(const nstime_t *time);
-uint64_t nstime_sec(const nstime_t *time);
-uint64_t nstime_nsec(const nstime_t *time);
-void nstime_copy(nstime_t *time, const nstime_t *source);
-int nstime_compare(const nstime_t *a, const nstime_t *b);
-void nstime_add(nstime_t *time, const nstime_t *addend);
-void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
-void nstime_imultiply(nstime_t *time, uint64_t multiplier);
-void nstime_idivide(nstime_t *time, uint64_t divisor);
-uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
-#ifdef JEMALLOC_JET
-typedef bool (nstime_monotonic_t)(void);
-extern nstime_monotonic_t *nstime_monotonic;
-typedef bool (nstime_update_t)(nstime_t *);
-extern nstime_update_t *nstime_update;
-#else
-bool nstime_monotonic(void);
-bool nstime_update(nstime_t *time);
-#endif
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/pages.h b/deps/jemalloc/include/jemalloc/internal/pages.h
index 4ae9f156a..da7eb9686 100644
--- a/deps/jemalloc/include/jemalloc/internal/pages.h
+++ b/deps/jemalloc/include/jemalloc/internal/pages.h
@@ -9,16 +9,13 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *pages_map(void *addr, size_t size, bool *commit);
+void *pages_map(void *addr, size_t size);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
- size_t size, bool *commit);
+ size_t size);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge(void *addr, size_t size);
-bool pages_huge(void *addr, size_t size);
-bool pages_nohuge(void *addr, size_t size);
-void pages_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/ph.h b/deps/jemalloc/include/jemalloc/internal/ph.h
deleted file mode 100644
index 4f91c333f..000000000
--- a/deps/jemalloc/include/jemalloc/internal/ph.h
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * A Pairing Heap implementation.
- *
- * "The Pairing Heap: A New Form of Self-Adjusting Heap"
- * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
- *
- * With auxiliary twopass list, described in a follow on paper.
- *
- * "Pairing Heaps: Experiments and Analysis"
- * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
- *
- *******************************************************************************
- */
-
-#ifndef PH_H_
-#define PH_H_
-
-/* Node structure. */
-#define phn(a_type) \
-struct { \
- a_type *phn_prev; \
- a_type *phn_next; \
- a_type *phn_lchild; \
-}
-
-/* Root structure. */
-#define ph(a_type) \
-struct { \
- a_type *ph_root; \
-}
-
-/* Internal utility macros. */
-#define phn_lchild_get(a_type, a_field, a_phn) \
- (a_phn->a_field.phn_lchild)
-#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
- a_phn->a_field.phn_lchild = a_lchild; \
-} while (0)
-
-#define phn_next_get(a_type, a_field, a_phn) \
- (a_phn->a_field.phn_next)
-#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
- a_phn->a_field.phn_prev = a_prev; \
-} while (0)
-
-#define phn_prev_get(a_type, a_field, a_phn) \
- (a_phn->a_field.phn_prev)
-#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
- a_phn->a_field.phn_next = a_next; \
-} while (0)
-
-#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
- a_type *phn0child; \
- \
- assert(a_phn0 != NULL); \
- assert(a_phn1 != NULL); \
- assert(a_cmp(a_phn0, a_phn1) <= 0); \
- \
- phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
- phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
- phn_next_set(a_type, a_field, a_phn1, phn0child); \
- if (phn0child != NULL) \
- phn_prev_set(a_type, a_field, phn0child, a_phn1); \
- phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
-} while (0)
-
-#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
- if (a_phn0 == NULL) \
- r_phn = a_phn1; \
- else if (a_phn1 == NULL) \
- r_phn = a_phn0; \
- else if (a_cmp(a_phn0, a_phn1) < 0) { \
- phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
- a_cmp); \
- r_phn = a_phn0; \
- } else { \
- phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
- a_cmp); \
- r_phn = a_phn1; \
- } \
-} while (0)
-
-#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
- a_type *head = NULL; \
- a_type *tail = NULL; \
- a_type *phn0 = a_phn; \
- a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
- \
- /* \
- * Multipass merge, wherein the first two elements of a FIFO \
- * are repeatedly merged, and each result is appended to the \
- * singly linked FIFO, until the FIFO contains only a single \
- * element. We start with a sibling list but no reference to \
- * its tail, so we do a single pass over the sibling list to \
- * populate the FIFO. \
- */ \
- if (phn1 != NULL) { \
- a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
- if (phnrest != NULL) \
- phn_prev_set(a_type, a_field, phnrest, NULL); \
- phn_prev_set(a_type, a_field, phn0, NULL); \
- phn_next_set(a_type, a_field, phn0, NULL); \
- phn_prev_set(a_type, a_field, phn1, NULL); \
- phn_next_set(a_type, a_field, phn1, NULL); \
- phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
- head = tail = phn0; \
- phn0 = phnrest; \
- while (phn0 != NULL) { \
- phn1 = phn_next_get(a_type, a_field, phn0); \
- if (phn1 != NULL) { \
- phnrest = phn_next_get(a_type, a_field, \
- phn1); \
- if (phnrest != NULL) { \
- phn_prev_set(a_type, a_field, \
- phnrest, NULL); \
- } \
- phn_prev_set(a_type, a_field, phn0, \
- NULL); \
- phn_next_set(a_type, a_field, phn0, \
- NULL); \
- phn_prev_set(a_type, a_field, phn1, \
- NULL); \
- phn_next_set(a_type, a_field, phn1, \
- NULL); \
- phn_merge(a_type, a_field, phn0, phn1, \
- a_cmp, phn0); \
- phn_next_set(a_type, a_field, tail, \
- phn0); \
- tail = phn0; \
- phn0 = phnrest; \
- } else { \
- phn_next_set(a_type, a_field, tail, \
- phn0); \
- tail = phn0; \
- phn0 = NULL; \
- } \
- } \
- phn0 = head; \
- phn1 = phn_next_get(a_type, a_field, phn0); \
- if (phn1 != NULL) { \
- while (true) { \
- head = phn_next_get(a_type, a_field, \
- phn1); \
- assert(phn_prev_get(a_type, a_field, \
- phn0) == NULL); \
- phn_next_set(a_type, a_field, phn0, \
- NULL); \
- assert(phn_prev_get(a_type, a_field, \
- phn1) == NULL); \
- phn_next_set(a_type, a_field, phn1, \
- NULL); \
- phn_merge(a_type, a_field, phn0, phn1, \
- a_cmp, phn0); \
- if (head == NULL) \
- break; \
- phn_next_set(a_type, a_field, tail, \
- phn0); \
- tail = phn0; \
- phn0 = head; \
- phn1 = phn_next_get(a_type, a_field, \
- phn0); \
- } \
- } \
- } \
- r_phn = phn0; \
-} while (0)
-
-#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
- a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
- if (phn != NULL) { \
- phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
- phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
- phn_prev_set(a_type, a_field, phn, NULL); \
- ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
- assert(phn_next_get(a_type, a_field, phn) == NULL); \
- phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
- a_ph->ph_root); \
- } \
-} while (0)
-
-#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
- a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
- if (lchild == NULL) \
- r_phn = NULL; \
- else { \
- ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
- r_phn); \
- } \
-} while (0)
-
-/*
- * The ph_proto() macro generates function prototypes that correspond to the
- * functions generated by an equivalently parameterized call to ph_gen().
- */
-#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
-a_attr void a_prefix##new(a_ph_type *ph); \
-a_attr bool a_prefix##empty(a_ph_type *ph); \
-a_attr a_type *a_prefix##first(a_ph_type *ph); \
-a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
-a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
-a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
-
-/*
- * The ph_gen() macro generates a type-specific pairing heap implementation,
- * based on the above cpp macros.
- */
-#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
-a_attr void \
-a_prefix##new(a_ph_type *ph) \
-{ \
- \
- memset(ph, 0, sizeof(ph(a_type))); \
-} \
-a_attr bool \
-a_prefix##empty(a_ph_type *ph) \
-{ \
- \
- return (ph->ph_root == NULL); \
-} \
-a_attr a_type * \
-a_prefix##first(a_ph_type *ph) \
-{ \
- \
- if (ph->ph_root == NULL) \
- return (NULL); \
- ph_merge_aux(a_type, a_field, ph, a_cmp); \
- return (ph->ph_root); \
-} \
-a_attr void \
-a_prefix##insert(a_ph_type *ph, a_type *phn) \
-{ \
- \
- memset(&phn->a_field, 0, sizeof(phn(a_type))); \
- \
- /* \
- * Treat the root as an aux list during insertion, and lazily \
- * merge during a_prefix##remove_first(). For elements that \
- * are inserted, then removed via a_prefix##remove() before the \
- * aux list is ever processed, this makes insert/remove \
- * constant-time, whereas eager merging would make insert \
- * O(log n). \
- */ \
- if (ph->ph_root == NULL) \
- ph->ph_root = phn; \
- else { \
- phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
- a_field, ph->ph_root)); \
- if (phn_next_get(a_type, a_field, ph->ph_root) != \
- NULL) { \
- phn_prev_set(a_type, a_field, \
- phn_next_get(a_type, a_field, ph->ph_root), \
- phn); \
- } \
- phn_prev_set(a_type, a_field, phn, ph->ph_root); \
- phn_next_set(a_type, a_field, ph->ph_root, phn); \
- } \
-} \
-a_attr a_type * \
-a_prefix##remove_first(a_ph_type *ph) \
-{ \
- a_type *ret; \
- \
- if (ph->ph_root == NULL) \
- return (NULL); \
- ph_merge_aux(a_type, a_field, ph, a_cmp); \
- \
- ret = ph->ph_root; \
- \
- ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
- ph->ph_root); \
- \
- return (ret); \
-} \
-a_attr void \
-a_prefix##remove(a_ph_type *ph, a_type *phn) \
-{ \
- a_type *replace, *parent; \
- \
- /* \
- * We can delete from aux list without merging it, but we need \
- * to merge if we are dealing with the root node. \
- */ \
- if (ph->ph_root == phn) { \
- ph_merge_aux(a_type, a_field, ph, a_cmp); \
- if (ph->ph_root == phn) { \
- ph_merge_children(a_type, a_field, ph->ph_root, \
- a_cmp, ph->ph_root); \
- return; \
- } \
- } \
- \
- /* Get parent (if phn is leftmost child) before mutating. */ \
- if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
- if (phn_lchild_get(a_type, a_field, parent) != phn) \
- parent = NULL; \
- } \
- /* Find a possible replacement node, and link to parent. */ \
- ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
- /* Set next/prev for sibling linked list. */ \
- if (replace != NULL) { \
- if (parent != NULL) { \
- phn_prev_set(a_type, a_field, replace, parent); \
- phn_lchild_set(a_type, a_field, parent, \
- replace); \
- } else { \
- phn_prev_set(a_type, a_field, replace, \
- phn_prev_get(a_type, a_field, phn)); \
- if (phn_prev_get(a_type, a_field, phn) != \
- NULL) { \
- phn_next_set(a_type, a_field, \
- phn_prev_get(a_type, a_field, phn), \
- replace); \
- } \
- } \
- phn_next_set(a_type, a_field, replace, \
- phn_next_get(a_type, a_field, phn)); \
- if (phn_next_get(a_type, a_field, phn) != NULL) { \
- phn_prev_set(a_type, a_field, \
- phn_next_get(a_type, a_field, phn), \
- replace); \
- } \
- } else { \
- if (parent != NULL) { \
- a_type *next = phn_next_get(a_type, a_field, \
- phn); \
- phn_lchild_set(a_type, a_field, parent, next); \
- if (next != NULL) { \
- phn_prev_set(a_type, a_field, next, \
- parent); \
- } \
- } else { \
- assert(phn_prev_get(a_type, a_field, phn) != \
- NULL); \
- phn_next_set(a_type, a_field, \
- phn_prev_get(a_type, a_field, phn), \
- phn_next_get(a_type, a_field, phn)); \
- } \
- if (phn_next_get(a_type, a_field, phn) != NULL) { \
- phn_prev_set(a_type, a_field, \
- phn_next_get(a_type, a_field, phn), \
- phn_prev_get(a_type, a_field, phn)); \
- } \
- } \
-}
-
-#endif /* PH_H_ */
diff --git a/deps/jemalloc/include/jemalloc/internal/private_symbols.txt b/deps/jemalloc/include/jemalloc/internal/private_symbols.txt
index c1c6c4090..a90021aa6 100644
--- a/deps/jemalloc/include/jemalloc/internal/private_symbols.txt
+++ b/deps/jemalloc/include/jemalloc/internal/private_symbols.txt
@@ -3,15 +3,12 @@ a0get
a0malloc
arena_aalloc
arena_alloc_junk_small
-arena_basic_stats_merge
arena_bin_index
arena_bin_info
-arena_bitselm_get_const
-arena_bitselm_get_mutable
+arena_bitselm_get
arena_boot
arena_choose
arena_choose_hard
-arena_choose_impl
arena_chunk_alloc_huge
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
@@ -28,25 +25,18 @@ arena_dalloc_junk_small
arena_dalloc_large
arena_dalloc_large_junked_locked
arena_dalloc_small
-arena_decay_tick
-arena_decay_ticks
-arena_decay_time_default_get
-arena_decay_time_default_set
-arena_decay_time_get
-arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
-arena_extent_sn_next
arena_get
-arena_ichoose
+arena_get_hard
arena_init
arena_lg_dirty_mult_default_get
arena_lg_dirty_mult_default_set
arena_lg_dirty_mult_get
arena_lg_dirty_mult_set
arena_malloc
-arena_malloc_hard
arena_malloc_large
+arena_malloc_small
arena_mapbits_allocated_get
arena_mapbits_binind_get
arena_mapbits_decommitted_get
@@ -57,6 +47,9 @@ arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
arena_mapbits_large_size_get
+arena_mapbitsp_get
+arena_mapbitsp_read
+arena_mapbitsp_write
arena_mapbits_size_decode
arena_mapbits_size_encode
arena_mapbits_small_runind_get
@@ -65,33 +58,23 @@ arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
-arena_mapbitsp_get_const
-arena_mapbitsp_get_mutable
-arena_mapbitsp_read
-arena_mapbitsp_write
arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add
arena_metadata_allocated_get
arena_metadata_allocated_sub
arena_migrate
-arena_miscelm_get_const
-arena_miscelm_get_mutable
+arena_miscelm_get
arena_miscelm_to_pageind
arena_miscelm_to_rpages
+arena_nbound
arena_new
arena_node_alloc
arena_node_dalloc
-arena_nthreads_dec
-arena_nthreads_get
-arena_nthreads_inc
arena_palloc
arena_postfork_child
arena_postfork_parent
-arena_prefork0
-arena_prefork1
-arena_prefork2
-arena_prefork3
+arena_prefork
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
@@ -100,25 +83,21 @@ arena_prof_tctx_get
arena_prof_tctx_reset
arena_prof_tctx_set
arena_ptr_small_binind_get
-arena_purge
+arena_purge_all
arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption
-arena_reset
arena_run_regind
arena_run_to_miscelm
arena_salloc
+arenas_cache_bypass_cleanup
+arenas_cache_cleanup
arena_sdalloc
arena_stats_merge
arena_tcache_fill_small
-arena_tdata_get
-arena_tdata_get_hard
-arenas
-arenas_tdata_bypass_cleanup
-arenas_tdata_cleanup
atomic_add_p
atomic_add_u
atomic_add_uint32
@@ -134,11 +113,6 @@ atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
-atomic_write_p
-atomic_write_u
-atomic_write_uint32
-atomic_write_uint64
-atomic_write_z
base_alloc
base_boot
base_postfork_child
@@ -148,6 +122,7 @@ base_stats_get
bitmap_full
bitmap_get
bitmap_info_init
+bitmap_info_ngroups
bitmap_init
bitmap_set
bitmap_sfu
@@ -164,25 +139,32 @@ chunk_alloc_dss
chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot
+chunk_dalloc_arena
chunk_dalloc_cache
chunk_dalloc_mmap
chunk_dalloc_wrapper
chunk_deregister
chunk_dss_boot
-chunk_dss_mergeable
+chunk_dss_postfork_child
+chunk_dss_postfork_parent
chunk_dss_prec_get
chunk_dss_prec_set
+chunk_dss_prefork
chunk_hooks_default
chunk_hooks_get
chunk_hooks_set
chunk_in_dss
chunk_lookup
chunk_npages
+chunk_postfork_child
+chunk_postfork_parent
+chunk_prefork
+chunk_purge_arena
chunk_purge_wrapper
chunk_register
-chunks_rtree
chunksize
chunksize_mask
+chunks_rtree
ckh_count
ckh_delete
ckh_insert
@@ -201,7 +183,6 @@ ctl_nametomib
ctl_postfork_child
ctl_postfork_parent
ctl_prefork
-decay_ticker_get
dss_prec_names
extent_node_achunk_get
extent_node_achunk_set
@@ -209,8 +190,6 @@ extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
-extent_node_committed_get
-extent_node_committed_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
@@ -219,12 +198,8 @@ extent_node_prof_tctx_get
extent_node_prof_tctx_set
extent_node_size_get
extent_node_size_set
-extent_node_sn_get
-extent_node_sn_set
extent_node_zeroed_get
extent_node_zeroed_set
-extent_tree_ad_destroy
-extent_tree_ad_destroy_recurse
extent_tree_ad_empty
extent_tree_ad_first
extent_tree_ad_insert
@@ -242,31 +217,23 @@ extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
-extent_tree_szsnad_destroy
-extent_tree_szsnad_destroy_recurse
-extent_tree_szsnad_empty
-extent_tree_szsnad_first
-extent_tree_szsnad_insert
-extent_tree_szsnad_iter
-extent_tree_szsnad_iter_recurse
-extent_tree_szsnad_iter_start
-extent_tree_szsnad_last
-extent_tree_szsnad_new
-extent_tree_szsnad_next
-extent_tree_szsnad_nsearch
-extent_tree_szsnad_prev
-extent_tree_szsnad_psearch
-extent_tree_szsnad_remove
-extent_tree_szsnad_reverse_iter
-extent_tree_szsnad_reverse_iter_recurse
-extent_tree_szsnad_reverse_iter_start
-extent_tree_szsnad_search
-ffs_llu
-ffs_lu
-ffs_u
-ffs_u32
-ffs_u64
-ffs_zu
+extent_tree_szad_empty
+extent_tree_szad_first
+extent_tree_szad_insert
+extent_tree_szad_iter
+extent_tree_szad_iter_recurse
+extent_tree_szad_iter_start
+extent_tree_szad_last
+extent_tree_szad_new
+extent_tree_szad_next
+extent_tree_szad_nsearch
+extent_tree_szad_prev
+extent_tree_szad_psearch
+extent_tree_szad_remove
+extent_tree_szad_reverse_iter
+extent_tree_szad_reverse_iter_recurse
+extent_tree_szad_reverse_iter_start
+extent_tree_szad_search
get_errno
hash
hash_fmix_32
@@ -290,16 +257,19 @@ huge_ralloc
huge_ralloc_no_move
huge_salloc
iaalloc
-ialloc
iallocztm
-iarena_cleanup
+icalloc
+icalloct
idalloc
+idalloct
idalloctm
-in_valgrind
+imalloc
+imalloct
index2size
index2size_compute
index2size_lookup
index2size_tab
+in_valgrind
ipalloc
ipalloct
ipallocztm
@@ -318,11 +288,7 @@ jemalloc_postfork_parent
jemalloc_prefork
large_maxclass
lg_floor
-lg_prof_sample
malloc_cprintf
-malloc_mutex_assert_not_owner
-malloc_mutex_assert_owner
-malloc_mutex_boot
malloc_mutex_init
malloc_mutex_lock
malloc_mutex_postfork_child
@@ -344,29 +310,12 @@ malloc_write
map_bias
map_misc_offset
mb_write
-narenas_auto
-narenas_tdata_cleanup
+mutex_boot
+narenas_cache_cleanup
narenas_total_get
ncpus
nhbins
-nhclasses
-nlclasses
-nstime_add
-nstime_compare
-nstime_copy
-nstime_divide
-nstime_idivide
-nstime_imultiply
-nstime_init
-nstime_init2
-nstime_monotonic
-nstime_ns
-nstime_nsec
-nstime_sec
-nstime_subtract
-nstime_update
opt_abort
-opt_decay_time
opt_dss
opt_junk
opt_junk_alloc
@@ -385,7 +334,6 @@ opt_prof_gdump
opt_prof_leak
opt_prof_prefix
opt_prof_thread_active_init
-opt_purge
opt_quarantine
opt_redzone
opt_stats_print
@@ -394,32 +342,13 @@ opt_utrace
opt_xmalloc
opt_zero
p2rz
-pages_boot
pages_commit
pages_decommit
-pages_huge
pages_map
-pages_nohuge
pages_purge
pages_trim
pages_unmap
-pind2sz
-pind2sz_compute
-pind2sz_lookup
-pind2sz_tab
-pow2_ceil_u32
-pow2_ceil_u64
-pow2_ceil_zu
-prng_lg_range_u32
-prng_lg_range_u64
-prng_lg_range_zu
-prng_range_u32
-prng_range_u64
-prng_range_zu
-prng_state_next_u32
-prng_state_next_u64
-prng_state_next_zu
-prof_active
+pow2_ceil
prof_active_get
prof_active_get_unlocked
prof_active_set
@@ -429,7 +358,6 @@ prof_backtrace
prof_boot0
prof_boot1
prof_boot2
-prof_bt_count
prof_dump_header
prof_dump_open
prof_free
@@ -447,8 +375,7 @@ prof_malloc_sample_object
prof_mdump
prof_postfork_child
prof_postfork_parent
-prof_prefork0
-prof_prefork1
+prof_prefork
prof_realloc
prof_reset
prof_sample_accum_update
@@ -457,7 +384,6 @@ prof_tctx_get
prof_tctx_reset
prof_tctx_set
prof_tdata_cleanup
-prof_tdata_count
prof_tdata_get
prof_tdata_init
prof_tdata_reinit
@@ -467,13 +393,11 @@ prof_thread_active_init_set
prof_thread_active_set
prof_thread_name_get
prof_thread_name_set
-psz2ind
-psz2u
-purge_mode_names
quarantine
quarantine_alloc_hook
quarantine_alloc_hook_work
quarantine_cleanup
+register_zone
rtree_child_read
rtree_child_read_hard
rtree_child_tryread
@@ -489,8 +413,6 @@ rtree_subtree_read_hard
rtree_subtree_tryread
rtree_val_read
rtree_val_write
-run_quantize_ceil
-run_quantize_floor
s2u
s2u_compute
s2u_lookup
@@ -500,8 +422,6 @@ size2index
size2index_compute
size2index_lookup
size2index_tab
-spin_adaptive
-spin_init
stats_cactive
stats_cactive_add
stats_cactive_get
@@ -511,6 +431,8 @@ tcache_alloc_easy
tcache_alloc_large
tcache_alloc_small
tcache_alloc_small_hard
+tcache_arena_associate
+tcache_arena_dissociate
tcache_arena_reassociate
tcache_bin_flush_large
tcache_bin_flush_small
@@ -529,103 +451,49 @@ tcache_flush
tcache_get
tcache_get_hard
tcache_maxclass
-tcache_salloc
-tcache_stats_merge
tcaches
+tcache_salloc
tcaches_create
tcaches_destroy
tcaches_flush
tcaches_get
+tcache_stats_merge
thread_allocated_cleanup
thread_deallocated_cleanup
-ticker_copy
-ticker_init
-ticker_read
-ticker_tick
-ticker_ticks
tsd_arena_get
tsd_arena_set
-tsd_arenap_get
-tsd_arenas_tdata_bypass_get
-tsd_arenas_tdata_bypass_set
-tsd_arenas_tdata_bypassp_get
-tsd_arenas_tdata_get
-tsd_arenas_tdata_set
-tsd_arenas_tdatap_get
tsd_boot
tsd_boot0
tsd_boot1
tsd_booted
-tsd_booted_get
tsd_cleanup
tsd_cleanup_wrapper
tsd_fetch
-tsd_fetch_impl
tsd_get
-tsd_get_allocates
-tsd_iarena_get
-tsd_iarena_set
-tsd_iarenap_get
+tsd_wrapper_get
+tsd_wrapper_set
tsd_initialized
tsd_init_check_recursion
tsd_init_finish
tsd_init_head
-tsd_narenas_tdata_get
-tsd_narenas_tdata_set
-tsd_narenas_tdatap_get
-tsd_wrapper_get
-tsd_wrapper_set
tsd_nominal
-tsd_prof_tdata_get
-tsd_prof_tdata_set
-tsd_prof_tdatap_get
tsd_quarantine_get
tsd_quarantine_set
-tsd_quarantinep_get
tsd_set
tsd_tcache_enabled_get
tsd_tcache_enabled_set
-tsd_tcache_enabledp_get
tsd_tcache_get
tsd_tcache_set
-tsd_tcachep_get
+tsd_tls
+tsd_tsd
+tsd_prof_tdata_get
+tsd_prof_tdata_set
tsd_thread_allocated_get
tsd_thread_allocated_set
-tsd_thread_allocatedp_get
tsd_thread_deallocated_get
tsd_thread_deallocated_set
-tsd_thread_deallocatedp_get
-tsd_tls
-tsd_tsd
-tsd_tsdn
-tsd_witness_fork_get
-tsd_witness_fork_set
-tsd_witness_forkp_get
-tsd_witnesses_get
-tsd_witnesses_set
-tsd_witnessesp_get
-tsdn_fetch
-tsdn_null
-tsdn_tsd
u2rz
valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
-witness_assert_lockless
-witness_assert_not_owner
-witness_assert_owner
-witness_fork_cleanup
-witness_init
-witness_lock
-witness_lock_error
-witness_lockless_error
-witness_not_owner_error
-witness_owner
-witness_owner_error
-witness_postfork_child
-witness_postfork_parent
-witness_prefork
-witness_unlock
-witnesses_cleanup
-zone_register
diff --git a/deps/jemalloc/include/jemalloc/internal/prng.h b/deps/jemalloc/include/jemalloc/internal/prng.h
index c2bda19c6..216d0ef47 100644
--- a/deps/jemalloc/include/jemalloc/internal/prng.h
+++ b/deps/jemalloc/include/jemalloc/internal/prng.h
@@ -18,13 +18,31 @@
* proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
+ *
+ * Macro parameters:
+ * uint32_t r : Result.
+ * unsigned lg_range : (0..32], number of least significant bits to return.
+ * uint32_t state : Seed value.
+ * const uint32_t a, c : See above discussion.
*/
-
-#define PRNG_A_32 UINT32_C(1103515241)
-#define PRNG_C_32 UINT32_C(12347)
-
-#define PRNG_A_64 UINT64_C(6364136223846793005)
-#define PRNG_C_64 UINT64_C(1442695040888963407)
+#define prng32(r, lg_range, state, a, c) do { \
+ assert((lg_range) > 0); \
+ assert((lg_range) <= 32); \
+ \
+ r = (state * (a)) + (c); \
+ state = r; \
+ r >>= (32 - (lg_range)); \
+} while (false)
+
+/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
+#define prng64(r, lg_range, state, a, c) do { \
+ assert((lg_range) > 0); \
+ assert((lg_range) <= 64); \
+ \
+ r = (state * (a)) + (c); \
+ state = r; \
+ r >>= (64 - (lg_range)); \
+} while (false)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
@@ -38,170 +56,5 @@
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
-#ifndef JEMALLOC_ENABLE_INLINE
-uint32_t prng_state_next_u32(uint32_t state);
-uint64_t prng_state_next_u64(uint64_t state);
-size_t prng_state_next_zu(size_t state);
-
-uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range,
- bool atomic);
-uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range);
-size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic);
-
-uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic);
-uint64_t prng_range_u64(uint64_t *state, uint64_t range);
-size_t prng_range_zu(size_t *state, size_t range, bool atomic);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_state_next_u32(uint32_t state)
-{
-
- return ((state * PRNG_A_32) + PRNG_C_32);
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_state_next_u64(uint64_t state)
-{
-
- return ((state * PRNG_A_64) + PRNG_C_64);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_state_next_zu(size_t state)
-{
-
-#if LG_SIZEOF_PTR == 2
- return ((state * PRNG_A_32) + PRNG_C_32);
-#elif LG_SIZEOF_PTR == 3
- return ((state * PRNG_A_64) + PRNG_C_64);
-#else
-#error Unsupported pointer size
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
-{
- uint32_t ret, state1;
-
- assert(lg_range > 0);
- assert(lg_range <= 32);
-
- if (atomic) {
- uint32_t state0;
-
- do {
- state0 = atomic_read_uint32(state);
- state1 = prng_state_next_u32(state0);
- } while (atomic_cas_uint32(state, state0, state1));
- } else {
- state1 = prng_state_next_u32(*state);
- *state = state1;
- }
- ret = state1 >> (32 - lg_range);
-
- return (ret);
-}
-
-/* 64-bit atomic operations cannot be supported on all relevant platforms. */
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_lg_range_u64(uint64_t *state, unsigned lg_range)
-{
- uint64_t ret, state1;
-
- assert(lg_range > 0);
- assert(lg_range <= 64);
-
- state1 = prng_state_next_u64(*state);
- *state = state1;
- ret = state1 >> (64 - lg_range);
-
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
-{
- size_t ret, state1;
-
- assert(lg_range > 0);
- assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
-
- if (atomic) {
- size_t state0;
-
- do {
- state0 = atomic_read_z(state);
- state1 = prng_state_next_zu(state0);
- } while (atomic_cas_z(state, state0, state1));
- } else {
- state1 = prng_state_next_zu(*state);
- *state = state1;
- }
- ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
-
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
-{
- uint32_t ret;
- unsigned lg_range;
-
- assert(range > 1);
-
- /* Compute the ceiling of lg(range). */
- lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
-
- /* Generate a result in [0..range) via repeated trial. */
- do {
- ret = prng_lg_range_u32(state, lg_range, atomic);
- } while (ret >= range);
-
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_range_u64(uint64_t *state, uint64_t range)
-{
- uint64_t ret;
- unsigned lg_range;
-
- assert(range > 1);
-
- /* Compute the ceiling of lg(range). */
- lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
-
- /* Generate a result in [0..range) via repeated trial. */
- do {
- ret = prng_lg_range_u64(state, lg_range);
- } while (ret >= range);
-
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_range_zu(size_t *state, size_t range, bool atomic)
-{
- size_t ret;
- unsigned lg_range;
-
- assert(range > 1);
-
- /* Compute the ceiling of lg(range). */
- lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
-
- /* Generate a result in [0..range) via repeated trial. */
- do {
- ret = prng_lg_range_zu(state, lg_range, atomic);
- } while (ret >= range);
-
- return (ret);
-}
-#endif
-
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/prof.h b/deps/jemalloc/include/jemalloc/internal/prof.h
index 8293b71ed..e5198c3e8 100644
--- a/deps/jemalloc/include/jemalloc/internal/prof.h
+++ b/deps/jemalloc/include/jemalloc/internal/prof.h
@@ -281,7 +281,7 @@ extern uint64_t prof_interval;
extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
-void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
+void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
@@ -293,33 +293,32 @@ size_t prof_bt_count(void);
const prof_cnt_t *prof_cnt_all(void);
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
-typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
+typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *);
extern prof_dump_header_t *prof_dump_header;
#endif
-void prof_idump(tsdn_t *tsdn);
-bool prof_mdump(tsd_t *tsd, const char *filename);
-void prof_gdump(tsdn_t *tsdn);
+void prof_idump(void);
+bool prof_mdump(const char *filename);
+void prof_gdump(void);
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
-bool prof_active_get(tsdn_t *tsdn);
-bool prof_active_set(tsdn_t *tsdn, bool active);
-const char *prof_thread_name_get(tsd_t *tsd);
+const char *prof_thread_name_get(void);
+bool prof_active_get(void);
+bool prof_active_set(bool active);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
-bool prof_thread_active_get(tsd_t *tsd);
-bool prof_thread_active_set(tsd_t *tsd, bool active);
-bool prof_thread_active_init_get(tsdn_t *tsdn);
-bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
-bool prof_gdump_get(tsdn_t *tsdn);
-bool prof_gdump_set(tsdn_t *tsdn, bool active);
+bool prof_thread_active_get(void);
+bool prof_thread_active_set(bool active);
+bool prof_thread_active_init_get(void);
+bool prof_thread_active_init_set(bool active_init);
+bool prof_gdump_get(void);
+bool prof_gdump_set(bool active);
void prof_boot0(void);
void prof_boot1(void);
-bool prof_boot2(tsd_t *tsd);
-void prof_prefork0(tsdn_t *tsdn);
-void prof_prefork1(tsdn_t *tsdn);
-void prof_postfork_parent(tsdn_t *tsdn);
-void prof_postfork_child(tsdn_t *tsdn);
+bool prof_boot2(void);
+void prof_prefork(void);
+void prof_postfork_parent(void);
+void prof_postfork_child(void);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */
@@ -330,17 +329,17 @@ void prof_sample_threshold_update(prof_tdata_t *tdata);
bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
-prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
-void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx);
-void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
- const void *old_ptr, prof_tctx_t *tctx);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
bool update);
-void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
+prof_tctx_t *prof_tctx_get(const void *ptr);
+void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
+void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+ prof_tctx_t *tctx);
+void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
+void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
size_t old_usize, prof_tctx_t *old_tctx);
@@ -398,34 +397,34 @@ prof_tdata_get(tsd_t *tsd, bool create)
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_tctx_get(tsdn_t *tsdn, const void *ptr)
+prof_tctx_get(const void *ptr)
{
cassert(config_prof);
assert(ptr != NULL);
- return (arena_prof_tctx_get(tsdn, ptr));
+ return (arena_prof_tctx_get(ptr));
}
JEMALLOC_ALWAYS_INLINE void
-prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
+prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- arena_prof_tctx_set(tsdn, ptr, usize, tctx);
+ arena_prof_tctx_set(ptr, usize, tctx);
}
JEMALLOC_ALWAYS_INLINE void
-prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
+prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
+ arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
}
JEMALLOC_ALWAYS_INLINE bool
@@ -437,16 +436,16 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
cassert(config_prof);
tdata = prof_tdata_get(tsd, true);
- if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
+ if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
tdata = NULL;
if (tdata_out != NULL)
*tdata_out = tdata;
- if (unlikely(tdata == NULL))
+ if (tdata == NULL)
return (true);
- if (likely(tdata->bytes_until_sample >= usize)) {
+ if (tdata->bytes_until_sample >= usize) {
if (update)
tdata->bytes_until_sample -= usize;
return (true);
@@ -480,17 +479,17 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
}
JEMALLOC_ALWAYS_INLINE void
-prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
+prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- assert(usize == isalloc(tsdn, ptr, true));
+ assert(usize == isalloc(ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
- prof_malloc_sample_object(tsdn, ptr, usize, tctx);
+ prof_malloc_sample_object(ptr, usize, tctx);
else
- prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
+ prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void
@@ -504,7 +503,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
- assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
+ assert(usize == isalloc(ptr, true));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
@@ -513,7 +512,6 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
* though its actual usize was insufficient to cross the
* sample threshold.
*/
- prof_alloc_rollback(tsd, tctx, true);
tctx = (prof_tctx_t *)(uintptr_t)1U;
}
}
@@ -522,9 +520,9 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
if (unlikely(sampled))
- prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
+ prof_malloc_sample_object(ptr, usize, tctx);
else
- prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
+ prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
if (unlikely(old_sampled))
prof_free_sampled_object(tsd, old_usize, old_tctx);
@@ -533,10 +531,10 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{
- prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
+ prof_tctx_t *tctx = prof_tctx_get(ptr);
cassert(config_prof);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
+ assert(usize == isalloc(ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_free_sampled_object(tsd, usize, tctx);
diff --git a/deps/jemalloc/include/jemalloc/internal/rb.h b/deps/jemalloc/include/jemalloc/internal/rb.h
index 3770342f8..2ca8e5933 100644
--- a/deps/jemalloc/include/jemalloc/internal/rb.h
+++ b/deps/jemalloc/include/jemalloc/internal/rb.h
@@ -42,6 +42,7 @@ struct { \
#define rb_tree(a_type) \
struct { \
a_type *rbt_root; \
+ a_type rbt_nil; \
}
/* Left accessors. */
@@ -78,15 +79,6 @@ struct { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
-
-/* Node initializer. */
-#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
- /* Bookkeeping bit cannot be used by node pointer. */ \
- assert(((uintptr_t)(a_node) & 0x1) == 0); \
- rbtn_left_set(a_type, a_field, (a_node), NULL); \
- rbtn_right_set(a_type, a_field, (a_node), NULL); \
- rbtn_red_set(a_type, a_field, (a_node)); \
-} while (0)
#else
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
@@ -107,26 +99,28 @@ struct { \
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \
} while (0)
+#endif
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
- rbtn_left_set(a_type, a_field, (a_node), NULL); \
- rbtn_right_set(a_type, a_field, (a_node), NULL); \
+ rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
+ rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
-#endif
/* Tree initializer. */
#define rb_new(a_type, a_field, a_rbt) do { \
- (a_rbt)->rbt_root = NULL; \
+ (a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \
+ rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \
+ rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \
} while (0)
/* Internal utility macros. */
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
- if ((r_node) != NULL) { \
+ if ((r_node) != &(a_rbt)->rbt_nil) { \
for (; \
- rbtn_left_get(a_type, a_field, (r_node)) != NULL; \
+ rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
} \
} \
@@ -134,9 +128,10 @@ struct { \
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
- if ((r_node) != NULL) { \
- for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
- (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
+ if ((r_node) != &(a_rbt)->rbt_nil) { \
+ for (; rbtn_right_get(a_type, a_field, (r_node)) != \
+ &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
+ (r_node))) { \
} \
} \
} while (0)
@@ -174,11 +169,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
-a_prefix##search(a_rbt_type *rbtree, const a_type *key); \
+a_prefix##search(a_rbt_type *rbtree, a_type *key); \
a_attr a_type * \
-a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \
+a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \
a_attr a_type * \
-a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \
+a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_attr void \
@@ -188,10 +183,7 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg); \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
-a_attr void \
-a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
- void *arg);
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
@@ -262,7 +254,7 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* last/first.
*
* static ex_node_t *
- * ex_search(ex_t *tree, const ex_node_t *key);
+ * ex_search(ex_t *tree, ex_node_t *key);
* Description: Search for node that matches key.
* Args:
* tree: Pointer to an initialized red-black tree object.
@@ -270,9 +262,9 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* Ret: Node in tree that matches key, or NULL if no match.
*
* static ex_node_t *
- * ex_nsearch(ex_t *tree, const ex_node_t *key);
+ * ex_nsearch(ex_t *tree, ex_node_t *key);
* static ex_node_t *
- * ex_psearch(ex_t *tree, const ex_node_t *key);
+ * ex_psearch(ex_t *tree, ex_node_t *key);
* Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were
* key in tree.
@@ -320,20 +312,6 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration.
- *
- * static void
- * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
- * Description: Iterate over the tree with post-order traversal, remove
- * each node, and run the callback if non-null. This is
- * used for destroying a tree without paying the cost to
- * rebalance it. The tree must not be otherwise altered
- * during traversal.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * cb : Callback function, which, if non-null, is called for each node
- * during iteration. There is no way to stop iteration once it
- * has begun.
- * arg : Opaque pointer passed to cb().
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \
@@ -342,30 +320,36 @@ a_prefix##new(a_rbt_type *rbtree) { \
} \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree) { \
- return (rbtree->rbt_root == NULL); \
+ return (rbtree->rbt_root == &rbtree->rbt_nil); \
} \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = NULL; \
+ } \
return (ret); \
} \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = NULL; \
+ } \
return (ret); \
} \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
- if (rbtn_right_get(a_type, a_field, node) != NULL) { \
+ if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
- assert(tnode != NULL); \
- ret = NULL; \
+ assert(tnode != &rbtree->rbt_nil); \
+ ret = &rbtree->rbt_nil; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
@@ -376,21 +360,24 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
} else { \
break; \
} \
- assert(tnode != NULL); \
+ assert(tnode != &rbtree->rbt_nil); \
} \
} \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = (NULL); \
+ } \
return (ret); \
} \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
- if (rbtn_left_get(a_type, a_field, node) != NULL) { \
+ if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
- assert(tnode != NULL); \
- ret = NULL; \
+ assert(tnode != &rbtree->rbt_nil); \
+ ret = &rbtree->rbt_nil; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
@@ -401,17 +388,20 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
} else { \
break; \
} \
- assert(tnode != NULL); \
+ assert(tnode != &rbtree->rbt_nil); \
} \
} \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = (NULL); \
+ } \
return (ret); \
} \
a_attr a_type * \
-a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
+a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
a_type *ret; \
int cmp; \
ret = rbtree->rbt_root; \
- while (ret != NULL \
+ while (ret != &rbtree->rbt_nil \
&& (cmp = (a_cmp)(key, ret)) != 0) { \
if (cmp < 0) { \
ret = rbtn_left_get(a_type, a_field, ret); \
@@ -419,14 +409,17 @@ a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = (NULL); \
+ } \
return (ret); \
} \
a_attr a_type * \
-a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
+a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
- ret = NULL; \
- while (tnode != NULL) { \
+ ret = &rbtree->rbt_nil; \
+ while (tnode != &rbtree->rbt_nil) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
ret = tnode; \
@@ -438,14 +431,17 @@ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
break; \
} \
} \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = (NULL); \
+ } \
return (ret); \
} \
a_attr a_type * \
-a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
+a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
- ret = NULL; \
- while (tnode != NULL) { \
+ ret = &rbtree->rbt_nil; \
+ while (tnode != &rbtree->rbt_nil) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
@@ -457,6 +453,9 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
break; \
} \
} \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = (NULL); \
+ } \
return (ret); \
} \
a_attr void \
@@ -468,7 +467,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */ \
path->node = rbtree->rbt_root; \
- for (pathp = path; pathp->node != NULL; pathp++) { \
+ for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
assert(cmp != 0); \
if (cmp < 0) { \
@@ -488,8 +487,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_left_set(a_type, a_field, cnode, left); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
- leftleft)) { \
+ if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* Fix up 4-node. */ \
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
@@ -504,8 +502,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, cnode, right); \
if (rbtn_red_get(a_type, a_field, right)) { \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
- if (left != NULL && rbtn_red_get(a_type, a_field, \
- left)) { \
+ if (rbtn_red_get(a_type, a_field, left)) { \
/* Split 4-node. */ \
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, right); \
@@ -538,7 +535,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Wind. */ \
nodep = NULL; /* Silence compiler warning. */ \
path->node = rbtree->rbt_root; \
- for (pathp = path; pathp->node != NULL; pathp++) { \
+ for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
@@ -550,7 +547,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Find node's successor, in preparation for swap. */ \
pathp->cmp = 1; \
nodep = pathp; \
- for (pathp++; pathp->node != NULL; \
+ for (pathp++; pathp->node != &rbtree->rbt_nil; \
pathp++) { \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
@@ -593,7 +590,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} else { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
- if (left != NULL) { \
+ if (left != &rbtree->rbt_nil) { \
/* node has no successor, but it has a left child. */\
/* Splice node out, without losing the left child. */\
assert(!rbtn_red_get(a_type, a_field, node)); \
@@ -613,32 +610,33 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} else if (pathp == path) { \
/* The tree only contained one node. */ \
- rbtree->rbt_root = NULL; \
+ rbtree->rbt_root = &rbtree->rbt_nil; \
return; \
} \
} \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */ \
assert(pathp[-1].cmp < 0); \
- rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, \
+ &rbtree->rbt_nil); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */\
/* restored. */\
- pathp->node = NULL; \
+ pathp->node = &rbtree->rbt_nil; \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
assert(pathp->cmp != 0); \
if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \
+ assert(!rbtn_red_get(a_type, a_field, pathp[1].node)); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
a_type *tnode; \
- if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
- rightleft)) { \
+ if (rbtn_red_get(a_type, a_field, rightleft)) { \
/* In the following diagrams, ||, //, and \\ */\
/* indicate the path to the removed node. */\
/* */\
@@ -681,8 +679,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
- if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
- rightleft)) { \
+ if (rbtn_red_get(a_type, a_field, rightleft)) { \
/* || */\
/* pathp(b) */\
/* // \ */\
@@ -736,8 +733,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
left); \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
leftright); \
- if (leftrightleft != NULL && rbtn_red_get(a_type, \
- a_field, leftrightleft)) { \
+ if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
/* || */\
/* pathp(b) */\
/* / \\ */\
@@ -763,7 +759,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* (b) */\
/* / */\
/* (b) */\
- assert(leftright != NULL); \
+ assert(leftright != &rbtree->rbt_nil); \
rbtn_red_set(a_type, a_field, leftright); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
@@ -786,8 +782,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
- leftleft)) { \
+ if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* || */\
/* pathp(r) */\
/* / \\ */\
@@ -825,8 +820,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} else { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
- leftleft)) { \
+ if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* || */\
/* pathp(b) */\
/* / \\ */\
@@ -872,13 +866,13 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
- if (node == NULL) { \
- return (NULL); \
+ if (node == &rbtree->rbt_nil) { \
+ return (&rbtree->rbt_nil); \
} else { \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
- a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
- arg)) != NULL) { \
+ a_field, node), cb, arg)) != &rbtree->rbt_nil \
+ || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
@@ -892,8 +886,8 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
if (cmp < 0) { \
a_type *ret; \
if ((ret = a_prefix##iter_start(rbtree, start, \
- rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
- (ret = cb(rbtree, node, arg)) != NULL) { \
+ rbtn_left_get(a_type, a_field, node), cb, arg)) != \
+ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
@@ -920,18 +914,21 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = NULL; \
+ } \
return (ret); \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
- if (node == NULL) { \
- return (NULL); \
+ if (node == &rbtree->rbt_nil) { \
+ return (&rbtree->rbt_nil); \
} else { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
- (ret = cb(rbtree, node, arg)) != NULL) { \
+ rbtn_right_get(a_type, a_field, node), cb, arg)) != \
+ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
@@ -946,8 +943,8 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
if (cmp > 0) { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
- rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
- (ret = cb(rbtree, node, arg)) != NULL) { \
+ rbtn_right_get(a_type, a_field, node), cb, arg)) != \
+ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
@@ -975,29 +972,10 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
} \
- return (ret); \
-} \
-a_attr void \
-a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
- a_type *, void *), void *arg) { \
- if (node == NULL) { \
- return; \
- } \
- a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
- node), cb, arg); \
- rbtn_left_set(a_type, a_field, (node), NULL); \
- a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
- node), cb, arg); \
- rbtn_right_set(a_type, a_field, (node), NULL); \
- if (cb) { \
- cb(node, arg); \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = NULL; \
} \
-} \
-a_attr void \
-a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
- void *arg) { \
- a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
- rbtree->rbt_root = NULL; \
+ return (ret); \
}
#endif /* RB_H_ */
diff --git a/deps/jemalloc/include/jemalloc/internal/rtree.h b/deps/jemalloc/include/jemalloc/internal/rtree.h
index 8d0c584da..28ae9d1dd 100644
--- a/deps/jemalloc/include/jemalloc/internal/rtree.h
+++ b/deps/jemalloc/include/jemalloc/internal/rtree.h
@@ -15,10 +15,9 @@ typedef struct rtree_s rtree_t;
* machine address width.
*/
#define LG_RTREE_BITS_PER_LEVEL 4
-#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
-/* Maximum rtree height. */
+#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
#define RTREE_HEIGHT_MAX \
- ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
+ ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
@@ -112,25 +111,22 @@ unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
bool rtree_node_valid(rtree_node_elm_t *node);
-rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
- bool dependent);
+rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm);
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
- unsigned level, bool dependent);
+ unsigned level);
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
bool dependent);
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
const extent_node_t *val);
-rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
- bool dependent);
-rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
- bool dependent);
+rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level);
+rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level);
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
-JEMALLOC_ALWAYS_INLINE unsigned
+JEMALLOC_INLINE unsigned
rtree_start_level(rtree_t *rtree, uintptr_t key)
{
unsigned start_level;
@@ -144,7 +140,7 @@ rtree_start_level(rtree_t *rtree, uintptr_t key)
return (start_level);
}
-JEMALLOC_ALWAYS_INLINE uintptr_t
+JEMALLOC_INLINE uintptr_t
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
{
@@ -153,40 +149,37 @@ rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
rtree->levels[level].bits) - 1));
}
-JEMALLOC_ALWAYS_INLINE bool
+JEMALLOC_INLINE bool
rtree_node_valid(rtree_node_elm_t *node)
{
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
}
-JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
-rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_child_tryread(rtree_node_elm_t *elm)
{
rtree_node_elm_t *child;
/* Double-checked read (first read may be stale. */
child = elm->child;
- if (!dependent && !rtree_node_valid(child))
+ if (!rtree_node_valid(child))
child = atomic_read_p(&elm->pun);
- assert(!dependent || child != NULL);
return (child);
}
-JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
-rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
- bool dependent)
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
{
rtree_node_elm_t *child;
- child = rtree_child_tryread(elm, dependent);
- if (!dependent && unlikely(!rtree_node_valid(child)))
+ child = rtree_child_tryread(elm);
+ if (unlikely(!rtree_node_valid(child)))
child = rtree_child_read_hard(rtree, elm, level);
- assert(!dependent || child != NULL);
return (child);
}
-JEMALLOC_ALWAYS_INLINE extent_node_t *
+JEMALLOC_INLINE extent_node_t *
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
{
@@ -215,119 +208,54 @@ rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
atomic_write_p(&elm->pun, val);
}
-JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
-rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_subtree_tryread(rtree_t *rtree, unsigned level)
{
rtree_node_elm_t *subtree;
/* Double-checked read (first read may be stale. */
subtree = rtree->levels[level].subtree;
- if (!dependent && unlikely(!rtree_node_valid(subtree)))
+ if (!rtree_node_valid(subtree))
subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
- assert(!dependent || subtree != NULL);
return (subtree);
}
-JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
-rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_subtree_read(rtree_t *rtree, unsigned level)
{
rtree_node_elm_t *subtree;
- subtree = rtree_subtree_tryread(rtree, level, dependent);
- if (!dependent && unlikely(!rtree_node_valid(subtree)))
+ subtree = rtree_subtree_tryread(rtree, level);
+ if (unlikely(!rtree_node_valid(subtree)))
subtree = rtree_subtree_read_hard(rtree, level);
- assert(!dependent || subtree != NULL);
return (subtree);
}
-JEMALLOC_ALWAYS_INLINE extent_node_t *
+JEMALLOC_INLINE extent_node_t *
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
{
uintptr_t subkey;
- unsigned start_level;
- rtree_node_elm_t *node;
+ unsigned i, start_level;
+ rtree_node_elm_t *node, *child;
start_level = rtree_start_level(rtree, key);
- node = rtree_subtree_tryread(rtree, start_level, dependent);
-#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
- switch (start_level + RTREE_GET_BIAS) {
-#define RTREE_GET_SUBTREE(level) \
- case level: \
- assert(level < (RTREE_HEIGHT_MAX-1)); \
- if (!dependent && unlikely(!rtree_node_valid(node))) \
- return (NULL); \
- subkey = rtree_subkey(rtree, key, level - \
- RTREE_GET_BIAS); \
- node = rtree_child_tryread(&node[subkey], dependent); \
- /* Fall through. */
-#define RTREE_GET_LEAF(level) \
- case level: \
- assert(level == (RTREE_HEIGHT_MAX-1)); \
- if (!dependent && unlikely(!rtree_node_valid(node))) \
- return (NULL); \
- subkey = rtree_subkey(rtree, key, level - \
- RTREE_GET_BIAS); \
- /* \
- * node is a leaf, so it contains values rather than \
- * child pointers. \
- */ \
- return (rtree_val_read(rtree, &node[subkey], \
- dependent));
-#if RTREE_HEIGHT_MAX > 1
- RTREE_GET_SUBTREE(0)
-#endif
-#if RTREE_HEIGHT_MAX > 2
- RTREE_GET_SUBTREE(1)
-#endif
-#if RTREE_HEIGHT_MAX > 3
- RTREE_GET_SUBTREE(2)
-#endif
-#if RTREE_HEIGHT_MAX > 4
- RTREE_GET_SUBTREE(3)
-#endif
-#if RTREE_HEIGHT_MAX > 5
- RTREE_GET_SUBTREE(4)
-#endif
-#if RTREE_HEIGHT_MAX > 6
- RTREE_GET_SUBTREE(5)
-#endif
-#if RTREE_HEIGHT_MAX > 7
- RTREE_GET_SUBTREE(6)
-#endif
-#if RTREE_HEIGHT_MAX > 8
- RTREE_GET_SUBTREE(7)
-#endif
-#if RTREE_HEIGHT_MAX > 9
- RTREE_GET_SUBTREE(8)
-#endif
-#if RTREE_HEIGHT_MAX > 10
- RTREE_GET_SUBTREE(9)
-#endif
-#if RTREE_HEIGHT_MAX > 11
- RTREE_GET_SUBTREE(10)
-#endif
-#if RTREE_HEIGHT_MAX > 12
- RTREE_GET_SUBTREE(11)
-#endif
-#if RTREE_HEIGHT_MAX > 13
- RTREE_GET_SUBTREE(12)
-#endif
-#if RTREE_HEIGHT_MAX > 14
- RTREE_GET_SUBTREE(13)
-#endif
-#if RTREE_HEIGHT_MAX > 15
- RTREE_GET_SUBTREE(14)
-#endif
-#if RTREE_HEIGHT_MAX > 16
-# error Unsupported RTREE_HEIGHT_MAX
-#endif
- RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
-#undef RTREE_GET_SUBTREE
-#undef RTREE_GET_LEAF
- default: not_reached();
+ for (i = start_level, node = rtree_subtree_tryread(rtree, start_level);
+ /**/; i++, node = child) {
+ if (!dependent && unlikely(!rtree_node_valid(node)))
+ return (NULL);
+ subkey = rtree_subkey(rtree, key, i);
+ if (i == rtree->height - 1) {
+ /*
+ * node is a leaf, so it contains values rather than
+ * child pointers.
+ */
+ return (rtree_val_read(rtree, &node[subkey],
+ dependent));
+ }
+ assert(i < rtree->height - 1);
+ child = rtree_child_tryread(&node[subkey]);
}
-#undef RTREE_GET_BIAS
not_reached();
}
@@ -340,7 +268,7 @@ rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
start_level = rtree_start_level(rtree, key);
- node = rtree_subtree_read(rtree, start_level, false);
+ node = rtree_subtree_read(rtree, start_level);
if (node == NULL)
return (true);
for (i = start_level; /**/; i++, node = child) {
@@ -354,7 +282,7 @@ rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
return (false);
}
assert(i + 1 < rtree->height);
- child = rtree_child_read(rtree, &node[subkey], i, false);
+ child = rtree_child_read(rtree, &node[subkey], i);
if (child == NULL)
return (true);
}
diff --git a/deps/jemalloc/include/jemalloc/internal/size_classes.sh b/deps/jemalloc/include/jemalloc/internal/size_classes.sh
index f6fbce4ef..fc82036d3 100755
--- a/deps/jemalloc/include/jemalloc/internal/size_classes.sh
+++ b/deps/jemalloc/include/jemalloc/internal/size_classes.sh
@@ -48,21 +48,6 @@ size_class() {
lg_p=$5
lg_kmax=$6
- if [ ${lg_delta} -ge ${lg_p} ] ; then
- psz="yes"
- else
- pow2 ${lg_p}; p=${pow2_result}
- pow2 ${lg_grp}; grp=${pow2_result}
- pow2 ${lg_delta}; delta=${pow2_result}
- sz=$((${grp} + ${delta} * ${ndelta}))
- npgs=$((${sz} / ${p}))
- if [ ${sz} -eq $((${npgs} * ${p})) ] ; then
- psz="yes"
- else
- psz="no"
- fi
- fi
-
lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta}
if [ ${pow2_result} -lt ${ndelta} ] ; then
rem="yes"
@@ -89,15 +74,14 @@ size_class() {
else
lg_delta_lookup="no"
fi
- printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${lg_delta_lookup}
+ printf ' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${lg_delta_lookup}
# Defined upon return:
- # - psz ("yes" or "no")
- # - bin ("yes" or "no")
# - lg_delta_lookup (${lg_delta} or "no")
+ # - bin ("yes" or "no")
}
sep_line() {
- echo " \\"
+ echo " \\"
}
size_classes() {
@@ -111,13 +95,12 @@ size_classes() {
pow2 ${lg_g}; g=${pow2_result}
echo "#define SIZE_CLASSES \\"
- echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \\"
+ echo " /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \\"
ntbins=0
nlbins=0
lg_tiny_maxclass='"NA"'
nbins=0
- npsizes=0
# Tiny size classes.
ndelta=0
@@ -129,9 +112,6 @@ size_classes() {
if [ ${lg_delta_lookup} != "no" ] ; then
nlbins=$((${index} + 1))
fi
- if [ ${psz} = "yes" ] ; then
- npsizes=$((${npsizes} + 1))
- fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
fi
@@ -153,25 +133,19 @@ size_classes() {
index=$((${index} + 1))
lg_grp=$((${lg_grp} + 1))
lg_delta=$((${lg_delta} + 1))
- if [ ${psz} = "yes" ] ; then
- npsizes=$((${npsizes} + 1))
- fi
fi
while [ ${ndelta} -lt ${g} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
index=$((${index} + 1))
ndelta=$((${ndelta} + 1))
- if [ ${psz} = "yes" ] ; then
- npsizes=$((${npsizes} + 1))
- fi
done
# All remaining groups.
lg_grp=$((${lg_grp} + ${lg_g}))
- while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do
+ while [ ${lg_grp} -lt ${ptr_bits} ] ; do
sep_line
ndelta=1
- if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then
+ if [ ${lg_grp} -eq $((${ptr_bits} - 1)) ] ; then
ndelta_limit=$((${g} - 1))
else
ndelta_limit=${g}
@@ -183,9 +157,6 @@ size_classes() {
# Final written value is correct:
lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
fi
- if [ ${psz} = "yes" ] ; then
- npsizes=$((${npsizes} + 1))
- fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
# Final written value is correct:
@@ -212,7 +183,6 @@ size_classes() {
# - nlbins
# - nbins
# - nsizes
- # - npsizes
# - lg_tiny_maxclass
# - lookup_maxclass
# - small_maxclass
@@ -230,13 +200,13 @@ cat <<EOF
* be defined prior to inclusion, and it in turn defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
- * SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
- * bin, lg_delta_lookup) tuples.
+ * SIZE_CLASSES: Complete table of
+ * SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)
+ * tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
- * psz: 'yes' if a multiple of the page size, 'no' otherwise.
* bin: 'yes' if a small bin size class, 'no' otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
@@ -244,7 +214,6 @@ cat <<EOF
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* NSIZES: Number of size classes.
- * NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
@@ -269,7 +238,6 @@ for lg_z in ${lg_zarr} ; do
echo "#define NLBINS ${nlbins}"
echo "#define NBINS ${nbins}"
echo "#define NSIZES ${nsizes}"
- echo "#define NPSIZES ${npsizes}"
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
echo "#define SMALL_MAXCLASS ${small_maxclass}"
diff --git a/deps/jemalloc/include/jemalloc/internal/smoothstep.h b/deps/jemalloc/include/jemalloc/internal/smoothstep.h
deleted file mode 100644
index c5333ccad..000000000
--- a/deps/jemalloc/include/jemalloc/internal/smoothstep.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * This file was generated by the following command:
- * sh smoothstep.sh smoother 200 24 3 15
- */
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-/*
- * This header defines a precomputed table based on the smoothstep family of
- * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
- * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
- * that floating point math can be avoided.
- *
- * 3 2
- * smoothstep(x) = -2x + 3x
- *
- * 5 4 3
- * smootherstep(x) = 6x - 15x + 10x
- *
- * 7 6 5 4
- * smootheststep(x) = -20x + 70x - 84x + 35x
- */
-
-#define SMOOTHSTEP_VARIANT "smoother"
-#define SMOOTHSTEP_NSTEPS 200
-#define SMOOTHSTEP_BFP 24
-#define SMOOTHSTEP \
- /* STEP(step, h, x, y) */ \
- STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
- STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
- STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
- STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
- STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
- STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
- STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
- STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
- STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
- STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
- STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
- STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
- STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
- STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
- STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
- STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
- STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
- STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
- STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
- STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
- STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
- STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
- STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
- STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
- STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
- STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
- STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
- STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
- STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
- STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
- STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
- STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
- STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
- STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
- STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
- STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
- STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
- STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
- STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
- STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
- STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
- STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
- STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
- STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
- STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
- STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
- STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
- STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
- STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
- STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
- STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
- STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
- STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
- STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
- STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
- STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
- STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
- STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
- STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
- STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
- STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
- STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
- STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
- STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
- STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
- STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
- STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
- STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
- STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
- STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
- STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
- STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
- STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
- STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
- STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
- STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
- STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
- STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
- STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
- STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
- STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
- STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
- STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
- STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
- STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
- STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
- STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
- STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
- STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
- STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
- STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
- STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
- STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
- STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
- STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
- STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
- STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
- STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
- STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
- STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
- STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
- STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
- STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
- STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
- STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
- STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
- STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
- STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
- STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
- STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
- STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
- STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
- STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
- STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
- STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
- STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
- STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
- STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
- STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
- STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
- STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
- STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
- STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
- STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
- STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
- STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
- STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
- STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
- STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
- STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
- STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
- STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
- STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
- STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
- STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
- STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
- STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
- STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
- STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
- STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
- STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
- STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
- STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
- STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
- STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
- STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
- STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
- STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
- STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
- STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
- STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
- STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
- STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
- STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
- STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
- STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
- STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
- STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
- STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
- STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
- STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
- STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
- STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
- STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
- STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
- STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
- STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
- STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
- STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
- STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
- STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
- STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
- STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
- STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
- STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
- STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
- STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
- STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
- STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
- STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
- STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
- STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
- STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
- STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
- STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
- STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
- STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
- STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
- STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
- STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
- STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
- STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
- STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
- STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
- STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
- STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
- STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
- STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
- STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
- STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/smoothstep.sh b/deps/jemalloc/include/jemalloc/internal/smoothstep.sh
deleted file mode 100755
index 8124693f7..000000000
--- a/deps/jemalloc/include/jemalloc/internal/smoothstep.sh
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/bin/sh
-#
-# Generate a discrete lookup table for a sigmoid function in the smoothstep
-# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
-# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode
-# the entries using a binary fixed point representation.
-#
-# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
-#
-# <variant> is in {smooth, smoother, smoothest}.
-# <nsteps> must be greater than zero.
-# <bfp> must be in [0..62]; reasonable values are roughly [10..30].
-# <xprec> is x decimal precision.
-# <yprec> is y decimal precision.
-
-#set -x
-
-cmd="sh smoothstep.sh $*"
-variant=$1
-nsteps=$2
-bfp=$3
-xprec=$4
-yprec=$5
-
-case "${variant}" in
- smooth)
- ;;
- smoother)
- ;;
- smoothest)
- ;;
- *)
- echo "Unsupported variant"
- exit 1
- ;;
-esac
-
-smooth() {
- step=$1
- y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
- h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
-}
-
-smoother() {
- step=$1
- y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
- h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
-}
-
-smoothest() {
- step=$1
- y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
- h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
-}
-
-cat <<EOF
-/*
- * This file was generated by the following command:
- * $cmd
- */
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-/*
- * This header defines a precomputed table based on the smoothstep family of
- * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
- * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
- * that floating point math can be avoided.
- *
- * 3 2
- * smoothstep(x) = -2x + 3x
- *
- * 5 4 3
- * smootherstep(x) = 6x - 15x + 10x
- *
- * 7 6 5 4
- * smootheststep(x) = -20x + 70x - 84x + 35x
- */
-
-#define SMOOTHSTEP_VARIANT "${variant}"
-#define SMOOTHSTEP_NSTEPS ${nsteps}
-#define SMOOTHSTEP_BFP ${bfp}
-#define SMOOTHSTEP \\
- /* STEP(step, h, x, y) */ \\
-EOF
-
-s=1
-while [ $s -le $nsteps ] ; do
- $variant ${s}
- x=`echo ${xprec} k ${s} ${nsteps} / p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
- printf ' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n' ${s} ${h} ${x} ${y}
-
- s=$((s+1))
-done
-echo
-
-cat <<EOF
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-EOF
diff --git a/deps/jemalloc/include/jemalloc/internal/spin.h b/deps/jemalloc/include/jemalloc/internal/spin.h
deleted file mode 100644
index 9ef5ceb92..000000000
--- a/deps/jemalloc/include/jemalloc/internal/spin.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct spin_s spin_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct spin_s {
- unsigned iteration;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void spin_init(spin_t *spin);
-void spin_adaptive(spin_t *spin);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_))
-JEMALLOC_INLINE void
-spin_init(spin_t *spin)
-{
-
- spin->iteration = 0;
-}
-
-JEMALLOC_INLINE void
-spin_adaptive(spin_t *spin)
-{
- volatile uint64_t i;
-
- for (i = 0; i < (KQU(1) << spin->iteration); i++)
- CPU_SPINWAIT;
-
- if (spin->iteration < 63)
- spin->iteration++;
-}
-
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-
diff --git a/deps/jemalloc/include/jemalloc/internal/stats.h b/deps/jemalloc/include/jemalloc/internal/stats.h
index 04e7dae14..c91dba99d 100644
--- a/deps/jemalloc/include/jemalloc/internal/stats.h
+++ b/deps/jemalloc/include/jemalloc/internal/stats.h
@@ -103,14 +103,6 @@ struct arena_stats_s {
size_t mapped;
/*
- * Number of bytes currently retained as a side effect of munmap() being
- * disabled/bypassed. Retained bytes are technically mapped (though
- * always decommitted or purged), but they are excluded from the mapped
- * statistic (above).
- */
- size_t retained;
-
- /*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
@@ -176,9 +168,6 @@ JEMALLOC_INLINE void
stats_cactive_add(size_t size)
{
- assert(size > 0);
- assert((size & chunksize_mask) == 0);
-
atomic_add_z(&stats_cactive, size);
}
@@ -186,9 +175,6 @@ JEMALLOC_INLINE void
stats_cactive_sub(size_t size)
{
- assert(size > 0);
- assert((size & chunksize_mask) == 0);
-
atomic_sub_z(&stats_cactive, size);
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/tcache.h b/deps/jemalloc/include/jemalloc/internal/tcache.h
index 01ba062de..5079cd266 100644
--- a/deps/jemalloc/include/jemalloc/internal/tcache.h
+++ b/deps/jemalloc/include/jemalloc/internal/tcache.h
@@ -70,20 +70,13 @@ struct tcache_bin_s {
int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */
- /*
- * To make use of adjacent cacheline prefetch, the items in the avail
- * stack goes to higher address for newer allocations. avail points
- * just above the available space, which means that
- * avail[-ncached, ... -1] are available items and the lowest item will
- * be allocated first.
- */
void **avail; /* Stack of available objects. */
};
struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
- ticker_t gc_ticker; /* Drives incremental GC. */
+ unsigned ev_cnt; /* Event count since incremental GC. */
szind_t next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
@@ -115,7 +108,7 @@ extern tcache_bin_info_t *tcache_bin_info;
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
-extern unsigned nhbins;
+extern size_t nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
@@ -130,25 +123,27 @@ extern size_t tcache_maxclass;
*/
extern tcaches_t *tcaches;
-size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
+size_t tcache_salloc(const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
-void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
+void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
-void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
- arena_t *oldarena, arena_t *newarena);
+void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
+void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
+ arena_t *newarena);
+void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena);
tcache_t *tcache_get_hard(tsd_t *tsd);
-tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
+tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
void tcache_cleanup(tsd_t *tsd);
void tcache_enabled_cleanup(tsd_t *tsd);
-void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
+void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
-bool tcache_boot(tsdn_t *tsdn);
+bool tcache_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
@@ -160,15 +155,15 @@ void tcache_flush(void);
bool tcache_enabled_get(void);
tcache_t *tcache_get(tsd_t *tsd, bool create);
void tcache_enabled_set(bool enabled);
-void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
+void *tcache_alloc_easy(tcache_bin_t *tbin);
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
- size_t size, szind_t ind, bool zero, bool slow_path);
+ size_t size, bool zero);
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
- size_t size, szind_t ind, bool zero, bool slow_path);
+ size_t size, bool zero);
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
- szind_t binind, bool slow_path);
+ szind_t binind);
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
- size_t size, bool slow_path);
+ size_t size);
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
#endif
@@ -245,74 +240,51 @@ tcache_event(tsd_t *tsd, tcache_t *tcache)
if (TCACHE_GC_INCR == 0)
return;
- if (unlikely(ticker_tick(&tcache->gc_ticker)))
+ tcache->ev_cnt++;
+ assert(tcache->ev_cnt <= TCACHE_GC_INCR);
+ if (unlikely(tcache->ev_cnt == TCACHE_GC_INCR))
tcache_event_hard(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
+tcache_alloc_easy(tcache_bin_t *tbin)
{
void *ret;
if (unlikely(tbin->ncached == 0)) {
tbin->low_water = -1;
- *tcache_success = false;
return (NULL);
}
- /*
- * tcache_success (instead of ret) should be checked upon the return of
- * this function. We avoid checking (ret == NULL) because there is
- * never a null stored on the avail stack (which is unknown to the
- * compiler), and eagerly checking ret would cause pipeline stall
- * (waiting for the cacheline).
- */
- *tcache_success = true;
- ret = *(tbin->avail - tbin->ncached);
tbin->ncached--;
-
if (unlikely((int)tbin->ncached < tbin->low_water))
tbin->low_water = tbin->ncached;
-
+ ret = tbin->avail[tbin->ncached];
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
- szind_t binind, bool zero, bool slow_path)
+ bool zero)
{
void *ret;
+ szind_t binind;
+ size_t usize;
tcache_bin_t *tbin;
- bool tcache_success;
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+ binind = size2index(size);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
- ret = tcache_alloc_easy(tbin, &tcache_success);
- assert(tcache_success == (ret != NULL));
- if (unlikely(!tcache_success)) {
- bool tcache_hard_success;
- arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL))
- return (NULL);
-
- ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
- tbin, binind, &tcache_hard_success);
- if (tcache_hard_success == false)
+ usize = index2size(binind);
+ ret = tcache_alloc_easy(tbin);
+ if (unlikely(ret == NULL)) {
+ ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind);
+ if (ret == NULL)
return (NULL);
}
-
- assert(ret);
- /*
- * Only compute usize if required. The checks in the following if
- * statement are all static.
- */
- if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
- usize = index2size(binind);
- assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
- }
+ assert(tcache_salloc(ret) == usize);
if (likely(!zero)) {
- if (slow_path && config_fill) {
+ if (config_fill) {
if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
@@ -320,7 +292,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
memset(ret, 0, usize);
}
} else {
- if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
+ if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
@@ -337,38 +309,28 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
- szind_t binind, bool zero, bool slow_path)
+ bool zero)
{
void *ret;
+ szind_t binind;
+ size_t usize;
tcache_bin_t *tbin;
- bool tcache_success;
+ binind = size2index(size);
+ usize = index2size(binind);
+ assert(usize <= tcache_maxclass);
assert(binind < nhbins);
tbin = &tcache->tbins[binind];
- ret = tcache_alloc_easy(tbin, &tcache_success);
- assert(tcache_success == (ret != NULL));
- if (unlikely(!tcache_success)) {
+ ret = tcache_alloc_easy(tbin);
+ if (unlikely(ret == NULL)) {
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
- arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL))
- return (NULL);
-
- ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
+ ret = arena_malloc_large(arena, usize, zero);
if (ret == NULL)
return (NULL);
} else {
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
-
- /* Only compute usize on demand */
- if (config_prof || (slow_path && config_fill) ||
- unlikely(zero)) {
- usize = index2size(binind);
- assert(usize <= tcache_maxclass);
- }
-
if (config_prof && usize == LARGE_MINCLASS) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
@@ -378,11 +340,10 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
BININD_INVALID);
}
if (likely(!zero)) {
- if (slow_path && config_fill) {
- if (unlikely(opt_junk_alloc)) {
- memset(ret, JEMALLOC_ALLOC_JUNK,
- usize);
- } else if (unlikely(opt_zero))
+ if (config_fill) {
+ if (unlikely(opt_junk_alloc))
+ memset(ret, 0xa5, usize);
+ else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
} else
@@ -399,15 +360,14 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
- bool slow_path)
+tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind)
{
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
- assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
+ assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
- if (slow_path && config_fill && unlikely(opt_junk_free))
+ if (config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
tbin = &tcache->tbins[binind];
@@ -417,27 +377,26 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
(tbin_info->ncached_max >> 1));
}
assert(tbin->ncached < tbin_info->ncached_max);
+ tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
- *(tbin->avail - tbin->ncached) = ptr;
tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
- bool slow_path)
+tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size)
{
szind_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
- assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
- assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
+ assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
+ assert(tcache_salloc(ptr) <= tcache_maxclass);
binind = size2index(size);
- if (slow_path && config_fill && unlikely(opt_junk_free))
+ if (config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_large(ptr, size);
tbin = &tcache->tbins[binind];
@@ -447,8 +406,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
(tbin_info->ncached_max >> 1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
+ tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
- *(tbin->avail - tbin->ncached) = ptr;
tcache_event(tsd, tcache);
}
@@ -457,10 +416,8 @@ JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t *tsd, unsigned ind)
{
tcaches_t *elm = &tcaches[ind];
- if (unlikely(elm->tcache == NULL)) {
- elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
- NULL));
- }
+ if (unlikely(elm->tcache == NULL))
+ elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
return (elm->tcache);
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/ticker.h b/deps/jemalloc/include/jemalloc/internal/ticker.h
deleted file mode 100644
index 4696e56d2..000000000
--- a/deps/jemalloc/include/jemalloc/internal/ticker.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct ticker_s ticker_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct ticker_s {
- int32_t tick;
- int32_t nticks;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void ticker_init(ticker_t *ticker, int32_t nticks);
-void ticker_copy(ticker_t *ticker, const ticker_t *other);
-int32_t ticker_read(const ticker_t *ticker);
-bool ticker_ticks(ticker_t *ticker, int32_t nticks);
-bool ticker_tick(ticker_t *ticker);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
-JEMALLOC_INLINE void
-ticker_init(ticker_t *ticker, int32_t nticks)
-{
-
- ticker->tick = nticks;
- ticker->nticks = nticks;
-}
-
-JEMALLOC_INLINE void
-ticker_copy(ticker_t *ticker, const ticker_t *other)
-{
-
- *ticker = *other;
-}
-
-JEMALLOC_INLINE int32_t
-ticker_read(const ticker_t *ticker)
-{
-
- return (ticker->tick);
-}
-
-JEMALLOC_INLINE bool
-ticker_ticks(ticker_t *ticker, int32_t nticks)
-{
-
- if (unlikely(ticker->tick < nticks)) {
- ticker->tick = ticker->nticks;
- return (true);
- }
- ticker->tick -= nticks;
- return(false);
-}
-
-JEMALLOC_INLINE bool
-ticker_tick(ticker_t *ticker)
-{
-
- return (ticker_ticks(ticker, 1));
-}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/tsd.h b/deps/jemalloc/include/jemalloc/internal/tsd.h
index 9055acafd..eed7aa013 100644
--- a/deps/jemalloc/include/jemalloc/internal/tsd.h
+++ b/deps/jemalloc/include/jemalloc/internal/tsd.h
@@ -13,9 +13,6 @@ typedef struct tsd_init_head_s tsd_init_head_t;
#endif
typedef struct tsd_s tsd_t;
-typedef struct tsdn_s tsdn_t;
-
-#define TSDN_NULL ((tsdn_t *)0)
typedef enum {
tsd_state_uninitialized,
@@ -47,8 +44,7 @@ typedef enum {
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
- * bool example_tsd_booted_get(void) {...}
- * example_t *example_tsd_get(bool init) {...}
+ * example_t *example_tsd_get() {...}
* void example_tsd_set(example_t *val) {...}
*
* Note that all of the functions deal in terms of (a_type *) rather than
@@ -102,10 +98,8 @@ a_attr void \
a_name##tsd_boot1(void); \
a_attr bool \
a_name##tsd_boot(void); \
-a_attr bool \
-a_name##tsd_booted_get(void); \
a_attr a_type * \
-a_name##tsd_get(bool init); \
+a_name##tsd_get(void); \
a_attr void \
a_name##tsd_set(a_type *val);
@@ -207,21 +201,9 @@ a_name##tsd_boot(void) \
\
return (a_name##tsd_boot0()); \
} \
-a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
- \
- return (a_name##tsd_booted); \
-} \
-a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
- \
- return (false); \
-} \
/* Get/set. */ \
a_attr a_type * \
-a_name##tsd_get(bool init) \
+a_name##tsd_get(void) \
{ \
\
assert(a_name##tsd_booted); \
@@ -264,21 +246,9 @@ a_name##tsd_boot(void) \
\
return (a_name##tsd_boot0()); \
} \
-a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
- \
- return (a_name##tsd_booted); \
-} \
-a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
- \
- return (false); \
-} \
/* Get/set. */ \
a_attr a_type * \
-a_name##tsd_get(bool init) \
+a_name##tsd_get(void) \
{ \
\
assert(a_name##tsd_booted); \
@@ -337,14 +307,14 @@ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
} \
} \
a_attr a_name##tsd_wrapper_t * \
-a_name##tsd_wrapper_get(bool init) \
+a_name##tsd_wrapper_get(void) \
{ \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
- if (init && unlikely(wrapper == NULL)) { \
+ if (unlikely(wrapper == NULL)) { \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
@@ -398,28 +368,14 @@ a_name##tsd_boot(void) \
a_name##tsd_boot1(); \
return (false); \
} \
-a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
- \
- return (a_name##tsd_booted); \
-} \
-a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
- \
- return (true); \
-} \
/* Get/set. */ \
a_attr a_type * \
-a_name##tsd_get(bool init) \
+a_name##tsd_get(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
- wrapper = a_name##tsd_wrapper_get(init); \
- if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
- return (NULL); \
+ wrapper = a_name##tsd_wrapper_get(); \
return (&wrapper->val); \
} \
a_attr void \
@@ -428,7 +384,7 @@ a_name##tsd_set(a_type *val) \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
- wrapper = a_name##tsd_wrapper_get(true); \
+ wrapper = a_name##tsd_wrapper_get(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
@@ -472,12 +428,12 @@ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
} \
} \
a_attr a_name##tsd_wrapper_t * \
-a_name##tsd_wrapper_get(bool init) \
+a_name##tsd_wrapper_get(void) \
{ \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
pthread_getspecific(a_name##tsd_tsd); \
\
- if (init && unlikely(wrapper == NULL)) { \
+ if (unlikely(wrapper == NULL)) { \
tsd_init_block_t block; \
wrapper = tsd_init_check_recursion( \
&a_name##tsd_init_head, &block); \
@@ -534,28 +490,14 @@ a_name##tsd_boot(void) \
a_name##tsd_boot1(); \
return (false); \
} \
-a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
- \
- return (a_name##tsd_booted); \
-} \
-a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
- \
- return (true); \
-} \
/* Get/set. */ \
a_attr a_type * \
-a_name##tsd_get(bool init) \
+a_name##tsd_get(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
- wrapper = a_name##tsd_wrapper_get(init); \
- if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
- return (NULL); \
+ wrapper = a_name##tsd_wrapper_get(); \
return (&wrapper->val); \
} \
a_attr void \
@@ -564,7 +506,7 @@ a_name##tsd_set(a_type *val) \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
- wrapper = a_name##tsd_wrapper_get(true); \
+ wrapper = a_name##tsd_wrapper_get(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
@@ -594,15 +536,12 @@ struct tsd_init_head_s {
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
- O(iarena, arena_t *) \
O(arena, arena_t *) \
- O(arenas_tdata, arena_tdata_t *) \
- O(narenas_tdata, unsigned) \
- O(arenas_tdata_bypass, bool) \
+ O(arenas_cache, arena_t **) \
+ O(narenas_cache, unsigned) \
+ O(arenas_cache_bypass, bool) \
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
- O(witnesses, witness_list_t) \
- O(witness_fork, bool) \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
@@ -612,13 +551,10 @@ struct tsd_init_head_s {
NULL, \
NULL, \
NULL, \
- NULL, \
0, \
false, \
tcache_enabled_default, \
- NULL, \
- ql_head_initializer(witnesses), \
- false \
+ NULL \
}
struct tsd_s {
@@ -629,15 +565,6 @@ MALLOC_TSD
#undef O
};
-/*
- * Wrapper around tsd_t that makes it possible to avoid implicit conversion
- * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
- * explicitly converted to tsd_t, which is non-nullable.
- */
-struct tsdn_s {
- tsd_t tsd;
-};
-
static const tsd_t tsd_initializer = TSD_INITIALIZER;
malloc_tsd_types(, tsd_t)
@@ -650,7 +577,7 @@ void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *arg);
void malloc_tsd_cleanup_register(bool (*f)(void));
-tsd_t *malloc_tsd_boot0(void);
+bool malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
@@ -667,9 +594,7 @@ void tsd_cleanup(void *arg);
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
-tsd_t *tsd_fetch_impl(bool init);
tsd_t *tsd_fetch(void);
-tsdn_t *tsd_tsdn(tsd_t *tsd);
bool tsd_nominal(tsd_t *tsd);
#define O(n, t) \
t *tsd_##n##p_get(tsd_t *tsd); \
@@ -677,9 +602,6 @@ t tsd_##n##_get(tsd_t *tsd); \
void tsd_##n##_set(tsd_t *tsd, t n);
MALLOC_TSD
#undef O
-tsdn_t *tsdn_fetch(void);
-bool tsdn_null(const tsdn_t *tsdn);
-tsd_t *tsdn_tsd(tsdn_t *tsdn);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
@@ -687,13 +609,9 @@ malloc_tsd_externs(, tsd_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch_impl(bool init)
+tsd_fetch(void)
{
- tsd_t *tsd = tsd_get(init);
-
- if (!init && tsd_get_allocates() && tsd == NULL)
- return (NULL);
- assert(tsd != NULL);
+ tsd_t *tsd = tsd_get();
if (unlikely(tsd->state != tsd_state_nominal)) {
if (tsd->state == tsd_state_uninitialized) {
@@ -710,20 +628,6 @@ tsd_fetch_impl(bool init)
return (tsd);
}
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch(void)
-{
-
- return (tsd_fetch_impl(true));
-}
-
-JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsd_tsdn(tsd_t *tsd)
-{
-
- return ((tsdn_t *)tsd);
-}
-
JEMALLOC_INLINE bool
tsd_nominal(tsd_t *tsd)
{
@@ -755,32 +659,6 @@ tsd_##n##_set(tsd_t *tsd, t n) \
}
MALLOC_TSD
#undef O
-
-JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsdn_fetch(void)
-{
-
- if (!tsd_booted_get())
- return (NULL);
-
- return (tsd_tsdn(tsd_fetch_impl(false)));
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsdn_null(const tsdn_t *tsdn)
-{
-
- return (tsdn == NULL);
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsdn_tsd(tsdn_t *tsdn)
-{
-
- assert(!tsdn_null(tsdn));
-
- return (&tsdn->tsd);
-}
#endif
#endif /* JEMALLOC_H_INLINES */
diff --git a/deps/jemalloc/include/jemalloc/internal/util.h b/deps/jemalloc/include/jemalloc/internal/util.h
index 4b56d652e..b2ea740fd 100644
--- a/deps/jemalloc/include/jemalloc/internal/util.h
+++ b/deps/jemalloc/include/jemalloc/internal/util.h
@@ -40,14 +40,6 @@
*/
#define MALLOC_PRINTF_BUFSIZE 4096
-/* Junk fill patterns. */
-#ifndef JEMALLOC_ALLOC_JUNK
-# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
-#endif
-#ifndef JEMALLOC_FREE_JUNK
-# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
-#endif
-
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
@@ -65,21 +57,73 @@
# define JEMALLOC_CC_SILENCE_INIT(v)
#endif
+#define JEMALLOC_GNUC_PREREQ(major, minor) \
+ (!defined(__clang__) && \
+ (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))))
+#ifndef __has_builtin
+# define __has_builtin(builtin) (0)
+#endif
+#define JEMALLOC_CLANG_HAS_BUILTIN(builtin) \
+ (defined(__clang__) && __has_builtin(builtin))
+
#ifdef __GNUC__
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
+# if JEMALLOC_GNUC_PREREQ(4, 6) || \
+ JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable)
+# define unreachable() __builtin_unreachable()
+# else
+# define unreachable()
+# endif
#else
# define likely(x) !!(x)
# define unlikely(x) !!(x)
+# define unreachable()
#endif
-#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
-# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
+/*
+ * Define a custom assert() in order to reduce the chances of deadlock during
+ * assertion failure.
+ */
+#ifndef assert
+#define assert(e) do { \
+ if (unlikely(config_debug && !(e))) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
+ __FILE__, __LINE__, #e); \
+ abort(); \
+ } \
+} while (0)
#endif
-#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
+#ifndef not_reached
+#define not_reached() do { \
+ if (config_debug) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Unreachable code reached\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+ } \
+ unreachable(); \
+} while (0)
+#endif
-#include "jemalloc/internal/assert.h"
+#ifndef not_implemented
+#define not_implemented() do { \
+ if (config_debug) { \
+ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+ } \
+} while (0)
+#endif
+
+#ifndef assert_not_implemented
+#define assert_not_implemented(e) do { \
+ if (unlikely(config_debug && !(e))) \
+ not_implemented(); \
+} while (0)
+#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
@@ -104,9 +148,9 @@ void malloc_write(const char *s);
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
-size_t malloc_vsnprintf(char *str, size_t size, const char *format,
+int malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
-size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
+int malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);
@@ -119,16 +163,10 @@ void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-unsigned ffs_llu(unsigned long long bitmap);
-unsigned ffs_lu(unsigned long bitmap);
-unsigned ffs_u(unsigned bitmap);
-unsigned ffs_zu(size_t bitmap);
-unsigned ffs_u64(uint64_t bitmap);
-unsigned ffs_u32(uint32_t bitmap);
-uint64_t pow2_ceil_u64(uint64_t x);
-uint32_t pow2_ceil_u32(uint32_t x);
-size_t pow2_ceil_zu(size_t x);
-unsigned lg_floor(size_t x);
+int jemalloc_ffsl(long bitmap);
+int jemalloc_ffs(int bitmap);
+size_t pow2_ceil(size_t x);
+size_t lg_floor(size_t x);
void set_errno(int errnum);
int get_errno(void);
#endif
@@ -136,74 +174,27 @@ int get_errno(void);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
/* Sanity check. */
-#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
- || !defined(JEMALLOC_INTERNAL_FFS)
-# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
+#if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS)
+# error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure
#endif
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_llu(unsigned long long bitmap)
-{
-
- return (JEMALLOC_INTERNAL_FFSLL(bitmap));
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_lu(unsigned long bitmap)
+JEMALLOC_ALWAYS_INLINE int
+jemalloc_ffsl(long bitmap)
{
return (JEMALLOC_INTERNAL_FFSL(bitmap));
}
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u(unsigned bitmap)
+JEMALLOC_ALWAYS_INLINE int
+jemalloc_ffs(int bitmap)
{
return (JEMALLOC_INTERNAL_FFS(bitmap));
}
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_zu(size_t bitmap)
-{
-
-#if LG_SIZEOF_PTR == LG_SIZEOF_INT
- return (ffs_u(bitmap));
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
- return (ffs_lu(bitmap));
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
- return (ffs_llu(bitmap));
-#else
-#error No implementation for size_t ffs()
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u64(uint64_t bitmap)
-{
-
-#if LG_SIZEOF_LONG == 3
- return (ffs_lu(bitmap));
-#elif LG_SIZEOF_LONG_LONG == 3
- return (ffs_llu(bitmap));
-#else
-#error No implementation for 64-bit ffs()
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u32(uint32_t bitmap)
-{
-
-#if LG_SIZEOF_INT == 2
- return (ffs_u(bitmap));
-#else
-#error No implementation for 32-bit ffs()
-#endif
- return (ffs_u(bitmap));
-}
-
-JEMALLOC_INLINE uint64_t
-pow2_ceil_u64(uint64_t x)
+/* Compute the smallest power of 2 that is >= x. */
+JEMALLOC_INLINE size_t
+pow2_ceil(size_t x)
{
x--;
@@ -212,39 +203,15 @@ pow2_ceil_u64(uint64_t x)
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
+#if (LG_SIZEOF_PTR == 3)
x |= x >> 32;
+#endif
x++;
return (x);
}
-JEMALLOC_INLINE uint32_t
-pow2_ceil_u32(uint32_t x)
-{
-
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
- x++;
- return (x);
-}
-
-/* Compute the smallest power of 2 that is >= x. */
-JEMALLOC_INLINE size_t
-pow2_ceil_zu(size_t x)
-{
-
-#if (LG_SIZEOF_PTR == 3)
- return (pow2_ceil_u64(x));
-#else
- return (pow2_ceil_u32(x));
-#endif
-}
-
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
-JEMALLOC_INLINE unsigned
+JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
size_t ret;
@@ -255,11 +222,10 @@ lg_floor(size_t x)
: "=r"(ret) // Outputs.
: "r"(x) // Inputs.
);
- assert(ret < UINT_MAX);
- return ((unsigned)ret);
+ return (ret);
}
#elif (defined(_MSC_VER))
-JEMALLOC_INLINE unsigned
+JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
unsigned long ret;
@@ -271,13 +237,12 @@ lg_floor(size_t x)
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse(&ret, x);
#else
-# error "Unsupported type size for lg_floor()"
+# error "Unsupported type sizes for lg_floor()"
#endif
- assert(ret < UINT_MAX);
- return ((unsigned)ret);
+ return (ret);
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
-JEMALLOC_INLINE unsigned
+JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
@@ -288,11 +253,11 @@ lg_floor(size_t x)
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
#else
-# error "Unsupported type size for lg_floor()"
+# error "Unsupported type sizes for lg_floor()"
#endif
}
#else
-JEMALLOC_INLINE unsigned
+JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
@@ -303,13 +268,20 @@ lg_floor(size_t x)
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
-#if (LG_SIZEOF_PTR == 3)
+#if (LG_SIZEOF_PTR == 3 && LG_SIZEOF_PTR == LG_SIZEOF_LONG)
x |= (x >> 32);
-#endif
- if (x == SIZE_T_MAX)
- return ((8 << LG_SIZEOF_PTR) - 1);
+ if (x == KZU(0xffffffffffffffff))
+ return (63);
x++;
- return (ffs_zu(x) - 2);
+ return (jemalloc_ffsl(x) - 2);
+#elif (LG_SIZEOF_PTR == 2)
+ if (x == KZU(0xffffffff))
+ return (31);
+ x++;
+ return (jemalloc_ffs(x) - 2);
+#else
+# error "Unsupported type sizes for lg_floor()"
+#endif
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/valgrind.h b/deps/jemalloc/include/jemalloc/internal/valgrind.h
index 877a142b6..a3380df92 100644
--- a/deps/jemalloc/include/jemalloc/internal/valgrind.h
+++ b/deps/jemalloc/include/jemalloc/internal/valgrind.h
@@ -30,31 +30,17 @@
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
-#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
- if (unlikely(in_valgrind && cond)) { \
- VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
- zero); \
- } \
+#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
+ if (unlikely(in_valgrind && cond)) \
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
} while (0)
-#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \
- (false)
-#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \
- ((ptr) != (old_ptr))
-#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \
- (false)
-#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \
- (ptr == NULL)
-#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \
- (false)
-#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \
- (old_ptr == NULL)
-#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \
- old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \
+#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
+ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
+ zero) do { \
if (unlikely(in_valgrind)) { \
- size_t rzsize = p2rz(tsdn, ptr); \
+ size_t rzsize = p2rz(ptr); \
\
- if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \
- old_ptr)) { \
+ if (!maybe_moved || ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
@@ -63,13 +49,11 @@
old_usize), usize - old_usize); \
} \
} else { \
- if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \
- old_ptr_null(old_ptr)) { \
+ if (!old_ptr_maybe_null || old_ptr != NULL) { \
valgrind_freelike_block(old_ptr, \
old_rzsize); \
} \
- if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \
- ptr_null(ptr)) { \
+ if (!ptr_maybe_null || ptr != NULL) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
@@ -97,8 +81,8 @@
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
-#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0)
-#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
+#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
+#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
diff --git a/deps/jemalloc/include/jemalloc/internal/witness.h b/deps/jemalloc/include/jemalloc/internal/witness.h
deleted file mode 100644
index cdf15d797..000000000
--- a/deps/jemalloc/include/jemalloc/internal/witness.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct witness_s witness_t;
-typedef unsigned witness_rank_t;
-typedef ql_head(witness_t) witness_list_t;
-typedef int witness_comp_t (const witness_t *, const witness_t *);
-
-/*
- * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
- * the witness machinery.
- */
-#define WITNESS_RANK_OMIT 0U
-
-#define WITNESS_RANK_INIT 1U
-#define WITNESS_RANK_CTL 1U
-#define WITNESS_RANK_ARENAS 2U
-
-#define WITNESS_RANK_PROF_DUMP 3U
-#define WITNESS_RANK_PROF_BT2GCTX 4U
-#define WITNESS_RANK_PROF_TDATAS 5U
-#define WITNESS_RANK_PROF_TDATA 6U
-#define WITNESS_RANK_PROF_GCTX 7U
-
-#define WITNESS_RANK_ARENA 8U
-#define WITNESS_RANK_ARENA_CHUNKS 9U
-#define WITNESS_RANK_ARENA_NODE_CACHE 10
-
-#define WITNESS_RANK_BASE 11U
-
-#define WITNESS_RANK_LEAF 0xffffffffU
-#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
-#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
-#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
-
-#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}}
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct witness_s {
- /* Name, used for printing lock order reversal messages. */
- const char *name;
-
- /*
- * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
- * must be acquired in order of increasing rank.
- */
- witness_rank_t rank;
-
- /*
- * If two witnesses are of equal rank and they have the samp comp
- * function pointer, it is called as a last attempt to differentiate
- * between witnesses of equal rank.
- */
- witness_comp_t *comp;
-
- /* Linkage for thread's currently owned locks. */
- ql_elm(witness_t) link;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
- witness_comp_t *comp);
-#ifdef JEMALLOC_JET
-typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
-extern witness_lock_error_t *witness_lock_error;
-#else
-void witness_lock_error(const witness_list_t *witnesses,
- const witness_t *witness);
-#endif
-#ifdef JEMALLOC_JET
-typedef void (witness_owner_error_t)(const witness_t *);
-extern witness_owner_error_t *witness_owner_error;
-#else
-void witness_owner_error(const witness_t *witness);
-#endif
-#ifdef JEMALLOC_JET
-typedef void (witness_not_owner_error_t)(const witness_t *);
-extern witness_not_owner_error_t *witness_not_owner_error;
-#else
-void witness_not_owner_error(const witness_t *witness);
-#endif
-#ifdef JEMALLOC_JET
-typedef void (witness_lockless_error_t)(const witness_list_t *);
-extern witness_lockless_error_t *witness_lockless_error;
-#else
-void witness_lockless_error(const witness_list_t *witnesses);
-#endif
-
-void witnesses_cleanup(tsd_t *tsd);
-void witness_fork_cleanup(tsd_t *tsd);
-void witness_prefork(tsd_t *tsd);
-void witness_postfork_parent(tsd_t *tsd);
-void witness_postfork_child(tsd_t *tsd);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-bool witness_owner(tsd_t *tsd, const witness_t *witness);
-void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
-void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
-void witness_assert_lockless(tsdn_t *tsdn);
-void witness_lock(tsdn_t *tsdn, witness_t *witness);
-void witness_unlock(tsdn_t *tsdn, witness_t *witness);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
-JEMALLOC_INLINE bool
-witness_owner(tsd_t *tsd, const witness_t *witness)
-{
- witness_list_t *witnesses;
- witness_t *w;
-
- witnesses = tsd_witnessesp_get(tsd);
- ql_foreach(w, witnesses, link) {
- if (w == witness)
- return (true);
- }
-
- return (false);
-}
-
-JEMALLOC_INLINE void
-witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
-{
- tsd_t *tsd;
-
- if (!config_debug)
- return;
-
- if (tsdn_null(tsdn))
- return;
- tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
- return;
-
- if (witness_owner(tsd, witness))
- return;
- witness_owner_error(witness);
-}
-
-JEMALLOC_INLINE void
-witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
-{
- tsd_t *tsd;
- witness_list_t *witnesses;
- witness_t *w;
-
- if (!config_debug)
- return;
-
- if (tsdn_null(tsdn))
- return;
- tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
- return;
-
- witnesses = tsd_witnessesp_get(tsd);
- ql_foreach(w, witnesses, link) {
- if (w == witness)
- witness_not_owner_error(witness);
- }
-}
-
-JEMALLOC_INLINE void
-witness_assert_lockless(tsdn_t *tsdn)
-{
- tsd_t *tsd;
- witness_list_t *witnesses;
- witness_t *w;
-
- if (!config_debug)
- return;
-
- if (tsdn_null(tsdn))
- return;
- tsd = tsdn_tsd(tsdn);
-
- witnesses = tsd_witnessesp_get(tsd);
- w = ql_last(witnesses, link);
- if (w != NULL)
- witness_lockless_error(witnesses);
-}
-
-JEMALLOC_INLINE void
-witness_lock(tsdn_t *tsdn, witness_t *witness)
-{
- tsd_t *tsd;
- witness_list_t *witnesses;
- witness_t *w;
-
- if (!config_debug)
- return;
-
- if (tsdn_null(tsdn))
- return;
- tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
- return;
-
- witness_assert_not_owner(tsdn, witness);
-
- witnesses = tsd_witnessesp_get(tsd);
- w = ql_last(witnesses, link);
- if (w == NULL) {
- /* No other locks; do nothing. */
- } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
- /* Forking, and relaxed ranking satisfied. */
- } else if (w->rank > witness->rank) {
- /* Not forking, rank order reversal. */
- witness_lock_error(witnesses, witness);
- } else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
- witness->comp || w->comp(w, witness) > 0)) {
- /*
- * Missing/incompatible comparison function, or comparison
- * function indicates rank order reversal.
- */
- witness_lock_error(witnesses, witness);
- }
-
- ql_elm_new(witness, link);
- ql_tail_insert(witnesses, witness, link);
-}
-
-JEMALLOC_INLINE void
-witness_unlock(tsdn_t *tsdn, witness_t *witness)
-{
- tsd_t *tsd;
- witness_list_t *witnesses;
-
- if (!config_debug)
- return;
-
- if (tsdn_null(tsdn))
- return;
- tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
- return;
-
- /*
- * Check whether owner before removal, rather than relying on
- * witness_assert_owner() to abort, so that unit tests can test this
- * function's failure mode without causing undefined behavior.
- */
- if (witness_owner(tsd, witness)) {
- witnesses = tsd_witnessesp_get(tsd);
- ql_remove(witnesses, witness, link);
- } else
- witness_assert_owner(tsdn, witness);
-}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in b/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
index 6d89435c2..ab13c3758 100644
--- a/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
+++ b/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
@@ -33,13 +33,5 @@
*/
#undef JEMALLOC_USE_CXX_THROW
-#ifdef _MSC_VER
-# ifdef _WIN64
-# define LG_SIZEOF_PTR_WIN 3
-# else
-# define LG_SIZEOF_PTR_WIN 2
-# endif
-#endif
-
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#undef LG_SIZEOF_PTR
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
index 2956c7bb4..a7028db34 100644
--- a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
+++ b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
@@ -11,13 +11,12 @@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
-# define MALLOCX_LG_ALIGN(la) ((int)(la))
+# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
-# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
+# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
- ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
- ffs((int)(((size_t)(a))>>32))+31))
+ ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/*
@@ -29,7 +28,7 @@
/*
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
*/
-# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
+# define MALLOCX_ARENA(a) ((int)(((a)+1) << 20))
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define JEMALLOC_CXX_THROW throw()
@@ -37,7 +36,32 @@
# define JEMALLOC_CXX_THROW
#endif
-#if _MSC_VER
+#ifdef JEMALLOC_HAVE_ATTR
+# define JEMALLOC_ATTR(s) __attribute__((s))
+# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
+# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
+# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
+# else
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# endif
+# ifndef JEMALLOC_EXPORT
+# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
+# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
+# else
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# endif
+# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
+# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
+# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_ALLOC_SIZE(s)
@@ -63,31 +87,6 @@
# else
# define JEMALLOC_ALLOCATOR
# endif
-#elif defined(JEMALLOC_HAVE_ATTR)
-# define JEMALLOC_ATTR(s) __attribute__((s))
-# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
-# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
-# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
-# else
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# endif
-# ifndef JEMALLOC_EXPORT
-# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
-# endif
-# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
-# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
-# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
-# else
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# endif
-# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
-# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
-# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
-# define JEMALLOC_RESTRICT_RETURN
-# define JEMALLOC_ALLOCATOR
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s)
diff --git a/deps/jemalloc/include/msvc_compat/strings.h b/deps/jemalloc/include/msvc_compat/strings.h
index a3ee25063..f01ffdd18 100644
--- a/deps/jemalloc/include/msvc_compat/strings.h
+++ b/deps/jemalloc/include/msvc_compat/strings.h
@@ -21,37 +21,7 @@ static __forceinline int ffs(int x)
return (ffsl(x));
}
-# ifdef _M_X64
-# pragma intrinsic(_BitScanForward64)
-# endif
-
-static __forceinline int ffsll(unsigned __int64 x)
-{
- unsigned long i;
-#ifdef _M_X64
- if (_BitScanForward64(&i, x))
- return (i + 1);
- return (0);
-#else
-// Fallback for 32-bit build where 64-bit version not available
-// assuming little endian
- union {
- unsigned __int64 ll;
- unsigned long l[2];
- } s;
-
- s.ll = x;
-
- if (_BitScanForward(&i, s.l[0]))
- return (i + 1);
- else if(_BitScanForward(&i, s.l[1]))
- return (i + 33);
- return (0);
-#endif
-}
-
#else
-# define ffsll(x) __builtin_ffsll(x)
# define ffsl(x) __builtin_ffsl(x)
# define ffs(x) __builtin_ffs(x)
#endif
diff --git a/deps/jemalloc/include/msvc_compat/windows_extra.h b/deps/jemalloc/include/msvc_compat/windows_extra.h
index 3008faa37..0c5e323ff 100644
--- a/deps/jemalloc/include/msvc_compat/windows_extra.h
+++ b/deps/jemalloc/include/msvc_compat/windows_extra.h
@@ -1,6 +1,26 @@
#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
#define MSVC_COMPAT_WINDOWS_EXTRA_H
-#include <errno.h>
+#ifndef ENOENT
+# define ENOENT ERROR_PATH_NOT_FOUND
+#endif
+#ifndef EINVAL
+# define EINVAL ERROR_BAD_ARGUMENTS
+#endif
+#ifndef EAGAIN
+# define EAGAIN ERROR_OUTOFMEMORY
+#endif
+#ifndef EPERM
+# define EPERM ERROR_WRITE_FAULT
+#endif
+#ifndef EFAULT
+# define EFAULT ERROR_INVALID_ADDRESS
+#endif
+#ifndef ENOMEM
+# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
+#endif
+#ifndef ERANGE
+# define ERANGE ERROR_INVALID_DATA
+#endif
#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */