summaryrefslogtreecommitdiff
path: root/deps/jemalloc/include
diff options
context:
space:
mode:
authorantirez <antirez@gmail.com>2012-11-28 18:39:35 +0100
committerantirez <antirez@gmail.com>2012-11-28 18:39:35 +0100
commit7383c3b12920c6ae20f7c64c5db92f59e2b02aa5 (patch)
treeb7d32af471dcaa47435006277702c814adaaf7ac /deps/jemalloc/include
parentdee0b939fcd6650edfd8705f0685cd8430750085 (diff)
downloadredis-7383c3b12920c6ae20f7c64c5db92f59e2b02aa5.tar.gz
Jemalloc updated to version 3.2.0.
Diffstat (limited to 'deps/jemalloc/include')
-rw-r--r--deps/jemalloc/include/jemalloc/internal/arena.h68
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk.h8
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk_dss.h14
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk_mmap.h2
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ctl.h5
-rw-r--r--deps/jemalloc/include/jemalloc/internal/extent.h3
-rw-r--r--deps/jemalloc/include/jemalloc/internal/huge.h2
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in123
-rw-r--r--deps/jemalloc/include/jemalloc/internal/private_namespace.h40
-rw-r--r--deps/jemalloc/include/jemalloc/internal/prof.h5
-rw-r--r--deps/jemalloc/include/jemalloc/internal/rtree.h3
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc.h.in5
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_defs.h.in9
13 files changed, 220 insertions, 67 deletions
diff --git a/deps/jemalloc/include/jemalloc/internal/arena.h b/deps/jemalloc/include/jemalloc/internal/arena.h
index 0b0f640a4..561c9b6ff 100644
--- a/deps/jemalloc/include/jemalloc/internal/arena.h
+++ b/deps/jemalloc/include/jemalloc/internal/arena.h
@@ -38,10 +38,10 @@
*
* (nactive >> opt_lg_dirty_mult) >= ndirty
*
- * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32
- * times as many active pages as dirty pages.
+ * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
+ * as many active pages as dirty pages.
*/
-#define LG_DIRTY_MULT_DEFAULT 5
+#define LG_DIRTY_MULT_DEFAULT 3
typedef struct arena_chunk_map_s arena_chunk_map_t;
typedef struct arena_chunk_s arena_chunk_t;
@@ -69,7 +69,7 @@ struct arena_chunk_map_s {
/*
* Linkage for run trees. There are two disjoint uses:
*
- * 1) arena_t's runs_avail_{clean,dirty} trees.
+ * 1) arena_t's runs_avail tree.
* 2) arena_run_t conceptually uses this linkage for in-use
* non-full runs, rather than directly embedding linkage.
*/
@@ -162,20 +162,24 @@ typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
/* Arena chunk header. */
struct arena_chunk_s {
/* Arena that owns the chunk. */
- arena_t *arena;
+ arena_t *arena;
- /* Linkage for the arena's chunks_dirty list. */
- ql_elm(arena_chunk_t) link_dirty;
-
- /*
- * True if the chunk is currently in the chunks_dirty list, due to
- * having at some point contained one or more dirty pages. Removal
- * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible.
- */
- bool dirtied;
+ /* Linkage for tree of arena chunks that contain dirty runs. */
+ rb_node(arena_chunk_t) dirty_link;
/* Number of dirty pages. */
- size_t ndirty;
+ size_t ndirty;
+
+ /* Number of available runs. */
+ size_t nruns_avail;
+
+ /*
+ * Number of available run adjacencies. Clean and dirty available runs
+ * are not coalesced, which causes virtual memory fragmentation. The
+ * ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking
+ * this fragmentation.
+ * */
+ size_t nruns_adjac;
/*
* Map of pages within chunk that keeps track of free/large/small. The
@@ -183,7 +187,7 @@ struct arena_chunk_s {
* need to be tracked in the map. This omission saves a header page
* for common chunk sizes (e.g. 4 MiB).
*/
- arena_chunk_map_t map[1]; /* Dynamically sized. */
+ arena_chunk_map_t map[1]; /* Dynamically sized. */
};
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
@@ -331,8 +335,10 @@ struct arena_s {
uint64_t prof_accumbytes;
- /* List of dirty-page-containing chunks this arena manages. */
- ql_head(arena_chunk_t) chunks_dirty;
+ dss_prec_t dss_prec;
+
+ /* Tree of dirty-page-containing chunks this arena manages. */
+ arena_chunk_tree_t chunks_dirty;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
@@ -367,18 +373,9 @@ struct arena_s {
/*
* Size/address-ordered trees of this arena's available runs. The trees
- * are used for first-best-fit run allocation. The dirty tree contains
- * runs with dirty pages (i.e. very likely to have been touched and
- * therefore have associated physical pages), whereas the clean tree
- * contains runs with pages that either have no associated physical
- * pages, or have pages that the kernel may recycle at any time due to
- * previous madvise(2) calls. The dirty tree is used in preference to
- * the clean tree for allocations, because using dirty pages reduces
- * the amount of dirty purging necessary to keep the active:dirty page
- * ratio below the purge threshold.
+ * are used for first-best-fit run allocation.
*/
- arena_avail_tree_t runs_avail_clean;
- arena_avail_tree_t runs_avail_dirty;
+ arena_avail_tree_t runs_avail;
/* bins is used to store trees of free regions. */
arena_bin_t bins[NBINS];
@@ -422,13 +419,16 @@ void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
-void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
- arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats);
void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
-void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache);
+void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
+ bool try_tcache_dalloc);
+dss_prec_t arena_dss_prec_get(arena_t *arena);
+void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
+void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
+ size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
+ malloc_large_stats_t *lstats);
bool arena_new(arena_t *arena, unsigned ind);
void arena_boot(void);
void arena_prefork(arena_t *arena);
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk.h b/deps/jemalloc/include/jemalloc/internal/chunk.h
index 8fb1fe6d1..87d8700da 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk.h
@@ -28,6 +28,7 @@
#ifdef JEMALLOC_H_EXTERNS
extern size_t opt_lg_chunk;
+extern const char *opt_dss;
/* Protects stats_chunks; currently not used for any other purpose. */
extern malloc_mutex_t chunks_mtx;
@@ -42,9 +43,14 @@ extern size_t chunk_npages;
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */
-void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
+void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
+ dss_prec_t dss_prec);
+void chunk_unmap(void *chunk, size_t size);
void chunk_dealloc(void *chunk, size_t size, bool unmap);
bool chunk_boot(void);
+void chunk_prefork(void);
+void chunk_postfork_parent(void);
+void chunk_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk_dss.h b/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
index 6e2643b24..6585f071b 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
@@ -1,14 +1,28 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
+typedef enum {
+ dss_prec_disabled = 0,
+ dss_prec_primary = 1,
+ dss_prec_secondary = 2,
+
+ dss_prec_limit = 3
+} dss_prec_t ;
+#define DSS_PREC_DEFAULT dss_prec_secondary
+#define DSS_DEFAULT "secondary"
+
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
+extern const char *dss_prec_names[];
+
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
+dss_prec_t chunk_dss_prec_get(void);
+bool chunk_dss_prec_set(dss_prec_t dss_prec);
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
bool chunk_in_dss(void *chunk);
bool chunk_dss_boot(void);
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h b/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
index b29f39e9e..f24abac75 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
@@ -9,7 +9,7 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void pages_purge(void *addr, size_t length);
+bool pages_purge(void *addr, size_t length);
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
bool chunk_dealloc_mmap(void *chunk, size_t size);
diff --git a/deps/jemalloc/include/jemalloc/internal/ctl.h b/deps/jemalloc/include/jemalloc/internal/ctl.h
index adf3827f0..0ffecc5f2 100644
--- a/deps/jemalloc/include/jemalloc/internal/ctl.h
+++ b/deps/jemalloc/include/jemalloc/internal/ctl.h
@@ -33,6 +33,7 @@ struct ctl_indexed_node_s {
struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
+ const char *dss;
size_t pactive;
size_t pdirty;
arena_stats_t astats;
@@ -61,6 +62,7 @@ struct ctl_stats_s {
uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */
} huge;
+ unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
@@ -75,6 +77,9 @@ int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
bool ctl_boot(void);
+void ctl_prefork(void);
+void ctl_postfork_parent(void);
+void ctl_postfork_child(void);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
diff --git a/deps/jemalloc/include/jemalloc/internal/extent.h b/deps/jemalloc/include/jemalloc/internal/extent.h
index 36af8be89..ba95ca816 100644
--- a/deps/jemalloc/include/jemalloc/internal/extent.h
+++ b/deps/jemalloc/include/jemalloc/internal/extent.h
@@ -23,6 +23,9 @@ struct extent_node_s {
/* Total region size. */
size_t size;
+
+ /* True if zero-filled; used by chunk recycling code. */
+ bool zeroed;
};
typedef rb_tree(extent_node_t) extent_tree_t;
diff --git a/deps/jemalloc/include/jemalloc/internal/huge.h b/deps/jemalloc/include/jemalloc/internal/huge.h
index e8513c933..d987d3707 100644
--- a/deps/jemalloc/include/jemalloc/internal/huge.h
+++ b/deps/jemalloc/include/jemalloc/internal/huge.h
@@ -22,7 +22,7 @@ void *huge_palloc(size_t size, size_t alignment, bool zero);
void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero);
+ size_t alignment, bool zero, bool try_tcache_dalloc);
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
index 268cd146f..475821acb 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
@@ -270,6 +270,9 @@ static const bool config_ivsalloc =
# ifdef __arm__
# define LG_QUANTUM 3
# endif
+# ifdef __hppa__
+# define LG_QUANTUM 4
+# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
@@ -424,6 +427,7 @@ static const bool config_ivsalloc =
VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
} while (0)
#else
+#define RUNNING_ON_VALGRIND ((unsigned)0)
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
@@ -510,13 +514,19 @@ extern size_t opt_narenas;
/* Number of CPUs. */
extern unsigned ncpus;
-extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
+/* Protects arenas initialization (arenas, arenas_total). */
+extern malloc_mutex_t arenas_lock;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
+ *
+ * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
+ * arenas. arenas[narenas_auto..narenas_total) are only used if the application
+ * takes some action to create them and allocate from them.
*/
extern arena_t **arenas;
-extern unsigned narenas;
+extern unsigned narenas_total;
+extern unsigned narenas_auto; /* Read-only after initialization. */
arena_t *arenas_extend(unsigned ind);
void arenas_cleanup(void *arg);
@@ -571,6 +581,7 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
+unsigned narenas_total_get(void);
arena_t *choose_arena(arena_t *arena);
#endif
@@ -675,6 +686,18 @@ sa2u(size_t size, size_t alignment)
}
}
+JEMALLOC_INLINE unsigned
+narenas_total_get(void)
+{
+ unsigned narenas;
+
+ malloc_mutex_lock(&arenas_lock);
+ narenas = narenas_total;
+ malloc_mutex_unlock(&arenas_lock);
+
+ return (narenas);
+}
+
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
choose_arena(arena_t *arena)
@@ -710,15 +733,24 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
+void *imallocx(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
+void *icallocx(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
+void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
+void idallocx(void *ptr, bool try_tcache);
void idalloc(void *ptr);
+void iqallocx(void *ptr, bool try_tcache);
void iqalloc(void *ptr);
+void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
+ bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
+ arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
@@ -726,29 +758,44 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE void *
-imalloc(size_t size)
+imallocx(size_t size, bool try_tcache, arena_t *arena)
{
assert(size != 0);
if (size <= arena_maxclass)
- return (arena_malloc(NULL, size, false, true));
+ return (arena_malloc(arena, size, false, try_tcache));
else
return (huge_malloc(size, false));
}
JEMALLOC_INLINE void *
-icalloc(size_t size)
+imalloc(size_t size)
+{
+
+ return (imallocx(size, true, NULL));
+}
+
+JEMALLOC_INLINE void *
+icallocx(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
- return (arena_malloc(NULL, size, true, true));
+ return (arena_malloc(arena, size, true, try_tcache));
else
return (huge_malloc(size, true));
}
JEMALLOC_INLINE void *
-ipalloc(size_t usize, size_t alignment, bool zero)
+icalloc(size_t size)
+{
+
+ return (icallocx(size, true, NULL));
+}
+
+JEMALLOC_INLINE void *
+ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena)
{
void *ret;
@@ -756,11 +803,11 @@ ipalloc(size_t usize, size_t alignment, bool zero)
assert(usize == sa2u(usize, alignment));
if (usize <= arena_maxclass && alignment <= PAGE)
- ret = arena_malloc(NULL, usize, zero, true);
+ ret = arena_malloc(arena, usize, zero, try_tcache);
else {
if (usize <= arena_maxclass) {
- ret = arena_palloc(choose_arena(NULL), usize, alignment,
- zero);
+ ret = arena_palloc(choose_arena(arena), usize,
+ alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero);
else
@@ -771,6 +818,13 @@ ipalloc(size_t usize, size_t alignment, bool zero)
return (ret);
}
+JEMALLOC_INLINE void *
+ipalloc(size_t usize, size_t alignment, bool zero)
+{
+
+ return (ipallocx(usize, alignment, zero, true, NULL));
+}
+
/*
* Typical usage:
* void *ptr = [...]
@@ -829,7 +883,7 @@ p2rz(const void *ptr)
}
JEMALLOC_INLINE void
-idalloc(void *ptr)
+idallocx(void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
@@ -837,24 +891,38 @@ idalloc(void *ptr)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
- arena_dalloc(chunk->arena, chunk, ptr, true);
+ arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
else
huge_dalloc(ptr, true);
}
JEMALLOC_INLINE void
-iqalloc(void *ptr)
+idalloc(void *ptr)
+{
+
+ idallocx(ptr, true);
+}
+
+JEMALLOC_INLINE void
+iqallocx(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
- idalloc(ptr);
+ idallocx(ptr, try_tcache);
+}
+
+JEMALLOC_INLINE void
+iqalloc(void *ptr)
+{
+
+ iqallocx(ptr, true);
}
JEMALLOC_INLINE void *
-iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
- bool no_move)
+irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+ bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
void *ret;
size_t oldsize;
@@ -877,7 +945,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
- ret = ipalloc(usize, alignment, zero);
+ ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
if (ret == NULL) {
if (extra == 0)
return (NULL);
@@ -885,7 +953,8 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
- ret = ipalloc(usize, alignment, zero);
+ ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
+ arena);
if (ret == NULL)
return (NULL);
}
@@ -896,7 +965,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
- iqalloc(ptr);
+ iqallocx(ptr, try_tcache_dalloc);
return (ret);
}
@@ -910,15 +979,25 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
}
} else {
if (size + extra <= arena_maxclass) {
- return (arena_ralloc(ptr, oldsize, size, extra,
- alignment, zero, true));
+ return (arena_ralloc(arena, ptr, oldsize, size, extra,
+ alignment, zero, try_tcache_alloc,
+ try_tcache_dalloc));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
- alignment, zero));
+ alignment, zero, try_tcache_dalloc));
}
}
}
+JEMALLOC_INLINE void *
+iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+ bool no_move)
+{
+
+ return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
+ NULL));
+}
+
malloc_tsd_externs(thread_allocated, thread_allocated_t)
malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
diff --git a/deps/jemalloc/include/jemalloc/internal/private_namespace.h b/deps/jemalloc/include/jemalloc/internal/private_namespace.h
index b8166470d..06241cd2f 100644
--- a/deps/jemalloc/include/jemalloc/internal/private_namespace.h
+++ b/deps/jemalloc/include/jemalloc/internal/private_namespace.h
@@ -12,6 +12,8 @@
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
+#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
+#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
@@ -51,14 +53,13 @@
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas JEMALLOC_N(arenas)
-#define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
#define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock)
-#define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
#define arenas_tls JEMALLOC_N(arenas_tls)
+#define arenas_tsd JEMALLOC_N(arenas_tsd)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
@@ -101,9 +102,15 @@
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
+#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
+#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_npages JEMALLOC_N(chunk_npages)
+#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
+#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
+#define chunk_prefork JEMALLOC_N(chunk_prefork)
+#define chunk_unmap JEMALLOC_N(chunk_unmap)
#define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
@@ -129,6 +136,10 @@
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
+#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
+#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
+#define ctl_prefork JEMALLOC_N(ctl_prefork)
+#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
@@ -161,6 +172,7 @@
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
+#define get_errno JEMALLOC_N(get_errno)
#define hash JEMALLOC_N(hash)
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
@@ -180,11 +192,17 @@
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iallocm JEMALLOC_N(iallocm)
#define icalloc JEMALLOC_N(icalloc)
+#define icallocx JEMALLOC_N(icallocx)
#define idalloc JEMALLOC_N(idalloc)
+#define idallocx JEMALLOC_N(idallocx)
#define imalloc JEMALLOC_N(imalloc)
+#define imallocx JEMALLOC_N(imallocx)
#define ipalloc JEMALLOC_N(ipalloc)
+#define ipallocx JEMALLOC_N(ipallocx)
#define iqalloc JEMALLOC_N(iqalloc)
+#define iqallocx JEMALLOC_N(iqallocx)
#define iralloc JEMALLOC_N(iralloc)
+#define irallocx JEMALLOC_N(irallocx)
#define isalloc JEMALLOC_N(isalloc)
#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
@@ -212,7 +230,9 @@
#define map_bias JEMALLOC_N(map_bias)
#define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot)
-#define narenas JEMALLOC_N(narenas)
+#define narenas_auto JEMALLOC_N(narenas_auto)
+#define narenas_total JEMALLOC_N(narenas_total)
+#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define opt_abort JEMALLOC_N(opt_abort)
@@ -254,6 +274,9 @@
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_mdump JEMALLOC_N(prof_mdump)
+#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
+#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
+#define prof_prefork JEMALLOC_N(prof_prefork)
#define prof_promote JEMALLOC_N(prof_promote)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
@@ -264,6 +287,7 @@
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
+#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
@@ -278,12 +302,13 @@
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
+#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
+#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
+#define rtree_prefork JEMALLOC_N(rtree_prefork)
#define rtree_set JEMALLOC_N(rtree_set)
#define s2u JEMALLOC_N(s2u)
#define sa2u JEMALLOC_N(sa2u)
-#define stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index)
-#define stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index)
-#define stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index)
+#define set_errno JEMALLOC_N(set_errno)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
@@ -311,6 +336,7 @@
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
+#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
@@ -325,6 +351,7 @@
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls)
+#define tcache_tsd JEMALLOC_N(tcache_tsd)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
@@ -332,6 +359,7 @@
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
+#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
diff --git a/deps/jemalloc/include/jemalloc/internal/prof.h b/deps/jemalloc/include/jemalloc/internal/prof.h
index c3e3f9e4b..47f22ad2d 100644
--- a/deps/jemalloc/include/jemalloc/internal/prof.h
+++ b/deps/jemalloc/include/jemalloc/internal/prof.h
@@ -223,6 +223,9 @@ void prof_tdata_cleanup(void *arg);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(void);
+void prof_prefork(void);
+void prof_postfork_parent(void);
+void prof_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
@@ -506,7 +509,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx);
cnt->epoch++;
- } else
+ } else if (ptr != NULL)
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
diff --git a/deps/jemalloc/include/jemalloc/internal/rtree.h b/deps/jemalloc/include/jemalloc/internal/rtree.h
index 95d6355a5..9bd98548c 100644
--- a/deps/jemalloc/include/jemalloc/internal/rtree.h
+++ b/deps/jemalloc/include/jemalloc/internal/rtree.h
@@ -36,6 +36,9 @@ struct rtree_s {
#ifdef JEMALLOC_H_EXTERNS
rtree_t *rtree_new(unsigned bits);
+void rtree_prefork(rtree_t *rtree);
+void rtree_postfork_parent(rtree_t *rtree);
+void rtree_postfork_child(rtree_t *rtree);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/jemalloc.h.in b/deps/jemalloc/include/jemalloc/jemalloc.h.in
index ad0694858..31b1304a2 100644
--- a/deps/jemalloc/include/jemalloc/jemalloc.h.in
+++ b/deps/jemalloc/include/jemalloc/jemalloc.h.in
@@ -25,6 +25,8 @@ extern "C" {
#endif
#define ALLOCM_ZERO ((int)0x40)
#define ALLOCM_NO_MOVE ((int)0x80)
+/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
+#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
#define ALLOCM_SUCCESS 0
#define ALLOCM_ERR_OOM 1
@@ -59,7 +61,8 @@ JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
-JEMALLOC_EXPORT size_t je_malloc_usable_size(const void *ptr);
+JEMALLOC_EXPORT size_t je_malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in b/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
index c469142a5..1cd60254a 100644
--- a/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
+++ b/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
@@ -222,6 +222,15 @@
#undef JEMALLOC_OVERRIDE_VALLOC
/*
+ * At least Linux omits the "const" in:
+ *
+ * size_t malloc_usable_size(const void *ptr);
+ *
+ * Match the operating system's prototype.
+ */
+#undef JEMALLOC_USABLE_SIZE_CONST
+
+/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE