summaryrefslogtreecommitdiff
path: root/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in')
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in781
1 files changed, 462 insertions, 319 deletions
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
index a44f0978a..268cd146f 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
@@ -1,17 +1,33 @@
-#include <sys/mman.h>
-#include <sys/param.h>
-#include <sys/time.h>
+#ifndef JEMALLOC_INTERNAL_H
+#define JEMALLOC_INTERNAL_H
+#include <math.h>
+#ifdef _WIN32
+# include <windows.h>
+# define ENOENT ERROR_PATH_NOT_FOUND
+# define EINVAL ERROR_BAD_ARGUMENTS
+# define EAGAIN ERROR_OUTOFMEMORY
+# define EPERM ERROR_WRITE_FAULT
+# define EFAULT ERROR_INVALID_ADDRESS
+# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
+# undef ERANGE
+# define ERANGE ERROR_INVALID_DATA
+#else
+# include <sys/param.h>
+# include <sys/mman.h>
+# include <sys/syscall.h>
+# if !defined(SYS_write) && defined(__NR_write)
+# define SYS_write __NR_write
+# endif
+# include <sys/uio.h>
+# include <pthread.h>
+# include <errno.h>
+#endif
#include <sys/types.h>
-#include <sys/sysctl.h>
-#include <sys/uio.h>
-#include <errno.h>
#include <limits.h>
#ifndef SIZE_T_MAX
# define SIZE_T_MAX SIZE_MAX
#endif
-#include <pthread.h>
-#include <sched.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
@@ -25,16 +41,156 @@
#include <string.h>
#include <strings.h>
#include <ctype.h>
-#include <unistd.h>
+#ifdef _MSC_VER
+# include <io.h>
+typedef intptr_t ssize_t;
+# define PATH_MAX 1024
+# define STDERR_FILENO 2
+# define __func__ __FUNCTION__
+/* Disable warnings about deprecated system functions */
+# pragma warning(disable: 4996)
+#else
+# include <unistd.h>
+#endif
#include <fcntl.h>
-#include <pthread.h>
-#include <math.h>
-#define JEMALLOC_MANGLE
+#define JEMALLOC_NO_DEMANGLE
#include "../jemalloc@install_suffix@.h"
+#ifdef JEMALLOC_UTRACE
+#include <sys/ktrace.h>
+#endif
+
+#ifdef JEMALLOC_VALGRIND
+#include <valgrind/valgrind.h>
+#include <valgrind/memcheck.h>
+#endif
+
#include "jemalloc/internal/private_namespace.h"
+#ifdef JEMALLOC_CC_SILENCE
+#define UNUSED JEMALLOC_ATTR(unused)
+#else
+#define UNUSED
+#endif
+
+static const bool config_debug =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_fill =
+#ifdef JEMALLOC_FILL
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_lazy_lock =
+#ifdef JEMALLOC_LAZY_LOCK
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof =
+#ifdef JEMALLOC_PROF
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof_libgcc =
+#ifdef JEMALLOC_PROF_LIBGCC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof_libunwind =
+#ifdef JEMALLOC_PROF_LIBUNWIND
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_mremap =
+#ifdef JEMALLOC_MREMAP
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_munmap =
+#ifdef JEMALLOC_MUNMAP
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_tcache =
+#ifdef JEMALLOC_TCACHE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_tls =
+#ifdef JEMALLOC_TLS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_utrace =
+#ifdef JEMALLOC_UTRACE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_valgrind =
+#ifdef JEMALLOC_VALGRIND
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_xmalloc =
+#ifdef JEMALLOC_XMALLOC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_ivsalloc =
+#ifdef JEMALLOC_IVSALLOC
+ true
+#else
+ false
+#endif
+ ;
+
+#ifdef JEMALLOC_ATOMIC9
+#include <machine/atomic.h>
+#endif
+
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
@@ -46,48 +202,11 @@
#include <malloc/malloc.h>
#endif
-#ifdef JEMALLOC_LAZY_LOCK
-#include <dlfcn.h>
-#endif
-
#define RB_COMPACT
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
-extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
-
-/*
- * Define a custom assert() in order to reduce the chances of deadlock during
- * assertion failure.
- */
-#ifndef assert
-# ifdef JEMALLOC_DEBUG
-# define assert(e) do { \
- if (!(e)) { \
- char line_buf[UMAX2S_BUFSIZE]; \
- malloc_write("<jemalloc>: "); \
- malloc_write(__FILE__); \
- malloc_write(":"); \
- malloc_write(u2s(__LINE__, 10, line_buf)); \
- malloc_write(": Failed assertion: "); \
- malloc_write("\""); \
- malloc_write(#e); \
- malloc_write("\"\n"); \
- abort(); \
- } \
-} while (0)
-# else
-# define assert(e)
-# endif
-#endif
-
-#ifdef JEMALLOC_DEBUG
-# define dassert(e) assert(e)
-#else
-# define dassert(e)
-#endif
-
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
* but there are circular dependencies that cannot be broken without
@@ -119,38 +238,56 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
#else
# define JEMALLOC_ENABLE_INLINE
# define JEMALLOC_INLINE static inline
+# ifdef _MSC_VER
+# define inline _inline
+# endif
#endif
-/* Size of stack-allocated buffer passed to buferror(). */
-#define BUFERROR_BUF 64
+/* Smallest size class to support. */
+#define LG_TINY_MIN 3
+#define TINY_MIN (1U << LG_TINY_MIN)
-/* Minimum alignment of allocations is 2^LG_QUANTUM bytes. */
-#ifdef __i386__
-# define LG_QUANTUM 4
-#endif
-#ifdef __ia64__
-# define LG_QUANTUM 4
-#endif
-#ifdef __alpha__
-# define LG_QUANTUM 4
-#endif
-#ifdef __sparc64__
-# define LG_QUANTUM 4
-#endif
-#if (defined(__amd64__) || defined(__x86_64__))
-# define LG_QUANTUM 4
-#endif
-#ifdef __arm__
-# define LG_QUANTUM 3
-#endif
-#ifdef __mips__
-# define LG_QUANTUM 3
-#endif
-#ifdef __powerpc__
-# define LG_QUANTUM 4
-#endif
-#ifdef __s390x__
-# define LG_QUANTUM 4
+/*
+ * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
+ * classes).
+ */
+#ifndef LG_QUANTUM
+# if (defined(__i386__) || defined(_M_IX86))
+# define LG_QUANTUM 4
+# endif
+# ifdef __ia64__
+# define LG_QUANTUM 4
+# endif
+# ifdef __alpha__
+# define LG_QUANTUM 4
+# endif
+# ifdef __sparc64__
+# define LG_QUANTUM 4
+# endif
+# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
+# define LG_QUANTUM 4
+# endif
+# ifdef __arm__
+# define LG_QUANTUM 3
+# endif
+# ifdef __mips__
+# define LG_QUANTUM 3
+# endif
+# ifdef __powerpc__
+# define LG_QUANTUM 4
+# endif
+# ifdef __s390x__
+# define LG_QUANTUM 4
+# endif
+# ifdef __SH4__
+# define LG_QUANTUM 4
+# endif
+# ifdef __tile__
+# define LG_QUANTUM 4
+# endif
+# ifndef LG_QUANTUM
+# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
+# endif
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
@@ -164,67 +301,149 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
-#define LONG_CEILING(a) \
+#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
-#define PTR_CEILING(a) \
+#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
* In addition, this controls the spacing of cacheline-spaced size classes.
+ *
+ * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
+ * only handle raw constants.
*/
#define LG_CACHELINE 6
-#define CACHELINE ((size_t)(1U << LG_CACHELINE))
+#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
-/*
- * Page size. STATIC_PAGE_SHIFT is determined by the configure script. If
- * DYNAMIC_PAGE_SHIFT is enabled, only use the STATIC_PAGE_* macros where
- * compile-time values are required for the purposes of defining data
- * structures.
- */
-#define STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
-#define STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
-
-#ifdef PAGE_SHIFT
-# undef PAGE_SHIFT
-#endif
-#ifdef PAGE_SIZE
-# undef PAGE_SIZE
-#endif
+/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
-
-#ifdef DYNAMIC_PAGE_SHIFT
-# define PAGE_SHIFT lg_pagesize
-# define PAGE_SIZE pagesize
-# define PAGE_MASK pagesize_mask
-#else
-# define PAGE_SHIFT STATIC_PAGE_SHIFT
-# define PAGE_SIZE STATIC_PAGE_SIZE
-# define PAGE_MASK STATIC_PAGE_MASK
-#endif
+#define LG_PAGE STATIC_PAGE_SHIFT
+#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
+#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
+/* Return the nearest aligned address at or below a. */
+#define ALIGNMENT_ADDR2BASE(a, alignment) \
+ ((void *)((uintptr_t)(a) & (-(alignment))))
+
+/* Return the offset between a and the nearest aligned address at or below a. */
+#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
+ ((size_t)((uintptr_t)(a) & (alignment - 1)))
+
+/* Return the smallest alignment multiple that is >= s. */
+#define ALIGNMENT_CEILING(s, alignment) \
+ (((s) + (alignment - 1)) & (-(alignment)))
+
+/* Declare a variable length array */
+#if __STDC_VERSION__ < 199901L
+# ifdef _MSC_VER
+# include <malloc.h>
+# define alloca _alloca
+# else
+# include <alloca.h>
+# endif
+# define VARIABLE_ARRAY(type, name, count) \
+ type *name = alloca(sizeof(type) * count)
+#else
+# define VARIABLE_ARRAY(type, name, count) type name[count]
+#endif
+
+#ifdef JEMALLOC_VALGRIND
+/*
+ * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
+ * so that when Valgrind reports errors, there are no extra stack frames
+ * in the backtraces.
+ *
+ * The size that is reported to valgrind must be consistent through a chain of
+ * malloc..realloc..realloc calls. Request size isn't recorded anywhere in
+ * jemalloc, so it is critical that all callers of these macros provide usize
+ * rather than request size. As a result, buffer overflow detection is
+ * technically weakened for the standard API, though it is generally accepted
+ * practice to consider any extra bytes reported by malloc_usable_size() as
+ * usable space.
+ */
+#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
+ if (config_valgrind && opt_valgrind && cond) \
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
+} while (0)
+#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
+ old_rzsize, zero) do { \
+ if (config_valgrind && opt_valgrind) { \
+ size_t rzsize = p2rz(ptr); \
+ \
+ if (ptr == old_ptr) { \
+ VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
+ usize, rzsize); \
+ if (zero && old_usize < usize) { \
+ VALGRIND_MAKE_MEM_DEFINED( \
+ (void *)((uintptr_t)ptr + \
+ old_usize), usize - old_usize); \
+ } \
+ } else { \
+ if (old_ptr != NULL) { \
+ VALGRIND_FREELIKE_BLOCK(old_ptr, \
+ old_rzsize); \
+ } \
+ if (ptr != NULL) { \
+ size_t copy_size = (old_usize < usize) \
+ ? old_usize : usize; \
+ size_t tail_size = usize - copy_size; \
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
+ rzsize, false); \
+ if (copy_size > 0) { \
+ VALGRIND_MAKE_MEM_DEFINED(ptr, \
+ copy_size); \
+ } \
+ if (zero && tail_size > 0) { \
+ VALGRIND_MAKE_MEM_DEFINED( \
+ (void *)((uintptr_t)ptr + \
+ copy_size), tail_size); \
+ } \
+ } \
+ } \
+ } \
+} while (0)
+#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
+ if (config_valgrind && opt_valgrind) \
+ VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
+} while (0)
+#else
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
+#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
+#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
+#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
+#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
+#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
+ old_rzsize, zero)
+#define JEMALLOC_VALGRIND_FREE(ptr, rzsize)
+#endif
+
+#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/prn.h"
+#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
@@ -235,21 +454,22 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
-#ifdef JEMALLOC_ZONE
-#include "jemalloc/internal/zone.h"
-#endif
+#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_TYPES
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
+#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/prn.h"
+#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
@@ -260,66 +480,37 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
-#ifdef JEMALLOC_ZONE
-#include "jemalloc/internal/zone.h"
-#endif
+#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
-#ifdef JEMALLOC_STATS
typedef struct {
uint64_t allocated;
uint64_t deallocated;
} thread_allocated_t;
-#endif
+/*
+ * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
+ * argument.
+ */
+#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0})
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
-#ifdef JEMALLOC_FILL
extern bool opt_junk;
-#endif
-#ifdef JEMALLOC_SYSV
-extern bool opt_sysv;
-#endif
-#ifdef JEMALLOC_XMALLOC
+extern size_t opt_quarantine;
+extern bool opt_redzone;
+extern bool opt_utrace;
+extern bool opt_valgrind;
extern bool opt_xmalloc;
-#endif
-#ifdef JEMALLOC_FILL
extern bool opt_zero;
-#endif
extern size_t opt_narenas;
-#ifdef DYNAMIC_PAGE_SHIFT
-extern size_t pagesize;
-extern size_t pagesize_mask;
-extern size_t lg_pagesize;
-#endif
-
/* Number of CPUs. */
extern unsigned ncpus;
extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
-extern pthread_key_t arenas_tsd;
-#ifndef NO_TLS
-/*
- * Map of pthread_self() --> arenas[???], used for selecting an arena to use
- * for allocations.
- */
-extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
-# define ARENA_GET() arenas_tls
-# define ARENA_SET(v) do { \
- arenas_tls = (v); \
- pthread_setspecific(arenas_tsd, (void *)(v)); \
-} while (0)
-#else
-# define ARENA_GET() ((arena_t *)pthread_getspecific(arenas_tsd))
-# define ARENA_SET(v) do { \
- pthread_setspecific(arenas_tsd, (void *)(v)); \
-} while (0)
-#endif
-
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
@@ -327,45 +518,22 @@ extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
extern arena_t **arenas;
extern unsigned narenas;
-#ifdef JEMALLOC_STATS
-# ifndef NO_TLS
-extern __thread thread_allocated_t thread_allocated_tls;
-# define ALLOCATED_GET() (thread_allocated_tls.allocated)
-# define ALLOCATEDP_GET() (&thread_allocated_tls.allocated)
-# define DEALLOCATED_GET() (thread_allocated_tls.deallocated)
-# define DEALLOCATEDP_GET() (&thread_allocated_tls.deallocated)
-# define ALLOCATED_ADD(a, d) do { \
- thread_allocated_tls.allocated += a; \
- thread_allocated_tls.deallocated += d; \
-} while (0)
-# else
-extern pthread_key_t thread_allocated_tsd;
-thread_allocated_t *thread_allocated_get_hard(void);
-
-# define ALLOCATED_GET() (thread_allocated_get()->allocated)
-# define ALLOCATEDP_GET() (&thread_allocated_get()->allocated)
-# define DEALLOCATED_GET() (thread_allocated_get()->deallocated)
-# define DEALLOCATEDP_GET() (&thread_allocated_get()->deallocated)
-# define ALLOCATED_ADD(a, d) do { \
- thread_allocated_t *thread_allocated = thread_allocated_get(); \
- thread_allocated->allocated += (a); \
- thread_allocated->deallocated += (d); \
-} while (0)
-# endif
-#endif
-
arena_t *arenas_extend(unsigned ind);
+void arenas_cleanup(void *arg);
arena_t *choose_arena_hard(void);
-int buferror(int errnum, char *buf, size_t buflen);
void jemalloc_prefork(void);
-void jemalloc_postfork(void);
+void jemalloc_postfork_parent(void);
+void jemalloc_postfork_child(void);
+#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/prn.h"
+#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
@@ -376,21 +544,22 @@ void jemalloc_postfork(void);
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
-#ifdef JEMALLOC_ZONE
-#include "jemalloc/internal/zone.h"
-#endif
+#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
#define JEMALLOC_H_INLINES
+#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/prn.h"
+#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
@@ -398,34 +567,20 @@ void jemalloc_postfork(void);
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
-size_t pow2_ceil(size_t x);
+malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
+
size_t s2u(size_t size);
-size_t sa2u(size_t size, size_t alignment, size_t *run_size_p);
-void malloc_write(const char *s);
-arena_t *choose_arena(void);
-# if (defined(JEMALLOC_STATS) && defined(NO_TLS))
-thread_allocated_t *thread_allocated_get(void);
-# endif
+size_t sa2u(size_t size, size_t alignment);
+arena_t *choose_arena(arena_t *arena);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-/* Compute the smallest power of 2 that is >= x. */
-JEMALLOC_INLINE size_t
-pow2_ceil(size_t x)
-{
-
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
-#if (LG_SIZEOF_PTR == 3)
- x |= x >> 32;
-#endif
- x++;
- return (x);
-}
+/*
+ * Map of pthread_self() --> arenas[???], used for selecting an arena to use
+ * for allocations.
+ */
+malloc_tsd_externs(arenas, arena_t *)
+malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
/*
* Compute usable size that would result from allocating an object with the
@@ -435,7 +590,7 @@ JEMALLOC_INLINE size_t
s2u(size_t size)
{
- if (size <= small_maxclass)
+ if (size <= SMALL_MAXCLASS)
return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
if (size <= arena_maxclass)
return (PAGE_CEILING(size));
@@ -447,10 +602,12 @@ s2u(size_t size)
* specified size and alignment.
*/
JEMALLOC_INLINE size_t
-sa2u(size_t size, size_t alignment, size_t *run_size_p)
+sa2u(size_t size, size_t alignment)
{
size_t usize;
+ assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
+
/*
* Round size up to the nearest multiple of alignment.
*
@@ -464,12 +621,8 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
- *
- * Depending on runtime settings, it is possible that arena_malloc()
- * will further round up to a power of two, but that never causes
- * correctness issues.
*/
- usize = (size + (alignment - 1)) & (-alignment);
+ usize = ALIGNMENT_CEILING(size, alignment);
/*
* (usize < size) protects against the combination of maximal
* alignment and size greater than maximal alignment.
@@ -479,8 +632,8 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
return (0);
}
- if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
- if (usize <= small_maxclass)
+ if (usize <= arena_maxclass && alignment <= PAGE) {
+ if (usize <= SMALL_MAXCLASS)
return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
return (PAGE_CEILING(usize));
} else {
@@ -494,7 +647,7 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
usize = PAGE_CEILING(size);
/*
* (usize < size) protects against very large sizes within
- * PAGE_SIZE of SIZE_T_MAX.
+ * PAGE of SIZE_T_MAX.
*
* (usize + alignment < usize) protects against the
* combination of maximal alignment and usize large enough
@@ -512,93 +665,63 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
/*
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
+ * If the run wouldn't fit within a chunk, round up to a huge
+ * allocation size.
*/
- if (usize >= alignment)
- run_size = usize + alignment - PAGE_SIZE;
- else {
- /*
- * It is possible that (alignment << 1) will cause
- * overflow, but it doesn't matter because we also
- * subtract PAGE_SIZE, which in the case of overflow
- * leaves us with a very large run_size. That causes
- * the first conditional below to fail, which means
- * that the bogus run_size value never gets used for
- * anything important.
- */
- run_size = (alignment << 1) - PAGE_SIZE;
- }
- if (run_size_p != NULL)
- *run_size_p = run_size;
-
+ run_size = usize + alignment - PAGE;
if (run_size <= arena_maxclass)
return (PAGE_CEILING(usize));
return (CHUNK_CEILING(usize));
}
}
-/*
- * Wrapper around malloc_message() that avoids the need for
- * JEMALLOC_P(malloc_message)(...) throughout the code.
- */
-JEMALLOC_INLINE void
-malloc_write(const char *s)
-{
-
- JEMALLOC_P(malloc_message)(NULL, s);
-}
-
-/*
- * Choose an arena based on a per-thread value (fast-path code, calls slow-path
- * code if necessary).
- */
+/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
-choose_arena(void)
+choose_arena(arena_t *arena)
{
arena_t *ret;
- ret = ARENA_GET();
- if (ret == NULL) {
+ if (arena != NULL)
+ return (arena);
+
+ if ((ret = *arenas_tsd_get()) == NULL) {
ret = choose_arena_hard();
assert(ret != NULL);
}
return (ret);
}
-
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
-JEMALLOC_INLINE thread_allocated_t *
-thread_allocated_get(void)
-{
- thread_allocated_t *thread_allocated = (thread_allocated_t *)
- pthread_getspecific(thread_allocated_tsd);
-
- if (thread_allocated == NULL)
- return (thread_allocated_get_hard());
- return (thread_allocated);
-}
-#endif
#endif
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h"
+/*
+ * Include arena.h twice in order to resolve circular dependencies with
+ * tcache.h.
+ */
+#define JEMALLOC_ARENA_INLINE_A
+#include "jemalloc/internal/arena.h"
+#undef JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/tcache.h"
+#define JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/arena.h"
+#undef JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/hash.h"
-#ifdef JEMALLOC_ZONE
-#include "jemalloc/internal/zone.h"
-#endif
+#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
void *imalloc(size_t size);
void *icalloc(size_t size);
void *ipalloc(size_t usize, size_t alignment, bool zero);
-size_t isalloc(const void *ptr);
-# ifdef JEMALLOC_IVSALLOC
-size_t ivsalloc(const void *ptr);
-# endif
+size_t isalloc(const void *ptr, bool demote);
+size_t ivsalloc(const void *ptr, bool demote);
+size_t u2rz(size_t usize);
+size_t p2rz(const void *ptr);
void idalloc(void *ptr);
+void iqalloc(void *ptr);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move);
+malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
@@ -609,7 +732,7 @@ imalloc(size_t size)
assert(size != 0);
if (size <= arena_maxclass)
- return (arena_malloc(size, false));
+ return (arena_malloc(NULL, size, false, true));
else
return (huge_malloc(size, false));
}
@@ -619,7 +742,7 @@ icalloc(size_t size)
{
if (size <= arena_maxclass)
- return (arena_malloc(size, true));
+ return (arena_malloc(NULL, size, true, true));
else
return (huge_malloc(size, true));
}
@@ -630,75 +753,80 @@ ipalloc(size_t usize, size_t alignment, bool zero)
void *ret;
assert(usize != 0);
- assert(usize == sa2u(usize, alignment, NULL));
+ assert(usize == sa2u(usize, alignment));
- if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
- ret = arena_malloc(usize, zero);
+ if (usize <= arena_maxclass && alignment <= PAGE)
+ ret = arena_malloc(NULL, usize, zero, true);
else {
- size_t run_size
-#ifdef JEMALLOC_CC_SILENCE
- = 0
-#endif
- ;
-
- /*
- * Ideally we would only ever call sa2u() once per aligned
- * allocation request, and the caller of this function has
- * already done so once. However, it's rather burdensome to
- * require every caller to pass in run_size, especially given
- * that it's only relevant to large allocations. Therefore,
- * just call it again here in order to get run_size.
- */
- sa2u(usize, alignment, &run_size);
- if (run_size <= arena_maxclass) {
- ret = arena_palloc(choose_arena(), usize, run_size,
- alignment, zero);
+ if (usize <= arena_maxclass) {
+ ret = arena_palloc(choose_arena(NULL), usize, alignment,
+ zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero);
else
ret = huge_palloc(usize, alignment, zero);
}
- assert(((uintptr_t)ret & (alignment - 1)) == 0);
+ assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
return (ret);
}
+/*
+ * Typical usage:
+ * void *ptr = [...]
+ * size_t sz = isalloc(ptr, config_prof);
+ */
JEMALLOC_INLINE size_t
-isalloc(const void *ptr)
+isalloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
+ /* Demotion only makes sense if config_prof is true. */
+ assert(config_prof || demote == false);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr) {
- /* Region. */
- dassert(chunk->arena->magic == ARENA_MAGIC);
-
-#ifdef JEMALLOC_PROF
- ret = arena_salloc_demote(ptr);
-#else
- ret = arena_salloc(ptr);
-#endif
- } else
+ if (chunk != ptr)
+ ret = arena_salloc(ptr, demote);
+ else
ret = huge_salloc(ptr);
return (ret);
}
-#ifdef JEMALLOC_IVSALLOC
JEMALLOC_INLINE size_t
-ivsalloc(const void *ptr)
+ivsalloc(const void *ptr, bool demote)
{
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
return (0);
- return (isalloc(ptr));
+ return (isalloc(ptr, demote));
+}
+
+JEMALLOC_INLINE size_t
+u2rz(size_t usize)
+{
+ size_t ret;
+
+ if (usize <= SMALL_MAXCLASS) {
+ size_t binind = SMALL_SIZE2BIN(usize);
+ ret = arena_bin_info[binind].redzone_size;
+ } else
+ ret = 0;
+
+ return (ret);
+}
+
+JEMALLOC_INLINE size_t
+p2rz(const void *ptr)
+{
+ size_t usize = isalloc(ptr, false);
+
+ return (u2rz(usize));
}
-#endif
JEMALLOC_INLINE void
idalloc(void *ptr)
@@ -709,11 +837,21 @@ idalloc(void *ptr)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
- arena_dalloc(chunk->arena, chunk, ptr);
+ arena_dalloc(chunk->arena, chunk, ptr, true);
else
huge_dalloc(ptr, true);
}
+JEMALLOC_INLINE void
+iqalloc(void *ptr)
+{
+
+ if (config_fill && opt_quarantine)
+ quarantine(ptr);
+ else
+ idalloc(ptr);
+}
+
JEMALLOC_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move)
@@ -724,19 +862,19 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
assert(ptr != NULL);
assert(size != 0);
- oldsize = isalloc(ptr);
+ oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
size_t usize, copysize;
/*
- * Existing object alignment is inadquate; allocate new space
+ * Existing object alignment is inadequate; allocate new space
* and copy.
*/
if (no_move)
return (NULL);
- usize = sa2u(size + extra, alignment, NULL);
+ usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
ret = ipalloc(usize, alignment, zero);
@@ -744,7 +882,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
- usize = sa2u(size, alignment, NULL);
+ usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
ret = ipalloc(usize, alignment, zero);
@@ -758,7 +896,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
- idalloc(ptr);
+ iqalloc(ptr);
return (ret);
}
@@ -773,16 +911,21 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
} else {
if (size + extra <= arena_maxclass) {
return (arena_ralloc(ptr, oldsize, size, extra,
- alignment, zero));
+ alignment, zero, true));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero));
}
}
}
+
+malloc_tsd_externs(thread_allocated, thread_allocated_t)
+malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
+ THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
#endif
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_INLINES
/******************************************************************************/
+#endif /* JEMALLOC_INTERNAL_H */