summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorantirez <antirez@gmail.com>2014-06-20 14:59:18 +0200
committerantirez <antirez@gmail.com>2014-06-20 14:59:20 +0200
commitfceef8e0ddfca103b2f0328e3ad9ae522093f2c8 (patch)
tree63ce1ad7f4c284fe44e9decb63b9710c1fa52035
parentfe596d67e3c871cb13b4c909cb4ec435dd738cd8 (diff)
downloadredis-fceef8e0ddfca103b2f0328e3ad9ae522093f2c8.tar.gz
Jemalloc updated to 3.6.0.
Not a single bug in about 3 months, and our previous version was too old (3.2.0).
-rw-r--r--deps/jemalloc/.gitignore72
-rw-r--r--deps/jemalloc/COPYING4
-rw-r--r--deps/jemalloc/ChangeLog172
-rw-r--r--deps/jemalloc/INSTALL32
-rw-r--r--deps/jemalloc/Makefile.in287
-rw-r--r--deps/jemalloc/README18
-rw-r--r--deps/jemalloc/VERSION2
-rwxr-xr-xdeps/jemalloc/bin/pprof30
-rwxr-xr-xdeps/jemalloc/config.guess184
-rwxr-xr-xdeps/jemalloc/config.sub110
-rwxr-xr-xdeps/jemalloc/configure1384
-rw-r--r--deps/jemalloc/configure.ac364
-rwxr-xr-xdeps/jemalloc/coverage.sh16
-rw-r--r--deps/jemalloc/doc/jemalloc.3278
-rw-r--r--deps/jemalloc/doc/jemalloc.html408
-rw-r--r--deps/jemalloc/doc/jemalloc.xml.in474
-rw-r--r--deps/jemalloc/include/jemalloc/internal/arena.h218
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk_dss.h2
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ckh.h10
-rw-r--r--deps/jemalloc/include/jemalloc/internal/hash.h335
-rw-r--r--deps/jemalloc/include/jemalloc/internal/huge.h14
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in290
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in205
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h51
-rw-r--r--deps/jemalloc/include/jemalloc/internal/private_namespace.h367
-rwxr-xr-xdeps/jemalloc/include/jemalloc/internal/private_namespace.sh5
-rw-r--r--deps/jemalloc/include/jemalloc/internal/private_symbols.txt413
-rwxr-xr-xdeps/jemalloc/include/jemalloc/internal/private_unnamespace.sh5
-rw-r--r--deps/jemalloc/include/jemalloc/internal/prng.h4
-rw-r--r--deps/jemalloc/include/jemalloc/internal/prof.h106
-rwxr-xr-xdeps/jemalloc/include/jemalloc/internal/public_namespace.sh6
-rwxr-xr-xdeps/jemalloc/include/jemalloc/internal/public_unnamespace.sh6
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ql.h36
-rw-r--r--deps/jemalloc/include/jemalloc/internal/qr.h22
-rw-r--r--deps/jemalloc/include/jemalloc/internal/quarantine.h43
-rw-r--r--deps/jemalloc/include/jemalloc/internal/rb.h4
-rw-r--r--deps/jemalloc/include/jemalloc/internal/rtree.h50
-rw-r--r--deps/jemalloc/include/jemalloc/internal/tcache.h21
-rw-r--r--deps/jemalloc/include/jemalloc/internal/tsd.h39
-rw-r--r--deps/jemalloc/include/jemalloc/internal/util.h22
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc.h.in157
-rwxr-xr-xdeps/jemalloc/include/jemalloc/jemalloc.sh28
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_defs.h.in242
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_macros.h.in61
-rwxr-xr-xdeps/jemalloc/include/jemalloc/jemalloc_mangle.sh45
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_protos.h.in58
-rwxr-xr-xdeps/jemalloc/include/jemalloc/jemalloc_rename.sh22
-rw-r--r--deps/jemalloc/src/arena.c1023
-rw-r--r--deps/jemalloc/src/base.c3
-rw-r--r--deps/jemalloc/src/bitmap.c2
-rw-r--r--deps/jemalloc/src/chunk.c108
-rw-r--r--deps/jemalloc/src/chunk_dss.c15
-rw-r--r--deps/jemalloc/src/chunk_mmap.c4
-rw-r--r--deps/jemalloc/src/ckh.c98
-rw-r--r--deps/jemalloc/src/ctl.c349
-rw-r--r--deps/jemalloc/src/huge.c82
-rw-r--r--deps/jemalloc/src/jemalloc.c1281
-rw-r--r--deps/jemalloc/src/mutex.c2
-rw-r--r--deps/jemalloc/src/prof.c693
-rw-r--r--deps/jemalloc/src/quarantine.c97
-rw-r--r--deps/jemalloc/src/rtree.c78
-rw-r--r--deps/jemalloc/src/stats.c8
-rw-r--r--deps/jemalloc/src/tcache.c29
-rw-r--r--deps/jemalloc/src/tsd.c36
-rw-r--r--deps/jemalloc/src/util.c85
-rw-r--r--deps/jemalloc/src/zone.c2
-rw-r--r--deps/jemalloc/test/ALLOCM_ARENA.c66
-rw-r--r--deps/jemalloc/test/ALLOCM_ARENA.exp2
-rw-r--r--deps/jemalloc/test/aligned_alloc.exp25
-rw-r--r--deps/jemalloc/test/allocated.c118
-rw-r--r--deps/jemalloc/test/allocated.exp2
-rw-r--r--deps/jemalloc/test/allocm.c194
-rw-r--r--deps/jemalloc/test/allocm.exp25
-rw-r--r--deps/jemalloc/test/bitmap.exp2
-rw-r--r--deps/jemalloc/test/include/test/SFMT-alti.h186
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params.h132
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params11213.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params1279.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params132049.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params19937.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params216091.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params2281.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params4253.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params44497.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params607.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-params86243.h81
-rw-r--r--deps/jemalloc/test/include/test/SFMT-sse2.h157
-rw-r--r--deps/jemalloc/test/include/test/SFMT.h171
-rw-r--r--deps/jemalloc/test/include/test/jemalloc_test.h.in141
-rw-r--r--deps/jemalloc/test/include/test/jemalloc_test_defs.h.in5
-rw-r--r--deps/jemalloc/test/include/test/math.h311
-rw-r--r--deps/jemalloc/test/include/test/mq.h110
-rw-r--r--deps/jemalloc/test/include/test/mtx.h21
-rw-r--r--deps/jemalloc/test/include/test/test.h329
-rw-r--r--deps/jemalloc/test/include/test/thd.h9
-rw-r--r--deps/jemalloc/test/integration/MALLOCX_ARENA.c58
-rw-r--r--deps/jemalloc/test/integration/aligned_alloc.c (renamed from deps/jemalloc/test/aligned_alloc.c)100
-rw-r--r--deps/jemalloc/test/integration/allocated.c125
-rw-r--r--deps/jemalloc/test/integration/allocm.c107
-rw-r--r--deps/jemalloc/test/integration/mallocx.c97
-rw-r--r--deps/jemalloc/test/integration/mremap.c45
-rw-r--r--deps/jemalloc/test/integration/posix_memalign.c119
-rw-r--r--deps/jemalloc/test/integration/rallocm.c111
-rw-r--r--deps/jemalloc/test/integration/rallocx.c182
-rw-r--r--deps/jemalloc/test/integration/thread_arena.c79
-rw-r--r--deps/jemalloc/test/integration/thread_tcache_enabled.c113
-rw-r--r--deps/jemalloc/test/integration/xallocx.c59
-rw-r--r--deps/jemalloc/test/jemalloc_test.h.in53
-rw-r--r--deps/jemalloc/test/mremap.c60
-rw-r--r--deps/jemalloc/test/mremap.exp2
-rw-r--r--deps/jemalloc/test/posix_memalign.c115
-rw-r--r--deps/jemalloc/test/posix_memalign.exp25
-rw-r--r--deps/jemalloc/test/rallocm.c127
-rw-r--r--deps/jemalloc/test/rallocm.exp2
-rw-r--r--deps/jemalloc/test/src/SFMT.c719
-rw-r--r--deps/jemalloc/test/src/math.c2
-rw-r--r--deps/jemalloc/test/src/mtx.c62
-rw-r--r--deps/jemalloc/test/src/test.c94
-rw-r--r--deps/jemalloc/test/src/thd.c35
-rw-r--r--deps/jemalloc/test/test.sh.in53
-rw-r--r--deps/jemalloc/test/thread_arena.c80
-rw-r--r--deps/jemalloc/test/thread_arena.exp2
-rw-r--r--deps/jemalloc/test/thread_tcache_enabled.c91
-rw-r--r--deps/jemalloc/test/thread_tcache_enabled.exp2
-rw-r--r--deps/jemalloc/test/unit/SFMT.c1605
-rw-r--r--deps/jemalloc/test/unit/bitmap.c (renamed from deps/jemalloc/test/bitmap.c)84
-rw-r--r--deps/jemalloc/test/unit/ckh.c206
-rw-r--r--deps/jemalloc/test/unit/hash.c171
-rw-r--r--deps/jemalloc/test/unit/junk.c222
-rw-r--r--deps/jemalloc/test/unit/mallctl.c415
-rw-r--r--deps/jemalloc/test/unit/math.c388
-rw-r--r--deps/jemalloc/test/unit/mq.c92
-rw-r--r--deps/jemalloc/test/unit/mtx.c60
-rw-r--r--deps/jemalloc/test/unit/prof_accum.c86
-rw-r--r--deps/jemalloc/test/unit/prof_accum.h35
-rw-r--r--deps/jemalloc/test/unit/prof_accum_a.c3
-rw-r--r--deps/jemalloc/test/unit/prof_accum_b.c3
-rw-r--r--deps/jemalloc/test/unit/prof_gdump.c56
-rw-r--r--deps/jemalloc/test/unit/prof_idump.c51
-rw-r--r--deps/jemalloc/test/unit/ql.c209
-rw-r--r--deps/jemalloc/test/unit/qr.c248
-rw-r--r--deps/jemalloc/test/unit/quarantine.c108
-rw-r--r--deps/jemalloc/test/unit/rb.c333
-rw-r--r--deps/jemalloc/test/unit/rtree.c118
-rw-r--r--deps/jemalloc/test/unit/stats.c380
-rw-r--r--deps/jemalloc/test/unit/tsd.c71
-rw-r--r--deps/jemalloc/test/unit/util.c294
-rw-r--r--deps/jemalloc/test/unit/zero.c78
148 files changed, 16667 insertions, 4901 deletions
diff --git a/deps/jemalloc/.gitignore b/deps/jemalloc/.gitignore
index 0406413a0..4c408ec2c 100644
--- a/deps/jemalloc/.gitignore
+++ b/deps/jemalloc/.gitignore
@@ -1,20 +1,72 @@
+/*.gcov.*
+
/autom4te.cache/
+
+/bin/jemalloc.sh
+
/config.stamp
/config.log
/config.status
+/configure
+
/doc/html.xsl
/doc/manpages.xsl
/doc/jemalloc.xml
+/doc/jemalloc.html
+/doc/jemalloc.3
+
/lib/
+
/Makefile
-/include/jemalloc/internal/jemalloc_internal\.h
-/include/jemalloc/internal/size_classes\.h
-/include/jemalloc/jemalloc\.h
-/include/jemalloc/jemalloc_defs\.h
+
+/include/jemalloc/internal/jemalloc_internal.h
+/include/jemalloc/internal/jemalloc_internal_defs.h
+/include/jemalloc/internal/private_namespace.h
+/include/jemalloc/internal/private_unnamespace.h
+/include/jemalloc/internal/public_namespace.h
+/include/jemalloc/internal/public_symbols.txt
+/include/jemalloc/internal/public_unnamespace.h
+/include/jemalloc/internal/size_classes.h
+/include/jemalloc/jemalloc.h
+/include/jemalloc/jemalloc_defs.h
+/include/jemalloc/jemalloc_macros.h
+/include/jemalloc/jemalloc_mangle.h
+/include/jemalloc/jemalloc_mangle_jet.h
+/include/jemalloc/jemalloc_protos.h
+/include/jemalloc/jemalloc_protos_jet.h
+/include/jemalloc/jemalloc_rename.h
+
/src/*.[od]
-/test/*.[od]
-/test/*.out
-/test/[a-zA-Z_]*
-!test/*.c
-!test/*.exp
-/bin/jemalloc.sh
+/src/*.gcda
+/src/*.gcno
+
+/test/test.sh
+test/include/test/jemalloc_test.h
+test/include/test/jemalloc_test_defs.h
+
+/test/integration/[A-Za-z]*
+!/test/integration/[A-Za-z]*.*
+/test/integration/*.[od]
+/test/integration/*.gcda
+/test/integration/*.gcno
+/test/integration/*.out
+
+/test/src/*.[od]
+/test/src/*.gcda
+/test/src/*.gcno
+
+/test/stress/[A-Za-z]*
+!/test/stress/[A-Za-z]*.*
+/test/stress/*.[od]
+/test/stress/*.gcda
+/test/stress/*.gcno
+/test/stress/*.out
+
+/test/unit/[A-Za-z]*
+!/test/unit/[A-Za-z]*.*
+/test/unit/*.[od]
+/test/unit/*.gcda
+/test/unit/*.gcno
+/test/unit/*.out
+
+/VERSION
diff --git a/deps/jemalloc/COPYING b/deps/jemalloc/COPYING
index e27fc4d6c..bdda0feb9 100644
--- a/deps/jemalloc/COPYING
+++ b/deps/jemalloc/COPYING
@@ -1,10 +1,10 @@
Unless otherwise specified, files in the jemalloc source distribution are
subject to the following license:
--------------------------------------------------------------------------------
-Copyright (C) 2002-2012 Jason Evans <jasone@canonware.com>.
+Copyright (C) 2002-2014 Jason Evans <jasone@canonware.com>.
All rights reserved.
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
-Copyright (C) 2009-2012 Facebook, Inc. All rights reserved.
+Copyright (C) 2009-2014 Facebook, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
diff --git a/deps/jemalloc/ChangeLog b/deps/jemalloc/ChangeLog
index ab3476c69..d56ee999e 100644
--- a/deps/jemalloc/ChangeLog
+++ b/deps/jemalloc/ChangeLog
@@ -3,8 +3,176 @@ bug fixes are all mentioned, but internal enhancements are omitted here for
brevity (even though they are more fun to write about). Much more detail can be
found in the git revision history:
- http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
- git://canonware.com/jemalloc.git
+ https://github.com/jemalloc/jemalloc
+
+* 3.6.0 (March 31, 2014)
+
+ This version contains a critical bug fix for a regression present in 3.5.0 and
+ 3.5.1.
+
+ Bug fixes:
+ - Fix a regression in arena_chunk_alloc() that caused crashes during
+ small/large allocation if chunk allocation failed. In the absence of this
+ bug, chunk allocation failure would result in allocation failure, e.g. NULL
+ return from malloc(). This regression was introduced in 3.5.0.
+ - Fix backtracing for gcc intrinsics-based backtracing by specifying
+ -fno-omit-frame-pointer to gcc. Note that the application (and all the
+ libraries it links to) must also be compiled with this option for
+ backtracing to be reliable.
+ - Use dss allocation precedence for huge allocations as well as small/large
+ allocations.
+ - Fix test assertion failure message formatting. This bug did not manifect on
+ x86_64 systems because of implementation subtleties in va_list.
+ - Fix inconsequential test failures for hash and SFMT code.
+
+ New features:
+ - Support heap profiling on FreeBSD. This feature depends on the proc
+ filesystem being mounted during heap profile dumping.
+
+* 3.5.1 (February 25, 2014)
+
+ This version primarily addresses minor bugs in test code.
+
+ Bug fixes:
+ - Configure Solaris/Illumos to use MADV_FREE.
+ - Fix junk filling for mremap(2)-based huge reallocation. This is only
+ relevant if configuring with the --enable-mremap option specified.
+ - Avoid compilation failure if 'restrict' C99 keyword is not supported by the
+ compiler.
+ - Add a configure test for SSE2 rather than assuming it is usable on i686
+ systems. This fixes test compilation errors, especially on 32-bit Linux
+ systems.
+ - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit
+ test.
+ - Fix/remove flawed alignment-related overflow tests.
+ - Prevent compiler optimizations that could change backtraces in the
+ prof_accum unit test.
+
+* 3.5.0 (January 22, 2014)
+
+ This version focuses on refactoring and automated testing, though it also
+ includes some non-trivial heap profiling optimizations not mentioned below.
+
+ New features:
+ - Add the *allocx() API, which is a successor to the experimental *allocm()
+ API. The *allocx() functions are slightly simpler to use because they have
+ fewer parameters, they directly return the results of primary interest, and
+ mallocx()/rallocx() avoid the strict aliasing pitfall that
+ allocm()/rallocm() share with posix_memalign(). Note that *allocm() is
+ slated for removal in the next non-bugfix release.
+ - Add support for LinuxThreads.
+
+ Bug fixes:
+ - Unless heap profiling is enabled, disable floating point code and don't link
+ with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64
+ systems, makes it possible to completely disable floating point register
+ use. Some versions of glibc neglect to save/restore caller-saved floating
+ point registers during dynamic lazy symbol loading, and the symbol loading
+ code uses whatever malloc the application happens to have linked/loaded
+ with, the result being potential floating point register corruption.
+ - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling
+ backtrace creation in imemalign(). This bug impacted posix_memalign() and
+ aligned_alloc().
+ - Fix a file descriptor leak in a prof_dump_maps() error path.
+ - Fix prof_dump() to close the dump file descriptor for all relevant error
+ paths.
+ - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for
+ allocation, not just deallocation.
+ - Fix a data race for large allocation stats counters.
+ - Fix a potential infinite loop during thread exit. This bug occurred on
+ Solaris, and could affect other platforms with similar pthreads TSD
+ implementations.
+ - Don't junk-fill reallocations unless usable size changes. This fixes a
+ violation of the *allocx()/*allocm() semantics.
+ - Fix growing large reallocation to junk fill new space.
+ - Fix huge deallocation to junk fill when munmap is disabled.
+ - Change the default private namespace prefix from empty to je_, and change
+ --with-private-namespace-prefix so that it prepends an additional prefix
+ rather than replacing je_. This reduces the likelihood of applications
+ which statically link jemalloc experiencing symbol name collisions.
+ - Add missing private namespace mangling (relevant when
+ --with-private-namespace is specified).
+ - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as
+ static even for debug builds.
+ - Add a missing mutex unlock in a malloc_init_hard() error path. In practice
+ this error path is never executed.
+ - Fix numerous bugs in malloc_strotumax() error handling/reporting. These
+ bugs had no impact except for malformed inputs.
+ - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by
+ existing calls, so they had no impact.
+
+* 3.4.1 (October 20, 2013)
+
+ Bug fixes:
+ - Fix a race in the "arenas.extend" mallctl that could cause memory corruption
+ of internal data structures and subsequent crashes.
+ - Fix Valgrind integration flaws that caused Valgrind warnings about reads of
+ uninitialized memory in:
+ + arena chunk headers
+ + internal zero-initialized data structures (relevant to tcache and prof
+ code)
+ - Preserve errno during the first allocation. A readlink(2) call during
+ initialization fails unless /etc/malloc.conf exists, so errno was typically
+ set during the first allocation prior to this fix.
+ - Fix compilation warnings reported by gcc 4.8.1.
+
+* 3.4.0 (June 2, 2013)
+
+ This version is essentially a small bugfix release, but the addition of
+ aarch64 support requires that the minor version be incremented.
+
+ Bug fixes:
+ - Fix race-triggered deadlocks in chunk_record(). These deadlocks were
+ typically triggered by multiple threads concurrently deallocating huge
+ objects.
+
+ New features:
+ - Add support for the aarch64 architecture.
+
+* 3.3.1 (March 6, 2013)
+
+ This version fixes bugs that are typically encountered only when utilizing
+ custom run-time options.
+
+ Bug fixes:
+ - Fix a locking order bug that could cause deadlock during fork if heap
+ profiling were enabled.
+ - Fix a chunk recycling bug that could cause the allocator to lose track of
+ whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
+ corruption if allocating via sbrk(2) (unlikely unless running with the
+ "dss:primary" option specified). This was completely harmless on Linux
+ unless using mlockall(2) (and unlikely even then, unless the
+ --disable-munmap configure option or the "dss:primary" option was
+ specified). This regression was introduced in 3.1.0 by the
+ mlockall(2)/madvise(2) interaction fix.
+ - Fix TLS-related memory corruption that could occur during thread exit if the
+ thread never allocated memory. Only the quarantine and prof facilities were
+ susceptible.
+ - Fix two quarantine bugs:
+ + Internal reallocation of the quarantined object array leaked the old
+ array.
+ + Reallocation failure for internal reallocation of the quarantined object
+ array (very unlikely) resulted in memory corruption.
+ - Fix Valgrind integration to annotate all internally allocated memory in a
+ way that keeps Valgrind happy about internal data structure access.
+ - Fix building for s390 systems.
+
+* 3.3.0 (January 23, 2013)
+
+ This version includes a few minor performance improvements in addition to the
+ listed new features and bug fixes.
+
+ New features:
+ - Add clipping support to lg_chunk option processing.
+ - Add the --enable-ivsalloc option.
+ - Add the --without-export option.
+ - Add the --disable-zone-allocator option.
+
+ Bug fixes:
+ - Fix "arenas.extend" mallctl to output the number of arenas.
+ - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory
+ is undefined.
+ - Fix build break on FreeBSD related to alloca.h.
* 3.2.0 (November 9, 2012)
diff --git a/deps/jemalloc/INSTALL b/deps/jemalloc/INSTALL
index e40a7eddc..841704d2a 100644
--- a/deps/jemalloc/INSTALL
+++ b/deps/jemalloc/INSTALL
@@ -55,11 +55,16 @@ any of the following arguments (not a definitive list) to 'configure':
jemalloc overlays the default malloc zone, but makes no attempt to actually
replace the "malloc", "calloc", etc. symbols.
+--without-export
+ Don't export public APIs. This can be useful when building jemalloc as a
+ static library, or to avoid exporting public APIs when using the zone
+ allocator on OSX.
+
--with-private-namespace=<prefix>
- Prefix all library-private APIs with <prefix>. For shared libraries,
+ Prefix all library-private APIs with <prefix>je_. For shared libraries,
symbol visibility mechanisms prevent these symbols from being exported, but
for static libraries, naming collisions are a real possibility. By
- default, the prefix is "" (empty string).
+ default, <prefix> is empty, which results in a symbol prefix of je_ .
--with-install-suffix=<suffix>
Append <suffix> to the base name of all installed files, such that multiple
@@ -74,6 +79,25 @@ any of the following arguments (not a definitive list) to 'configure':
--enable-debug
Enable assertions and validation code. This incurs a substantial
performance hit, but is very useful during application development.
+ Implies --enable-ivsalloc.
+
+--enable-code-coverage
+ Enable code coverage support, for use during jemalloc test development.
+ Additional testing targets are available if this option is enabled:
+
+ coverage
+ coverage_unit
+ coverage_integration
+ coverage_stress
+
+ These targets do not clear code coverage results from previous runs, and
+ there are interactions between the various coverage targets, so it is
+ usually advisable to run 'make clean' between repeated code coverage runs.
+
+--enable-ivsalloc
+ Enable validation code, which verifies that pointers reside within
+ jemalloc-owned chunks before dereferencing them. This incurs a substantial
+ performance hit.
--disable-stats
Disable statistics gathering functionality. See the "opt.stats_print"
@@ -136,6 +160,10 @@ any of the following arguments (not a definitive list) to 'configure':
--disable-experimental
Disable support for the experimental API (*allocm()).
+--disable-zone-allocator
+ Disable zone allocator for Darwin. This means jemalloc won't be hooked as
+ the default allocator on OSX/iOS.
+
--enable-utrace
Enable utrace(2)-based allocation tracing. This feature is not broadly
portable (FreeBSD has it, but Linux and OS X do not).
diff --git a/deps/jemalloc/Makefile.in b/deps/jemalloc/Makefile.in
index 364481890..d6b7d6ea3 100644
--- a/deps/jemalloc/Makefile.in
+++ b/deps/jemalloc/Makefile.in
@@ -47,13 +47,17 @@ cfghdrs_out := @cfghdrs_out@
cfgoutputs_in := @cfgoutputs_in@
cfgoutputs_out := @cfgoutputs_out@
enable_autogen := @enable_autogen@
+enable_code_coverage := @enable_code_coverage@
enable_experimental := @enable_experimental@
+enable_zone_allocator := @enable_zone_allocator@
DSO_LDFLAGS = @DSO_LDFLAGS@
SOREV = @SOREV@
PIC_CFLAGS = @PIC_CFLAGS@
CTARGET = @CTARGET@
LDTARGET = @LDTARGET@
MKLIB = @MKLIB@
+AR = @AR@
+ARFLAGS = @ARFLAGS@
CC_MM = @CC_MM@
ifeq (macho, $(ABI))
@@ -70,18 +74,17 @@ LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix)
# Lists of files.
BINS := $(srcroot)bin/pprof $(objroot)bin/jemalloc.sh
-CHDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h \
- $(objroot)include/jemalloc/jemalloc_defs$(install_suffix).h
-CSRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c $(srcroot)src/atomic.c \
- $(srcroot)src/base.c $(srcroot)src/bitmap.c $(srcroot)src/chunk.c \
- $(srcroot)src/chunk_dss.c $(srcroot)src/chunk_mmap.c \
- $(srcroot)src/ckh.c $(srcroot)src/ctl.c $(srcroot)src/extent.c \
- $(srcroot)src/hash.c $(srcroot)src/huge.c $(srcroot)src/mb.c \
- $(srcroot)src/mutex.c $(srcroot)src/prof.c $(srcroot)src/quarantine.c \
- $(srcroot)src/rtree.c $(srcroot)src/stats.c $(srcroot)src/tcache.c \
- $(srcroot)src/util.c $(srcroot)src/tsd.c
-ifeq (macho, $(ABI))
-CSRCS += $(srcroot)src/zone.c
+C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h
+C_SRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c \
+ $(srcroot)src/atomic.c $(srcroot)src/base.c $(srcroot)src/bitmap.c \
+ $(srcroot)src/chunk.c $(srcroot)src/chunk_dss.c \
+ $(srcroot)src/chunk_mmap.c $(srcroot)src/ckh.c $(srcroot)src/ctl.c \
+ $(srcroot)src/extent.c $(srcroot)src/hash.c $(srcroot)src/huge.c \
+ $(srcroot)src/mb.c $(srcroot)src/mutex.c $(srcroot)src/prof.c \
+ $(srcroot)src/quarantine.c $(srcroot)src/rtree.c $(srcroot)src/stats.c \
+ $(srcroot)src/tcache.c $(srcroot)src/util.c $(srcroot)src/tsd.c
+ifeq ($(enable_zone_allocator), 1)
+C_SRCS += $(srcroot)src/zone.c
endif
ifeq ($(IMPORTLIB),$(SO))
STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A)
@@ -100,27 +103,74 @@ DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml
DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.html)
DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.3)
DOCS := $(DOCS_HTML) $(DOCS_MAN3)
-CTESTS := $(srcroot)test/aligned_alloc.c $(srcroot)test/allocated.c \
- $(srcroot)test/ALLOCM_ARENA.c $(srcroot)test/bitmap.c \
- $(srcroot)test/mremap.c $(srcroot)test/posix_memalign.c \
- $(srcroot)test/thread_arena.c $(srcroot)test/thread_tcache_enabled.c
+C_TESTLIB_SRCS := $(srcroot)test/src/math.c $(srcroot)test/src/mtx.c \
+ $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
+ $(srcroot)test/src/thd.c
+C_UTIL_INTEGRATION_SRCS := $(srcroot)src/util.c
+TESTS_UNIT := $(srcroot)test/unit/bitmap.c \
+ $(srcroot)test/unit/ckh.c \
+ $(srcroot)test/unit/hash.c \
+ $(srcroot)test/unit/junk.c \
+ $(srcroot)test/unit/mallctl.c \
+ $(srcroot)test/unit/math.c \
+ $(srcroot)test/unit/mq.c \
+ $(srcroot)test/unit/mtx.c \
+ $(srcroot)test/unit/prof_accum.c \
+ $(srcroot)test/unit/prof_gdump.c \
+ $(srcroot)test/unit/prof_idump.c \
+ $(srcroot)test/unit/ql.c \
+ $(srcroot)test/unit/qr.c \
+ $(srcroot)test/unit/quarantine.c \
+ $(srcroot)test/unit/rb.c \
+ $(srcroot)test/unit/rtree.c \
+ $(srcroot)test/unit/SFMT.c \
+ $(srcroot)test/unit/stats.c \
+ $(srcroot)test/unit/tsd.c \
+ $(srcroot)test/unit/util.c \
+ $(srcroot)test/unit/zero.c
+TESTS_UNIT_AUX := $(srcroot)test/unit/prof_accum_a.c \
+ $(srcroot)test/unit/prof_accum_b.c
+TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
+ $(srcroot)test/integration/allocated.c \
+ $(srcroot)test/integration/mallocx.c \
+ $(srcroot)test/integration/mremap.c \
+ $(srcroot)test/integration/posix_memalign.c \
+ $(srcroot)test/integration/rallocx.c \
+ $(srcroot)test/integration/thread_arena.c \
+ $(srcroot)test/integration/thread_tcache_enabled.c \
+ $(srcroot)test/integration/xallocx.c
ifeq ($(enable_experimental), 1)
-CTESTS += $(srcroot)test/allocm.c $(srcroot)test/rallocm.c
+TESTS_INTEGRATION += $(srcroot)test/integration/allocm.c \
+ $(srcroot)test/integration/MALLOCX_ARENA.c \
+ $(srcroot)test/integration/rallocm.c
endif
-
-COBJS := $(CSRCS:$(srcroot)%.c=$(objroot)%.$(O))
-CPICOBJS := $(CSRCS:$(srcroot)%.c=$(objroot)%.pic.$(O))
-CTESTOBJS := $(CTESTS:$(srcroot)%.c=$(objroot)%.$(O))
-
-.PHONY: all dist doc_html doc_man doc
+TESTS_STRESS :=
+TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_STRESS)
+
+C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O))
+C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O))
+C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O))
+C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O))
+C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
+C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
+C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O))
+C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS)
+
+TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_UNIT_AUX_OBJS := $(TESTS_UNIT_AUX:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_UNIT_AUX_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS)
+
+.PHONY: all dist build_doc_html build_doc_man build_doc
.PHONY: install_bin install_include install_lib
-.PHONY: install_html install_man install_doc install
+.PHONY: install_doc_html install_doc_man install_doc install
.PHONY: tests check clean distclean relclean
-.SECONDARY : $(CTESTOBJS)
+.SECONDARY : $(TESTS_OBJS)
# Default target.
-all: build
+all: build_lib
dist: build_doc
@@ -138,30 +188,51 @@ build_doc: $(DOCS)
# Include generated dependency files.
#
ifdef CC_MM
--include $(COBJS:%.$(O)=%.d)
--include $(CPICOBJS:%.$(O)=%.d)
--include $(CTESTOBJS:%.$(O)=%.d)
+-include $(C_OBJS:%.$(O)=%.d)
+-include $(C_PIC_OBJS:%.$(O)=%.d)
+-include $(C_JET_OBJS:%.$(O)=%.d)
+-include $(C_TESTLIB_OBJS:%.$(O)=%.d)
+-include $(TESTS_OBJS:%.$(O)=%.d)
endif
-$(COBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c
-$(CPICOBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c
-$(CPICOBJS): CFLAGS += $(PIC_CFLAGS)
-$(CTESTOBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
-$(CTESTOBJS): CPPFLAGS += -I$(objroot)test
+$(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c
+$(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c
+$(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS)
+$(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c
+$(C_JET_OBJS): CFLAGS += -DJEMALLOC_JET
+$(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
+$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
+$(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c
+$(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB
+$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
+$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
+$(TESTS_UNIT_AUX_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
+define make-unit-link-dep
+$(1): TESTS_UNIT_LINK_OBJS += $(2)
+$(1): $(2)
+endef
+$(foreach test, $(TESTS_UNIT:$(srcroot)test/unit/%.c=$(objroot)test/unit/%$(EXE)), $(eval $(call make-unit-link-dep,$(test),$(filter $(test:%=%_a.$(O)) $(test:%=%_b.$(O)),$(TESTS_UNIT_AUX_OBJS)))))
+$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
+$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST
+$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
+$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
ifneq ($(IMPORTLIB),$(SO))
-$(COBJS): CPPFLAGS += -DDLLEXPORT
+$(C_OBJS): CPPFLAGS += -DDLLEXPORT
endif
ifndef CC_MM
-# Dependencies
+# Dependencies.
HEADER_DIRS = $(srcroot)include/jemalloc/internal \
$(objroot)include/jemalloc $(objroot)include/jemalloc/internal
HEADERS = $(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h))
-$(COBJS) $(CPICOBJS) $(CTESTOBJS): $(HEADERS)
-$(CTESTOBJS): $(objroot)test/jemalloc_test.h
+$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): $(HEADERS)
+$(TESTS_OBJS): $(objroot)test/unit/jemalloc_test.h
endif
-$(COBJS) $(CPICOBJS) $(CTESTOBJS): %.$(O):
+$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O):
@mkdir -p $(@D)
$(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
ifdef CC_MM
@@ -174,27 +245,33 @@ ifneq ($(SOREV),$(SO))
ln -sf $(<F) $@
endif
-$(objroot)lib/$(LIBJEMALLOC).$(SOREV) : $(if $(PIC_CFLAGS),$(CPICOBJS),$(COBJS))
+$(objroot)lib/$(LIBJEMALLOC).$(SOREV) : $(if $(PIC_CFLAGS),$(C_PIC_OBJS),$(C_OBJS))
@mkdir -p $(@D)
$(CC) $(DSO_LDFLAGS) $(call RPATH,$(RPATH_EXTRA)) $(LDTARGET) $+ $(LDFLAGS) $(LIBS) $(EXTRA_LDFLAGS)
-$(objroot)lib/$(LIBJEMALLOC)_pic.$(A) : $(CPICOBJS)
-$(objroot)lib/$(LIBJEMALLOC).$(A) : $(COBJS)
-$(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(COBJS)
+$(objroot)lib/$(LIBJEMALLOC)_pic.$(A) : $(C_PIC_OBJS)
+$(objroot)lib/$(LIBJEMALLOC).$(A) : $(C_OBJS)
+$(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(C_OBJS)
$(STATIC_LIBS):
@mkdir -p $(@D)
- $(MKLIB) $+
+ $(AR) $(ARFLAGS)@AROUT@ $+
-$(objroot)test/bitmap$(EXE): $(objroot)src/bitmap.$(O)
+$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(TESTS_UNIT_LINK_OBJS) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
+ @mkdir -p $(@D)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
-$(objroot)test/%$(EXE): $(objroot)test/%.$(O) $(objroot)src/util.$(O) $(DSOS)
+$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
@mkdir -p $(@D)
- $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(filter -lpthread,$(LIBS)) $(EXTRA_LDFLAGS)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(filter -lpthread,$(LIBS))) -lm $(EXTRA_LDFLAGS)
+
+$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
+ @mkdir -p $(@D)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
build_lib_shared: $(DSOS)
build_lib_static: $(STATIC_LIBS)
-build: build_lib_shared build_lib_static
+build_lib: build_lib_shared build_lib_static
install_bin:
install -d $(BINDIR)
@@ -205,7 +282,7 @@ done
install_include:
install -d $(INCLUDEDIR)/jemalloc
- @for h in $(CHDRS); do \
+ @for h in $(C_HDRS); do \
echo "install -m 644 $$h $(INCLUDEDIR)/jemalloc"; \
install -m 644 $$h $(INCLUDEDIR)/jemalloc; \
done
@@ -244,49 +321,87 @@ install_doc: install_doc_html install_doc_man
install: install_bin install_include install_lib install_doc
-tests: $(CTESTS:$(srcroot)%.c=$(objroot)%$(EXE))
-
-check: tests
- @mkdir -p $(objroot)test
- @$(SHELL) -c 'total=0; \
- failures=0; \
- echo "========================================="; \
- for t in $(CTESTS:$(srcroot)%.c=$(objroot)%); do \
- total=`expr $$total + 1`; \
- /bin/echo -n "$${t} ... "; \
- $(TEST_LIBRARY_PATH) $${t}$(EXE) $(abs_srcroot) \
- $(abs_objroot) > $(objroot)$${t}.out 2>&1; \
- if test -e "$(srcroot)$${t}.exp"; then \
- diff -w -u $(srcroot)$${t}.exp \
- $(objroot)$${t}.out >/dev/null 2>&1; \
- fail=$$?; \
- if test "$${fail}" -eq "1" ; then \
- failures=`expr $${failures} + 1`; \
- echo "*** FAIL ***"; \
- else \
- echo "pass"; \
- fi; \
- else \
- echo "*** FAIL *** (.exp file is missing)"; \
- failures=`expr $${failures} + 1`; \
- fi; \
- done; \
- echo "========================================="; \
- echo "Failures: $${failures}/$${total}"'
+tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE))
+tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE))
+tests_stress: $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%$(EXE))
+tests: tests_unit tests_integration tests_stress
+
+check_unit_dir:
+ @mkdir -p $(objroot)test/unit
+check_integration_dir:
+ @mkdir -p $(objroot)test/integration
+check_stress_dir:
+ @mkdir -p $(objroot)test/stress
+check_dir: check_unit_dir check_integration_dir check_stress_dir
+
+check_unit: tests_unit check_unit_dir
+ $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
+check_integration: tests_integration check_integration_dir
+ $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+check_stress: tests_stress check_stress_dir
+ $(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
+check: tests check_dir
+ $(SHELL) $(objroot)test/test.sh $(TESTS:$(srcroot)%.c=$(objroot)%)
+
+ifeq ($(enable_code_coverage), 1)
+coverage_unit: check_unit
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src unit $(C_TESTLIB_UNIT_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/unit unit $(TESTS_UNIT_OBJS)
+
+coverage_integration: check_integration
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src integration $(C_UTIL_INTEGRATION_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS)
+
+coverage_stress: check_stress
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/stress stress $(TESTS_STRESS_OBJS)
+
+coverage: check
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)src integration $(C_UTIL_INTEGRATION_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src unit $(C_TESTLIB_UNIT_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/unit unit $(TESTS_UNIT_OBJS) $(TESTS_UNIT_AUX_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS)
+ $(SHELL) $(srcroot)coverage.sh $(srcroot)test/stress integration $(TESTS_STRESS_OBJS)
+endif
clean:
- rm -f $(COBJS)
- rm -f $(CPICOBJS)
- rm -f $(COBJS:%.$(O)=%.d)
- rm -f $(CPICOBJS:%.$(O)=%.d)
- rm -f $(CTESTOBJS:%.$(O)=%$(EXE))
- rm -f $(CTESTOBJS)
- rm -f $(CTESTOBJS:%.$(O)=%.d)
- rm -f $(CTESTOBJS:%.$(O)=%.out)
+ rm -f $(C_OBJS)
+ rm -f $(C_PIC_OBJS)
+ rm -f $(C_JET_OBJS)
+ rm -f $(C_TESTLIB_OBJS)
+ rm -f $(C_OBJS:%.$(O)=%.d)
+ rm -f $(C_OBJS:%.$(O)=%.gcda)
+ rm -f $(C_OBJS:%.$(O)=%.gcno)
+ rm -f $(C_PIC_OBJS:%.$(O)=%.d)
+ rm -f $(C_PIC_OBJS:%.$(O)=%.gcda)
+ rm -f $(C_PIC_OBJS:%.$(O)=%.gcno)
+ rm -f $(C_JET_OBJS:%.$(O)=%.d)
+ rm -f $(C_JET_OBJS:%.$(O)=%.gcda)
+ rm -f $(C_JET_OBJS:%.$(O)=%.gcno)
+ rm -f $(C_TESTLIB_OBJS:%.$(O)=%.d)
+ rm -f $(C_TESTLIB_OBJS:%.$(O)=%.gcda)
+ rm -f $(C_TESTLIB_OBJS:%.$(O)=%.gcno)
+ rm -f $(TESTS_OBJS:%.$(O)=%$(EXE))
+ rm -f $(TESTS_OBJS)
+ rm -f $(TESTS_OBJS:%.$(O)=%.d)
+ rm -f $(TESTS_OBJS:%.$(O)=%.gcda)
+ rm -f $(TESTS_OBJS:%.$(O)=%.gcno)
+ rm -f $(TESTS_OBJS:%.$(O)=%.out)
rm -f $(DSOS) $(STATIC_LIBS)
+ rm -f $(objroot)*.gcov.*
distclean: clean
rm -rf $(objroot)autom4te.cache
+ rm -f $(objroot)bin/jemalloc.sh
rm -f $(objroot)config.log
rm -f $(objroot)config.status
rm -f $(objroot)config.stamp
diff --git a/deps/jemalloc/README b/deps/jemalloc/README
index 7661683ba..9b268f422 100644
--- a/deps/jemalloc/README
+++ b/deps/jemalloc/README
@@ -1,10 +1,14 @@
-jemalloc is a general-purpose scalable concurrent malloc(3) implementation.
-This distribution is a "portable" implementation that currently targets
-FreeBSD, Linux, Apple OS X, and MinGW. jemalloc is included as the default
-allocator in the FreeBSD and NetBSD operating systems, and it is used by the
-Mozilla Firefox web browser on Microsoft Windows-related platforms. Depending
-on your needs, one of the other divergent versions may suit your needs better
-than this distribution.
+jemalloc is a general purpose malloc(3) implementation that emphasizes
+fragmentation avoidance and scalable concurrency support. jemalloc first came
+into use as the FreeBSD libc allocator in 2005, and since then it has found its
+way into numerous applications that rely on its predictable behavior. In 2010
+jemalloc development efforts broadened to include developer support features
+such as heap profiling, Valgrind integration, and extensive monitoring/tuning
+hooks. Modern jemalloc releases continue to be integrated back into FreeBSD,
+and therefore versatility remains critical. Ongoing development efforts trend
+toward making jemalloc among the best allocators for a broad range of demanding
+applications, and eliminating/mitigating weaknesses that have practical
+repercussions for real world applications.
The COPYING file contains copyright and licensing information.
diff --git a/deps/jemalloc/VERSION b/deps/jemalloc/VERSION
index 5e64fc9e8..dace31ba7 100644
--- a/deps/jemalloc/VERSION
+++ b/deps/jemalloc/VERSION
@@ -1 +1 @@
-3.2.0-0-g87499f6748ebe4817571e817e9f680ccb5bf54a9
+3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340
diff --git a/deps/jemalloc/bin/pprof b/deps/jemalloc/bin/pprof
index 727eb4370..a309943c1 100755
--- a/deps/jemalloc/bin/pprof
+++ b/deps/jemalloc/bin/pprof
@@ -4197,8 +4197,12 @@ sub FindLibrary {
# For libc libraries, the copy in /usr/lib/debug contains debugging symbols
sub DebuggingLibrary {
my $file = shift;
- if ($file =~ m|^/| && -f "/usr/lib/debug$file") {
- return "/usr/lib/debug$file";
+ if ($file =~ m|^/|) {
+ if (-f "/usr/lib/debug$file") {
+ return "/usr/lib/debug$file";
+ } elsif (-f "/usr/lib/debug$file.debug") {
+ return "/usr/lib/debug$file.debug";
+ }
}
return undef;
}
@@ -4360,6 +4364,19 @@ sub ParseLibraries {
$finish = HexExtend($2);
$offset = $zero_offset;
$lib = $3;
+ }
+ # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in
+ # function procfs_doprocmap (sys/fs/procfs/procfs_map.c)
+ #
+ # Example:
+ # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s
+ # o.1 NCH -1
+ elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) {
+ $start = HexExtend($1);
+ $finish = HexExtend($2);
+ $offset = $zero_offset;
+ $lib = FindLibrary($5);
+
} else {
next;
}
@@ -4382,6 +4399,7 @@ sub ParseLibraries {
}
}
+ if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; }
push(@{$result}, [$lib, $start, $finish, $offset]);
}
@@ -4589,6 +4607,12 @@ sub ExtractSymbols {
my $finish = $lib->[2];
my $offset = $lib->[3];
+ # Use debug library if it exists
+ my $debug_libname = DebuggingLibrary($libname);
+ if ($debug_libname) {
+ $libname = $debug_libname;
+ }
+
# Get list of pcs that belong in this library.
my $contained = [];
my ($start_pc_index, $finish_pc_index);
@@ -5019,7 +5043,7 @@ sub GetProcedureBoundariesViaNm {
# Tag this routine with the starting address in case the image
# has multiple occurrences of this routine. We use a syntax
- # that resembles template paramters that are automatically
+ # that resembles template parameters that are automatically
# stripped out by ShortFunctionName()
$this_routine .= "<$start_val>";
diff --git a/deps/jemalloc/config.guess b/deps/jemalloc/config.guess
index d622a44e5..b79252d6b 100755
--- a/deps/jemalloc/config.guess
+++ b/deps/jemalloc/config.guess
@@ -1,14 +1,12 @@
#! /bin/sh
# Attempt to guess a canonical system name.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-# 2011, 2012 Free Software Foundation, Inc.
+# Copyright 1992-2013 Free Software Foundation, Inc.
-timestamp='2012-02-10'
+timestamp='2013-06-10'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
@@ -22,19 +20,17 @@ timestamp='2012-02-10'
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Originally written by Per Bothner. Please send patches (context
-# diff format) to <config-patches@gnu.org> and include a ChangeLog
-# entry.
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
#
-# This script attempts to guess a canonical system name similar to
-# config.sub. If it succeeds, it prints the system name on stdout, and
-# exits with 0. Otherwise, it exits with 1.
+# Originally written by Per Bothner.
#
# You can get the latest version of this script from:
# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+#
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
+
me=`echo "$0" | sed -e 's,.*/,,'`
@@ -54,9 +50,7 @@ version="\
GNU config.guess ($timestamp)
Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
-2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
-Free Software Foundation, Inc.
+Copyright 1992-2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -138,6 +132,27 @@ UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+case "${UNAME_SYSTEM}" in
+Linux|GNU|GNU/*)
+ # If the system lacks a compiler, then just pick glibc.
+ # We could probably try harder.
+ LIBC=gnu
+
+ eval $set_cc_for_build
+ cat <<-EOF > $dummy.c
+ #include <features.h>
+ #if defined(__UCLIBC__)
+ LIBC=uclibc
+ #elif defined(__dietlibc__)
+ LIBC=dietlibc
+ #else
+ LIBC=gnu
+ #endif
+ EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
+ ;;
+esac
+
# Note: order is significant - the case branches are not exclusive.
case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
@@ -200,6 +215,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
echo "${machine}-${os}${release}"
exit ;;
+ *:Bitrig:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+ exit ;;
*:OpenBSD:*:*)
UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
@@ -302,7 +321,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
echo arm-acorn-riscix${UNAME_RELEASE}
exit ;;
- arm:riscos:*:*|arm:RISCOS:*:*)
+ arm*:riscos:*:*|arm*:RISCOS:*:*)
echo arm-unknown-riscos
exit ;;
SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
@@ -801,6 +820,9 @@ EOF
i*:CYGWIN*:*)
echo ${UNAME_MACHINE}-pc-cygwin
exit ;;
+ *:MINGW64*:*)
+ echo ${UNAME_MACHINE}-pc-mingw64
+ exit ;;
*:MINGW*:*)
echo ${UNAME_MACHINE}-pc-mingw32
exit ;;
@@ -852,21 +874,21 @@ EOF
exit ;;
*:GNU:*:*)
# the GNU system
- echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
exit ;;
*:GNU/*:*:*)
# other systems with GNU libc and userland
- echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
exit ;;
i*86:Minix:*:*)
echo ${UNAME_MACHINE}-pc-minix
exit ;;
aarch64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
aarch64_be:Linux:*:*)
UNAME_MACHINE=aarch64_be
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
alpha:Linux:*:*)
case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
@@ -879,59 +901,54 @@ EOF
EV68*) UNAME_MACHINE=alphaev68 ;;
esac
objdump --private-headers /bin/sh | grep -q ld.so.1
- if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
- echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+ if test "$?" = 0 ; then LIBC="gnulibc1" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ arc:Linux:*:* | arceb:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
arm*:Linux:*:*)
eval $set_cc_for_build
if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
| grep -q __ARM_EABI__
then
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
else
if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
| grep -q __ARM_PCS_VFP
then
- echo ${UNAME_MACHINE}-unknown-linux-gnueabi
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi
else
- echo ${UNAME_MACHINE}-unknown-linux-gnueabihf
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf
fi
fi
exit ;;
avr32*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
cris:Linux:*:*)
- echo ${UNAME_MACHINE}-axis-linux-gnu
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
exit ;;
crisv32:Linux:*:*)
- echo ${UNAME_MACHINE}-axis-linux-gnu
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
exit ;;
frv:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
hexagon:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
i*86:Linux:*:*)
- LIBC=gnu
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #ifdef __dietlibc__
- LIBC=dietlibc
- #endif
-EOF
- eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
- echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
+ echo ${UNAME_MACHINE}-pc-linux-${LIBC}
exit ;;
ia64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
m32r*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
m68*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
mips:Linux:*:* | mips64:Linux:*:*)
eval $set_cc_for_build
@@ -950,54 +967,63 @@ EOF
#endif
EOF
eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
;;
+ or1k:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
or32:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
padre:Linux:*:*)
- echo sparc-unknown-linux-gnu
+ echo sparc-unknown-linux-${LIBC}
exit ;;
parisc64:Linux:*:* | hppa64:Linux:*:*)
- echo hppa64-unknown-linux-gnu
+ echo hppa64-unknown-linux-${LIBC}
exit ;;
parisc:Linux:*:* | hppa:Linux:*:*)
# Look for CPU level
case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
- PA7*) echo hppa1.1-unknown-linux-gnu ;;
- PA8*) echo hppa2.0-unknown-linux-gnu ;;
- *) echo hppa-unknown-linux-gnu ;;
+ PA7*) echo hppa1.1-unknown-linux-${LIBC} ;;
+ PA8*) echo hppa2.0-unknown-linux-${LIBC} ;;
+ *) echo hppa-unknown-linux-${LIBC} ;;
esac
exit ;;
ppc64:Linux:*:*)
- echo powerpc64-unknown-linux-gnu
+ echo powerpc64-unknown-linux-${LIBC}
exit ;;
ppc:Linux:*:*)
- echo powerpc-unknown-linux-gnu
+ echo powerpc-unknown-linux-${LIBC}
+ exit ;;
+ ppc64le:Linux:*:*)
+ echo powerpc64le-unknown-linux-${LIBC}
+ exit ;;
+ ppcle:Linux:*:*)
+ echo powerpcle-unknown-linux-${LIBC}
exit ;;
s390:Linux:*:* | s390x:Linux:*:*)
- echo ${UNAME_MACHINE}-ibm-linux
+ echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
exit ;;
sh64*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
sh*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
sparc:Linux:*:* | sparc64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
tile*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
vax:Linux:*:*)
- echo ${UNAME_MACHINE}-dec-linux-gnu
+ echo ${UNAME_MACHINE}-dec-linux-${LIBC}
exit ;;
x86_64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
xtensa*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
i*86:DYNIX/ptx:4*:*)
# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
@@ -1201,6 +1227,9 @@ EOF
BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
echo i586-pc-haiku
exit ;;
+ x86_64:Haiku:*:*)
+ echo x86_64-unknown-haiku
+ exit ;;
SX-4:SUPER-UX:*:*)
echo sx4-nec-superux${UNAME_RELEASE}
exit ;;
@@ -1227,19 +1256,21 @@ EOF
exit ;;
*:Darwin:*:*)
UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
- case $UNAME_PROCESSOR in
- i386)
- eval $set_cc_for_build
- if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
- if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
- (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
- grep IS_64BIT_ARCH >/dev/null
- then
- UNAME_PROCESSOR="x86_64"
- fi
- fi ;;
- unknown) UNAME_PROCESSOR=powerpc ;;
- esac
+ eval $set_cc_for_build
+ if test "$UNAME_PROCESSOR" = unknown ; then
+ UNAME_PROCESSOR=powerpc
+ fi
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ case $UNAME_PROCESSOR in
+ i386) UNAME_PROCESSOR=x86_64 ;;
+ powerpc) UNAME_PROCESSOR=powerpc64 ;;
+ esac
+ fi
+ fi
echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
exit ;;
*:procnto*:*:* | *:QNX:[0123456789]*:*)
@@ -1256,7 +1287,7 @@ EOF
NEO-?:NONSTOP_KERNEL:*:*)
echo neo-tandem-nsk${UNAME_RELEASE}
exit ;;
- NSE-?:NONSTOP_KERNEL:*:*)
+ NSE-*:NONSTOP_KERNEL:*:*)
echo nse-tandem-nsk${UNAME_RELEASE}
exit ;;
NSR-?:NONSTOP_KERNEL:*:*)
@@ -1330,9 +1361,6 @@ EOF
exit ;;
esac
-#echo '(No uname command or uname output not recognized.)' 1>&2
-#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
-
eval $set_cc_for_build
cat >$dummy.c <<EOF
#ifdef _SEQUENT_
diff --git a/deps/jemalloc/config.sub b/deps/jemalloc/config.sub
index c894da455..61cb4bc22 100755
--- a/deps/jemalloc/config.sub
+++ b/deps/jemalloc/config.sub
@@ -1,24 +1,18 @@
#! /bin/sh
# Configuration validation subroutine script.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-# 2011, 2012 Free Software Foundation, Inc.
+# Copyright 1992-2013 Free Software Foundation, Inc.
-timestamp='2012-02-10'
+timestamp='2013-10-01'
-# This file is (in principle) common to ALL GNU software.
-# The presence of a machine in this file suggests that SOME GNU software
-# can handle that machine. It does not imply ALL GNU software can.
-#
-# This file is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
@@ -26,11 +20,12 @@ timestamp='2012-02-10'
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
-# Please send patches to <config-patches@gnu.org>. Submit a context
-# diff and a properly formatted GNU ChangeLog entry.
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
#
# Configuration subroutine to validate and canonicalize a configuration type.
# Supply the specified configuration type as an argument.
@@ -73,9 +68,7 @@ Report bugs and patches to <config-patches@gnu.org>."
version="\
GNU config.sub ($timestamp)
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
-2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
-Free Software Foundation, Inc.
+Copyright 1992-2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -123,7 +116,7 @@ esac
maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
case $maybe_os in
nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
- linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
knetbsd*-gnu* | netbsd*-gnu* | \
kopensolaris*-gnu* | \
storm-chaos* | os2-emx* | rtmk-nova*)
@@ -156,7 +149,7 @@ case $os in
-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
- -apple | -axis | -knuth | -cray | -microblaze)
+ -apple | -axis | -knuth | -cray | -microblaze*)
os=
basic_machine=$1
;;
@@ -225,6 +218,12 @@ case $os in
-isc*)
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
;;
+ -lynx*178)
+ os=-lynxos178
+ ;;
+ -lynx*5)
+ os=-lynxos5
+ ;;
-lynx*)
os=-lynxos
;;
@@ -253,10 +252,12 @@ case $basic_machine in
| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
| am33_2.0 \
- | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
- | be32 | be64 \
+ | arc | arceb \
+ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
+ | avr | avr32 \
+ | be32 | be64 \
| bfin \
- | c4x | clipper \
+ | c4x | c8051 | clipper \
| d10v | d30v | dlx | dsp16xx \
| epiphany \
| fido | fr30 | frv \
@@ -264,10 +265,11 @@ case $basic_machine in
| hexagon \
| i370 | i860 | i960 | ia64 \
| ip2k | iq2000 \
+ | k1om \
| le32 | le64 \
| lm32 \
| m32c | m32r | m32rle | m68000 | m68k | m88k \
- | maxq | mb | microblaze | mcore | mep | metag \
+ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \
| mips | mipsbe | mipseb | mipsel | mipsle \
| mips16 \
| mips64 | mips64el \
@@ -285,16 +287,17 @@ case $basic_machine in
| mipsisa64r2 | mipsisa64r2el \
| mipsisa64sb1 | mipsisa64sb1el \
| mipsisa64sr71k | mipsisa64sr71kel \
+ | mipsr5900 | mipsr5900el \
| mipstx39 | mipstx39el \
| mn10200 | mn10300 \
| moxie \
| mt \
| msp430 \
| nds32 | nds32le | nds32be \
- | nios | nios2 \
+ | nios | nios2 | nios2eb | nios2el \
| ns16k | ns32k \
| open8 \
- | or32 \
+ | or1k | or32 \
| pdp10 | pdp11 | pj | pjl \
| powerpc | powerpc64 | powerpc64le | powerpcle \
| pyramid \
@@ -322,7 +325,7 @@ case $basic_machine in
c6x)
basic_machine=tic6x-unknown
;;
- m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip)
+ m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip)
basic_machine=$basic_machine-unknown
os=-none
;;
@@ -364,13 +367,13 @@ case $basic_machine in
| aarch64-* | aarch64_be-* \
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
- | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
| arm-* | armbe-* | armle-* | armeb-* | armv*-* \
| avr-* | avr32-* \
| be32-* | be64-* \
| bfin-* | bs2000-* \
| c[123]* | c30-* | [cjt]90-* | c4x-* \
- | clipper-* | craynv-* | cydra-* \
+ | c8051-* | clipper-* | craynv-* | cydra-* \
| d10v-* | d30v-* | dlx-* \
| elxsi-* \
| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
@@ -379,11 +382,13 @@ case $basic_machine in
| hexagon-* \
| i*86-* | i860-* | i960-* | ia64-* \
| ip2k-* | iq2000-* \
+ | k1om-* \
| le32-* | le64-* \
| lm32-* \
| m32c-* | m32r-* | m32rle-* \
| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
+ | microblaze-* | microblazeel-* \
| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
| mips16-* \
| mips64-* | mips64el-* \
@@ -401,12 +406,13 @@ case $basic_machine in
| mipsisa64r2-* | mipsisa64r2el-* \
| mipsisa64sb1-* | mipsisa64sb1el-* \
| mipsisa64sr71k-* | mipsisa64sr71kel-* \
+ | mipsr5900-* | mipsr5900el-* \
| mipstx39-* | mipstx39el-* \
| mmix-* \
| mt-* \
| msp430-* \
| nds32-* | nds32le-* | nds32be-* \
- | nios-* | nios2-* \
+ | nios-* | nios2-* | nios2eb-* | nios2el-* \
| none-* | np1-* | ns16k-* | ns32k-* \
| open8-* \
| orion-* \
@@ -782,11 +788,15 @@ case $basic_machine in
basic_machine=ns32k-utek
os=-sysv
;;
- microblaze)
+ microblaze*)
basic_machine=microblaze-xilinx
;;
+ mingw64)
+ basic_machine=x86_64-pc
+ os=-mingw64
+ ;;
mingw32)
- basic_machine=i386-pc
+ basic_machine=i686-pc
os=-mingw32
;;
mingw32ce)
@@ -822,7 +832,7 @@ case $basic_machine in
basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
;;
msys)
- basic_machine=i386-pc
+ basic_machine=i686-pc
os=-msys
;;
mvs)
@@ -1013,7 +1023,11 @@ case $basic_machine in
basic_machine=i586-unknown
os=-pw32
;;
- rdos)
+ rdos | rdos64)
+ basic_machine=x86_64-pc
+ os=-rdos
+ ;;
+ rdos32)
basic_machine=i386-pc
os=-rdos
;;
@@ -1340,21 +1354,21 @@ case $os in
-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
| -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
| -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
- | -sym* | -kopensolaris* \
+ | -sym* | -kopensolaris* | -plan9* \
| -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
| -aos* | -aros* \
| -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
| -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
| -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
- | -openbsd* | -solidbsd* \
+ | -bitrig* | -openbsd* | -solidbsd* \
| -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
| -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
| -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
| -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
| -chorusos* | -chorusrdb* | -cegcc* \
| -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -mingw32* | -linux-gnu* | -linux-android* \
- | -linux-newlib* | -linux-uclibc* \
+ | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+ | -linux-newlib* | -linux-musl* | -linux-uclibc* \
| -uxpv* | -beos* | -mpeix* | -udk* \
| -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
| -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
@@ -1486,9 +1500,6 @@ case $os in
-aros*)
os=-aros
;;
- -kaos*)
- os=-kaos
- ;;
-zvmoe)
os=-zvmoe
;;
@@ -1537,6 +1548,12 @@ case $basic_machine in
c4x-* | tic4x-*)
os=-coff
;;
+ c8051-*)
+ os=-elf
+ ;;
+ hexagon-*)
+ os=-elf
+ ;;
tic54x-*)
os=-coff
;;
@@ -1577,6 +1594,9 @@ case $basic_machine in
mips*-*)
os=-elf
;;
+ or1k-*)
+ os=-elf
+ ;;
or32-*)
os=-coff
;;
diff --git a/deps/jemalloc/configure b/deps/jemalloc/configure
index 0d7481291..2e5496bfb 100755
--- a/deps/jemalloc/configure
+++ b/deps/jemalloc/configure
@@ -1,11 +1,9 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.68.
+# Generated by GNU Autoconf 2.69.
#
#
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
-# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software
-# Foundation, Inc.
+# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
#
#
# This configure script is free software; the Free Software Foundation
@@ -134,6 +132,31 @@ export LANGUAGE
# CDPATH.
(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+# Use a proper internal environment variable to ensure we don't fall
+ # into an infinite loop, continuously re-executing ourselves.
+ if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
+ _as_can_reexec=no; export _as_can_reexec;
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+as_fn_exit 255
+ fi
+ # We don't want this to propagate to other subprocesses.
+ { _as_can_reexec=; unset _as_can_reexec;}
if test "x$CONFIG_SHELL" = x; then
as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
emulate sh
@@ -167,7 +190,8 @@ if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
else
exitcode=1; echo positional parameters were not saved.
fi
-test x\$exitcode = x0 || exit 1"
+test x\$exitcode = x0 || exit 1
+test -x / || exit 1"
as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
@@ -212,21 +236,25 @@ IFS=$as_save_IFS
if test "x$CONFIG_SHELL" != x; then :
- # We cannot yet assume a decent shell, so we have to provide a
- # neutralization value for shells without unset; and this also
- # works around shells that cannot unset nonexistent variables.
- # Preserve -v and -x to the replacement shell.
- BASH_ENV=/dev/null
- ENV=/dev/null
- (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
- export CONFIG_SHELL
- case $- in # ((((
- *v*x* | *x*v* ) as_opts=-vx ;;
- *v* ) as_opts=-v ;;
- *x* ) as_opts=-x ;;
- * ) as_opts= ;;
- esac
- exec "$CONFIG_SHELL" $as_opts "$as_myself" ${1+"$@"}
+ export CONFIG_SHELL
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
fi
if test x$as_have_required = xno; then :
@@ -328,6 +356,14 @@ $as_echo X"$as_dir" |
} # as_fn_mkdir_p
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
# as_fn_append VAR VALUE
# ----------------------
# Append the text in VALUE to the end of the definition contained in VAR. Take
@@ -449,6 +485,10 @@ as_cr_alnum=$as_cr_Letters$as_cr_digits
chmod +x "$as_me.lineno" ||
{ $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+ # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+ # already done that, so ensure we don't try to do so again and fall
+ # in an infinite loop. This has already happened in practice.
+ _as_can_reexec=no; export _as_can_reexec
# Don't try to exec as it changes $[0], causing all sort of problems
# (the dirname of $[0] is not the place where we might find the
# original and so on. Autoconf is especially sensitive to this).
@@ -483,16 +523,16 @@ if (echo >conf$$.file) 2>/dev/null; then
# ... but there are two gotchas:
# 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
# 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
- # In both cases, we have to default to `cp -p'.
+ # In both cases, we have to default to `cp -pR'.
ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
elif ln conf$$.file conf$$ 2>/dev/null; then
as_ln_s=ln
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
rmdir conf$$.dir 2>/dev/null
@@ -504,28 +544,8 @@ else
as_mkdir_p=false
fi
-if test -x / >/dev/null 2>&1; then
- as_test_x='test -x'
-else
- if ls -dL / >/dev/null 2>&1; then
- as_ls_L_option=L
- else
- as_ls_L_option=
- fi
- as_test_x='
- eval sh -c '\''
- if test -d "$1"; then
- test -d "$1/.";
- else
- case $1 in #(
- -*)set "./$1";;
- esac;
- case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
- ???[sx]*):;;*)false;;esac;fi
- '\'' sh
- '
-fi
-as_executable_p=$as_test_x
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
# Sed expression to map a string onto a valid CPP name.
as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
@@ -605,6 +625,7 @@ cfgoutputs_out
cfgoutputs_in
cfghdrs_out
cfghdrs_in
+enable_zone_allocator
enable_tls
enable_lazy_lock
jemalloc_version_gid
@@ -624,11 +645,13 @@ enable_tcache
enable_prof
enable_stats
enable_debug
+je_
install_suffix
+private_namespace
+enable_code_coverage
enable_experimental
AUTOCONF
LD
-AR
RANLIB
INSTALL_DATA
INSTALL_SCRIPT
@@ -636,6 +659,8 @@ INSTALL_PROGRAM
enable_autogen
RPATH_EXTRA
CC_MM
+AROUT
+ARFLAGS
MKLIB
LDTARGET
CTARGET
@@ -652,6 +677,7 @@ so
LD_PRELOAD_VAR
RPATH
abi
+AR
host_os
host_vendor
host_cpu
@@ -728,12 +754,15 @@ with_xslroot
with_rpath
enable_autogen
enable_experimental
+enable_code_coverage
with_mangling
with_jemalloc_prefix
+with_export
with_private_namespace
with_install_suffix
enable_cc_silence
enable_debug
+enable_ivsalloc
enable_stats
enable_prof
enable_prof_libunwind
@@ -750,6 +779,7 @@ enable_valgrind
enable_xmalloc
enable_lazy_lock
enable_tls
+enable_zone_allocator
'
ac_precious_vars='build_alias
host_alias
@@ -1215,8 +1245,6 @@ target=$target_alias
if test "x$host_alias" != x; then
if test "x$build_alias" = x; then
cross_compiling=maybe
- $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host.
- If a cross compiler is detected then cross compile mode will be used" >&2
elif test "x$build_alias" != "x$host_alias"; then
cross_compiling=yes
fi
@@ -1375,8 +1403,10 @@ Optional Features:
--enable-FEATURE[=ARG] include FEATURE [ARG=yes]
--enable-autogen Automatically regenerate configure output
--disable-experimental Disable support for the experimental API
+ --enable-code-coverage Enable code coverage
--enable-cc-silence Silence irrelevant compiler warnings
- --enable-debug Build debugging code
+ --enable-debug Build debugging code (implies --enable-ivsalloc)
+ --enable-ivsalloc Validate pointers passed through the public API
--disable-stats Disable statistics calculation/reporting
--enable-prof Enable allocation profiling
--enable-prof-libunwind Use libunwind for backtracing
@@ -1393,6 +1423,8 @@ Optional Features:
--enable-xmalloc Support xmalloc option
--enable-lazy-lock Enable lazy locking (only lock when multi-threaded)
--disable-tls Disable thread-local storage (__thread keyword)
+ --disable-zone-allocator
+ Disable zone allocator for Darwin
Optional Packages:
--with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
@@ -1402,6 +1434,7 @@ Optional Packages:
--with-mangling=<map> Mangle symbols in <map>
--with-jemalloc-prefix=<prefix>
Prefix to prepend to all public APIs
+ --without-export disable exporting jemalloc public APIs
--with-private-namespace=<prefix>
Prefix to prepend to all library-private APIs
--with-install-suffix=<suffix>
@@ -1487,9 +1520,9 @@ test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
configure
-generated by GNU Autoconf 2.68
+generated by GNU Autoconf 2.69
-Copyright (C) 2010 Free Software Foundation, Inc.
+Copyright (C) 2012 Free Software Foundation, Inc.
This configure script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it.
_ACEOF
@@ -1617,6 +1650,37 @@ fi
} # ac_fn_c_try_run
+# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
+# -------------------------------------------------------
+# Tests whether HEADER exists and can be compiled using the include files in
+# INCLUDES, setting the cache variable VAR accordingly.
+ac_fn_c_check_header_compile ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_header_compile
+
# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES
# --------------------------------------------
# Tries to find the compile-time value of EXPR in a program that includes
@@ -1634,7 +1698,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) >= 0)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -1650,7 +1715,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) <= $ac_mid)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -1676,7 +1742,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) < 0)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -1692,7 +1759,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) >= $ac_mid)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -1726,7 +1794,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) <= $ac_mid)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -1795,37 +1864,6 @@ rm -f conftest.val
} # ac_fn_c_compute_int
-# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
-# -------------------------------------------------------
-# Tests whether HEADER exists and can be compiled using the include files in
-# INCLUDES, setting the cache variable VAR accordingly.
-ac_fn_c_check_header_compile ()
-{
- as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if eval \${$3+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-$4
-#include <$2>
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- eval "$3=yes"
-else
- eval "$3=no"
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-eval ac_res=\$$3
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
-
-} # ac_fn_c_check_header_compile
-
# ac_fn_c_try_link LINENO
# -----------------------
# Try to link conftest.$ac_ext, and return whether this succeeded.
@@ -1853,7 +1891,7 @@ $as_echo "$ac_try_echo"; } >&5
test ! -s conftest.err
} && test -s conftest$ac_exeext && {
test "$cross_compiling" = yes ||
- $as_test_x conftest$ac_exeext
+ test -x conftest$ac_exeext
}; then :
ac_retval=0
else
@@ -2084,7 +2122,7 @@ This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by $as_me, which was
-generated by GNU Autoconf 2.68. Invocation command line was
+generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -2498,7 +2536,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_XSLTPROC="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -2507,6 +2545,7 @@ done
done
IFS=$as_save_IFS
+ test -z "$ac_cv_path_XSLTPROC" && ac_cv_path_XSLTPROC="false"
;;
esac
fi
@@ -2567,7 +2606,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_CC="${ac_tool_prefix}gcc"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -2607,7 +2646,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_CC="gcc"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -2660,7 +2699,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_CC="${ac_tool_prefix}cc"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -2701,7 +2740,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
ac_prog_rejected=yes
continue
@@ -2759,7 +2798,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -2803,7 +2842,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_CC="$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3249,8 +3288,7 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include <stdarg.h>
#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
+struct stat;
/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
struct buf { int x; };
FILE * (*rcsopen) (struct buf *, struct stat *, int);
@@ -3395,16 +3433,24 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-std=gnu99
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then
+ cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_HAS_RESTRICT 1
+_ACEOF
+
+ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5
$as_echo_n "checking whether compiler supports -Wall... " >&6; }
@@ -3429,10 +3475,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-Wall
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -3463,10 +3511,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-pipe
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -3497,10 +3547,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-g3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -3533,10 +3585,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-Zi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -3567,10 +3621,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-MT
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -3601,10 +3657,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-W3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -3639,10 +3697,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=$EXTRA_CFLAGS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -3805,7 +3865,7 @@ do
for ac_prog in grep ggrep; do
for ac_exec_ext in '' $ac_executable_extensions; do
ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
- { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
+ as_fn_executable_p "$ac_path_GREP" || continue
# Check for GNU ac_path_GREP and select it if it is found.
# Check for GNU $ac_path_GREP
case `"$ac_path_GREP" --version 2>&1` in
@@ -3871,7 +3931,7 @@ do
for ac_prog in egrep; do
for ac_exec_ext in '' $ac_executable_extensions; do
ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
- { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
+ as_fn_executable_p "$ac_path_EGREP" || continue
# Check for GNU ac_path_EGREP and select it if it is found.
# Check for GNU $ac_path_EGREP
case `"$ac_path_EGREP" --version 2>&1` in
@@ -4048,6 +4108,236 @@ fi
done
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5
+$as_echo_n "checking whether byte ordering is bigendian... " >&6; }
+if ${ac_cv_c_bigendian+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_c_bigendian=unknown
+ # See if we're dealing with a universal compiler.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#ifndef __APPLE_CC__
+ not a universal capable compiler
+ #endif
+ typedef int dummy;
+
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+ # Check for potential -arch flags. It is not universal unless
+ # there are at least two -arch flags with different values.
+ ac_arch=
+ ac_prev=
+ for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do
+ if test -n "$ac_prev"; then
+ case $ac_word in
+ i?86 | x86_64 | ppc | ppc64)
+ if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then
+ ac_arch=$ac_word
+ else
+ ac_cv_c_bigendian=universal
+ break
+ fi
+ ;;
+ esac
+ ac_prev=
+ elif test "x$ac_word" = "x-arch"; then
+ ac_prev=arch
+ fi
+ done
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ if test $ac_cv_c_bigendian = unknown; then
+ # See if sys/param.h defines the BYTE_ORDER macro.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <sys/types.h>
+ #include <sys/param.h>
+
+int
+main ()
+{
+#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \
+ && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \
+ && LITTLE_ENDIAN)
+ bogus endian macros
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ # It does; now see whether it defined to BIG_ENDIAN or not.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <sys/types.h>
+ #include <sys/param.h>
+
+int
+main ()
+{
+#if BYTE_ORDER != BIG_ENDIAN
+ not big endian
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_c_bigendian=yes
+else
+ ac_cv_c_bigendian=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+ if test $ac_cv_c_bigendian = unknown; then
+ # See if <limits.h> defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris).
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <limits.h>
+
+int
+main ()
+{
+#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN)
+ bogus endian macros
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ # It does; now see whether it defined to _BIG_ENDIAN or not.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <limits.h>
+
+int
+main ()
+{
+#ifndef _BIG_ENDIAN
+ not big endian
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_c_bigendian=yes
+else
+ ac_cv_c_bigendian=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+ if test $ac_cv_c_bigendian = unknown; then
+ # Compile a test program.
+ if test "$cross_compiling" = yes; then :
+ # Try to guess by grepping values from an object file.
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+short int ascii_mm[] =
+ { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 };
+ short int ascii_ii[] =
+ { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 };
+ int use_ascii (int i) {
+ return ascii_mm[i] + ascii_ii[i];
+ }
+ short int ebcdic_ii[] =
+ { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 };
+ short int ebcdic_mm[] =
+ { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 };
+ int use_ebcdic (int i) {
+ return ebcdic_mm[i] + ebcdic_ii[i];
+ }
+ extern int foo;
+
+int
+main ()
+{
+return use_ascii (foo) == use_ebcdic (foo);
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then
+ ac_cv_c_bigendian=yes
+ fi
+ if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then
+ if test "$ac_cv_c_bigendian" = unknown; then
+ ac_cv_c_bigendian=no
+ else
+ # finding both strings is unlikely to happen, but who knows?
+ ac_cv_c_bigendian=unknown
+ fi
+ fi
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$ac_includes_default
+int
+main ()
+{
+
+ /* Are we little or big endian? From Harbison&Steele. */
+ union
+ {
+ long int l;
+ char c[sizeof (long int)];
+ } u;
+ u.l = 1;
+ return u.c[sizeof (long int) - 1] == 1;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ ac_cv_c_bigendian=no
+else
+ ac_cv_c_bigendian=yes
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5
+$as_echo "$ac_cv_c_bigendian" >&6; }
+ case $ac_cv_c_bigendian in #(
+ yes)
+ ac_cv_big_endian=1;; #(
+ no)
+ ac_cv_big_endian=0 ;; #(
+ universal)
+
+$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h
+
+ ;; #(
+ *)
+ as_fn_error $? "unknown endianness
+ presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;;
+ esac
+
+if test "x${ac_cv_big_endian}" = "x1" ; then
+ cat >>confdefs.h <<_ACEOF
+#define JEMALLOC_BIG_ENDIAN
+_ACEOF
+
+fi
+
# The cast to long int works around a bug in the HP C Compiler
# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
@@ -4334,11 +4624,11 @@ CPU_SPINWAIT=""
case "${host_cpu}" in
i[345]86)
;;
- i686)
+ i686|x86_64)
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __asm__ is compilable" >&5
-$as_echo_n "checking whether __asm__ is compilable... " >&6; }
-if ${je_cv_asm+:} false; then :
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction is compilable" >&5
+$as_echo_n "checking whether pause instruction is compilable... " >&6; }
+if ${je_cv_pause+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -4353,53 +4643,62 @@ __asm__ volatile("pause"); return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- je_cv_asm=yes
+ je_cv_pause=yes
else
- je_cv_asm=no
+ je_cv_pause=no
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_asm" >&5
-$as_echo "$je_cv_asm" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause" >&5
+$as_echo "$je_cv_pause" >&6; }
- if test "x${je_cv_asm}" = "xyes" ; then
+ if test "x${je_cv_pause}" = "xyes" ; then
CPU_SPINWAIT='__asm__ volatile("pause")'
fi
- ;;
- x86_64)
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __asm__ syntax is compilable" >&5
-$as_echo_n "checking whether __asm__ syntax is compilable... " >&6; }
-if ${je_cv_asm+:} false; then :
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether SSE2 intrinsics is compilable" >&5
+$as_echo_n "checking whether SSE2 intrinsics is compilable... " >&6; }
+if ${je_cv_sse2+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
+#include <emmintrin.h>
+
int
main ()
{
-__asm__ volatile("pause"); return 0;
+
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- je_cv_asm=yes
+ je_cv_sse2=yes
else
- je_cv_asm=no
+ je_cv_sse2=no
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_asm" >&5
-$as_echo "$je_cv_asm" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_sse2" >&5
+$as_echo "$je_cv_sse2" >&6; }
+
+ if test "x${je_cv_sse2}" = "xyes" ; then
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_SSE2
+_ACEOF
- if test "x${je_cv_asm}" = "xyes" ; then
- CPU_SPINWAIT='__asm__ volatile("pause")'
fi
;;
+ powerpc)
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_ALTIVEC
+_ACEOF
+
+ ;;
*)
;;
esac
@@ -4422,9 +4721,106 @@ PIC_CFLAGS='-fPIC -DPIC'
CTARGET='-o $@'
LDTARGET='-o $@'
EXTRA_LDFLAGS=
-MKLIB='ar crus $@'
+ARFLAGS='crus'
+AROUT=' $@'
CC_MM=1
+
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ar; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_AR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$AR"; then
+ ac_cv_prog_AR="$AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_AR="${ac_tool_prefix}ar"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+AR=$ac_cv_prog_AR
+if test -n "$AR"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5
+$as_echo "$AR" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_AR"; then
+ ac_ct_AR=$AR
+ # Extract the first word of "ar", so it can be a program name with args.
+set dummy ar; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_AR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_AR"; then
+ ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_AR="ar"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_AR=$ac_cv_prog_ac_ct_AR
+if test -n "$ac_ct_AR"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5
+$as_echo "$ac_ct_AR" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_AR" = x; then
+ AR=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ AR=$ac_ct_AR
+ fi
+else
+ AR="$ac_cv_prog_AR"
+fi
+
+
default_munmap="1"
JEMALLOC_USABLE_SIZE_CONST="const"
case "${host}" in
@@ -4440,6 +4836,7 @@ case "${host}" in
force_tls="0"
DSO_LDFLAGS='-shared -Wl,-dylib_install_name,$(@F)'
SOREV="${rev}.${so}"
+ sbrk_deprecated="1"
;;
*-*-freebsd*)
CFLAGS="$CFLAGS"
@@ -4452,6 +4849,8 @@ case "${host}" in
CFLAGS="$CFLAGS"
CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
abi="elf"
+ $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h
+
$as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED " >>confdefs.h
$as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h
@@ -4492,6 +4891,8 @@ $as_echo "$abi" >&6; }
*-*-solaris2*)
CFLAGS="$CFLAGS"
abi="elf"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
RPATH='-Wl,-R,$(1)'
CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS"
LIBS="$LIBS -lposix4 -lsocket -lnsl"
@@ -4515,7 +4916,9 @@ $as_echo "$abi" >&6; }
EXTRA_LDFLAGS="-link -DEBUG"
CTARGET='-Fo$@'
LDTARGET='-Fe$@'
- MKLIB='lib -nologo -out:$@'
+ AR='lib'
+ ARFLAGS='-nologo -out:'
+ AROUT='$@'
CC_MM=
else
importlib="${so}"
@@ -4554,9 +4957,7 @@ _ACEOF
-if test "x$abi" != "xpecoff"; then
- LIBS="$LIBS -lm"
-fi
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __attribute__ syntax is compilable" >&5
@@ -4614,10 +5015,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-fvisibility=hidden
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -4651,10 +5054,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-Werror
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -4767,7 +5172,7 @@ case $as_dir/ in #((
# by default.
for ac_prog in ginstall scoinst install; do
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
if test $ac_prog = install &&
grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
# AIX install. It has an incompatible calling convention.
@@ -4840,7 +5245,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -4880,7 +5285,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_RANLIB="ranlib"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -4915,46 +5320,6 @@ else
RANLIB="$ac_cv_prog_RANLIB"
fi
-# Extract the first word of "ar", so it can be a program name with args.
-set dummy ar; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if ${ac_cv_path_AR+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- case $AR in
- [\\/]* | ?:[\\/]*)
- ac_cv_path_AR="$AR" # Let the user override the test with a path.
- ;;
- *)
- as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
- IFS=$as_save_IFS
- test -z "$as_dir" && as_dir=.
- for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
- ac_cv_path_AR="$as_dir/$ac_word$ac_exec_ext"
- $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
- break 2
- fi
-done
- done
-IFS=$as_save_IFS
-
- ;;
-esac
-fi
-AR=$ac_cv_path_AR
-if test -n "$AR"; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5
-$as_echo "$AR" >&6; }
-else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
# Extract the first word of "ld", so it can be a program name with args.
set dummy ld; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
@@ -4973,7 +5338,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_LD="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -4982,6 +5347,7 @@ done
done
IFS=$as_save_IFS
+ test -z "$ac_cv_path_LD" && ac_cv_path_LD="false"
;;
esac
fi
@@ -5013,7 +5379,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_AUTOCONF="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -5022,6 +5388,7 @@ done
done
IFS=$as_save_IFS
+ test -z "$ac_cv_path_AUTOCONF" && ac_cv_path_AUTOCONF="false"
;;
esac
fi
@@ -5036,7 +5403,7 @@ fi
-public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free malloc_usable_size malloc_stats_print mallctl mallctlnametomib mallctlbymib"
+public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size"
ac_fn_c_check_func "$LINENO" "memalign" "ac_cv_func_memalign"
if test "x$ac_cv_func_memalign" = xyes; then :
@@ -5073,6 +5440,103 @@ if test "x$enable_experimental" = "x1" ; then
fi
+GCOV_FLAGS=
+# Check whether --enable-code-coverage was given.
+if test "${enable_code_coverage+set}" = set; then :
+ enableval=$enable_code_coverage; if test "x$enable_code_coverage" = "xno" ; then
+ enable_code_coverage="0"
+else
+ enable_code_coverage="1"
+fi
+
+else
+ enable_code_coverage="0"
+
+fi
+
+if test "x$enable_code_coverage" = "x1" ; then
+ deoptimize="no"
+ echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes"
+ if test "x${deoptimize}" = "xyes" ; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O0" >&5
+$as_echo_n "checking whether compiler supports -O0... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-O0"
+else
+ CFLAGS="${CFLAGS} -O0"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-O0
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fprofile-arcs -ftest-coverage" >&5
+$as_echo_n "checking whether compiler supports -fprofile-arcs -ftest-coverage... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-fprofile-arcs -ftest-coverage"
+else
+ CFLAGS="${CFLAGS} -fprofile-arcs -ftest-coverage"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-fprofile-arcs -ftest-coverage
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage"
+ $as_echo "#define JEMALLOC_CODE_COVERAGE " >>confdefs.h
+
+fi
+
+
# Check whether --with-mangling was given.
if test "${with_mangling+set}" = set; then :
@@ -5081,16 +5545,6 @@ else
mangling_map=""
fi
-for nm in `echo ${mangling_map} |tr ',' ' '` ; do
- k="`echo ${nm} |tr ':' ' ' |awk '{print $1}'`"
- n="je_${k}"
- m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
- cat >>confdefs.h <<_ACEOF
-#define ${n} ${m}
-_ACEOF
-
- public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${k}\$" |tr '\n' ' '`
-done
# Check whether --with-jemalloc_prefix was given.
@@ -5116,39 +5570,33 @@ _ACEOF
_ACEOF
fi
-for stem in ${public_syms}; do
- n="je_${stem}"
- m="${JEMALLOC_PREFIX}${stem}"
- cat >>confdefs.h <<_ACEOF
-#define ${n} ${m}
-_ACEOF
-done
+
+# Check whether --with-export was given.
+if test "${with_export+set}" = set; then :
+ withval=$with_export; if test "x$with_export" = "xno"; then
+ $as_echo "#define JEMALLOC_EXPORT /**/" >>confdefs.h
+
+fi
+
+fi
+
# Check whether --with-private_namespace was given.
if test "${with_private_namespace+set}" = set; then :
- withval=$with_private_namespace; JEMALLOC_PRIVATE_NAMESPACE="$with_private_namespace"
+ withval=$with_private_namespace; JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"
else
- JEMALLOC_PRIVATE_NAMESPACE=""
+ JEMALLOC_PRIVATE_NAMESPACE="je_"
fi
cat >>confdefs.h <<_ACEOF
-#define JEMALLOC_PRIVATE_NAMESPACE "$JEMALLOC_PRIVATE_NAMESPACE"
-_ACEOF
-
-if test "x$JEMALLOC_PRIVATE_NAMESPACE" != "x" ; then
- cat >>confdefs.h <<_ACEOF
-#define JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix) ${JEMALLOC_PRIVATE_NAMESPACE}##string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix
+#define JEMALLOC_PRIVATE_NAMESPACE $JEMALLOC_PRIVATE_NAMESPACE
_ACEOF
-else
- cat >>confdefs.h <<_ACEOF
-#define JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix) string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix
-_ACEOF
+private_namespace="$JEMALLOC_PRIVATE_NAMESPACE"
-fi
# Check whether --with-install_suffix was given.
@@ -5162,37 +5610,70 @@ fi
install_suffix="$INSTALL_SUFFIX"
+je_="je_"
+
+
cfgoutputs_in="${srcroot}Makefile.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/html.xsl.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/manpages.xsl.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/jemalloc.xml.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc.h.in"
+cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc_protos.h.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal.h.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/jemalloc_test.h.in"
+cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/test.sh.in"
+cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/include/test/jemalloc_test.h.in"
cfgoutputs_out="Makefile"
cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
-cfgoutputs_out="${cfgoutputs_out} doc/jemalloc${install_suffix}.xml"
-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc${install_suffix}.h"
+cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h"
-cfgoutputs_out="${cfgoutputs_out} test/jemalloc_test.h"
+cfgoutputs_out="${cfgoutputs_out} test/test.sh"
+cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
cfgoutputs_tup="Makefile"
cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
-cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc${install_suffix}.xml:doc/jemalloc.xml.in"
-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc${install_suffix}.h:include/jemalloc/jemalloc.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h"
-cfgoutputs_tup="${cfgoutputs_tup} test/jemalloc_test.h:test/jemalloc_test.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
+cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
cfghdrs_in="${srcroot}include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_namespace.sh"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_unnamespace.sh"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_symbols.txt"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/public_namespace.sh"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/public_unnamespace.sh"
cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/size_classes.sh"
-
-cfghdrs_out="include/jemalloc/jemalloc_defs${install_suffix}.h"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc_rename.sh"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc_mangle.sh"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc.sh"
+cfghdrs_in="${cfghdrs_in} ${srcroot}test/include/test/jemalloc_test_defs.h.in"
+
+cfghdrs_out="include/jemalloc/jemalloc_defs.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_namespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_unnamespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h"
cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h"
+cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h"
-cfghdrs_tup="include/jemalloc/jemalloc_defs${install_suffix}.h:include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:${srcroot}include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:${srcroot}test/include/test/jemalloc_test_defs.h.in"
# Check whether --enable-cc-silence was given.
if test "${enable_cc_silence+set}" = set; then :
@@ -5228,14 +5709,31 @@ fi
if test "x$enable_debug" = "x1" ; then
$as_echo "#define JEMALLOC_DEBUG " >>confdefs.h
- $as_echo "#define JEMALLOC_IVSALLOC " >>confdefs.h
+ enable_ivsalloc="1"
+fi
+
+
+# Check whether --enable-ivsalloc was given.
+if test "${enable_ivsalloc+set}" = set; then :
+ enableval=$enable_ivsalloc; if test "x$enable_ivsalloc" = "xno" ; then
+ enable_ivsalloc="0"
+else
+ enable_ivsalloc="1"
+fi
+
+else
+ enable_ivsalloc="0"
fi
+if test "x$enable_ivsalloc" = "x1" ; then
+ $as_echo "#define JEMALLOC_IVSALLOC " >>confdefs.h
+
+fi
if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then
optimize="no"
- echo "$EXTRA_CFLAGS" | grep "\-O" >/dev/null || optimize="yes"
+ echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || optimize="yes"
if test "x${optimize}" = "xyes" ; then
if test "x$GCC" = "xyes" ; then
@@ -5262,10 +5760,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-O3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -5296,10 +5796,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-funroll-loops
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -5331,10 +5833,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-O2
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -5366,10 +5870,12 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+ je_cv_cflags_appended=-O
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
CFLAGS="${TCFLAGS}"
@@ -5586,24 +6092,6 @@ else
enable_prof_libgcc="0"
fi
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking libgcc-based backtracing reliability on ${host_cpu}" >&5
-$as_echo_n "checking libgcc-based backtracing reliability on ${host_cpu}... " >&6; }
- case "${host_cpu}" in
- i[3456]86)
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: unreliable" >&5
-$as_echo "unreliable" >&6; }
- enable_prof_libgcc="0";
- ;;
- x86_64)
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: reliable" >&5
-$as_echo "reliable" >&6; }
- ;;
- *)
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: unreliable" >&5
-$as_echo "unreliable" >&6; }
- enable_prof_libgcc="0";
- ;;
- esac
if test "x${enable_prof_libgcc}" = "x1" ; then
backtrace_method="libgcc"
$as_echo "#define JEMALLOC_PROF_LIBGCC " >>confdefs.h
@@ -5628,6 +6116,42 @@ fi
if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \
-a "x$GCC" = "xyes" ; then
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fno-omit-frame-pointer" >&5
+$as_echo_n "checking whether compiler supports -fno-omit-frame-pointer... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-fno-omit-frame-pointer"
+else
+ CFLAGS="${CFLAGS} -fno-omit-frame-pointer"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-fno-omit-frame-pointer
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
backtrace_method="gcc intrinsics"
$as_echo "#define JEMALLOC_PROF_GCC " >>confdefs.h
@@ -5648,6 +6172,11 @@ if test "x$enable_prof" = "x1" ; then
as_fn_error $? "Heap profiling requires TLS" "$LINENO" 5;
fi
force_tls="1"
+
+ if test "x$abi" != "xpecoff"; then
+ LIBS="$LIBS -lm"
+ fi
+
$as_echo "#define JEMALLOC_PROF " >>confdefs.h
fi
@@ -5695,7 +6224,7 @@ else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-#define _GNU_SOURCE
+#define _GNU_SOURCE
#include <sys/mman.h>
int
@@ -5769,8 +6298,14 @@ else
fi
if test "x$have_sbrk" = "x1" ; then
- $as_echo "#define JEMALLOC_HAVE_SBRK " >>confdefs.h
+ if test "x$sbrk_deprecated" == "x1" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Disabling dss allocation because sbrk is deprecated" >&5
+$as_echo "Disabling dss allocation because sbrk is deprecated" >&6; }
+ enable_dss="0"
+ else
+ $as_echo "#define JEMALLOC_HAVE_SBRK " >>confdefs.h
+ fi
else
enable_dss="0"
fi
@@ -5962,7 +6497,7 @@ int
main ()
{
- long result;
+ int result;
FILE *f;
#ifdef _WIN32
@@ -5981,7 +6516,7 @@ main ()
if (f == NULL) {
return 1;
}
- fprintf(f, "%u\n", result);
+ fprintf(f, "%d\n", result);
fclose(f);
return 0;
@@ -6336,6 +6871,7 @@ else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
+#include <stdio.h>
#include <strings.h>
#include <string.h>
@@ -6345,6 +6881,7 @@ main ()
{
int rv = ffsl(0x08);
+ printf("%d\n", rv);
}
;
@@ -6601,7 +7138,28 @@ if test "x${je_cv_osspin}" = "xyes" ; then
fi
-if test "x${abi}" = "xmacho" ; then
+# Check whether --enable-zone-allocator was given.
+if test "${enable_zone_allocator+set}" = set; then :
+ enableval=$enable_zone_allocator; if test "x$enable_zone_allocator" = "xno" ; then
+ enable_zone_allocator="0"
+else
+ enable_zone_allocator="1"
+fi
+
+else
+ if test "x${abi}" = "xmacho"; then
+ enable_zone_allocator="1"
+fi
+
+
+fi
+
+
+
+if test "x${enable_zone_allocator}" = "x1" ; then
+ if test "x${abi}" != "xmacho"; then
+ as_fn_error $? "--enable-zone-allocator is only supported on Darwin" "$LINENO" 5
+ fi
$as_echo "#define JEMALLOC_IVSALLOC " >>confdefs.h
$as_echo "#define JEMALLOC_ZONE " >>confdefs.h
@@ -6766,60 +7324,60 @@ else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-#include <stdbool.h>
-#ifndef bool
- "error: bool is not defined"
-#endif
-#ifndef false
- "error: false is not defined"
-#endif
-#if false
- "error: false is not 0"
-#endif
-#ifndef true
- "error: true is not defined"
-#endif
-#if true != 1
- "error: true is not 1"
-#endif
-#ifndef __bool_true_false_are_defined
- "error: __bool_true_false_are_defined is not defined"
-#endif
-
- struct s { _Bool s: 1; _Bool t; } s;
-
- char a[true == 1 ? 1 : -1];
- char b[false == 0 ? 1 : -1];
- char c[__bool_true_false_are_defined == 1 ? 1 : -1];
- char d[(bool) 0.5 == true ? 1 : -1];
- /* See body of main program for 'e'. */
- char f[(_Bool) 0.0 == false ? 1 : -1];
- char g[true];
- char h[sizeof (_Bool)];
- char i[sizeof s.t];
- enum { j = false, k = true, l = false * true, m = true * 256 };
- /* The following fails for
- HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */
- _Bool n[m];
- char o[sizeof n == m * sizeof n[0] ? 1 : -1];
- char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1];
- /* Catch a bug in an HP-UX C compiler. See
- http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html
- http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html
- */
- _Bool q = true;
- _Bool *pq = &q;
+ #include <stdbool.h>
+ #ifndef bool
+ "error: bool is not defined"
+ #endif
+ #ifndef false
+ "error: false is not defined"
+ #endif
+ #if false
+ "error: false is not 0"
+ #endif
+ #ifndef true
+ "error: true is not defined"
+ #endif
+ #if true != 1
+ "error: true is not 1"
+ #endif
+ #ifndef __bool_true_false_are_defined
+ "error: __bool_true_false_are_defined is not defined"
+ #endif
+
+ struct s { _Bool s: 1; _Bool t; } s;
+
+ char a[true == 1 ? 1 : -1];
+ char b[false == 0 ? 1 : -1];
+ char c[__bool_true_false_are_defined == 1 ? 1 : -1];
+ char d[(bool) 0.5 == true ? 1 : -1];
+ /* See body of main program for 'e'. */
+ char f[(_Bool) 0.0 == false ? 1 : -1];
+ char g[true];
+ char h[sizeof (_Bool)];
+ char i[sizeof s.t];
+ enum { j = false, k = true, l = false * true, m = true * 256 };
+ /* The following fails for
+ HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */
+ _Bool n[m];
+ char o[sizeof n == m * sizeof n[0] ? 1 : -1];
+ char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1];
+ /* Catch a bug in an HP-UX C compiler. See
+ http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html
+ http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html
+ */
+ _Bool q = true;
+ _Bool *pq = &q;
int
main ()
{
- bool e = &s;
- *pq |= q;
- *pq |= ! q;
- /* Refer to every declared value, to avoid compiler optimizations. */
- return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l
- + !m + !n + !o + !p + !q + !pq);
+ bool e = &s;
+ *pq |= q;
+ *pq |= ! q;
+ /* Refer to every declared value, to avoid compiler optimizations. */
+ return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l
+ + !m + !n + !o + !p + !q + !pq);
;
return 0;
@@ -6834,7 +7392,7 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5
$as_echo "$ac_cv_header_stdbool_h" >&6; }
-ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default"
+ ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default"
if test "x$ac_cv_type__Bool" = xyes; then :
cat >>confdefs.h <<_ACEOF
@@ -6844,6 +7402,7 @@ _ACEOF
fi
+
if test $ac_cv_header_stdbool_h = yes; then
$as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h
@@ -6851,14 +7410,36 @@ $as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h
fi
+
+ac_config_commands="$ac_config_commands include/jemalloc/internal/private_namespace.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/internal/private_unnamespace.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/internal/public_symbols.txt"
+
+ac_config_commands="$ac_config_commands include/jemalloc/internal/public_namespace.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/internal/public_unnamespace.h"
+
ac_config_commands="$ac_config_commands include/jemalloc/internal/size_classes.h"
+ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_protos_jet.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_rename.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle_jet.h"
+
+ac_config_commands="$ac_config_commands include/jemalloc/jemalloc.h"
+
ac_config_headers="$ac_config_headers $cfghdrs_tup"
+
ac_config_files="$ac_config_files $cfgoutputs_tup config.stamp bin/jemalloc.sh"
@@ -6973,6 +7554,7 @@ LTLIBOBJS=$ac_ltlibobjs
+
: "${CONFIG_STATUS=./config.status}"
ac_write_fail=0
ac_clean_files_save=$ac_clean_files
@@ -7270,16 +7852,16 @@ if (echo >conf$$.file) 2>/dev/null; then
# ... but there are two gotchas:
# 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
# 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
- # In both cases, we have to default to `cp -p'.
+ # In both cases, we have to default to `cp -pR'.
ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
elif ln conf$$.file conf$$ 2>/dev/null; then
as_ln_s=ln
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
rmdir conf$$.dir 2>/dev/null
@@ -7339,28 +7921,16 @@ else
as_mkdir_p=false
fi
-if test -x / >/dev/null 2>&1; then
- as_test_x='test -x'
-else
- if ls -dL / >/dev/null 2>&1; then
- as_ls_L_option=L
- else
- as_ls_L_option=
- fi
- as_test_x='
- eval sh -c '\''
- if test -d "$1"; then
- test -d "$1/.";
- else
- case $1 in #(
- -*)set "./$1";;
- esac;
- case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
- ???[sx]*):;;*)false;;esac;fi
- '\'' sh
- '
-fi
-as_executable_p=$as_test_x
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
# Sed expression to map a string onto a valid CPP name.
as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
@@ -7382,7 +7952,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# values after options handling.
ac_log="
This file was extended by $as_me, which was
-generated by GNU Autoconf 2.68. Invocation command line was
+generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
CONFIG_HEADERS = $CONFIG_HEADERS
@@ -7448,10 +8018,10 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
config.status
-configured by $0, generated by GNU Autoconf 2.68,
+configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
-Copyright (C) 2010 Free Software Foundation, Inc.
+Copyright (C) 2012 Free Software Foundation, Inc.
This config.status script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it."
@@ -7540,7 +8110,7 @@ fi
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
if \$ac_cs_recheck; then
- set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
shift
\$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
CONFIG_SHELL='$SHELL'
@@ -7561,6 +8131,58 @@ _ASBOX
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+#
+# INIT-COMMANDS
+#
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ mangling_map="${mangling_map}"
+ public_syms="${public_syms}"
+ JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+
+
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ install_suffix="${install_suffix}"
+
+
_ACEOF
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
@@ -7569,7 +8191,17 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
for ac_config_target in $ac_config_targets
do
case $ac_config_target in
+ "include/jemalloc/internal/private_namespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_namespace.h" ;;
+ "include/jemalloc/internal/private_unnamespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_unnamespace.h" ;;
+ "include/jemalloc/internal/public_symbols.txt") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_symbols.txt" ;;
+ "include/jemalloc/internal/public_namespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_namespace.h" ;;
+ "include/jemalloc/internal/public_unnamespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_unnamespace.h" ;;
"include/jemalloc/internal/size_classes.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/size_classes.h" ;;
+ "include/jemalloc/jemalloc_protos_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_protos_jet.h" ;;
+ "include/jemalloc/jemalloc_rename.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_rename.h" ;;
+ "include/jemalloc/jemalloc_mangle.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle.h" ;;
+ "include/jemalloc/jemalloc_mangle_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle_jet.h" ;;
+ "include/jemalloc/jemalloc.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc.h" ;;
"$cfghdrs_tup") CONFIG_HEADERS="$CONFIG_HEADERS $cfghdrs_tup" ;;
"$cfgoutputs_tup") CONFIG_FILES="$CONFIG_FILES $cfgoutputs_tup" ;;
"config.stamp") CONFIG_FILES="$CONFIG_FILES config.stamp" ;;
@@ -8129,10 +8761,62 @@ $as_echo "$as_me: executing $ac_file commands" >&6;}
case $ac_file$ac_mode in
+ "include/jemalloc/internal/private_namespace.h":C)
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/private_namespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_namespace.h"
+ ;;
+ "include/jemalloc/internal/private_unnamespace.h":C)
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/private_unnamespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_unnamespace.h"
+ ;;
+ "include/jemalloc/internal/public_symbols.txt":C)
+ f="${objroot}include/jemalloc/internal/public_symbols.txt"
+ mkdir -p "${objroot}include/jemalloc/internal"
+ cp /dev/null "${f}"
+ for nm in `echo ${mangling_map} |tr ',' ' '` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
+ echo "${n}:${m}" >> "${f}"
+ public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '`
+ done
+ for sym in ${public_syms} ; do
+ n="${sym}"
+ m="${JEMALLOC_PREFIX}${sym}"
+ echo "${n}:${m}" >> "${f}"
+ done
+ ;;
+ "include/jemalloc/internal/public_namespace.h":C)
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h"
+ ;;
+ "include/jemalloc/internal/public_unnamespace.h":C)
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h"
+ ;;
"include/jemalloc/internal/size_classes.h":C)
- mkdir -p "include/jemalloc/internal"
+ mkdir -p "${objroot}include/jemalloc/internal"
"${srcdir}/include/jemalloc/internal/size_classes.sh" > "${objroot}include/jemalloc/internal/size_classes.h"
;;
+ "include/jemalloc/jemalloc_protos_jet.h":C)
+ mkdir -p "${objroot}include/jemalloc"
+ cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h"
+ ;;
+ "include/jemalloc/jemalloc_rename.h":C)
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h"
+ ;;
+ "include/jemalloc/jemalloc_mangle.h":C)
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h"
+ ;;
+ "include/jemalloc/jemalloc_mangle_jet.h":C)
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h"
+ ;;
+ "include/jemalloc/jemalloc.h":C)
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h"
+ ;;
esac
done # for ac_tag
@@ -8188,6 +8872,8 @@ $as_echo "CPPFLAGS : ${CPPFLAGS}" >&6; }
$as_echo "CFLAGS : ${CFLAGS}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS : ${LDFLAGS}" >&5
$as_echo "LDFLAGS : ${LDFLAGS}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&5
+$as_echo "EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS : ${LIBS}" >&5
$as_echo "LIBS : ${LIBS}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: RPATH_EXTRA : ${RPATH_EXTRA}" >&5
@@ -8240,6 +8926,8 @@ $as_echo "experimental : ${enable_experimental}" >&6; }
$as_echo "cc-silence : ${enable_cc_silence}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: debug : ${enable_debug}" >&5
$as_echo "debug : ${enable_debug}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: code-coverage : ${enable_code_coverage}" >&5
+$as_echo "code-coverage : ${enable_code_coverage}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: stats : ${enable_stats}" >&5
$as_echo "stats : ${enable_stats}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof : ${enable_prof}" >&5
diff --git a/deps/jemalloc/configure.ac b/deps/jemalloc/configure.ac
index 1c52439e0..4de81dc1d 100644
--- a/deps/jemalloc/configure.ac
+++ b/deps/jemalloc/configure.ac
@@ -19,7 +19,9 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
]], [[
return 0;
]])],
+ [je_cv_cflags_appended=$1]
AC_MSG_RESULT([yes]),
+ [je_cv_cflags_appended=]
AC_MSG_RESULT([no])
[CFLAGS="${TCFLAGS}"]
)
@@ -86,7 +88,7 @@ MANDIR=`eval echo $MANDIR`
AC_SUBST([MANDIR])
dnl Support for building documentation.
-AC_PATH_PROG([XSLTPROC], [xsltproc], , [$PATH])
+AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH])
if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then
DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl"
elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then
@@ -128,6 +130,9 @@ if test "x$CFLAGS" = "x" ; then
no_CFLAGS="yes"
if test "x$GCC" = "xyes" ; then
JE_CFLAGS_APPEND([-std=gnu99])
+ if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
+ fi
JE_CFLAGS_APPEND([-Wall])
JE_CFLAGS_APPEND([-pipe])
JE_CFLAGS_APPEND([-g3])
@@ -145,6 +150,11 @@ if test "x$EXTRA_CFLAGS" != "x" ; then
fi
AC_PROG_CPP
+AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0])
+if test "x${ac_cv_big_endian}" = "x1" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ])
+fi
+
AC_CHECK_SIZEOF([void *])
if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
LG_SIZEOF_PTR=3
@@ -193,20 +203,25 @@ CPU_SPINWAIT=""
case "${host_cpu}" in
i[[345]]86)
;;
- i686)
- JE_COMPILABLE([__asm__], [], [[__asm__ volatile("pause"); return 0;]],
- [je_cv_asm])
- if test "x${je_cv_asm}" = "xyes" ; then
+ i686|x86_64)
+ JE_COMPILABLE([pause instruction], [],
+ [[__asm__ volatile("pause"); return 0;]],
+ [je_cv_pause])
+ if test "x${je_cv_pause}" = "xyes" ; then
CPU_SPINWAIT='__asm__ volatile("pause")'
fi
- ;;
- x86_64)
- JE_COMPILABLE([__asm__ syntax], [],
- [[__asm__ volatile("pause"); return 0;]], [je_cv_asm])
- if test "x${je_cv_asm}" = "xyes" ; then
- CPU_SPINWAIT='__asm__ volatile("pause")'
+ dnl emmintrin.h fails to compile unless MMX, SSE, and SSE2 are
+ dnl supported.
+ JE_COMPILABLE([SSE2 intrinsics], [
+#include <emmintrin.h>
+], [], [je_cv_sse2])
+ if test "x${je_cv_sse2}" = "xyes" ; then
+ AC_DEFINE_UNQUOTED([HAVE_SSE2], [ ])
fi
;;
+ powerpc)
+ AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ])
+ ;;
*)
;;
esac
@@ -226,9 +241,15 @@ PIC_CFLAGS='-fPIC -DPIC'
CTARGET='-o $@'
LDTARGET='-o $@'
EXTRA_LDFLAGS=
-MKLIB='ar crus $@'
+ARFLAGS='crus'
+AROUT=' $@'
CC_MM=1
+AN_MAKEVAR([AR], [AC_PROG_AR])
+AN_PROGRAM([ar], [AC_PROG_AR])
+AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)])
+AC_PROG_AR
+
dnl Platform-specific settings. abi and RPATH can probably be determined
dnl programmatically, but doing so is error-prone, which makes it generally
dnl not worth the trouble.
@@ -250,6 +271,7 @@ case "${host}" in
force_tls="0"
DSO_LDFLAGS='-shared -Wl,-dylib_install_name,$(@F)'
SOREV="${rev}.${so}"
+ sbrk_deprecated="1"
;;
*-*-freebsd*)
CFLAGS="$CFLAGS"
@@ -261,6 +283,7 @@ case "${host}" in
CFLAGS="$CFLAGS"
CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
abi="elf"
+ AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
JEMALLOC_USABLE_SIZE_CONST=""
@@ -283,6 +306,7 @@ case "${host}" in
*-*-solaris2*)
CFLAGS="$CFLAGS"
abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
RPATH='-Wl,-R,$(1)'
dnl Solaris needs this for sigwait().
CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS"
@@ -309,7 +333,9 @@ case "${host}" in
EXTRA_LDFLAGS="-link -DEBUG"
CTARGET='-Fo$@'
LDTARGET='-Fe$@'
- MKLIB='lib -nologo -out:$@'
+ AR='lib'
+ ARFLAGS='-nologo -out:'
+ AROUT='$@'
CC_MM=
else
importlib="${so}"
@@ -342,13 +368,10 @@ AC_SUBST([PIC_CFLAGS])
AC_SUBST([CTARGET])
AC_SUBST([LDTARGET])
AC_SUBST([MKLIB])
+AC_SUBST([ARFLAGS])
+AC_SUBST([AROUT])
AC_SUBST([CC_MM])
-if test "x$abi" != "xpecoff"; then
- dnl Heap profiling uses the log(3) function.
- LIBS="$LIBS -lm"
-fi
-
JE_COMPILABLE([__attribute__ syntax],
[static __attribute__((unused)) void foo(void){}],
[],
@@ -402,11 +425,10 @@ AC_SUBST([enable_autogen])
AC_PROG_INSTALL
AC_PROG_RANLIB
-AC_PATH_PROG([AR], [ar], , [$PATH])
-AC_PATH_PROG([LD], [ld], , [$PATH])
-AC_PATH_PROG([AUTOCONF], [autoconf], , [$PATH])
+AC_PATH_PROG([LD], [ld], [false], [$PATH])
+AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH])
-public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free malloc_usable_size malloc_stats_print mallctl mallctlnametomib mallctlbymib"
+public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size"
dnl Check for allocator-related functions that should be wrapped.
AC_CHECK_FUNC([memalign],
@@ -434,18 +456,35 @@ if test "x$enable_experimental" = "x1" ; then
fi
AC_SUBST([enable_experimental])
+dnl Do not compute test code coverage by default.
+GCOV_FLAGS=
+AC_ARG_ENABLE([code-coverage],
+ [AS_HELP_STRING([--enable-code-coverage],
+ [Enable code coverage])],
+[if test "x$enable_code_coverage" = "xno" ; then
+ enable_code_coverage="0"
+else
+ enable_code_coverage="1"
+fi
+],
+[enable_code_coverage="0"]
+)
+if test "x$enable_code_coverage" = "x1" ; then
+ deoptimize="no"
+ echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes"
+ if test "x${deoptimize}" = "xyes" ; then
+ JE_CFLAGS_APPEND([-O0])
+ fi
+ JE_CFLAGS_APPEND([-fprofile-arcs -ftest-coverage])
+ EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage"
+ AC_DEFINE([JEMALLOC_CODE_COVERAGE], [ ])
+fi
+AC_SUBST([enable_code_coverage])
+
dnl Perform no name mangling by default.
AC_ARG_WITH([mangling],
[AS_HELP_STRING([--with-mangling=<map>], [Mangle symbols in <map>])],
[mangling_map="$with_mangling"], [mangling_map=""])
-for nm in `echo ${mangling_map} |tr ',' ' '` ; do
- k="`echo ${nm} |tr ':' ' ' |awk '{print $1}'`"
- n="je_${k}"
- m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
- AC_DEFINE_UNQUOTED([${n}], [${m}])
- dnl Remove key from public_syms so that it isn't redefined later.
- public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${k}\$" |tr '\n' ' '`
-done
dnl Do not prefix public APIs by default.
AC_ARG_WITH([jemalloc_prefix],
@@ -462,27 +501,23 @@ if test "x$JEMALLOC_PREFIX" != "x" ; then
AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"])
AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"])
fi
-dnl Generate macros to rename public symbols. All public symbols are prefixed
-dnl with je_ in the source code, so these macro definitions are needed even if
-dnl --with-jemalloc-prefix wasn't specified.
-for stem in ${public_syms}; do
- n="je_${stem}"
- m="${JEMALLOC_PREFIX}${stem}"
- AC_DEFINE_UNQUOTED([${n}], [${m}])
-done
-dnl Do not mangle library-private APIs by default.
+AC_ARG_WITH([export],
+ [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])],
+ [if test "x$with_export" = "xno"; then
+ AC_DEFINE([JEMALLOC_EXPORT],[])
+fi]
+)
+
+dnl Mangle library-private APIs.
AC_ARG_WITH([private_namespace],
[AS_HELP_STRING([--with-private-namespace=<prefix>], [Prefix to prepend to all library-private APIs])],
- [JEMALLOC_PRIVATE_NAMESPACE="$with_private_namespace"],
- [JEMALLOC_PRIVATE_NAMESPACE=""]
+ [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"],
+ [JEMALLOC_PRIVATE_NAMESPACE="je_"]
)
-AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], ["$JEMALLOC_PRIVATE_NAMESPACE"])
-if test "x$JEMALLOC_PRIVATE_NAMESPACE" != "x" ; then
- AC_DEFINE_UNQUOTED([JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix)], [${JEMALLOC_PRIVATE_NAMESPACE}##string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix])
-else
- AC_DEFINE_UNQUOTED([JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix)], [string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix])
-fi
+AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE])
+private_namespace="$JEMALLOC_PRIVATE_NAMESPACE"
+AC_SUBST([private_namespace])
dnl Do not add suffix to installed files by default.
AC_ARG_WITH([install_suffix],
@@ -493,37 +528,72 @@ AC_ARG_WITH([install_suffix],
install_suffix="$INSTALL_SUFFIX"
AC_SUBST([install_suffix])
+dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of
+dnl jemalloc_protos_jet.h easy.
+je_="je_"
+AC_SUBST([je_])
+
cfgoutputs_in="${srcroot}Makefile.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/html.xsl.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/manpages.xsl.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/jemalloc.xml.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc.h.in"
+cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc_protos.h.in"
cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal.h.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/jemalloc_test.h.in"
+cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/test.sh.in"
+cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/include/test/jemalloc_test.h.in"
cfgoutputs_out="Makefile"
cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
-cfgoutputs_out="${cfgoutputs_out} doc/jemalloc${install_suffix}.xml"
-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc${install_suffix}.h"
+cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h"
-cfgoutputs_out="${cfgoutputs_out} test/jemalloc_test.h"
+cfgoutputs_out="${cfgoutputs_out} test/test.sh"
+cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
cfgoutputs_tup="Makefile"
cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
-cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc${install_suffix}.xml:doc/jemalloc.xml.in"
-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc${install_suffix}.h:include/jemalloc/jemalloc.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h"
-cfgoutputs_tup="${cfgoutputs_tup} test/jemalloc_test.h:test/jemalloc_test.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
+cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
cfghdrs_in="${srcroot}include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_namespace.sh"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_unnamespace.sh"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_symbols.txt"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/public_namespace.sh"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/public_unnamespace.sh"
cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/size_classes.sh"
-
-cfghdrs_out="include/jemalloc/jemalloc_defs${install_suffix}.h"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc_rename.sh"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc_mangle.sh"
+cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc.sh"
+cfghdrs_in="${cfghdrs_in} ${srcroot}test/include/test/jemalloc_test_defs.h.in"
+
+cfghdrs_out="include/jemalloc/jemalloc_defs.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_namespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_unnamespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h"
cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h"
+cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h"
-cfghdrs_tup="include/jemalloc/jemalloc_defs${install_suffix}.h:include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:${srcroot}include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:${srcroot}test/include/test/jemalloc_test_defs.h.in"
dnl Do not silence irrelevant compiler warnings by default, since enabling this
dnl option incurs a performance penalty.
@@ -544,7 +614,7 @@ fi
dnl Do not compile with debugging by default.
AC_ARG_ENABLE([debug],
- [AS_HELP_STRING([--enable-debug], [Build debugging code])],
+ [AS_HELP_STRING([--enable-debug], [Build debugging code (implies --enable-ivsalloc)])],
[if test "x$enable_debug" = "xno" ; then
enable_debug="0"
else
@@ -555,15 +625,30 @@ fi
)
if test "x$enable_debug" = "x1" ; then
AC_DEFINE([JEMALLOC_DEBUG], [ ])
- AC_DEFINE([JEMALLOC_IVSALLOC], [ ])
+ enable_ivsalloc="1"
fi
AC_SUBST([enable_debug])
+dnl Do not validate pointers by default.
+AC_ARG_ENABLE([ivsalloc],
+ [AS_HELP_STRING([--enable-ivsalloc], [Validate pointers passed through the public API])],
+[if test "x$enable_ivsalloc" = "xno" ; then
+ enable_ivsalloc="0"
+else
+ enable_ivsalloc="1"
+fi
+],
+[enable_ivsalloc="0"]
+)
+if test "x$enable_ivsalloc" = "x1" ; then
+ AC_DEFINE([JEMALLOC_IVSALLOC], [ ])
+fi
+
dnl Only optimize if not debugging.
if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then
dnl Make sure that an optimization flag was not specified in EXTRA_CFLAGS.
optimize="no"
- echo "$EXTRA_CFLAGS" | grep "\-O" >/dev/null || optimize="yes"
+ echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || optimize="yes"
if test "x${optimize}" = "xyes" ; then
if test "x$GCC" = "xyes" ; then
JE_CFLAGS_APPEND([-O3])
@@ -662,22 +747,6 @@ if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \
-a "x$GCC" = "xyes" ; then
AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"])
AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [LIBS="$LIBS -lgcc"], [enable_prof_libgcc="0"])
- dnl The following is conservative, in that it only has entries for CPUs on
- dnl which jemalloc has been tested.
- AC_MSG_CHECKING([libgcc-based backtracing reliability on ${host_cpu}])
- case "${host_cpu}" in
- i[[3456]]86)
- AC_MSG_RESULT([unreliable])
- enable_prof_libgcc="0";
- ;;
- x86_64)
- AC_MSG_RESULT([reliable])
- ;;
- *)
- AC_MSG_RESULT([unreliable])
- enable_prof_libgcc="0";
- ;;
- esac
if test "x${enable_prof_libgcc}" = "x1" ; then
backtrace_method="libgcc"
AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ])
@@ -699,6 +768,7 @@ fi
)
if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \
-a "x$GCC" = "xyes" ; then
+ JE_CFLAGS_APPEND([-fno-omit-frame-pointer])
backtrace_method="gcc intrinsics"
AC_DEFINE([JEMALLOC_PROF_GCC], [ ])
else
@@ -716,6 +786,12 @@ if test "x$enable_prof" = "x1" ; then
AC_MSG_ERROR([Heap profiling requires TLS]);
fi
force_tls="1"
+
+ if test "x$abi" != "xpecoff"; then
+ dnl Heap profiling uses the log(3) function.
+ LIBS="$LIBS -lm"
+ fi
+
AC_DEFINE([JEMALLOC_PROF], [ ])
fi
AC_SUBST([enable_prof])
@@ -749,7 +825,7 @@ fi
)
if test "x$enable_mremap" = "x1" ; then
JE_COMPILABLE([mremap(...MREMAP_FIXED...)], [
-#define _GNU_SOURCE
+#define _GNU_SOURCE
#include <sys/mman.h>
], [
void *p = mremap((void *)0, 0, 0, MREMAP_MAYMOVE|MREMAP_FIXED, (void *)0);
@@ -793,7 +869,12 @@ fi
dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support.
AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"])
if test "x$have_sbrk" = "x1" ; then
- AC_DEFINE([JEMALLOC_HAVE_SBRK], [ ])
+ if test "x$sbrk_deprecated" == "x1" ; then
+ AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated])
+ enable_dss="0"
+ else
+ AC_DEFINE([JEMALLOC_HAVE_SBRK], [ ])
+ fi
else
enable_dss="0"
fi
@@ -906,7 +987,7 @@ AC_CACHE_CHECK([STATIC_PAGE_SHIFT],
#include <stdio.h>
]],
[[
- long result;
+ int result;
FILE *f;
#ifdef _WIN32
@@ -925,7 +1006,7 @@ AC_CACHE_CHECK([STATIC_PAGE_SHIFT],
if (f == NULL) {
return 1;
}
- fprintf(f, "%u\n", result);
+ fprintf(f, "%d\n", result);
fclose(f);
return 0;
@@ -1070,11 +1151,13 @@ dnl Check for ffsl(3), and fail if not found. This function exists on all
dnl platforms that jemalloc currently has a chance of functioning on without
dnl modification.
JE_COMPILABLE([a program using ffsl], [
+#include <stdio.h>
#include <strings.h>
#include <string.h>
], [
{
int rv = ffsl(0x08);
+ printf("%d\n", rv);
}
], [je_cv_function_ffsl])
if test "x${je_cv_function_ffsl}" != "xyes" ; then
@@ -1178,7 +1261,26 @@ fi
dnl ============================================================================
dnl Darwin-related configuration.
-if test "x${abi}" = "xmacho" ; then
+AC_ARG_ENABLE([zone-allocator],
+ [AS_HELP_STRING([--disable-zone-allocator],
+ [Disable zone allocator for Darwin])],
+[if test "x$enable_zone_allocator" = "xno" ; then
+ enable_zone_allocator="0"
+else
+ enable_zone_allocator="1"
+fi
+],
+[if test "x${abi}" = "xmacho"; then
+ enable_zone_allocator="1"
+fi
+]
+)
+AC_SUBST([enable_zone_allocator])
+
+if test "x${enable_zone_allocator}" = "x1" ; then
+ if test "x${abi}" != "xmacho"; then
+ AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin])
+ fi
AC_DEFINE([JEMALLOC_IVSALLOC], [ ])
AC_DEFINE([JEMALLOC_ZONE], [ ])
@@ -1218,9 +1320,102 @@ dnl ============================================================================
dnl Check for typedefs, structures, and compiler characteristics.
AC_HEADER_STDBOOL
+dnl ============================================================================
+dnl Define commands that generate output files.
+
+AC_CONFIG_COMMANDS([include/jemalloc/internal/private_namespace.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/private_namespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_namespace.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/private_unnamespace.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/private_unnamespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_unnamespace.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [
+ f="${objroot}include/jemalloc/internal/public_symbols.txt"
+ mkdir -p "${objroot}include/jemalloc/internal"
+ cp /dev/null "${f}"
+ for nm in `echo ${mangling_map} |tr ',' ' '` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'`
+ m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'`
+ echo "${n}:${m}" >> "${f}"
+ dnl Remove name from public_syms so that it isn't redefined later.
+ public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '`
+ done
+ for sym in ${public_syms} ; do
+ n="${sym}"
+ m="${JEMALLOC_PREFIX}${sym}"
+ echo "${n}:${m}" >> "${f}"
+ done
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ mangling_map="${mangling_map}"
+ public_syms="${public_syms}"
+ JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [
- mkdir -p "include/jemalloc/internal"
+ mkdir -p "${objroot}include/jemalloc/internal"
"${srcdir}/include/jemalloc/internal/size_classes.sh" > "${objroot}include/jemalloc/internal/size_classes.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ install_suffix="${install_suffix}"
])
dnl Process .in files.
@@ -1230,6 +1425,7 @@ AC_CONFIG_HEADERS([$cfghdrs_tup])
dnl ============================================================================
dnl Generate outputs.
+
AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc.sh])
AC_SUBST([cfgoutputs_in])
AC_SUBST([cfgoutputs_out])
@@ -1245,6 +1441,7 @@ AC_MSG_RESULT([CC : ${CC}])
AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}])
AC_MSG_RESULT([CFLAGS : ${CFLAGS}])
AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}])
+AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}])
AC_MSG_RESULT([LIBS : ${LIBS}])
AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}])
AC_MSG_RESULT([])
@@ -1271,6 +1468,7 @@ AC_MSG_RESULT([autogen : ${enable_autogen}])
AC_MSG_RESULT([experimental : ${enable_experimental}])
AC_MSG_RESULT([cc-silence : ${enable_cc_silence}])
AC_MSG_RESULT([debug : ${enable_debug}])
+AC_MSG_RESULT([code-coverage : ${enable_code_coverage}])
AC_MSG_RESULT([stats : ${enable_stats}])
AC_MSG_RESULT([prof : ${enable_prof}])
AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}])
diff --git a/deps/jemalloc/coverage.sh b/deps/jemalloc/coverage.sh
new file mode 100755
index 000000000..6d1362a8c
--- /dev/null
+++ b/deps/jemalloc/coverage.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+set -e
+
+objdir=$1
+suffix=$2
+shift 2
+objs=$@
+
+gcov -b -p -f -o "${objdir}" ${objs}
+
+# Move gcov outputs so that subsequent gcov invocations won't clobber results
+# for the same sources with different compilation flags.
+for f in `find . -maxdepth 1 -type f -name '*.gcov'` ; do
+ mv "${f}" "${f}.${suffix}"
+done
diff --git a/deps/jemalloc/doc/jemalloc.3 b/deps/jemalloc/doc/jemalloc.3
index 4c198b68a..d04fbb498 100644
--- a/deps/jemalloc/doc/jemalloc.3
+++ b/deps/jemalloc/doc/jemalloc.3
@@ -1,13 +1,13 @@
'\" t
.\" Title: JEMALLOC
.\" Author: Jason Evans
-.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\" Date: 11/09/2012
+.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
+.\" Date: 03/31/2014
.\" Manual: User Manual
-.\" Source: jemalloc 3.2.0-0-g87499f6748ebe4817571e817e9f680ccb5bf54a9
+.\" Source: jemalloc 3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340
.\" Language: English
.\"
-.TH "JEMALLOC" "3" "11/09/2012" "jemalloc 3.2.0-0-g87499f6748eb" "User Manual"
+.TH "JEMALLOC" "3" "03/31/2014" "jemalloc 3.6.0-0-g46c0af68bd24" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
-This manual describes jemalloc 3\&.2\&.0\-0\-g87499f6748ebe4817571e817e9f680ccb5bf54a9\&. More information can be found at the
+This manual describes jemalloc 3\&.6\&.0\-0\-g46c0af68bd248b04df75e4f92d5fb804c3d75340\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.SH "SYNOPSIS"
.sp
@@ -55,16 +55,28 @@ This manual describes jemalloc 3\&.2\&.0\-0\-g87499f6748ebe4817571e817e9f680ccb5
.HP \w'void\ free('u
.BI "void free(void\ *" "ptr" ");"
.SS "Non\-standard API"
-.HP \w'size_t\ malloc_usable_size('u
-.BI "size_t malloc_usable_size(const\ void\ *" "ptr" ");"
-.HP \w'void\ malloc_stats_print('u
-.BI "void malloc_stats_print(void\ " "(*write_cb)" "\ (void\ *,\ const\ char\ *), void\ *" "cbopaque" ", const\ char\ *" "opts" ");"
+.HP \w'void\ *mallocx('u
+.BI "void *mallocx(size_t\ " "size" ", int\ " "flags" ");"
+.HP \w'void\ *rallocx('u
+.BI "void *rallocx(void\ *" "ptr" ", size_t\ " "size" ", int\ " "flags" ");"
+.HP \w'size_t\ xallocx('u
+.BI "size_t xallocx(void\ *" "ptr" ", size_t\ " "size" ", size_t\ " "extra" ", int\ " "flags" ");"
+.HP \w'size_t\ sallocx('u
+.BI "size_t sallocx(void\ *" "ptr" ", int\ " "flags" ");"
+.HP \w'void\ dallocx('u
+.BI "void dallocx(void\ *" "ptr" ", int\ " "flags" ");"
+.HP \w'size_t\ nallocx('u
+.BI "size_t nallocx(size_t\ " "size" ", int\ " "flags" ");"
.HP \w'int\ mallctl('u
.BI "int mallctl(const\ char\ *" "name" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");"
.HP \w'int\ mallctlnametomib('u
.BI "int mallctlnametomib(const\ char\ *" "name" ", size_t\ *" "mibp" ", size_t\ *" "miblenp" ");"
.HP \w'int\ mallctlbymib('u
.BI "int mallctlbymib(const\ size_t\ *" "mib" ", size_t\ " "miblen" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");"
+.HP \w'void\ malloc_stats_print('u
+.BI "void malloc_stats_print(void\ " "(*write_cb)" "\ (void\ *,\ const\ char\ *), void\ *" "cbopaque" ", const\ char\ *" "opts" ");"
+.HP \w'size_t\ malloc_usable_size('u
+.BI "size_t malloc_usable_size(const\ void\ *" "ptr" ");"
.HP \w'void\ (*malloc_message)('u
.BI "void (*malloc_message)(void\ *" "cbopaque" ", const\ char\ *" "s" ");"
.PP
@@ -156,36 +168,105 @@ is
.SS "Non\-standard API"
.PP
The
-\fBmalloc_usable_size\fR\fB\fR
-function returns the usable size of the allocation pointed to by
-\fIptr\fR\&. The return value may be larger than the size that was requested during allocation\&. The
-\fBmalloc_usable_size\fR\fB\fR
-function is not a mechanism for in\-place
-\fBrealloc\fR\fB\fR; rather it is provided solely as a tool for introspection purposes\&. Any discrepancy between the requested allocation size and the size reported by
-\fBmalloc_usable_size\fR\fB\fR
-should not be depended on, since such behavior is entirely implementation\-dependent\&.
+\fBmallocx\fR\fB\fR,
+\fBrallocx\fR\fB\fR,
+\fBxallocx\fR\fB\fR,
+\fBsallocx\fR\fB\fR,
+\fBdallocx\fR\fB\fR, and
+\fBnallocx\fR\fB\fR
+functions all have a
+\fIflags\fR
+argument that can be used to specify options\&. The functions only check the options that are contextually relevant\&. Use bitwise or (|) operations to specify one or more of the following:
+.PP
+\fBMALLOCX_LG_ALIGN(\fR\fB\fIla\fR\fR\fB) \fR
+.RS 4
+Align the memory allocation to start at an address that is a multiple of
+(1 << \fIla\fR)\&. This macro does not validate that
+\fIla\fR
+is within the valid range\&.
+.RE
+.PP
+\fBMALLOCX_ALIGN(\fR\fB\fIa\fR\fR\fB) \fR
+.RS 4
+Align the memory allocation to start at an address that is a multiple of
+\fIa\fR, where
+\fIa\fR
+is a power of two\&. This macro does not validate that
+\fIa\fR
+is a power of 2\&.
+.RE
+.PP
+\fBMALLOCX_ZERO\fR
+.RS 4
+Initialize newly allocated memory to contain zero bytes\&. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes\&. If this macro is absent, newly allocated memory is uninitialized\&.
+.RE
+.PP
+\fBMALLOCX_ARENA(\fR\fB\fIa\fR\fR\fB) \fR
+.RS 4
+Use the arena specified by the index
+\fIa\fR
+(and by necessity bypass the thread cache)\&. This macro has no effect for huge regions, nor for regions that were allocated via an arena other than the one specified\&. This macro does not validate that
+\fIa\fR
+specifies an arena index in the valid range\&.
+.RE
.PP
The
-\fBmalloc_stats_print\fR\fB\fR
-function writes human\-readable summary statistics via the
-\fIwrite_cb\fR
-callback function pointer and
-\fIcbopaque\fR
-data passed to
-\fIwrite_cb\fR, or
-\fBmalloc_message\fR\fB\fR
-if
-\fIwrite_cb\fR
+\fBmallocx\fR\fB\fR
+function allocates at least
+\fIsize\fR
+bytes of memory, and returns a pointer to the base address of the allocation\&. Behavior is undefined if
+\fIsize\fR
is
-\fBNULL\fR\&. This function can be called repeatedly\&. General information that never changes during execution can be omitted by specifying "g" as a character within the
-\fIopts\fR
-string\&. Note that
-\fBmalloc_message\fR\fB\fR
-uses the
-\fBmallctl*\fR\fB\fR
-functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously\&. If
-\fB\-\-enable\-stats\fR
-is specified during configuration, \(lqm\(rq and \(lqa\(rq can be specified to omit merged arena and per arena statistics, respectively; \(lqb\(rq and \(lql\(rq can be specified to omit per size class statistics for bins and large objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&.
+\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
+.PP
+The
+\fBrallocx\fR\fB\fR
+function resizes the allocation at
+\fIptr\fR
+to be at least
+\fIsize\fR
+bytes, and returns a pointer to the base address of the resulting allocation, which may or may not have moved from its original location\&. Behavior is undefined if
+\fIsize\fR
+is
+\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
+.PP
+The
+\fBxallocx\fR\fB\fR
+function resizes the allocation at
+\fIptr\fR
+in place to be at least
+\fIsize\fR
+bytes, and returns the real size of the allocation\&. If
+\fIextra\fR
+is non\-zero, an attempt is made to resize the allocation to be at least
+(\fIsize\fR + \fIextra\fR)
+bytes, though inability to allocate the extra byte(s) will not by itself result in failure to resize\&. Behavior is undefined if
+\fIsize\fR
+is
+\fB0\fR, or if
+(\fIsize\fR + \fIextra\fR > \fBSIZE_T_MAX\fR)\&.
+.PP
+The
+\fBsallocx\fR\fB\fR
+function returns the real size of the allocation at
+\fIptr\fR\&.
+.PP
+The
+\fBdallocx\fR\fB\fR
+function causes the memory referenced by
+\fIptr\fR
+to be made available for future allocations\&.
+.PP
+The
+\fBnallocx\fR\fB\fR
+function allocates no memory, but it performs the same size computation as the
+\fBmallocx\fR\fB\fR
+function, and returns the real size of the allocation that would result from the equivalent
+\fBmallocx\fR\fB\fR
+function call\&. Behavior is undefined if
+\fIsize\fR
+is
+\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
.PP
The
\fBmallctl\fR\fB\fR
@@ -229,15 +310,14 @@ that is smaller than the number of period\-separated name components, which resu
.\}
.nf
unsigned nbins, i;
-
-int mib[4];
+size_t mib[4];
size_t len, miblen;
len = sizeof(nbins);
mallctl("arenas\&.nbins", &nbins, &len, NULL, 0);
miblen = 4;
-mallnametomib("arenas\&.bin\&.0\&.size", mib, &miblen);
+mallctlnametomib("arenas\&.bin\&.0\&.size", mib, &miblen);
for (i = 0; i < nbins; i++) {
size_t bin_size;
@@ -250,6 +330,38 @@ for (i = 0; i < nbins; i++) {
.if n \{\
.RE
.\}
+.PP
+The
+\fBmalloc_stats_print\fR\fB\fR
+function writes human\-readable summary statistics via the
+\fIwrite_cb\fR
+callback function pointer and
+\fIcbopaque\fR
+data passed to
+\fIwrite_cb\fR, or
+\fBmalloc_message\fR\fB\fR
+if
+\fIwrite_cb\fR
+is
+\fBNULL\fR\&. This function can be called repeatedly\&. General information that never changes during execution can be omitted by specifying "g" as a character within the
+\fIopts\fR
+string\&. Note that
+\fBmalloc_message\fR\fB\fR
+uses the
+\fBmallctl*\fR\fB\fR
+functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously\&. If
+\fB\-\-enable\-stats\fR
+is specified during configuration, \(lqm\(rq and \(lqa\(rq can be specified to omit merged arena and per arena statistics, respectively; \(lqb\(rq and \(lql\(rq can be specified to omit per size class statistics for bins and large objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&.
+.PP
+The
+\fBmalloc_usable_size\fR\fB\fR
+function returns the usable size of the allocation pointed to by
+\fIptr\fR\&. The return value may be larger than the size that was requested during allocation\&. The
+\fBmalloc_usable_size\fR\fB\fR
+function is not a mechanism for in\-place
+\fBrealloc\fR\fB\fR; rather it is provided solely as a tool for introspection purposes\&. Any discrepancy between the requested allocation size and the size reported by
+\fBmalloc_usable_size\fR\fB\fR
+should not be depended on, since such behavior is entirely implementation\-dependent\&.
.SS "Experimental API"
.PP
The experimental API is subject to change or removal without regard for backward compatibility\&. If
@@ -286,7 +398,7 @@ is a power of 2\&.
.PP
\fBALLOCM_ZERO\fR
.RS 4
-Initialize newly allocated memory to contain zero bytes\&. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes\&. If this option is absent, newly allocated memory is uninitialized\&.
+Initialize newly allocated memory to contain zero bytes\&. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes\&. If this macro is absent, newly allocated memory is uninitialized\&.
.RE
.PP
\fBALLOCM_NO_MOVE\fR
@@ -297,9 +409,10 @@ For reallocation, fail rather than moving the object\&. This constraint can appl
\fBALLOCM_ARENA(\fR\fB\fIa\fR\fR\fB) \fR
.RS 4
Use the arena specified by the index
-\fIa\fR\&. This macro does not validate that
\fIa\fR
-specifies an arena in the valid range\&.
+(and by necessity bypass the thread cache)\&. This macro has no effect for huge regions, nor for regions that were allocated via an arena other than the one specified\&. This macro does not validate that
+\fIa\fR
+specifies an arena index in the valid range\&.
.RE
.PP
The
@@ -316,7 +429,7 @@ is not
\fBNULL\fR\&. Behavior is undefined if
\fIsize\fR
is
-\fB0\fR\&.
+\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
.PP
The
\fBrallocm\fR\fB\fR
@@ -334,11 +447,11 @@ is not
\fBNULL\fR\&. If
\fIextra\fR
is non\-zero, an attempt is made to resize the allocation to be at least
-\fIsize\fR + \fIextra\fR)
+(\fIsize\fR + \fIextra\fR)
bytes, though inability to allocate the extra byte(s) will not by itself result in failure\&. Behavior is undefined if
\fIsize\fR
is
-\fB0\fR, or if
+\fB0\fR, if request size overflows due to size class and/or alignment constraints, or if
(\fIsize\fR + \fIextra\fR > \fBSIZE_T_MAX\fR)\&.
.PP
The
@@ -368,7 +481,7 @@ to the real size of the allocation that would result from the equivalent
function call\&. Behavior is undefined if
\fIsize\fR
is
-\fB0\fR\&.
+\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
.SH "TUNING"
.PP
Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile\- or run\-time\&.
@@ -376,7 +489,19 @@ Once, when the first call is made to one of the memory allocation routines, the
The string pointed to by the global variable
\fImalloc_conf\fR, the \(lqname\(rq of the file referenced by the symbolic link named
/etc/malloc\&.conf, and the value of the environment variable
-\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&.
+\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&. Note that
+\fImalloc_conf\fR
+may be read before
+\fBmain\fR\fB\fR
+is entered, so the declaration of
+\fImalloc_conf\fR
+should specify an initializer that contains the final value to be read by jemalloc\&.
+\fImalloc_conf\fR
+is a compile\-time setting, whereas
+/etc/malloc\&.conf
+and
+\fBMALLOC_CONF\fR
+can be safely set any time prior to program invocation\&.
.PP
An options string is a comma\-separated list of option:value pairs\&. There is one key corresponding to each
"opt\&.*"
@@ -634,16 +759,18 @@ in these cases\&. This option is disabled by default unless
is specified during configuration, in which case it is enabled by default\&.
.RE
.PP
-"opt\&.lg_chunk" (\fBsize_t\fR) r\-
-.RS 4
-Virtual memory chunk size (log base 2)\&. The default chunk size is 4 MiB (2^22)\&.
-.RE
-.PP
"opt\&.dss" (\fBconst char *\fR) r\-
.RS 4
dss (\fBsbrk\fR(2)) allocation precedence as related to
\fBmmap\fR(2)
-allocation\&. The following settings are supported: \(lqdisabled\(rq, \(lqprimary\(rq, and \(lqsecondary\(rq (default)\&.
+allocation\&. The following settings are supported: \(lqdisabled\(rq, \(lqprimary\(rq, and \(lqsecondary\(rq\&. The default is \(lqsecondary\(rq if
+"config\&.dss"
+is true, \(lqdisabled\(rq otherwise\&.
+.RE
+.PP
+"opt\&.lg_chunk" (\fBsize_t\fR) r\-
+.RS 4
+Virtual memory chunk size (log base 2)\&. If a chunk size outside the supported size range is specified, the size is silently clipped to the minimum/maximum supported size\&. The default chunk size is 4 MiB (2^22)\&.
.RE
.PP
"opt\&.narenas" (\fBsize_t\fR) r\-
@@ -698,7 +825,8 @@ option is enabled, the redzones are checked for corruption during deallocation\&
"opt\&.zero" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
.RS 4
Zero filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to 0\&. Note that this initialization only happens once for each byte, so
-\fBrealloc\fR\fB\fR
+\fBrealloc\fR\fB\fR,
+\fBrallocx\fR\fB\fR
and
\fBrallocm\fR\fB\fR
calls do not zero memory that was previously allocated\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default\&.
@@ -776,7 +904,7 @@ Filename prefix for profile dumps\&. If the prefix is set to the empty string, n
jeprof\&.
.RE
.PP
-"opt\&.prof_active" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+"opt\&.prof_active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
.RS 4
Profiling activated/deactivated\&. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the
"opt\&.prof"
@@ -891,7 +1019,7 @@ Purge unused dirty pages for arena <i>, or for all arenas if <i> equals
"arena\&.<i>\&.dss" (\fBconst char *\fR) rw
.RS 4
Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals
-"arenas\&.narenas"\&. See
+"arenas\&.narenas"\&. Note that even during huge allocation this setting is read from the arena that would be chosen for small or large allocation so that applications can depend on consistent dss versus mmap allocation regardless of allocation size\&. See
"opt\&.dss"
for supported settings\&.
.RE
@@ -1091,7 +1219,7 @@ Number of
or similar calls made to purge dirty pages\&.
.RE
.PP
-"stats\&.arenas\&.<i>\&.npurged" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.purged" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of pages purged\&.
.RE
@@ -1314,11 +1442,32 @@ function returns no value\&.
.SS "Non\-standard API"
.PP
The
-\fBmalloc_usable_size\fR\fB\fR
-function returns the usable size of the allocation pointed to by
+\fBmallocx\fR\fB\fR
+and
+\fBrallocx\fR\fB\fR
+functions return a pointer to the allocated memory if successful; otherwise a
+\fBNULL\fR
+pointer is returned to indicate insufficient contiguous memory was available to service the allocation request\&.
+.PP
+The
+\fBxallocx\fR\fB\fR
+function returns the real size of the resulting resized allocation pointed to by
+\fIptr\fR, which is a value less than
+\fIsize\fR
+if the allocation could not be adequately grown in place\&.
+.PP
+The
+\fBsallocx\fR\fB\fR
+function returns the real size of the allocation pointed to by
\fIptr\fR\&.
.PP
The
+\fBnallocx\fR\fB\fR
+returns the real size that would result from a successful equivalent
+\fBmallocx\fR\fB\fR
+function call, or zero if insufficient memory is available to perform the size computation\&.
+.PP
+The
\fBmallctl\fR\fB\fR,
\fBmallctlnametomib\fR\fB\fR, and
\fBmallctlbymib\fR\fB\fR
@@ -1335,12 +1484,6 @@ is too large or too small\&. Alternatively,
is too large or too small; in this case as much data as possible are read despite the error\&.
.RE
.PP
-ENOMEM
-.RS 4
-\fI*oldlenp\fR
-is too short to hold the requested value\&.
-.RE
-.PP
ENOENT
.RS 4
\fIname\fR
@@ -1365,6 +1508,11 @@ An interface with side effects failed in some way not directly related to
\fBmallctl*\fR\fB\fR
read/write processing\&.
.RE
+.PP
+The
+\fBmalloc_usable_size\fR\fB\fR
+function returns the usable size of the allocation pointed to by
+\fIptr\fR\&.
.SS "Experimental API"
.PP
The
diff --git a/deps/jemalloc/doc/jemalloc.html b/deps/jemalloc/doc/jemalloc.html
index 002356e53..5a9fc7789 100644
--- a/deps/jemalloc/doc/jemalloc.html
+++ b/deps/jemalloc/doc/jemalloc.html
@@ -1,8 +1,8 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>JEMALLOC</title><meta name="generator" content="DocBook XSL Stylesheets V1.76.1"></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="refentry" title="JEMALLOC"><a name="id286949159"></a><div class="titlepage"></div><div class="refnamediv"><h2>Name</h2><p>jemalloc &#8212; general purpose memory allocation functions</p></div><div class="refsect1" title="LIBRARY"><a name="library"></a><h2>LIBRARY</h2><p>This manual describes jemalloc 3.2.0-0-g87499f6748ebe4817571e817e9f680ccb5bf54a9. More information
- can be found at the <a class="ulink" href="http://www.canonware.com/jemalloc/" target="_top">jemalloc website</a>.</p></div><div class="refsynopsisdiv" title="SYNOPSIS"><h2>SYNOPSIS</h2><div class="funcsynopsis"><pre class="funcsynopsisinfo">#include &lt;<code class="filename">stdlib.h</code>&gt;
-#include &lt;<code class="filename">jemalloc/jemalloc.h</code>&gt;</pre><div class="refsect2" title="Standard API"><a name="id286901505"></a><h3>Standard API</h3><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void *<b class="fsfunc">malloc</b>(</code></td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void *<b class="fsfunc">calloc</b>(</code></td><td>size_t <var class="pdparam">number</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">posix_memalign</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void *<b class="fsfunc">aligned_alloc</b>(</code></td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void *<b class="fsfunc">realloc</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void <b class="fsfunc">free</b>(</code></td><td>void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="refsect2" title="Non-standard API"><a name="id286900549"></a><h3>Non-standard API</h3><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">size_t <b class="fsfunc">malloc_usable_size</b>(</code></td><td>const void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void <b class="fsfunc">malloc_stats_print</b>(</code></td><td>void <var class="pdparam">(*write_cb)</var>
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>JEMALLOC</title><meta name="generator" content="DocBook XSL Stylesheets V1.78.1"></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="refentry"><a name="idm316394519664"></a><div class="titlepage"></div><div class="refnamediv"><h2>Name</h2><p>jemalloc &#8212; general purpose memory allocation functions</p></div><div class="refsect1"><a name="library"></a><h2>LIBRARY</h2><p>This manual describes jemalloc 3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340. More information
+ can be found at the <a class="ulink" href="http://www.canonware.com/jemalloc/" target="_top">jemalloc website</a>.</p></div><div class="refsynopsisdiv"><h2>SYNOPSIS</h2><div class="funcsynopsis"><pre class="funcsynopsisinfo">#include &lt;<code class="filename">stdlib.h</code>&gt;
+#include &lt;<code class="filename">jemalloc/jemalloc.h</code>&gt;</pre><div class="refsect2"><a name="idm316394002288"></a><h3>Standard API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">malloc</b>(</code></td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">calloc</b>(</code></td><td>size_t <var class="pdparam">number</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">posix_memalign</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">aligned_alloc</b>(</code></td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">realloc</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">free</b>(</code></td><td>void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="refsect2"><a name="idm316393986160"></a><h3>Non-standard API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">mallocx</b>(</code></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">rallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">xallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">extra</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">sallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">dallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">nallocx</b>(</code></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctl</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlnametomib</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">mibp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">miblenp</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlbymib</b>(</code></td><td>const size_t *<var class="pdparam">mib</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">miblen</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">malloc_stats_print</b>(</code></td><td>void <var class="pdparam">(*write_cb)</var>
<code>(</code>void *, const char *<code>)</code>
- , </td></tr><tr><td> </td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">opts</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">mallctl</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlnametomib</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">mibp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">miblenp</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlbymib</b>(</code></td><td>const size_t *<var class="pdparam">mib</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">miblen</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">void <b class="fsfunc">(*malloc_message)</b>(</code></td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">s</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><p><span class="type">const char *</span><code class="varname">malloc_conf</code>;</p></div><div class="refsect2" title="Experimental API"><a name="id286900756"></a><h3>Experimental API</h3><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">allocm</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">rallocm</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">extra</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">sallocm</b>(</code></td><td>const void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">dallocm</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" summary="Function synopsis" cellspacing="0" cellpadding="0" class="funcprototype-table"><tr><td><code class="funcdef">int <b class="fsfunc">nallocm</b>(</code></td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div></div></div><div class="refsect1" title="DESCRIPTION"><a name="description"></a><h2>DESCRIPTION</h2><div class="refsect2" title="Standard API"><a name="id286949297"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) function allocates
+ , </td></tr><tr><td> </td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">opts</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">malloc_usable_size</b>(</code></td><td>const void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">(*malloc_message)</b>(</code></td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">s</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><p><span class="type">const char *</span><code class="varname">malloc_conf</code>;</p></div><div class="refsect2"><a name="idm316388684112"></a><h3>Experimental API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">allocm</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">rallocm</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">extra</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">sallocm</b>(</code></td><td>const void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">dallocm</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">nallocm</b>(</code></td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div></div></div><div class="refsect1"><a name="description"></a><h2>DESCRIPTION</h2><div class="refsect2"><a name="idm316388663504"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) function allocates
<em class="parameter"><code>size</code></em> bytes of uninitialized memory. The allocated
space is suitably aligned (after possible pointer coercion) for storage
of any type of object.</p><p>The <code class="function">calloc</code>(<em class="parameter"><code></code></em>) function allocates
@@ -38,38 +38,67 @@
<code class="function">malloc</code>(<em class="parameter"><code></code></em>) for the specified size.</p><p>The <code class="function">free</code>(<em class="parameter"><code></code></em>) function causes the
allocated memory referenced by <em class="parameter"><code>ptr</code></em> to be made
available for future allocations. If <em class="parameter"><code>ptr</code></em> is
- <code class="constant">NULL</code>, no action occurs.</p></div><div class="refsect2" title="Non-standard API"><a name="id286949561"></a><h3>Non-standard API</h3><p>The <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function
- returns the usable size of the allocation pointed to by
- <em class="parameter"><code>ptr</code></em>. The return value may be larger than the size
- that was requested during allocation. The
- <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function is not a
- mechanism for in-place <code class="function">realloc</code>(<em class="parameter"><code></code></em>); rather
- it is provided solely as a tool for introspection purposes. Any
- discrepancy between the requested allocation size and the size reported
- by <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) should not be
- depended on, since such behavior is entirely implementation-dependent.
- </p><p>The <code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>) function
- writes human-readable summary statistics via the
- <em class="parameter"><code>write_cb</code></em> callback function pointer and
- <em class="parameter"><code>cbopaque</code></em> data passed to
- <em class="parameter"><code>write_cb</code></em>, or
- <code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) if
- <em class="parameter"><code>write_cb</code></em> is <code class="constant">NULL</code>. This
- function can be called repeatedly. General information that never
- changes during execution can be omitted by specifying "g" as a character
- within the <em class="parameter"><code>opts</code></em> string. Note that
- <code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) uses the
- <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions internally, so
- inconsistent statistics can be reported if multiple threads use these
- functions simultaneously. If <code class="option">--enable-stats</code> is
- specified during configuration, &#8220;m&#8221; and &#8220;a&#8221; can
- be specified to omit merged arena and per arena statistics, respectively;
- &#8220;b&#8221; and &#8220;l&#8221; can be specified to omit per size
- class statistics for bins and large objects, respectively. Unrecognized
- characters are silently ignored. Note that thread caching may prevent
- some statistics from being completely up to date, since extra locking
- would be required to merge counters that track thread cache operations.
- </p><p>The <code class="function">mallctl</code>(<em class="parameter"><code></code></em>) function provides a
+ <code class="constant">NULL</code>, no action occurs.</p></div><div class="refsect2"><a name="idm316388639904"></a><h3>Non-standard API</h3><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>),
+ <code class="function">rallocx</code>(<em class="parameter"><code></code></em>),
+ <code class="function">xallocx</code>(<em class="parameter"><code></code></em>),
+ <code class="function">sallocx</code>(<em class="parameter"><code></code></em>),
+ <code class="function">dallocx</code>(<em class="parameter"><code></code></em>), and
+ <code class="function">nallocx</code>(<em class="parameter"><code></code></em>) functions all have a
+ <em class="parameter"><code>flags</code></em> argument that can be used to specify
+ options. The functions only check the options that are contextually
+ relevant. Use bitwise or (<code class="code">|</code>) operations to
+ specify one or more of the following:
+ </p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><code class="constant">MALLOCX_LG_ALIGN(<em class="parameter"><code>la</code></em>)
+ </code></span></dt><dd><p>Align the memory allocation to start at an address
+ that is a multiple of <code class="code">(1 &lt;&lt;
+ <em class="parameter"><code>la</code></em>)</code>. This macro does not validate
+ that <em class="parameter"><code>la</code></em> is within the valid
+ range.</p></dd><dt><span class="term"><code class="constant">MALLOCX_ALIGN(<em class="parameter"><code>a</code></em>)
+ </code></span></dt><dd><p>Align the memory allocation to start at an address
+ that is a multiple of <em class="parameter"><code>a</code></em>, where
+ <em class="parameter"><code>a</code></em> is a power of two. This macro does not
+ validate that <em class="parameter"><code>a</code></em> is a power of 2.
+ </p></dd><dt><span class="term"><code class="constant">MALLOCX_ZERO</code></span></dt><dd><p>Initialize newly allocated memory to contain zero
+ bytes. In the growing reallocation case, the real size prior to
+ reallocation defines the boundary between untouched bytes and those
+ that are initialized to contain zero bytes. If this macro is
+ absent, newly allocated memory is uninitialized.</p></dd><dt><span class="term"><code class="constant">MALLOCX_ARENA(<em class="parameter"><code>a</code></em>)
+ </code></span></dt><dd><p>Use the arena specified by the index
+ <em class="parameter"><code>a</code></em> (and by necessity bypass the thread
+ cache). This macro has no effect for huge regions, nor for regions
+ that were allocated via an arena other than the one specified.
+ This macro does not validate that <em class="parameter"><code>a</code></em>
+ specifies an arena index in the valid range.</p></dd></dl></div><p>
+ </p><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function allocates at
+ least <em class="parameter"><code>size</code></em> bytes of memory, and returns a pointer
+ to the base address of the allocation. Behavior is undefined if
+ <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or if request size
+ overflows due to size class and/or alignment constraints.</p><p>The <code class="function">rallocx</code>(<em class="parameter"><code></code></em>) function resizes the
+ allocation at <em class="parameter"><code>ptr</code></em> to be at least
+ <em class="parameter"><code>size</code></em> bytes, and returns a pointer to the base
+ address of the resulting allocation, which may or may not have moved from
+ its original location. Behavior is undefined if
+ <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or if request size
+ overflows due to size class and/or alignment constraints.</p><p>The <code class="function">xallocx</code>(<em class="parameter"><code></code></em>) function resizes the
+ allocation at <em class="parameter"><code>ptr</code></em> in place to be at least
+ <em class="parameter"><code>size</code></em> bytes, and returns the real size of the
+ allocation. If <em class="parameter"><code>extra</code></em> is non-zero, an attempt is
+ made to resize the allocation to be at least <code class="code">(<em class="parameter"><code>size</code></em> +
+ <em class="parameter"><code>extra</code></em>)</code> bytes, though inability to allocate
+ the extra byte(s) will not by itself result in failure to resize.
+ Behavior is undefined if <em class="parameter"><code>size</code></em> is
+ <code class="constant">0</code>, or if <code class="code">(<em class="parameter"><code>size</code></em> + <em class="parameter"><code>extra</code></em>
+ &gt; <code class="constant">SIZE_T_MAX</code>)</code>.</p><p>The <code class="function">sallocx</code>(<em class="parameter"><code></code></em>) function returns the
+ real size of the allocation at <em class="parameter"><code>ptr</code></em>.</p><p>The <code class="function">dallocx</code>(<em class="parameter"><code></code></em>) function causes the
+ memory referenced by <em class="parameter"><code>ptr</code></em> to be made available for
+ future allocations.</p><p>The <code class="function">nallocx</code>(<em class="parameter"><code></code></em>) function allocates no
+ memory, but it performs the same size computation as the
+ <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function, and returns the real
+ size of the allocation that would result from the equivalent
+ <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function call. Behavior is
+ undefined if <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or if
+ request size overflows due to size class and/or alignment
+ constraints.</p><p>The <code class="function">mallctl</code>(<em class="parameter"><code></code></em>) function provides a
general interface for introspecting the memory allocator, as well as
setting modifiable parameters and triggering actions. The
period-separated <em class="parameter"><code>name</code></em> argument specifies a
@@ -102,15 +131,14 @@
the corresponding MIB component will always be that integer. Therefore,
it is legitimate to construct code like the following: </p><pre class="programlisting">
unsigned nbins, i;
-
-int mib[4];
+size_t mib[4];
size_t len, miblen;
len = sizeof(nbins);
mallctl("arenas.nbins", &amp;nbins, &amp;len, NULL, 0);
miblen = 4;
-mallnametomib("arenas.bin.0.size", mib, &amp;miblen);
+mallctlnametomib("arenas.bin.0.size", mib, &amp;miblen);
for (i = 0; i &lt; nbins; i++) {
size_t bin_size;
@@ -118,7 +146,38 @@ for (i = 0; i &lt; nbins; i++) {
len = sizeof(bin_size);
mallctlbymib(mib, miblen, &amp;bin_size, &amp;len, NULL, 0);
/* Do something with bin_size... */
-}</pre></div><div class="refsect2" title="Experimental API"><a name="id286949870"></a><h3>Experimental API</h3><p>The experimental API is subject to change or removal without regard
+}</pre><p>The <code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>) function
+ writes human-readable summary statistics via the
+ <em class="parameter"><code>write_cb</code></em> callback function pointer and
+ <em class="parameter"><code>cbopaque</code></em> data passed to
+ <em class="parameter"><code>write_cb</code></em>, or
+ <code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) if
+ <em class="parameter"><code>write_cb</code></em> is <code class="constant">NULL</code>. This
+ function can be called repeatedly. General information that never
+ changes during execution can be omitted by specifying "g" as a character
+ within the <em class="parameter"><code>opts</code></em> string. Note that
+ <code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) uses the
+ <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions internally, so
+ inconsistent statistics can be reported if multiple threads use these
+ functions simultaneously. If <code class="option">--enable-stats</code> is
+ specified during configuration, &#8220;m&#8221; and &#8220;a&#8221; can
+ be specified to omit merged arena and per arena statistics, respectively;
+ &#8220;b&#8221; and &#8220;l&#8221; can be specified to omit per size
+ class statistics for bins and large objects, respectively. Unrecognized
+ characters are silently ignored. Note that thread caching may prevent
+ some statistics from being completely up to date, since extra locking
+ would be required to merge counters that track thread cache operations.
+ </p><p>The <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function
+ returns the usable size of the allocation pointed to by
+ <em class="parameter"><code>ptr</code></em>. The return value may be larger than the size
+ that was requested during allocation. The
+ <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function is not a
+ mechanism for in-place <code class="function">realloc</code>(<em class="parameter"><code></code></em>); rather
+ it is provided solely as a tool for introspection purposes. Any
+ discrepancy between the requested allocation size and the size reported
+ by <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) should not be
+ depended on, since such behavior is entirely implementation-dependent.
+ </p></div><div class="refsect2"><a name="idm316388574208"></a><h3>Experimental API</h3><p>The experimental API is subject to change or removal without regard
for backward compatibility. If <code class="option">--disable-experimental</code>
is specified during configuration, the experimental API is
omitted.</p><p>The <code class="function">allocm</code>(<em class="parameter"><code></code></em>),
@@ -130,7 +189,7 @@ for (i = 0; i &lt; nbins; i++) {
options. The functions only check the options that are contextually
relevant. Use bitwise or (<code class="code">|</code>) operations to
specify one or more of the following:
- </p><div class="variablelist"><dl><dt><span class="term"><code class="constant">ALLOCM_LG_ALIGN(<em class="parameter"><code>la</code></em>)
+ </p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><code class="constant">ALLOCM_LG_ALIGN(<em class="parameter"><code>la</code></em>)
</code></span></dt><dd><p>Align the memory allocation to start at an address
that is a multiple of <code class="code">(1 &lt;&lt;
<em class="parameter"><code>la</code></em>)</code>. This macro does not validate
@@ -143,32 +202,36 @@ for (i = 0; i &lt; nbins; i++) {
</p></dd><dt><span class="term"><code class="constant">ALLOCM_ZERO</code></span></dt><dd><p>Initialize newly allocated memory to contain zero
bytes. In the growing reallocation case, the real size prior to
reallocation defines the boundary between untouched bytes and those
- that are initialized to contain zero bytes. If this option is
+ that are initialized to contain zero bytes. If this macro is
absent, newly allocated memory is uninitialized.</p></dd><dt><span class="term"><code class="constant">ALLOCM_NO_MOVE</code></span></dt><dd><p>For reallocation, fail rather than moving the
object. This constraint can apply to both growth and
shrinkage.</p></dd><dt><span class="term"><code class="constant">ALLOCM_ARENA(<em class="parameter"><code>a</code></em>)
</code></span></dt><dd><p>Use the arena specified by the index
- <em class="parameter"><code>a</code></em>. This macro does not validate that
- <em class="parameter"><code>a</code></em> specifies an arena in the valid
- range.</p></dd></dl></div><p>
+ <em class="parameter"><code>a</code></em> (and by necessity bypass the thread
+ cache). This macro has no effect for huge regions, nor for regions
+ that were allocated via an arena other than the one specified.
+ This macro does not validate that <em class="parameter"><code>a</code></em>
+ specifies an arena index in the valid range.</p></dd></dl></div><p>
</p><p>The <code class="function">allocm</code>(<em class="parameter"><code></code></em>) function allocates at
least <em class="parameter"><code>size</code></em> bytes of memory, sets
<em class="parameter"><code>*ptr</code></em> to the base address of the allocation, and
sets <em class="parameter"><code>*rsize</code></em> to the real size of the allocation if
<em class="parameter"><code>rsize</code></em> is not <code class="constant">NULL</code>. Behavior
- is undefined if <em class="parameter"><code>size</code></em> is
- <code class="constant">0</code>.</p><p>The <code class="function">rallocm</code>(<em class="parameter"><code></code></em>) function resizes the
+ is undefined if <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or
+ if request size overflows due to size class and/or alignment
+ constraints.</p><p>The <code class="function">rallocm</code>(<em class="parameter"><code></code></em>) function resizes the
allocation at <em class="parameter"><code>*ptr</code></em> to be at least
<em class="parameter"><code>size</code></em> bytes, sets <em class="parameter"><code>*ptr</code></em> to
the base address of the allocation if it moved, and sets
<em class="parameter"><code>*rsize</code></em> to the real size of the allocation if
<em class="parameter"><code>rsize</code></em> is not <code class="constant">NULL</code>. If
<em class="parameter"><code>extra</code></em> is non-zero, an attempt is made to resize
- the allocation to be at least <code class="code"><em class="parameter"><code>size</code></em> +
+ the allocation to be at least <code class="code">(<em class="parameter"><code>size</code></em> +
<em class="parameter"><code>extra</code></em>)</code> bytes, though inability to allocate
the extra byte(s) will not by itself result in failure. Behavior is
- undefined if <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or if
- <code class="code">(<em class="parameter"><code>size</code></em> +
+ undefined if <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, if
+ request size overflows due to size class and/or alignment constraints, or
+ if <code class="code">(<em class="parameter"><code>size</code></em> +
<em class="parameter"><code>extra</code></em> &gt;
<code class="constant">SIZE_T_MAX</code>)</code>.</p><p>The <code class="function">sallocm</code>(<em class="parameter"><code></code></em>) function sets
<em class="parameter"><code>*rsize</code></em> to the real size of the allocation.</p><p>The <code class="function">dallocm</code>(<em class="parameter"><code></code></em>) function causes the
@@ -179,14 +242,21 @@ for (i = 0; i &lt; nbins; i++) {
<em class="parameter"><code>rsize</code></em> is not <code class="constant">NULL</code> it sets
<em class="parameter"><code>*rsize</code></em> to the real size of the allocation that
would result from the equivalent <code class="function">allocm</code>(<em class="parameter"><code></code></em>)
- function call. Behavior is undefined if
- <em class="parameter"><code>size</code></em> is <code class="constant">0</code>.</p></div></div><div class="refsect1" title="TUNING"><a name="tuning"></a><h2>TUNING</h2><p>Once, when the first call is made to one of the memory allocation
+ function call. Behavior is undefined if <em class="parameter"><code>size</code></em> is
+ <code class="constant">0</code>, or if request size overflows due to size class
+ and/or alignment constraints.</p></div></div><div class="refsect1"><a name="tuning"></a><h2>TUNING</h2><p>Once, when the first call is made to one of the memory allocation
routines, the allocator initializes its internals based in part on various
options that can be specified at compile- or run-time.</p><p>The string pointed to by the global variable
<code class="varname">malloc_conf</code>, the &#8220;name&#8221; of the file
referenced by the symbolic link named <code class="filename">/etc/malloc.conf</code>, and the value of the
environment variable <code class="envar">MALLOC_CONF</code>, will be interpreted, in
- that order, from left to right as options.</p><p>An options string is a comma-separated list of option:value pairs.
+ that order, from left to right as options. Note that
+ <code class="varname">malloc_conf</code> may be read before
+ <code class="function">main</code>(<em class="parameter"><code></code></em>) is entered, so the declaration of
+ <code class="varname">malloc_conf</code> should specify an initializer that contains
+ the final value to be read by jemalloc. <code class="varname">malloc_conf</code> is
+ a compile-time setting, whereas <code class="filename">/etc/malloc.conf</code> and <code class="envar">MALLOC_CONF</code>
+ can be safely set any time prior to program invocation.</p><p>An options string is a comma-separated list of option:value pairs.
There is one key corresponding to each <a class="link" href="#opt.abort">
"<code class="mallctl">opt.*</code>"
</a> mallctl (see the <a class="xref" href="#mallctl_namespace" title="MALLCTL NAMESPACE">MALLCTL NAMESPACE</a> section for options
@@ -198,7 +268,7 @@ for (i = 0; i &lt; nbins; i++) {
</a> options. Some
options have boolean values (true/false), others have integer values (base
8, 10, or 16, depending on prefix), and yet others have raw string
- values.</p></div><div class="refsect1" title="IMPLEMENTATION NOTES"><a name="implementation_notes"></a><h2>IMPLEMENTATION NOTES</h2><p>Traditionally, allocators have used
+ values.</p></div><div class="refsect1"><a name="implementation_notes"></a><h2>IMPLEMENTATION NOTES</h2><p>Traditionally, allocators have used
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> to obtain memory, which is
suboptimal for several reasons, including race conditions, increased
fragmentation, and artificial limitations on maximum usable memory. If
@@ -252,7 +322,7 @@ for (i = 0; i &lt; nbins; i++) {
suffer from cacheline sharing, round your allocation requests up to the
nearest multiple of the cacheline size, or specify cacheline alignment when
allocating.</p><p>Assuming 4 MiB chunks, 4 KiB pages, and a 16-byte quantum on a 64-bit
- system, the size classes in each category are as shown in <a class="xref" href="#size_classes" title="Table 1. Size classes">Table 1</a>.</p><div class="table"><a name="size_classes"></a><p class="title"><b>Table 1. Size classes</b></p><div class="table-contents"><table summary="Size classes" border="1"><colgroup><col align="left" class="c1"><col align="right" class="c2"><col align="left" class="c3"></colgroup><thead><tr><th align="left">Category</th><th align="right">Spacing</th><th align="left">Size</th></tr></thead><tbody><tr><td rowspan="7" align="left">Small</td><td align="right">lg</td><td align="left">[8]</td></tr><tr><td align="right">16</td><td align="left">[16, 32, 48, ..., 128]</td></tr><tr><td align="right">32</td><td align="left">[160, 192, 224, 256]</td></tr><tr><td align="right">64</td><td align="left">[320, 384, 448, 512]</td></tr><tr><td align="right">128</td><td align="left">[640, 768, 896, 1024]</td></tr><tr><td align="right">256</td><td align="left">[1280, 1536, 1792, 2048]</td></tr><tr><td align="right">512</td><td align="left">[2560, 3072, 3584]</td></tr><tr><td align="left">Large</td><td align="right">4 KiB</td><td align="left">[4 KiB, 8 KiB, 12 KiB, ..., 4072 KiB]</td></tr><tr><td align="left">Huge</td><td align="right">4 MiB</td><td align="left">[4 MiB, 8 MiB, 12 MiB, ...]</td></tr></tbody></table></div></div><br class="table-break"></div><div class="refsect1" title="MALLCTL NAMESPACE"><a name="mallctl_namespace"></a><h2>MALLCTL NAMESPACE</h2><p>The following names are defined in the namespace accessible via the
+ system, the size classes in each category are as shown in <a class="xref" href="#size_classes" title="Table 1. Size classes">Table 1</a>.</p><div class="table"><a name="size_classes"></a><p class="title"><b>Table 1. Size classes</b></p><div class="table-contents"><table summary="Size classes" border="1"><colgroup><col align="left" class="c1"><col align="right" class="c2"><col align="left" class="c3"></colgroup><thead><tr><th align="left">Category</th><th align="right">Spacing</th><th align="left">Size</th></tr></thead><tbody><tr><td rowspan="7" align="left">Small</td><td align="right">lg</td><td align="left">[8]</td></tr><tr><td align="right">16</td><td align="left">[16, 32, 48, ..., 128]</td></tr><tr><td align="right">32</td><td align="left">[160, 192, 224, 256]</td></tr><tr><td align="right">64</td><td align="left">[320, 384, 448, 512]</td></tr><tr><td align="right">128</td><td align="left">[640, 768, 896, 1024]</td></tr><tr><td align="right">256</td><td align="left">[1280, 1536, 1792, 2048]</td></tr><tr><td align="right">512</td><td align="left">[2560, 3072, 3584]</td></tr><tr><td align="left">Large</td><td align="right">4 KiB</td><td align="left">[4 KiB, 8 KiB, 12 KiB, ..., 4072 KiB]</td></tr><tr><td align="left">Huge</td><td align="right">4 MiB</td><td align="left">[4 MiB, 8 MiB, 12 MiB, ...]</td></tr></tbody></table></div></div><br class="table-break"></div><div class="refsect1"><a name="mallctl_namespace"></a><h2>MALLCTL NAMESPACE</h2><p>The following names are defined in the namespace accessible via the
<code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions. Value types are
specified in parentheses, their readable/writable statuses are encoded as
<code class="literal">rw</code>, <code class="literal">r-</code>, <code class="literal">-w</code>, or
@@ -270,7 +340,7 @@ for (i = 0; i &lt; nbins; i++) {
note of the <a class="link" href="#epoch">
"<code class="mallctl">epoch</code>"
</a> mallctl,
- which controls refreshing of cached dynamic statistics.</p><div class="variablelist"><dl><dt><span class="term">
+ which controls refreshing of cached dynamic statistics.</p><div class="variablelist"><dl class="variablelist"><dt><a name="version"></a><span class="term">
"<code class="mallctl">version</code>"
@@ -285,105 +355,105 @@ for (i = 0; i &lt; nbins; i++) {
</span></dt><dd><p>If a value is passed in, refresh the data from which
the <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions report values,
and increment the epoch. Return the current epoch. This is useful for
- detecting whether another thread caused a refresh.</p></dd><dt><span class="term">
+ detecting whether another thread caused a refresh.</p></dd><dt><a name="config.debug"></a><span class="term">
"<code class="mallctl">config.debug</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-debug</code> was specified during
- build configuration.</p></dd><dt><span class="term">
+ build configuration.</p></dd><dt><a name="config.dss"></a><span class="term">
"<code class="mallctl">config.dss</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-dss</code> was specified during
- build configuration.</p></dd><dt><span class="term">
+ build configuration.</p></dd><dt><a name="config.fill"></a><span class="term">
"<code class="mallctl">config.fill</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-fill</code> was specified during
- build configuration.</p></dd><dt><span class="term">
+ build configuration.</p></dd><dt><a name="config.lazy_lock"></a><span class="term">
"<code class="mallctl">config.lazy_lock</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-lazy-lock</code> was specified
- during build configuration.</p></dd><dt><span class="term">
+ during build configuration.</p></dd><dt><a name="config.mremap"></a><span class="term">
"<code class="mallctl">config.mremap</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-mremap</code> was specified during
- build configuration.</p></dd><dt><span class="term">
+ build configuration.</p></dd><dt><a name="config.munmap"></a><span class="term">
"<code class="mallctl">config.munmap</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-munmap</code> was specified during
- build configuration.</p></dd><dt><span class="term">
+ build configuration.</p></dd><dt><a name="config.prof"></a><span class="term">
"<code class="mallctl">config.prof</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-prof</code> was specified during
- build configuration.</p></dd><dt><span class="term">
+ build configuration.</p></dd><dt><a name="config.prof_libgcc"></a><span class="term">
"<code class="mallctl">config.prof_libgcc</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--disable-prof-libgcc</code> was not
- specified during build configuration.</p></dd><dt><span class="term">
+ specified during build configuration.</p></dd><dt><a name="config.prof_libunwind"></a><span class="term">
"<code class="mallctl">config.prof_libunwind</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-prof-libunwind</code> was specified
- during build configuration.</p></dd><dt><span class="term">
+ during build configuration.</p></dd><dt><a name="config.stats"></a><span class="term">
"<code class="mallctl">config.stats</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-stats</code> was specified during
- build configuration.</p></dd><dt><span class="term">
+ build configuration.</p></dd><dt><a name="config.tcache"></a><span class="term">
"<code class="mallctl">config.tcache</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--disable-tcache</code> was not specified
- during build configuration.</p></dd><dt><span class="term">
+ during build configuration.</p></dd><dt><a name="config.tls"></a><span class="term">
"<code class="mallctl">config.tls</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--disable-tls</code> was not specified during
- build configuration.</p></dd><dt><span class="term">
+ build configuration.</p></dd><dt><a name="config.utrace"></a><span class="term">
"<code class="mallctl">config.utrace</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-utrace</code> was specified during
- build configuration.</p></dd><dt><span class="term">
+ build configuration.</p></dd><dt><a name="config.valgrind"></a><span class="term">
"<code class="mallctl">config.valgrind</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-valgrind</code> was specified during
- build configuration.</p></dd><dt><span class="term">
+ build configuration.</p></dd><dt><a name="config.xmalloc"></a><span class="term">
"<code class="mallctl">config.xmalloc</code>"
@@ -401,14 +471,7 @@ for (i = 0; i &lt; nbins; i++) {
<span class="citerefentry"><span class="refentrytitle">abort</span>(3)</span> in these cases. This option is
disabled by default unless <code class="option">--enable-debug</code> is
specified during configuration, in which case it is enabled by default.
- </p></dd><dt><a name="opt.lg_chunk"></a><span class="term">
-
- "<code class="mallctl">opt.lg_chunk</code>"
-
- (<span class="type">size_t</span>)
- <code class="literal">r-</code>
- </span></dt><dd><p>Virtual memory chunk size (log base 2). The default
- chunk size is 4 MiB (2^22).</p></dd><dt><a name="opt.dss"></a><span class="term">
+ </p></dd><dt><a name="opt.dss"></a><span class="term">
"<code class="mallctl">opt.dss</code>"
@@ -417,7 +480,22 @@ for (i = 0; i &lt; nbins; i++) {
</span></dt><dd><p>dss (<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>) allocation precedence as
related to <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> allocation. The following
settings are supported: &#8220;disabled&#8221;, &#8220;primary&#8221;,
- and &#8220;secondary&#8221; (default).</p></dd><dt><a name="opt.narenas"></a><span class="term">
+ and &#8220;secondary&#8221;. The default is &#8220;secondary&#8221; if
+ <a class="link" href="#config.dss">
+ "<code class="mallctl">config.dss</code>"
+ </a> is
+ true, &#8220;disabled&#8221; otherwise.
+ </p></dd><dt><a name="opt.lg_chunk"></a><span class="term">
+
+ "<code class="mallctl">opt.lg_chunk</code>"
+
+ (<span class="type">size_t</span>)
+ <code class="literal">r-</code>
+ </span></dt><dd><p>Virtual memory chunk size (log base 2). If a chunk
+ size outside the supported size range is specified, the size is
+ silently clipped to the minimum/maximum supported size. The default
+ chunk size is 4 MiB (2^22).
+ </p></dd><dt><a name="opt.narenas"></a><span class="term">
"<code class="mallctl">opt.narenas</code>"
@@ -514,7 +592,8 @@ for (i = 0; i &lt; nbins; i++) {
</span></dt><dd><p>Zero filling enabled/disabled. If enabled, each byte
of uninitialized allocated memory will be initialized to 0. Note that
this initialization only happens once for each byte, so
- <code class="function">realloc</code>(<em class="parameter"><code></code></em>) and
+ <code class="function">realloc</code>(<em class="parameter"><code></code></em>),
+ <code class="function">rallocx</code>(<em class="parameter"><code></code></em>) and
<code class="function">rallocm</code>(<em class="parameter"><code></code></em>) calls do not zero memory that
was previously allocated. This is intended for debugging and will
impact performance negatively. This option is disabled by default.
@@ -625,7 +704,7 @@ malloc_conf = "xmalloc:true";</pre><p>
"<code class="mallctl">opt.prof_active</code>"
(<span class="type">bool</span>)
- <code class="literal">r-</code>
+ <code class="literal">rw</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Profiling activated/deactivated. This is a secondary
control mechanism that makes it possible to start the application with
@@ -720,7 +799,7 @@ malloc_conf = "xmalloc:true";</pre><p>
"<code class="mallctl">opt.prof</code>"
</a> option for
information on analyzing heap profile output. This option is disabled
- by default.</p></dd><dt><span class="term">
+ by default.</p></dd><dt><a name="thread.arena"></a><span class="term">
"<code class="mallctl">thread.arena</code>"
@@ -742,7 +821,7 @@ malloc_conf = "xmalloc:true";</pre><p>
</span></dt><dd><p>Get the total number of bytes ever allocated by the
calling thread. This counter has the potential to wrap around; it is
up to the application to appropriately interpret the counter in such
- cases.</p></dd><dt><span class="term">
+ cases.</p></dd><dt><a name="thread.allocatedp"></a><span class="term">
"<code class="mallctl">thread.allocatedp</code>"
@@ -764,7 +843,7 @@ malloc_conf = "xmalloc:true";</pre><p>
</span></dt><dd><p>Get the total number of bytes ever deallocated by the
calling thread. This counter has the potential to wrap around; it is
up to the application to appropriately interpret the counter in such
- cases.</p></dd><dt><span class="term">
+ cases.</p></dd><dt><a name="thread.deallocatedp"></a><span class="term">
"<code class="mallctl">thread.deallocatedp</code>"
@@ -776,7 +855,7 @@ malloc_conf = "xmalloc:true";</pre><p>
"<code class="mallctl">thread.deallocated</code>"
</a>
mallctl. This is useful for avoiding the overhead of repeated
- <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) calls.</p></dd><dt><span class="term">
+ <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) calls.</p></dd><dt><a name="thread.tcache.enabled"></a><span class="term">
"<code class="mallctl">thread.tcache.enabled</code>"
@@ -788,7 +867,7 @@ malloc_conf = "xmalloc:true";</pre><p>
disabled (see
"<code class="mallctl">thread.tcache.flush</code>"
).
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="thread.tcache.flush"></a><span class="term">
"<code class="mallctl">thread.tcache.flush</code>"
@@ -823,8 +902,11 @@ malloc_conf = "xmalloc:true";</pre><p>
allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
<a class="link" href="#arenas.narenas">
"<code class="mallctl">arenas.narenas</code>"
- </a>. See
- <a class="link" href="#opt.dss">
+ </a>. Note
+ that even during huge allocation this setting is read from the arena
+ that would be chosen for small or large allocation so that applications
+ can depend on consistent dss versus mmap allocation regardless of
+ allocation size. See <a class="link" href="#opt.dss">
"<code class="mallctl">opt.dss</code>"
</a> for supported
settings.
@@ -844,32 +926,32 @@ malloc_conf = "xmalloc:true";</pre><p>
"<code class="mallctl">arenas.narenas</code>"
</a>
booleans. Each boolean indicates whether the corresponding arena is
- initialized.</p></dd><dt><span class="term">
+ initialized.</p></dd><dt><a name="arenas.quantum"></a><span class="term">
"<code class="mallctl">arenas.quantum</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Quantum size.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Quantum size.</p></dd><dt><a name="arenas.page"></a><span class="term">
"<code class="mallctl">arenas.page</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Page size.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Page size.</p></dd><dt><a name="arenas.tcache_max"></a><span class="term">
"<code class="mallctl">arenas.tcache_max</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
- </span></dt><dd><p>Maximum thread-cached size class.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Maximum thread-cached size class.</p></dd><dt><a name="arenas.nbins"></a><span class="term">
"<code class="mallctl">arenas.nbins</code>"
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Number of bin size classes.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Number of bin size classes.</p></dd><dt><a name="arenas.nhbins"></a><span class="term">
"<code class="mallctl">arenas.nhbins</code>"
@@ -883,39 +965,39 @@ malloc_conf = "xmalloc:true";</pre><p>
(<span class="type">size_t</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Maximum size supported by size class.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Maximum size supported by size class.</p></dd><dt><a name="arenas.bin.i.nregs"></a><span class="term">
"<code class="mallctl">arenas.bin.&lt;i&gt;.nregs</code>"
(<span class="type">uint32_t</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Number of regions per page run.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Number of regions per page run.</p></dd><dt><a name="arenas.bin.i.run_size"></a><span class="term">
"<code class="mallctl">arenas.bin.&lt;i&gt;.run_size</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Number of bytes per page run.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Number of bytes per page run.</p></dd><dt><a name="arenas.nlruns"></a><span class="term">
"<code class="mallctl">arenas.nlruns</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Total number of large size classes.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Total number of large size classes.</p></dd><dt><a name="arenas.lrun.i.size"></a><span class="term">
"<code class="mallctl">arenas.lrun.&lt;i&gt;.size</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Maximum size supported by this large size
- class.</p></dd><dt><span class="term">
+ class.</p></dd><dt><a name="arenas.purge"></a><span class="term">
"<code class="mallctl">arenas.purge</code>"
(<span class="type">unsigned</span>)
<code class="literal">-w</code>
</span></dt><dd><p>Purge unused dirty pages for the specified arena, or
- for all arenas if none is specified.</p></dd><dt><span class="term">
+ for all arenas if none is specified.</p></dd><dt><a name="arenas.extend"></a><span class="term">
"<code class="mallctl">arenas.extend</code>"
@@ -934,7 +1016,7 @@ malloc_conf = "xmalloc:true";</pre><p>
"<code class="mallctl">opt.prof_active</code>"
</a>
option for additional information.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="prof.dump"></a><span class="term">
"<code class="mallctl">prof.dump</code>"
@@ -948,7 +1030,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<a class="link" href="#opt.prof_prefix">
"<code class="mallctl">opt.prof_prefix</code>"
</a>
- option.</p></dd><dt><span class="term">
+ option.</p></dd><dt><a name="prof.interval"></a><span class="term">
"<code class="mallctl">prof.interval</code>"
@@ -1001,7 +1083,7 @@ malloc_conf = "xmalloc:true";</pre><p>
"<code class="mallctl">stats.arenas.&lt;i&gt;.pdirty</code>"
</a> and pages
- entirely devoted to allocator metadata.</p></dd><dt><span class="term">
+ entirely devoted to allocator metadata.</p></dd><dt><a name="stats.mapped"></a><span class="term">
"<code class="mallctl">stats.mapped</code>"
@@ -1013,7 +1095,7 @@ malloc_conf = "xmalloc:true";</pre><p>
large as <a class="link" href="#stats.active">
"<code class="mallctl">stats.active</code>"
</a>. This
- does not include inactive chunks.</p></dd><dt><span class="term">
+ does not include inactive chunks.</p></dd><dt><a name="stats.chunks.current"></a><span class="term">
"<code class="mallctl">stats.chunks.current</code>"
@@ -1022,14 +1104,14 @@ malloc_conf = "xmalloc:true";</pre><p>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Total number of chunks actively mapped on behalf of the
application. This does not include inactive chunks.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.chunks.total"></a><span class="term">
"<code class="mallctl">stats.chunks.total</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Cumulative number of chunks allocated.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Cumulative number of chunks allocated.</p></dd><dt><a name="stats.chunks.high"></a><span class="term">
"<code class="mallctl">stats.chunks.high</code>"
@@ -1037,7 +1119,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Maximum number of active chunks at any time thus far.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.huge.allocated"></a><span class="term">
"<code class="mallctl">stats.huge.allocated</code>"
@@ -1045,7 +1127,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of bytes currently allocated by huge objects.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.huge.nmalloc"></a><span class="term">
"<code class="mallctl">stats.huge.nmalloc</code>"
@@ -1053,7 +1135,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of huge allocation requests.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.huge.ndalloc"></a><span class="term">
"<code class="mallctl">stats.huge.ndalloc</code>"
@@ -1061,7 +1143,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of huge deallocation requests.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.arenas.i.dss"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.dss</code>"
@@ -1071,14 +1153,14 @@ malloc_conf = "xmalloc:true";</pre><p>
related to <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> allocation. See <a class="link" href="#opt.dss">
"<code class="mallctl">opt.dss</code>"
</a> for details.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.arenas.i.nthreads"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.nthreads</code>"
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Number of threads currently assigned to
- arena.</p></dd><dt><span class="term">
+ arena.</p></dd><dt><a name="stats.arenas.i.pactive"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.pactive</code>"
@@ -1093,14 +1175,14 @@ malloc_conf = "xmalloc:true";</pre><p>
</span></dt><dd><p>Number of pages within unused runs that are potentially
dirty, and for which <code class="function">madvise</code>(<em class="parameter"><code>...</code></em>,
<em class="parameter"><code><code class="constant">MADV_DONTNEED</code></code></em>) or
- similar has not been called.</p></dd><dt><span class="term">
+ similar has not been called.</p></dd><dt><a name="stats.arenas.i.mapped"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.mapped</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Number of mapped bytes.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Number of mapped bytes.</p></dd><dt><a name="stats.arenas.i.npurge"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.npurge</code>"
@@ -1108,7 +1190,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of dirty page purge sweeps performed.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.arenas.i.nmadvise"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.nmadvise</code>"
@@ -1117,14 +1199,14 @@ malloc_conf = "xmalloc:true";</pre><p>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of <code class="function">madvise</code>(<em class="parameter"><code>...</code></em>,
<em class="parameter"><code><code class="constant">MADV_DONTNEED</code></code></em>) or
- similar calls made to purge dirty pages.</p></dd><dt><span class="term">
+ similar calls made to purge dirty pages.</p></dd><dt><a name="stats.arenas.i.purged"></a><span class="term">
- "<code class="mallctl">stats.arenas.&lt;i&gt;.npurged</code>"
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.purged</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Number of pages purged.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Number of pages purged.</p></dd><dt><a name="stats.arenas.i.small.allocated"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.small.allocated</code>"
@@ -1132,7 +1214,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of bytes currently allocated by small objects.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.arenas.i.small.nmalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.small.nmalloc</code>"
@@ -1140,7 +1222,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests served by
- small bins.</p></dd><dt><span class="term">
+ small bins.</p></dd><dt><a name="stats.arenas.i.small.ndalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.small.ndalloc</code>"
@@ -1148,7 +1230,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of small objects returned to bins.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.arenas.i.small.nrequests"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.small.nrequests</code>"
@@ -1156,7 +1238,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of small allocation requests.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.arenas.i.large.allocated"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.large.allocated</code>"
@@ -1164,7 +1246,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of bytes currently allocated by large objects.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.arenas.i.large.nmalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.large.nmalloc</code>"
@@ -1172,7 +1254,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of large allocation requests served
- directly by the arena.</p></dd><dt><span class="term">
+ directly by the arena.</p></dd><dt><a name="stats.arenas.i.large.ndalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.large.ndalloc</code>"
@@ -1180,7 +1262,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of large deallocation requests served
- directly by the arena.</p></dd><dt><span class="term">
+ directly by the arena.</p></dd><dt><a name="stats.arenas.i.large.nrequests"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.large.nrequests</code>"
@@ -1188,7 +1270,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of large allocation requests.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.arenas.i.bins.j.allocated"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.allocated</code>"
@@ -1196,7 +1278,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Current number of bytes allocated by
- bin.</p></dd><dt><span class="term">
+ bin.</p></dd><dt><a name="stats.arenas.i.bins.j.nmalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</code>"
@@ -1204,7 +1286,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocations served by bin.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.arenas.i.bins.j.ndalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.ndalloc</code>"
@@ -1212,7 +1294,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocations returned to bin.
- </p></dd><dt><span class="term">
+ </p></dd><dt><a name="stats.arenas.i.bins.j.nrequests"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nrequests</code>"
@@ -1220,28 +1302,28 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation
- requests.</p></dd><dt><span class="term">
+ requests.</p></dd><dt><a name="stats.arenas.i.bins.j.nfills"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code> <code class="option">--enable-tcache</code>]
- </span></dt><dd><p>Cumulative number of tcache fills.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Cumulative number of tcache fills.</p></dd><dt><a name="stats.arenas.i.bins.j.nflushes"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code> <code class="option">--enable-tcache</code>]
- </span></dt><dd><p>Cumulative number of tcache flushes.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Cumulative number of tcache flushes.</p></dd><dt><a name="stats.arenas.i.bins.j.nruns"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nruns</code>"
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Cumulative number of runs created.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Cumulative number of runs created.</p></dd><dt><a name="stats.arenas.i.bins.j.nreruns"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreruns</code>"
@@ -1249,14 +1331,14 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of times the current run from which
- to allocate changed.</p></dd><dt><span class="term">
+ to allocate changed.</p></dd><dt><a name="stats.arenas.i.bins.j.curruns"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curruns</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Current number of runs.</p></dd><dt><span class="term">
+ </span></dt><dd><p>Current number of runs.</p></dd><dt><a name="stats.arenas.i.lruns.j.nmalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nmalloc</code>"
@@ -1264,7 +1346,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests for this size
- class served directly by the arena.</p></dd><dt><span class="term">
+ class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.lruns.j.ndalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.ndalloc</code>"
@@ -1272,7 +1354,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of deallocation requests for this
- size class served directly by the arena.</p></dd><dt><span class="term">
+ size class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.lruns.j.nrequests"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nrequests</code>"
@@ -1280,7 +1362,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests for this size
- class.</p></dd><dt><span class="term">
+ class.</p></dd><dt><a name="stats.arenas.i.lruns.j.curruns"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.curruns</code>"
@@ -1288,7 +1370,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Current number of runs for this size class.
- </p></dd></dl></div></div><div class="refsect1" title="DEBUGGING MALLOC PROBLEMS"><a name="debugging_malloc_problems"></a><h2>DEBUGGING MALLOC PROBLEMS</h2><p>When debugging, it is a good idea to configure/build jemalloc with
+ </p></dd></dl></div></div><div class="refsect1"><a name="debugging_malloc_problems"></a><h2>DEBUGGING MALLOC PROBLEMS</h2><p>When debugging, it is a good idea to configure/build jemalloc with
the <code class="option">--enable-debug</code> and <code class="option">--enable-fill</code>
options, and recompile the program with suitable options and symbols for
debugger support. When so configured, jemalloc incorporates a wide variety
@@ -1308,7 +1390,7 @@ malloc_conf = "xmalloc:true";</pre><p>
it detects, because the performance impact for storing such information
would be prohibitive. However, jemalloc does integrate with the most
excellent <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a> tool if the
- <code class="option">--enable-valgrind</code> configuration option is enabled.</p></div><div class="refsect1" title="DIAGNOSTIC MESSAGES"><a name="diagnostic_messages"></a><h2>DIAGNOSTIC MESSAGES</h2><p>If any of the memory allocation/deallocation functions detect an
+ <code class="option">--enable-valgrind</code> configuration option is enabled.</p></div><div class="refsect1"><a name="diagnostic_messages"></a><h2>DIAGNOSTIC MESSAGES</h2><p>If any of the memory allocation/deallocation functions detect an
error or warning condition, a message will be printed to file descriptor
<code class="constant">STDERR_FILENO</code>. Errors will result in the process
dumping core. If the <a class="link" href="#opt.abort">
@@ -1324,7 +1406,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>), followed by a string
pointer. Please note that doing anything which tries to allocate memory in
this function is likely to result in a crash or deadlock.</p><p>All messages are prefixed by
- &#8220;<code class="computeroutput">&lt;jemalloc&gt;: </code>&#8221;.</p></div><div class="refsect1" title="RETURN VALUES"><a name="return_values"></a><h2>RETURN VALUES</h2><div class="refsect2" title="Standard API"><a name="id286954473"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) and
+ &#8220;<code class="computeroutput">&lt;jemalloc&gt;: </code>&#8221;.</p></div><div class="refsect1"><a name="return_values"></a><h2>RETURN VALUES</h2><div class="refsect2"><a name="idm316388028784"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) and
<code class="function">calloc</code>(<em class="parameter"><code></code></em>) functions return a pointer to the
allocated memory if successful; otherwise a <code class="constant">NULL</code>
pointer is returned and <code class="varname">errno</code> is set to
@@ -1332,7 +1414,7 @@ malloc_conf = "xmalloc:true";</pre><p>
returns the value 0 if successful; otherwise it returns an error value.
The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function will fail
if:
- </p><div class="variablelist"><dl><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p>The <em class="parameter"><code>alignment</code></em> parameter is
+ </p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p>The <em class="parameter"><code>alignment</code></em> parameter is
not a power of 2 at least as large as
<code class="code">sizeof(<span class="type">void *</span>)</code>.
</p></dd><dt><span class="term"><span class="errorname">ENOMEM</span></span></dt><dd><p>Memory allocation error.</p></dd></dl></div><p>
@@ -1341,7 +1423,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="constant">NULL</code> pointer is returned and
<code class="varname">errno</code> is set. The
<code class="function">aligned_alloc</code>(<em class="parameter"><code></code></em>) function will fail if:
- </p><div class="variablelist"><dl><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p>The <em class="parameter"><code>alignment</code></em> parameter is
+ </p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p>The <em class="parameter"><code>alignment</code></em> parameter is
not a power of 2.
</p></dd><dt><span class="term"><span class="errorname">ENOMEM</span></span></dt><dd><p>Memory allocation error.</p></dd></dl></div><p>
</p><p>The <code class="function">realloc</code>(<em class="parameter"><code></code></em>) function returns a
@@ -1352,26 +1434,38 @@ malloc_conf = "xmalloc:true";</pre><p>
allocation failure. The <code class="function">realloc</code>(<em class="parameter"><code></code></em>)
function always leaves the original buffer intact when an error occurs.
</p><p>The <code class="function">free</code>(<em class="parameter"><code></code></em>) function returns no
- value.</p></div><div class="refsect2" title="Non-standard API"><a name="id286954690"></a><h3>Non-standard API</h3><p>The <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function
- returns the usable size of the allocation pointed to by
- <em class="parameter"><code>ptr</code></em>. </p><p>The <code class="function">mallctl</code>(<em class="parameter"><code></code></em>),
+ value.</p></div><div class="refsect2"><a name="idm316388003104"></a><h3>Non-standard API</h3><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) and
+ <code class="function">rallocx</code>(<em class="parameter"><code></code></em>) functions return a pointer to
+ the allocated memory if successful; otherwise a <code class="constant">NULL</code>
+ pointer is returned to indicate insufficient contiguous memory was
+ available to service the allocation request. </p><p>The <code class="function">xallocx</code>(<em class="parameter"><code></code></em>) function returns the
+ real size of the resulting resized allocation pointed to by
+ <em class="parameter"><code>ptr</code></em>, which is a value less than
+ <em class="parameter"><code>size</code></em> if the allocation could not be adequately
+ grown in place. </p><p>The <code class="function">sallocx</code>(<em class="parameter"><code></code></em>) function returns the
+ real size of the allocation pointed to by <em class="parameter"><code>ptr</code></em>.
+ </p><p>The <code class="function">nallocx</code>(<em class="parameter"><code></code></em>) returns the real size
+ that would result from a successful equivalent
+ <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function call, or zero if
+ insufficient memory is available to perform the size computation. </p><p>The <code class="function">mallctl</code>(<em class="parameter"><code></code></em>),
<code class="function">mallctlnametomib</code>(<em class="parameter"><code></code></em>), and
<code class="function">mallctlbymib</code>(<em class="parameter"><code></code></em>) functions return 0 on
success; otherwise they return an error value. The functions will fail
if:
- </p><div class="variablelist"><dl><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p><em class="parameter"><code>newp</code></em> is not
+ </p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p><em class="parameter"><code>newp</code></em> is not
<code class="constant">NULL</code>, and <em class="parameter"><code>newlen</code></em> is too
large or too small. Alternatively, <em class="parameter"><code>*oldlenp</code></em>
is too large or too small; in this case as much data as possible
- are read despite the error.</p></dd><dt><span class="term"><span class="errorname">ENOMEM</span></span></dt><dd><p><em class="parameter"><code>*oldlenp</code></em> is too short to
- hold the requested value.</p></dd><dt><span class="term"><span class="errorname">ENOENT</span></span></dt><dd><p><em class="parameter"><code>name</code></em> or
+ are read despite the error.</p></dd><dt><span class="term"><span class="errorname">ENOENT</span></span></dt><dd><p><em class="parameter"><code>name</code></em> or
<em class="parameter"><code>mib</code></em> specifies an unknown/invalid
value.</p></dd><dt><span class="term"><span class="errorname">EPERM</span></span></dt><dd><p>Attempt to read or write void value, or attempt to
write read-only value.</p></dd><dt><span class="term"><span class="errorname">EAGAIN</span></span></dt><dd><p>A memory allocation failure
occurred.</p></dd><dt><span class="term"><span class="errorname">EFAULT</span></span></dt><dd><p>An interface with side effects failed in some way
not directly related to <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>)
read/write processing.</p></dd></dl></div><p>
- </p></div><div class="refsect2" title="Experimental API"><a name="id286954842"></a><h3>Experimental API</h3><p>The <code class="function">allocm</code>(<em class="parameter"><code></code></em>),
+ </p><p>The <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function
+ returns the usable size of the allocation pointed to by
+ <em class="parameter"><code>ptr</code></em>. </p></div><div class="refsect2"><a name="idm316387973360"></a><h3>Experimental API</h3><p>The <code class="function">allocm</code>(<em class="parameter"><code></code></em>),
<code class="function">rallocm</code>(<em class="parameter"><code></code></em>),
<code class="function">sallocm</code>(<em class="parameter"><code></code></em>),
<code class="function">dallocm</code>(<em class="parameter"><code></code></em>), and
@@ -1380,7 +1474,7 @@ malloc_conf = "xmalloc:true";</pre><p>
error value. The <code class="function">allocm</code>(<em class="parameter"><code></code></em>),
<code class="function">rallocm</code>(<em class="parameter"><code></code></em>), and
<code class="function">nallocm</code>(<em class="parameter"><code></code></em>) functions will fail if:
- </p><div class="variablelist"><dl><dt><span class="term"><span class="errorname">ALLOCM_ERR_OOM</span></span></dt><dd><p>Out of memory. Insufficient contiguous memory was
+ </p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">ALLOCM_ERR_OOM</span></span></dt><dd><p>Out of memory. Insufficient contiguous memory was
available to service the allocation request. The
<code class="function">allocm</code>(<em class="parameter"><code></code></em>) function additionally sets
<em class="parameter"><code>*ptr</code></em> to <code class="constant">NULL</code>, whereas
@@ -1388,25 +1482,25 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="constant">*ptr</code> unmodified.</p></dd></dl></div><p>
The <code class="function">rallocm</code>(<em class="parameter"><code></code></em>) function will also
fail if:
- </p><div class="variablelist"><dl><dt><span class="term"><span class="errorname">ALLOCM_ERR_NOT_MOVED</span></span></dt><dd><p><code class="constant">ALLOCM_NO_MOVE</code> was specified,
+ </p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">ALLOCM_ERR_NOT_MOVED</span></span></dt><dd><p><code class="constant">ALLOCM_NO_MOVE</code> was specified,
but the reallocation request could not be serviced without moving
the object.</p></dd></dl></div><p>
- </p></div></div><div class="refsect1" title="ENVIRONMENT"><a name="environment"></a><h2>ENVIRONMENT</h2><p>The following environment variable affects the execution of the
+ </p></div></div><div class="refsect1"><a name="environment"></a><h2>ENVIRONMENT</h2><p>The following environment variable affects the execution of the
allocation functions:
- </p><div class="variablelist"><dl><dt><span class="term"><code class="envar">MALLOC_CONF</code></span></dt><dd><p>If the environment variable
+ </p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><code class="envar">MALLOC_CONF</code></span></dt><dd><p>If the environment variable
<code class="envar">MALLOC_CONF</code> is set, the characters it contains
will be interpreted as options.</p></dd></dl></div><p>
- </p></div><div class="refsect1" title="EXAMPLES"><a name="examples"></a><h2>EXAMPLES</h2><p>To dump core whenever a problem occurs:
+ </p></div><div class="refsect1"><a name="examples"></a><h2>EXAMPLES</h2><p>To dump core whenever a problem occurs:
</p><pre class="screen">ln -s 'abort:true' /etc/malloc.conf</pre><p>
</p><p>To specify in the source a chunk size that is 16 MiB:
</p><pre class="programlisting">
-malloc_conf = "lg_chunk:24";</pre></div><div class="refsect1" title="SEE ALSO"><a name="see_also"></a><h2>SEE ALSO</h2><p><span class="citerefentry"><span class="refentrytitle">madvise</span>(2)</span>,
+malloc_conf = "lg_chunk:24";</pre></div><div class="refsect1"><a name="see_also"></a><h2>SEE ALSO</h2><p><span class="citerefentry"><span class="refentrytitle">madvise</span>(2)</span>,
<span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span>,
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>,
<span class="citerefentry"><span class="refentrytitle">utrace</span>(2)</span>,
<span class="citerefentry"><span class="refentrytitle">alloca</span>(3)</span>,
<span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span>,
- <span class="citerefentry"><span class="refentrytitle">getpagesize</span>(3)</span></p></div><div class="refsect1" title="STANDARDS"><a name="standards"></a><h2>STANDARDS</h2><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>),
+ <span class="citerefentry"><span class="refentrytitle">getpagesize</span>(3)</span></p></div><div class="refsect1"><a name="standards"></a><h2>STANDARDS</h2><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>),
<code class="function">calloc</code>(<em class="parameter"><code></code></em>),
<code class="function">realloc</code>(<em class="parameter"><code></code></em>), and
<code class="function">free</code>(<em class="parameter"><code></code></em>) functions conform to ISO/IEC
diff --git a/deps/jemalloc/doc/jemalloc.xml.in b/deps/jemalloc/doc/jemalloc.xml.in
index 54b87474c..d8e2e711f 100644
--- a/deps/jemalloc/doc/jemalloc.xml.in
+++ b/deps/jemalloc/doc/jemalloc.xml.in
@@ -33,11 +33,17 @@
<refname>aligned_alloc</refname>
<refname>realloc</refname>
<refname>free</refname>
- <refname>malloc_usable_size</refname>
- <refname>malloc_stats_print</refname>
+ <refname>mallocx</refname>
+ <refname>rallocx</refname>
+ <refname>xallocx</refname>
+ <refname>sallocx</refname>
+ <refname>dallocx</refname>
+ <refname>nallocx</refname>
<refname>mallctl</refname>
<refname>mallctlnametomib</refname>
<refname>mallctlbymib</refname>
+ <refname>malloc_stats_print</refname>
+ <refname>malloc_usable_size</refname>
<refname>allocm</refname>
<refname>rallocm</refname>
<refname>sallocm</refname>
@@ -92,16 +98,37 @@
<refsect2>
<title>Non-standard API</title>
<funcprototype>
- <funcdef>size_t <function>malloc_usable_size</function></funcdef>
- <paramdef>const void *<parameter>ptr</parameter></paramdef>
+ <funcdef>void *<function>mallocx</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
</funcprototype>
<funcprototype>
- <funcdef>void <function>malloc_stats_print</function></funcdef>
- <paramdef>void <parameter>(*write_cb)</parameter>
- <funcparams>void *, const char *</funcparams>
- </paramdef>
- <paramdef>void *<parameter>cbopaque</parameter></paramdef>
- <paramdef>const char *<parameter>opts</parameter></paramdef>
+ <funcdef>void *<function>rallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>xallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>extra</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>sallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>dallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>nallocx</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
</funcprototype>
<funcprototype>
<funcdef>int <function>mallctl</function></funcdef>
@@ -127,6 +154,18 @@
<paramdef>size_t <parameter>newlen</parameter></paramdef>
</funcprototype>
<funcprototype>
+ <funcdef>void <function>malloc_stats_print</function></funcdef>
+ <paramdef>void <parameter>(*write_cb)</parameter>
+ <funcparams>void *, const char *</funcparams>
+ </paramdef>
+ <paramdef>void *<parameter>cbopaque</parameter></paramdef>
+ <paramdef>const char *<parameter>opts</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>malloc_usable_size</function></funcdef>
+ <paramdef>const void *<parameter>ptr</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
<funcdef>void <function>(*malloc_message)</function></funcdef>
<paramdef>void *<parameter>cbopaque</parameter></paramdef>
<paramdef>const char *<parameter>s</parameter></paramdef>
@@ -225,42 +264,103 @@
</refsect2>
<refsect2>
<title>Non-standard API</title>
+ <para>The <function>mallocx<parameter/></function>,
+ <function>rallocx<parameter/></function>,
+ <function>xallocx<parameter/></function>,
+ <function>sallocx<parameter/></function>,
+ <function>dallocx<parameter/></function>, and
+ <function>nallocx<parameter/></function> functions all have a
+ <parameter>flags</parameter> argument that can be used to specify
+ options. The functions only check the options that are contextually
+ relevant. Use bitwise or (<code language="C">|</code>) operations to
+ specify one or more of the following:
+ <variablelist>
+ <varlistentry>
+ <term><constant>MALLOCX_LG_ALIGN(<parameter>la</parameter>)
+ </constant></term>
- <para>The <function>malloc_usable_size<parameter/></function> function
- returns the usable size of the allocation pointed to by
- <parameter>ptr</parameter>. The return value may be larger than the size
- that was requested during allocation. The
- <function>malloc_usable_size<parameter/></function> function is not a
- mechanism for in-place <function>realloc<parameter/></function>; rather
- it is provided solely as a tool for introspection purposes. Any
- discrepancy between the requested allocation size and the size reported
- by <function>malloc_usable_size<parameter/></function> should not be
- depended on, since such behavior is entirely implementation-dependent.
- </para>
+ <listitem><para>Align the memory allocation to start at an address
+ that is a multiple of <code language="C">(1 &lt;&lt;
+ <parameter>la</parameter>)</code>. This macro does not validate
+ that <parameter>la</parameter> is within the valid
+ range.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>MALLOCX_ALIGN(<parameter>a</parameter>)
+ </constant></term>
- <para>The <function>malloc_stats_print<parameter/></function> function
- writes human-readable summary statistics via the
- <parameter>write_cb</parameter> callback function pointer and
- <parameter>cbopaque</parameter> data passed to
- <parameter>write_cb</parameter>, or
- <function>malloc_message<parameter/></function> if
- <parameter>write_cb</parameter> is <constant>NULL</constant>. This
- function can be called repeatedly. General information that never
- changes during execution can be omitted by specifying "g" as a character
- within the <parameter>opts</parameter> string. Note that
- <function>malloc_message<parameter/></function> uses the
- <function>mallctl*<parameter/></function> functions internally, so
- inconsistent statistics can be reported if multiple threads use these
- functions simultaneously. If <option>--enable-stats</option> is
- specified during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can
- be specified to omit merged arena and per arena statistics, respectively;
- &ldquo;b&rdquo; and &ldquo;l&rdquo; can be specified to omit per size
- class statistics for bins and large objects, respectively. Unrecognized
- characters are silently ignored. Note that thread caching may prevent
- some statistics from being completely up to date, since extra locking
- would be required to merge counters that track thread cache operations.
+ <listitem><para>Align the memory allocation to start at an address
+ that is a multiple of <parameter>a</parameter>, where
+ <parameter>a</parameter> is a power of two. This macro does not
+ validate that <parameter>a</parameter> is a power of 2.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>MALLOCX_ZERO</constant></term>
+
+ <listitem><para>Initialize newly allocated memory to contain zero
+ bytes. In the growing reallocation case, the real size prior to
+ reallocation defines the boundary between untouched bytes and those
+ that are initialized to contain zero bytes. If this macro is
+ absent, newly allocated memory is uninitialized.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><constant>MALLOCX_ARENA(<parameter>a</parameter>)
+ </constant></term>
+
+ <listitem><para>Use the arena specified by the index
+ <parameter>a</parameter> (and by necessity bypass the thread
+ cache). This macro has no effect for huge regions, nor for regions
+ that were allocated via an arena other than the one specified.
+ This macro does not validate that <parameter>a</parameter>
+ specifies an arena index in the valid range.</para></listitem>
+ </varlistentry>
+ </variablelist>
</para>
+ <para>The <function>mallocx<parameter/></function> function allocates at
+ least <parameter>size</parameter> bytes of memory, and returns a pointer
+ to the base address of the allocation. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>, or if request size
+ overflows due to size class and/or alignment constraints.</para>
+
+ <para>The <function>rallocx<parameter/></function> function resizes the
+ allocation at <parameter>ptr</parameter> to be at least
+ <parameter>size</parameter> bytes, and returns a pointer to the base
+ address of the resulting allocation, which may or may not have moved from
+ its original location. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>, or if request size
+ overflows due to size class and/or alignment constraints.</para>
+
+ <para>The <function>xallocx<parameter/></function> function resizes the
+ allocation at <parameter>ptr</parameter> in place to be at least
+ <parameter>size</parameter> bytes, and returns the real size of the
+ allocation. If <parameter>extra</parameter> is non-zero, an attempt is
+ made to resize the allocation to be at least <code
+ language="C">(<parameter>size</parameter> +
+ <parameter>extra</parameter>)</code> bytes, though inability to allocate
+ the extra byte(s) will not by itself result in failure to resize.
+ Behavior is undefined if <parameter>size</parameter> is
+ <constant>0</constant>, or if <code
+ language="C">(<parameter>size</parameter> + <parameter>extra</parameter>
+ &gt; <constant>SIZE_T_MAX</constant>)</code>.</para>
+
+ <para>The <function>sallocx<parameter/></function> function returns the
+ real size of the allocation at <parameter>ptr</parameter>.</para>
+
+ <para>The <function>dallocx<parameter/></function> function causes the
+ memory referenced by <parameter>ptr</parameter> to be made available for
+ future allocations.</para>
+
+ <para>The <function>nallocx<parameter/></function> function allocates no
+ memory, but it performs the same size computation as the
+ <function>mallocx<parameter/></function> function, and returns the real
+ size of the allocation that would result from the equivalent
+ <function>mallocx<parameter/></function> function call. Behavior is
+ undefined if <parameter>size</parameter> is <constant>0</constant>, or if
+ request size overflows due to size class and/or alignment
+ constraints.</para>
+
<para>The <function>mallctl<parameter/></function> function provides a
general interface for introspecting the memory allocator, as well as
setting modifiable parameters and triggering actions. The
@@ -297,15 +397,14 @@
it is legitimate to construct code like the following: <programlisting
language="C"><![CDATA[
unsigned nbins, i;
-
-int mib[4];
+size_t mib[4];
size_t len, miblen;
len = sizeof(nbins);
mallctl("arenas.nbins", &nbins, &len, NULL, 0);
miblen = 4;
-mallnametomib("arenas.bin.0.size", mib, &miblen);
+mallctlnametomib("arenas.bin.0.size", mib, &miblen);
for (i = 0; i < nbins; i++) {
size_t bin_size;
@@ -314,6 +413,41 @@ for (i = 0; i < nbins; i++) {
mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
/* Do something with bin_size... */
}]]></programlisting></para>
+
+ <para>The <function>malloc_stats_print<parameter/></function> function
+ writes human-readable summary statistics via the
+ <parameter>write_cb</parameter> callback function pointer and
+ <parameter>cbopaque</parameter> data passed to
+ <parameter>write_cb</parameter>, or
+ <function>malloc_message<parameter/></function> if
+ <parameter>write_cb</parameter> is <constant>NULL</constant>. This
+ function can be called repeatedly. General information that never
+ changes during execution can be omitted by specifying "g" as a character
+ within the <parameter>opts</parameter> string. Note that
+ <function>malloc_message<parameter/></function> uses the
+ <function>mallctl*<parameter/></function> functions internally, so
+ inconsistent statistics can be reported if multiple threads use these
+ functions simultaneously. If <option>--enable-stats</option> is
+ specified during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can
+ be specified to omit merged arena and per arena statistics, respectively;
+ &ldquo;b&rdquo; and &ldquo;l&rdquo; can be specified to omit per size
+ class statistics for bins and large objects, respectively. Unrecognized
+ characters are silently ignored. Note that thread caching may prevent
+ some statistics from being completely up to date, since extra locking
+ would be required to merge counters that track thread cache operations.
+ </para>
+
+ <para>The <function>malloc_usable_size<parameter/></function> function
+ returns the usable size of the allocation pointed to by
+ <parameter>ptr</parameter>. The return value may be larger than the size
+ that was requested during allocation. The
+ <function>malloc_usable_size<parameter/></function> function is not a
+ mechanism for in-place <function>realloc<parameter/></function>; rather
+ it is provided solely as a tool for introspection purposes. Any
+ discrepancy between the requested allocation size and the size reported
+ by <function>malloc_usable_size<parameter/></function> should not be
+ depended on, since such behavior is entirely implementation-dependent.
+ </para>
</refsect2>
<refsect2>
<title>Experimental API</title>
@@ -358,7 +492,7 @@ for (i = 0; i < nbins; i++) {
<listitem><para>Initialize newly allocated memory to contain zero
bytes. In the growing reallocation case, the real size prior to
reallocation defines the boundary between untouched bytes and those
- that are initialized to contain zero bytes. If this option is
+ that are initialized to contain zero bytes. If this macro is
absent, newly allocated memory is uninitialized.</para></listitem>
</varlistentry>
<varlistentry>
@@ -373,9 +507,11 @@ for (i = 0; i < nbins; i++) {
</constant></term>
<listitem><para>Use the arena specified by the index
- <parameter>a</parameter>. This macro does not validate that
- <parameter>a</parameter> specifies an arena in the valid
- range.</para></listitem>
+ <parameter>a</parameter> (and by necessity bypass the thread
+ cache). This macro has no effect for huge regions, nor for regions
+ that were allocated via an arena other than the one specified.
+ This macro does not validate that <parameter>a</parameter>
+ specifies an arena index in the valid range.</para></listitem>
</varlistentry>
</variablelist>
</para>
@@ -385,8 +521,9 @@ for (i = 0; i < nbins; i++) {
<parameter>*ptr</parameter> to the base address of the allocation, and
sets <parameter>*rsize</parameter> to the real size of the allocation if
<parameter>rsize</parameter> is not <constant>NULL</constant>. Behavior
- is undefined if <parameter>size</parameter> is
- <constant>0</constant>.</para>
+ is undefined if <parameter>size</parameter> is <constant>0</constant>, or
+ if request size overflows due to size class and/or alignment
+ constraints.</para>
<para>The <function>rallocm<parameter/></function> function resizes the
allocation at <parameter>*ptr</parameter> to be at least
@@ -396,11 +533,12 @@ for (i = 0; i < nbins; i++) {
<parameter>rsize</parameter> is not <constant>NULL</constant>. If
<parameter>extra</parameter> is non-zero, an attempt is made to resize
the allocation to be at least <code
- language="C"><parameter>size</parameter> +
+ language="C">(<parameter>size</parameter> +
<parameter>extra</parameter>)</code> bytes, though inability to allocate
the extra byte(s) will not by itself result in failure. Behavior is
- undefined if <parameter>size</parameter> is <constant>0</constant>, or if
- <code language="C">(<parameter>size</parameter> +
+ undefined if <parameter>size</parameter> is <constant>0</constant>, if
+ request size overflows due to size class and/or alignment constraints, or
+ if <code language="C">(<parameter>size</parameter> +
<parameter>extra</parameter> &gt;
<constant>SIZE_T_MAX</constant>)</code>.</para>
@@ -417,8 +555,9 @@ for (i = 0; i < nbins; i++) {
<parameter>rsize</parameter> is not <constant>NULL</constant> it sets
<parameter>*rsize</parameter> to the real size of the allocation that
would result from the equivalent <function>allocm<parameter/></function>
- function call. Behavior is undefined if
- <parameter>size</parameter> is <constant>0</constant>.</para>
+ function call. Behavior is undefined if <parameter>size</parameter> is
+ <constant>0</constant>, or if request size overflows due to size class
+ and/or alignment constraints.</para>
</refsect2>
</refsect1>
<refsect1 id="tuning">
@@ -432,7 +571,14 @@ for (i = 0; i < nbins; i++) {
referenced by the symbolic link named <filename
class="symlink">/etc/malloc.conf</filename>, and the value of the
environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in
- that order, from left to right as options.</para>
+ that order, from left to right as options. Note that
+ <varname>malloc_conf</varname> may be read before
+ <function>main<parameter/></function> is entered, so the declaration of
+ <varname>malloc_conf</varname> should specify an initializer that contains
+ the final value to be read by jemalloc. <varname>malloc_conf</varname> is
+ a compile-time setting, whereas <filename
+ class="symlink">/etc/malloc.conf</filename> and <envar>MALLOC_CONF</envar>
+ can be safely set any time prior to program invocation.</para>
<para>An options string is a comma-separated list of option:value pairs.
There is one key corresponding to each <link
@@ -598,7 +744,7 @@ for (i = 0; i < nbins; i++) {
which controls refreshing of cached dynamic statistics.</para>
<variablelist>
- <varlistentry>
+ <varlistentry id="version">
<term>
<mallctl>version</mallctl>
(<type>const char *</type>)
@@ -619,7 +765,7 @@ for (i = 0; i < nbins; i++) {
detecting whether another thread caused a refresh.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.debug">
<term>
<mallctl>config.debug</mallctl>
(<type>bool</type>)
@@ -629,7 +775,7 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.dss">
<term>
<mallctl>config.dss</mallctl>
(<type>bool</type>)
@@ -639,7 +785,7 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.fill">
<term>
<mallctl>config.fill</mallctl>
(<type>bool</type>)
@@ -649,7 +795,7 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.lazy_lock">
<term>
<mallctl>config.lazy_lock</mallctl>
(<type>bool</type>)
@@ -659,7 +805,7 @@ for (i = 0; i < nbins; i++) {
during build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.mremap">
<term>
<mallctl>config.mremap</mallctl>
(<type>bool</type>)
@@ -669,7 +815,7 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.munmap">
<term>
<mallctl>config.munmap</mallctl>
(<type>bool</type>)
@@ -679,7 +825,7 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.prof">
<term>
<mallctl>config.prof</mallctl>
(<type>bool</type>)
@@ -689,7 +835,7 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.prof_libgcc">
<term>
<mallctl>config.prof_libgcc</mallctl>
(<type>bool</type>)
@@ -699,7 +845,7 @@ for (i = 0; i < nbins; i++) {
specified during build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.prof_libunwind">
<term>
<mallctl>config.prof_libunwind</mallctl>
(<type>bool</type>)
@@ -709,7 +855,7 @@ for (i = 0; i < nbins; i++) {
during build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.stats">
<term>
<mallctl>config.stats</mallctl>
(<type>bool</type>)
@@ -719,7 +865,7 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.tcache">
<term>
<mallctl>config.tcache</mallctl>
(<type>bool</type>)
@@ -729,7 +875,7 @@ for (i = 0; i < nbins; i++) {
during build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.tls">
<term>
<mallctl>config.tls</mallctl>
(<type>bool</type>)
@@ -739,7 +885,7 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.utrace">
<term>
<mallctl>config.utrace</mallctl>
(<type>bool</type>)
@@ -749,7 +895,7 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.valgrind">
<term>
<mallctl>config.valgrind</mallctl>
(<type>bool</type>)
@@ -759,7 +905,7 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="config.xmalloc">
<term>
<mallctl>config.xmalloc</mallctl>
(<type>bool</type>)
@@ -784,16 +930,6 @@ for (i = 0; i < nbins; i++) {
</para></listitem>
</varlistentry>
- <varlistentry id="opt.lg_chunk">
- <term>
- <mallctl>opt.lg_chunk</mallctl>
- (<type>size_t</type>)
- <literal>r-</literal>
- </term>
- <listitem><para>Virtual memory chunk size (log base 2). The default
- chunk size is 4 MiB (2^22).</para></listitem>
- </varlistentry>
-
<varlistentry id="opt.dss">
<term>
<mallctl>opt.dss</mallctl>
@@ -805,7 +941,23 @@ for (i = 0; i < nbins; i++) {
related to <citerefentry><refentrytitle>mmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> allocation. The following
settings are supported: &ldquo;disabled&rdquo;, &ldquo;primary&rdquo;,
- and &ldquo;secondary&rdquo; (default).</para></listitem>
+ and &ldquo;secondary&rdquo;. The default is &ldquo;secondary&rdquo; if
+ <link linkend="config.dss"><mallctl>config.dss</mallctl></link> is
+ true, &ldquo;disabled&rdquo; otherwise.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_chunk">
+ <term>
+ <mallctl>opt.lg_chunk</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Virtual memory chunk size (log base 2). If a chunk
+ size outside the supported size range is specified, the size is
+ silently clipped to the minimum/maximum supported size. The default
+ chunk size is 4 MiB (2^22).
+ </para></listitem>
</varlistentry>
<varlistentry id="opt.narenas">
@@ -924,7 +1076,8 @@ for (i = 0; i < nbins; i++) {
<listitem><para>Zero filling enabled/disabled. If enabled, each byte
of uninitialized allocated memory will be initialized to 0. Note that
this initialization only happens once for each byte, so
- <function>realloc<parameter/></function> and
+ <function>realloc<parameter/></function>,
+ <function>rallocx<parameter/></function> and
<function>rallocm<parameter/></function> calls do not zero memory that
was previously allocated. This is intended for debugging and will
impact performance negatively. This option is disabled by default.
@@ -1053,7 +1206,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<term>
<mallctl>opt.prof_active</mallctl>
(<type>bool</type>)
- <literal>r-</literal>
+ <literal>rw</literal>
[<option>--enable-prof</option>]
</term>
<listitem><para>Profiling activated/deactivated. This is a secondary
@@ -1165,7 +1318,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
by default.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="thread.arena">
<term>
<mallctl>thread.arena</mallctl>
(<type>unsigned</type>)
@@ -1192,7 +1345,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
cases.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="thread.allocatedp">
<term>
<mallctl>thread.allocatedp</mallctl>
(<type>uint64_t *</type>)
@@ -1219,7 +1372,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
cases.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="thread.deallocatedp">
<term>
<mallctl>thread.deallocatedp</mallctl>
(<type>uint64_t *</type>)
@@ -1233,7 +1386,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<function>mallctl*<parameter/></function> calls.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="thread.tcache.enabled">
<term>
<mallctl>thread.tcache.enabled</mallctl>
(<type>bool</type>)
@@ -1247,7 +1400,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="thread.tcache.flush">
<term>
<mallctl>thread.tcache.flush</mallctl>
(<type>void</type>)
@@ -1286,8 +1439,12 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Set the precedence of dss allocation as related to mmap
allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
<link
- linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. See
- <link linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
+ linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. Note
+ that even during huge allocation this setting is read from the arena
+ that would be chosen for small or large allocation so that applications
+ can depend on consistent dss versus mmap allocation regardless of
+ allocation size. See <link
+ linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
settings.
</para></listitem>
</varlistentry>
@@ -1313,7 +1470,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
initialized.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="arenas.quantum">
<term>
<mallctl>arenas.quantum</mallctl>
(<type>size_t</type>)
@@ -1322,7 +1479,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Quantum size.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="arenas.page">
<term>
<mallctl>arenas.page</mallctl>
(<type>size_t</type>)
@@ -1331,7 +1488,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Page size.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="arenas.tcache_max">
<term>
<mallctl>arenas.tcache_max</mallctl>
(<type>size_t</type>)
@@ -1341,7 +1498,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Maximum thread-cached size class.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="arenas.nbins">
<term>
<mallctl>arenas.nbins</mallctl>
(<type>unsigned</type>)
@@ -1350,7 +1507,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Number of bin size classes.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="arenas.nhbins">
<term>
<mallctl>arenas.nhbins</mallctl>
(<type>unsigned</type>)
@@ -1370,7 +1527,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Maximum size supported by size class.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="arenas.bin.i.nregs">
<term>
<mallctl>arenas.bin.&lt;i&gt;.nregs</mallctl>
(<type>uint32_t</type>)
@@ -1379,7 +1536,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Number of regions per page run.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="arenas.bin.i.run_size">
<term>
<mallctl>arenas.bin.&lt;i&gt;.run_size</mallctl>
(<type>size_t</type>)
@@ -1388,7 +1545,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Number of bytes per page run.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="arenas.nlruns">
<term>
<mallctl>arenas.nlruns</mallctl>
(<type>size_t</type>)
@@ -1397,7 +1554,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Total number of large size classes.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="arenas.lrun.i.size">
<term>
<mallctl>arenas.lrun.&lt;i&gt;.size</mallctl>
(<type>size_t</type>)
@@ -1407,7 +1564,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
class.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="arenas.purge">
<term>
<mallctl>arenas.purge</mallctl>
(<type>unsigned</type>)
@@ -1417,7 +1574,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
for all arenas if none is specified.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="arenas.extend">
<term>
<mallctl>arenas.extend</mallctl>
(<type>unsigned</type>)
@@ -1441,7 +1598,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="prof.dump">
<term>
<mallctl>prof.dump</mallctl>
(<type>const char *</type>)
@@ -1457,7 +1614,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
option.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="prof.interval">
<term>
<mallctl>prof.interval</mallctl>
(<type>uint64_t</type>)
@@ -1517,7 +1674,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
entirely devoted to allocator metadata.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.mapped">
<term>
<mallctl>stats.mapped</mallctl>
(<type>size_t</type>)
@@ -1531,7 +1688,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
does not include inactive chunks.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.chunks.current">
<term>
<mallctl>stats.chunks.current</mallctl>
(<type>size_t</type>)
@@ -1543,7 +1700,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.chunks.total">
<term>
<mallctl>stats.chunks.total</mallctl>
(<type>uint64_t</type>)
@@ -1553,7 +1710,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Cumulative number of chunks allocated.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.chunks.high">
<term>
<mallctl>stats.chunks.high</mallctl>
(<type>size_t</type>)
@@ -1564,7 +1721,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.huge.allocated">
<term>
<mallctl>stats.huge.allocated</mallctl>
(<type>size_t</type>)
@@ -1575,7 +1732,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.huge.nmalloc">
<term>
<mallctl>stats.huge.nmalloc</mallctl>
(<type>uint64_t</type>)
@@ -1586,7 +1743,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.huge.ndalloc">
<term>
<mallctl>stats.huge.ndalloc</mallctl>
(<type>uint64_t</type>)
@@ -1597,7 +1754,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.dss">
<term>
<mallctl>stats.arenas.&lt;i&gt;.dss</mallctl>
(<type>const char *</type>)
@@ -1611,7 +1768,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.nthreads">
<term>
<mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>
(<type>unsigned</type>)
@@ -1621,7 +1778,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
arena.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.pactive">
<term>
<mallctl>stats.arenas.&lt;i&gt;.pactive</mallctl>
(<type>size_t</type>)
@@ -1642,7 +1799,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
similar has not been called.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.mapped">
<term>
<mallctl>stats.arenas.&lt;i&gt;.mapped</mallctl>
(<type>size_t</type>)
@@ -1652,7 +1809,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Number of mapped bytes.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.npurge">
<term>
<mallctl>stats.arenas.&lt;i&gt;.npurge</mallctl>
(<type>uint64_t</type>)
@@ -1663,7 +1820,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.nmadvise">
<term>
<mallctl>stats.arenas.&lt;i&gt;.nmadvise</mallctl>
(<type>uint64_t</type>)
@@ -1675,9 +1832,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
similar calls made to purge dirty pages.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.purged">
<term>
- <mallctl>stats.arenas.&lt;i&gt;.npurged</mallctl>
+ <mallctl>stats.arenas.&lt;i&gt;.purged</mallctl>
(<type>uint64_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
@@ -1685,7 +1842,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Number of pages purged.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.small.allocated">
<term>
<mallctl>stats.arenas.&lt;i&gt;.small.allocated</mallctl>
(<type>size_t</type>)
@@ -1696,7 +1853,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.small.nmalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.small.nmalloc</mallctl>
(<type>uint64_t</type>)
@@ -1707,7 +1864,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
small bins.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.small.ndalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.small.ndalloc</mallctl>
(<type>uint64_t</type>)
@@ -1718,7 +1875,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.small.nrequests">
<term>
<mallctl>stats.arenas.&lt;i&gt;.small.nrequests</mallctl>
(<type>uint64_t</type>)
@@ -1729,7 +1886,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.large.allocated">
<term>
<mallctl>stats.arenas.&lt;i&gt;.large.allocated</mallctl>
(<type>size_t</type>)
@@ -1740,7 +1897,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.large.nmalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.large.nmalloc</mallctl>
(<type>uint64_t</type>)
@@ -1751,7 +1908,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
directly by the arena.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.large.ndalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.large.ndalloc</mallctl>
(<type>uint64_t</type>)
@@ -1762,7 +1919,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
directly by the arena.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.large.nrequests">
<term>
<mallctl>stats.arenas.&lt;i&gt;.large.nrequests</mallctl>
(<type>uint64_t</type>)
@@ -1773,7 +1930,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.bins.j.allocated">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.allocated</mallctl>
(<type>size_t</type>)
@@ -1784,7 +1941,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
bin.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.bins.j.nmalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</mallctl>
(<type>uint64_t</type>)
@@ -1795,7 +1952,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.bins.j.ndalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.ndalloc</mallctl>
(<type>uint64_t</type>)
@@ -1806,7 +1963,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.bins.j.nrequests">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nrequests</mallctl>
(<type>uint64_t</type>)
@@ -1817,7 +1974,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
requests.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.bins.j.nfills">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl>
(<type>uint64_t</type>)
@@ -1827,7 +1984,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Cumulative number of tcache fills.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.bins.j.nflushes">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</mallctl>
(<type>uint64_t</type>)
@@ -1837,7 +1994,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Cumulative number of tcache flushes.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.bins.j.nruns">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nruns</mallctl>
(<type>uint64_t</type>)
@@ -1847,7 +2004,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Cumulative number of runs created.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.bins.j.nreruns">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreruns</mallctl>
(<type>uint64_t</type>)
@@ -1858,7 +2015,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
to allocate changed.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.bins.j.curruns">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curruns</mallctl>
(<type>size_t</type>)
@@ -1868,7 +2025,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Current number of runs.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.lruns.j.nmalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nmalloc</mallctl>
(<type>uint64_t</type>)
@@ -1879,7 +2036,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
class served directly by the arena.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.lruns.j.ndalloc">
<term>
<mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.ndalloc</mallctl>
(<type>uint64_t</type>)
@@ -1890,7 +2047,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
size class served directly by the arena.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.lruns.j.nrequests">
<term>
<mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nrequests</mallctl>
(<type>uint64_t</type>)
@@ -1901,7 +2058,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
class.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="stats.arenas.i.lruns.j.curruns">
<term>
<mallctl>stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.curruns</mallctl>
(<type>size_t</type>)
@@ -2027,9 +2184,26 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</refsect2>
<refsect2>
<title>Non-standard API</title>
- <para>The <function>malloc_usable_size<parameter/></function> function
- returns the usable size of the allocation pointed to by
- <parameter>ptr</parameter>. </para>
+ <para>The <function>mallocx<parameter/></function> and
+ <function>rallocx<parameter/></function> functions return a pointer to
+ the allocated memory if successful; otherwise a <constant>NULL</constant>
+ pointer is returned to indicate insufficient contiguous memory was
+ available to service the allocation request. </para>
+
+ <para>The <function>xallocx<parameter/></function> function returns the
+ real size of the resulting resized allocation pointed to by
+ <parameter>ptr</parameter>, which is a value less than
+ <parameter>size</parameter> if the allocation could not be adequately
+ grown in place. </para>
+
+ <para>The <function>sallocx<parameter/></function> function returns the
+ real size of the allocation pointed to by <parameter>ptr</parameter>.
+ </para>
+
+ <para>The <function>nallocx<parameter/></function> returns the real size
+ that would result from a successful equivalent
+ <function>mallocx<parameter/></function> function call, or zero if
+ insufficient memory is available to perform the size computation. </para>
<para>The <function>mallctl<parameter/></function>,
<function>mallctlnametomib<parameter/></function>, and
@@ -2047,12 +2221,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
are read despite the error.</para></listitem>
</varlistentry>
<varlistentry>
- <term><errorname>ENOMEM</errorname></term>
-
- <listitem><para><parameter>*oldlenp</parameter> is too short to
- hold the requested value.</para></listitem>
- </varlistentry>
- <varlistentry>
<term><errorname>ENOENT</errorname></term>
<listitem><para><parameter>name</parameter> or
@@ -2080,6 +2248,10 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</varlistentry>
</variablelist>
</para>
+
+ <para>The <function>malloc_usable_size<parameter/></function> function
+ returns the usable size of the allocation pointed to by
+ <parameter>ptr</parameter>. </para>
</refsect2>
<refsect2>
<title>Experimental API</title>
diff --git a/deps/jemalloc/include/jemalloc/internal/arena.h b/deps/jemalloc/include/jemalloc/internal/arena.h
index 561c9b6ff..9d000c03d 100644
--- a/deps/jemalloc/include/jemalloc/internal/arena.h
+++ b/deps/jemalloc/include/jemalloc/internal/arena.h
@@ -158,6 +158,7 @@ struct arena_chunk_map_s {
};
typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
+typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t;
/* Arena chunk header. */
struct arena_chunk_s {
@@ -174,11 +175,12 @@ struct arena_chunk_s {
size_t nruns_avail;
/*
- * Number of available run adjacencies. Clean and dirty available runs
- * are not coalesced, which causes virtual memory fragmentation. The
- * ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking
- * this fragmentation.
- * */
+ * Number of available run adjacencies that purging could coalesce.
+ * Clean and dirty available runs are not coalesced, which causes
+ * virtual memory fragmentation. The ratio of
+ * (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this
+ * fragmentation.
+ */
size_t nruns_adjac;
/*
@@ -400,12 +402,20 @@ extern arena_bin_info_t arena_bin_info[NBINS];
#define nlclasses (chunk_npages - map_bias)
void arena_purge_all(arena_t *arena);
-void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
size_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
+#ifdef JEMALLOC_JET
+typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
+ uint8_t);
+extern arena_redzone_corruption_t *arena_redzone_corruption;
+typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
+extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
+#else
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
+#endif
+void arena_quarantine_junk_small(void *ptr, size_t usize);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
@@ -416,10 +426,18 @@ void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_t *mapelm);
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind);
+#ifdef JEMALLOC_JET
+typedef void (arena_dalloc_junk_large_t)(void *, size_t);
+extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
+#endif
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
-void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+#ifdef JEMALLOC_JET
+typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
+extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
+#endif
+bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
@@ -442,6 +460,7 @@ void arena_postfork_child(arena_t *arena);
#ifndef JEMALLOC_ENABLE_INLINE
arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
@@ -452,6 +471,7 @@ size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
+void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
@@ -464,12 +484,15 @@ void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
size_t runind, size_t binind, size_t flags);
void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed);
+bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
+bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
+bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
prof_ctx_t *arena_prof_ctx_get(const void *ptr);
-void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
+void arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
size_t arena_salloc(const void *ptr, bool demote);
void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
@@ -478,7 +501,7 @@ void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
-JEMALLOC_INLINE arena_chunk_map_t *
+JEMALLOC_ALWAYS_INLINE arena_chunk_map_t *
arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
{
@@ -488,21 +511,28 @@ arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
return (&chunk->map[pageind-map_bias]);
}
-JEMALLOC_INLINE size_t *
+JEMALLOC_ALWAYS_INLINE size_t *
arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
{
return (&arena_mapp_get(chunk, pageind)->bits);
}
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbitsp_read(size_t *mapbitsp)
+{
+
+ return (*mapbitsp);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
{
- return (*arena_mapbitsp_get(chunk, pageind));
+ return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
}
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -512,7 +542,7 @@ arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & ~PAGE_MASK);
}
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -523,7 +553,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & ~PAGE_MASK);
}
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -534,7 +564,7 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits >> LG_PAGE);
}
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -546,7 +576,7 @@ arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
return (binind);
}
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -555,7 +585,7 @@ arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & CHUNK_MAP_DIRTY);
}
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -564,7 +594,7 @@ arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & CHUNK_MAP_UNZEROED);
}
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -573,7 +603,7 @@ arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & CHUNK_MAP_LARGE);
}
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -582,86 +612,138 @@ arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits & CHUNK_MAP_ALLOCATED);
}
-JEMALLOC_INLINE void
+JEMALLOC_ALWAYS_INLINE void
+arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
+{
+
+ *mapbitsp = mapbits;
+}
+
+JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
- *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
+ arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
}
-JEMALLOC_INLINE void
+JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
- assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
- *mapbitsp = size | (*mapbitsp & PAGE_MASK);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
+ arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
}
-JEMALLOC_INLINE void
+JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
size_t unzeroed;
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_DIRTY) == flags);
- unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
- *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed |
- CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
+ arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
+ | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
}
-JEMALLOC_INLINE void
+JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
size_t binind)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
- *mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind <<
- CHUNK_MAP_BININD_SHIFT);
+ arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
+ (binind << CHUNK_MAP_BININD_SHIFT));
}
-JEMALLOC_INLINE void
+JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
size_t binind, size_t flags)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
size_t unzeroed;
assert(binind < BININD_INVALID);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(pageind - runind >= map_bias);
assert((flags & CHUNK_MAP_DIRTY) == flags);
- unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
- *mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
- flags | unzeroed | CHUNK_MAP_ALLOCATED;
+ unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
+ arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
+ CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
}
-JEMALLOC_INLINE void
+JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
- *mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
+ arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
+ unzeroed);
}
-JEMALLOC_INLINE size_t
+JEMALLOC_INLINE bool
+arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
+{
+
+ cassert(config_prof);
+ assert(prof_interval != 0);
+
+ arena->prof_accumbytes += accumbytes;
+ if (arena->prof_accumbytes >= prof_interval) {
+ arena->prof_accumbytes -= prof_interval;
+ return (true);
+ }
+ return (false);
+}
+
+JEMALLOC_INLINE bool
+arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
+{
+
+ cassert(config_prof);
+
+ if (prof_interval == 0)
+ return (false);
+ return (arena_prof_accum_impl(arena, accumbytes));
+}
+
+JEMALLOC_INLINE bool
+arena_prof_accum(arena_t *arena, uint64_t accumbytes)
+{
+
+ cassert(config_prof);
+
+ if (prof_interval == 0)
+ return (false);
+
+ {
+ bool ret;
+
+ malloc_mutex_lock(&arena->lock);
+ ret = arena_prof_accum_impl(arena, accumbytes);
+ malloc_mutex_unlock(&arena->lock);
+ return (ret);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
{
size_t binind;
@@ -822,10 +904,10 @@ arena_prof_ctx_get(const void *ptr)
}
JEMALLOC_INLINE void
-arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
+arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
- size_t pageind, mapbits;
+ size_t pageind;
cassert(config_prof);
assert(ptr != NULL);
@@ -833,10 +915,17 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+
+ if (usize > SMALL_MAXCLASS || (prof_promote &&
+ ((uintptr_t)ctx != (uintptr_t)1U || arena_mapbits_large_get(chunk,
+ pageind) != 0))) {
+ assert(arena_mapbits_large_get(chunk, pageind) != 0);
+ arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
+ } else {
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
if (prof_promote == false) {
+ size_t mapbits = arena_mapbits_get(chunk, pageind);
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
LG_PAGE));
@@ -848,15 +937,14 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
bin_info = &arena_bin_info[binind];
regind = arena_run_regind(run, bin_info, ptr);
- *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset
- + (regind * sizeof(prof_ctx_t *)))) = ctx;
- } else
- assert((uintptr_t)ctx == (uintptr_t)1U);
- } else
- arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
+ *((prof_ctx_t **)((uintptr_t)run +
+ bin_info->ctx0_offset + (regind * sizeof(prof_ctx_t
+ *)))) = ctx;
+ }
+ }
}
-JEMALLOC_INLINE void *
+JEMALLOC_ALWAYS_INLINE void *
arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
{
tcache_t *tcache;
@@ -887,7 +975,7 @@ arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
}
/* Return the size of the allocation pointed to by ptr. */
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(const void *ptr, bool demote)
{
size_t ret;
@@ -933,7 +1021,7 @@ arena_salloc(const void *ptr, bool demote)
return (ret);
}
-JEMALLOC_INLINE void
+JEMALLOC_ALWAYS_INLINE void
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
{
size_t pageind, mapbits;
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk_dss.h b/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
index 6585f071b..4535ce09c 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
@@ -7,7 +7,7 @@ typedef enum {
dss_prec_secondary = 2,
dss_prec_limit = 3
-} dss_prec_t ;
+} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
diff --git a/deps/jemalloc/include/jemalloc/internal/ckh.h b/deps/jemalloc/include/jemalloc/internal/ckh.h
index 05d1fc03e..58712a6a7 100644
--- a/deps/jemalloc/include/jemalloc/internal/ckh.h
+++ b/deps/jemalloc/include/jemalloc/internal/ckh.h
@@ -5,7 +5,7 @@ typedef struct ckh_s ckh_t;
typedef struct ckhc_s ckhc_t;
/* Typedefs to allow easy function pointer passing. */
-typedef void ckh_hash_t (const void *, unsigned, size_t *, size_t *);
+typedef void ckh_hash_t (const void *, size_t[2]);
typedef bool ckh_keycomp_t (const void *, const void *);
/* Maintain counters used to get an idea of performance. */
@@ -17,7 +17,7 @@ typedef bool ckh_keycomp_t (const void *, const void *);
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
-#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
+#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
@@ -75,11 +75,9 @@ bool ckh_insert(ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
-void ckh_string_hash(const void *key, unsigned minbits, size_t *hash1,
- size_t *hash2);
+void ckh_string_hash(const void *key, size_t r_hash[2]);
bool ckh_string_keycomp(const void *k1, const void *k2);
-void ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
- size_t *hash2);
+void ckh_pointer_hash(const void *key, size_t r_hash[2]);
bool ckh_pointer_keycomp(const void *k1, const void *k2);
#endif /* JEMALLOC_H_EXTERNS */
diff --git a/deps/jemalloc/include/jemalloc/internal/hash.h b/deps/jemalloc/include/jemalloc/internal/hash.h
index 2f501f5d4..c7183ede8 100644
--- a/deps/jemalloc/include/jemalloc/internal/hash.h
+++ b/deps/jemalloc/include/jemalloc/internal/hash.h
@@ -1,3 +1,8 @@
+/*
+ * The following hash function is based on MurmurHash3, placed into the public
+ * domain by Austin Appleby. See http://code.google.com/p/smhasher/ for
+ * details.
+ */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
@@ -14,55 +19,315 @@
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-uint64_t hash(const void *key, size_t len, uint64_t seed);
+uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
+void hash_x86_128(const void *key, const int len, uint32_t seed,
+ uint64_t r_out[2]);
+void hash_x64_128(const void *key, const int len, const uint32_t seed,
+ uint64_t r_out[2]);
+void hash(const void *key, size_t len, const uint32_t seed,
+ size_t r_hash[2]);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
-/*
- * The following hash function is based on MurmurHash64A(), placed into the
- * public domain by Austin Appleby. See http://murmurhash.googlepages.com/ for
- * details.
- */
+/******************************************************************************/
+/* Internal implementation. */
+JEMALLOC_INLINE uint32_t
+hash_rotl_32(uint32_t x, int8_t r)
+{
+
+ return (x << r) | (x >> (32 - r));
+}
+
+JEMALLOC_INLINE uint64_t
+hash_rotl_64(uint64_t x, int8_t r)
+{
+ return (x << r) | (x >> (64 - r));
+}
+
+JEMALLOC_INLINE uint32_t
+hash_get_block_32(const uint32_t *p, int i)
+{
+
+ return (p[i]);
+}
+
JEMALLOC_INLINE uint64_t
-hash(const void *key, size_t len, uint64_t seed)
+hash_get_block_64(const uint64_t *p, int i)
{
- const uint64_t m = UINT64_C(0xc6a4a7935bd1e995);
- const int r = 47;
- uint64_t h = seed ^ (len * m);
- const uint64_t *data = (const uint64_t *)key;
- const uint64_t *end = data + (len/8);
- const unsigned char *data2;
- assert(((uintptr_t)key & 0x7) == 0);
+ return (p[i]);
+}
+
+JEMALLOC_INLINE uint32_t
+hash_fmix_32(uint32_t h)
+{
- while(data != end) {
- uint64_t k = *data++;
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
- k *= m;
- k ^= k >> r;
- k *= m;
+ return (h);
+}
- h ^= k;
- h *= m;
+JEMALLOC_INLINE uint64_t
+hash_fmix_64(uint64_t k)
+{
+
+ k ^= k >> 33;
+ k *= QU(0xff51afd7ed558ccdLLU);
+ k ^= k >> 33;
+ k *= QU(0xc4ceb9fe1a85ec53LLU);
+ k ^= k >> 33;
+
+ return (k);
+}
+
+JEMALLOC_INLINE uint32_t
+hash_x86_32(const void *key, int len, uint32_t seed)
+{
+ const uint8_t *data = (const uint8_t *) key;
+ const int nblocks = len / 4;
+
+ uint32_t h1 = seed;
+
+ const uint32_t c1 = 0xcc9e2d51;
+ const uint32_t c2 = 0x1b873593;
+
+ /* body */
+ {
+ const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
+ int i;
+
+ for (i = -nblocks; i; i++) {
+ uint32_t k1 = hash_get_block_32(blocks, i);
+
+ k1 *= c1;
+ k1 = hash_rotl_32(k1, 15);
+ k1 *= c2;
+
+ h1 ^= k1;
+ h1 = hash_rotl_32(h1, 13);
+ h1 = h1*5 + 0xe6546b64;
+ }
}
- data2 = (const unsigned char *)data;
- switch(len & 7) {
- case 7: h ^= ((uint64_t)(data2[6])) << 48;
- case 6: h ^= ((uint64_t)(data2[5])) << 40;
- case 5: h ^= ((uint64_t)(data2[4])) << 32;
- case 4: h ^= ((uint64_t)(data2[3])) << 24;
- case 3: h ^= ((uint64_t)(data2[2])) << 16;
- case 2: h ^= ((uint64_t)(data2[1])) << 8;
- case 1: h ^= ((uint64_t)(data2[0]));
- h *= m;
+ /* tail */
+ {
+ const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
+
+ uint32_t k1 = 0;
+
+ switch (len & 3) {
+ case 3: k1 ^= tail[2] << 16;
+ case 2: k1 ^= tail[1] << 8;
+ case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
+ k1 *= c2; h1 ^= k1;
+ }
}
- h ^= h >> r;
- h *= m;
- h ^= h >> r;
+ /* finalization */
+ h1 ^= len;
- return (h);
+ h1 = hash_fmix_32(h1);
+
+ return (h1);
+}
+
+UNUSED JEMALLOC_INLINE void
+hash_x86_128(const void *key, const int len, uint32_t seed,
+ uint64_t r_out[2])
+{
+ const uint8_t * data = (const uint8_t *) key;
+ const int nblocks = len / 16;
+
+ uint32_t h1 = seed;
+ uint32_t h2 = seed;
+ uint32_t h3 = seed;
+ uint32_t h4 = seed;
+
+ const uint32_t c1 = 0x239b961b;
+ const uint32_t c2 = 0xab0e9789;
+ const uint32_t c3 = 0x38b34ae5;
+ const uint32_t c4 = 0xa1e38b93;
+
+ /* body */
+ {
+ const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
+ int i;
+
+ for (i = -nblocks; i; i++) {
+ uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
+ uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
+ uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
+ uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
+
+ k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
+
+ h1 = hash_rotl_32(h1, 19); h1 += h2;
+ h1 = h1*5 + 0x561ccd1b;
+
+ k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
+
+ h2 = hash_rotl_32(h2, 17); h2 += h3;
+ h2 = h2*5 + 0x0bcaa747;
+
+ k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
+
+ h3 = hash_rotl_32(h3, 15); h3 += h4;
+ h3 = h3*5 + 0x96cd1c35;
+
+ k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
+
+ h4 = hash_rotl_32(h4, 13); h4 += h1;
+ h4 = h4*5 + 0x32ac3b17;
+ }
+ }
+
+ /* tail */
+ {
+ const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
+ uint32_t k1 = 0;
+ uint32_t k2 = 0;
+ uint32_t k3 = 0;
+ uint32_t k4 = 0;
+
+ switch (len & 15) {
+ case 15: k4 ^= tail[14] << 16;
+ case 14: k4 ^= tail[13] << 8;
+ case 13: k4 ^= tail[12] << 0;
+ k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
+
+ case 12: k3 ^= tail[11] << 24;
+ case 11: k3 ^= tail[10] << 16;
+ case 10: k3 ^= tail[ 9] << 8;
+ case 9: k3 ^= tail[ 8] << 0;
+ k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
+
+ case 8: k2 ^= tail[ 7] << 24;
+ case 7: k2 ^= tail[ 6] << 16;
+ case 6: k2 ^= tail[ 5] << 8;
+ case 5: k2 ^= tail[ 4] << 0;
+ k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
+
+ case 4: k1 ^= tail[ 3] << 24;
+ case 3: k1 ^= tail[ 2] << 16;
+ case 2: k1 ^= tail[ 1] << 8;
+ case 1: k1 ^= tail[ 0] << 0;
+ k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
+ }
+ }
+
+ /* finalization */
+ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
+
+ h1 += h2; h1 += h3; h1 += h4;
+ h2 += h1; h3 += h1; h4 += h1;
+
+ h1 = hash_fmix_32(h1);
+ h2 = hash_fmix_32(h2);
+ h3 = hash_fmix_32(h3);
+ h4 = hash_fmix_32(h4);
+
+ h1 += h2; h1 += h3; h1 += h4;
+ h2 += h1; h3 += h1; h4 += h1;
+
+ r_out[0] = (((uint64_t) h2) << 32) | h1;
+ r_out[1] = (((uint64_t) h4) << 32) | h3;
+}
+
+UNUSED JEMALLOC_INLINE void
+hash_x64_128(const void *key, const int len, const uint32_t seed,
+ uint64_t r_out[2])
+{
+ const uint8_t *data = (const uint8_t *) key;
+ const int nblocks = len / 16;
+
+ uint64_t h1 = seed;
+ uint64_t h2 = seed;
+
+ const uint64_t c1 = QU(0x87c37b91114253d5LLU);
+ const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
+
+ /* body */
+ {
+ const uint64_t *blocks = (const uint64_t *) (data);
+ int i;
+
+ for (i = 0; i < nblocks; i++) {
+ uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
+ uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
+
+ k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
+
+ h1 = hash_rotl_64(h1, 27); h1 += h2;
+ h1 = h1*5 + 0x52dce729;
+
+ k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
+
+ h2 = hash_rotl_64(h2, 31); h2 += h1;
+ h2 = h2*5 + 0x38495ab5;
+ }
+ }
+
+ /* tail */
+ {
+ const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
+ uint64_t k1 = 0;
+ uint64_t k2 = 0;
+
+ switch (len & 15) {
+ case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
+ case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
+ case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
+ case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
+ case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
+ case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
+ case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
+ k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
+
+ case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
+ case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
+ case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
+ case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
+ case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
+ case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
+ case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
+ case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
+ k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
+ }
+ }
+
+ /* finalization */
+ h1 ^= len; h2 ^= len;
+
+ h1 += h2;
+ h2 += h1;
+
+ h1 = hash_fmix_64(h1);
+ h2 = hash_fmix_64(h2);
+
+ h1 += h2;
+ h2 += h1;
+
+ r_out[0] = h1;
+ r_out[1] = h2;
+}
+
+/******************************************************************************/
+/* API. */
+JEMALLOC_INLINE void
+hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
+{
+#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
+ hash_x64_128(key, len, seed, (uint64_t *)r_hash);
+#else
+ uint64_t hashes[2];
+ hash_x86_128(key, len, seed, hashes);
+ r_hash[0] = (size_t)hashes[0];
+ r_hash[1] = (size_t)hashes[1];
+#endif
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/huge.h b/deps/jemalloc/include/jemalloc/internal/huge.h
index d987d3707..a2b9c7791 100644
--- a/deps/jemalloc/include/jemalloc/internal/huge.h
+++ b/deps/jemalloc/include/jemalloc/internal/huge.h
@@ -17,14 +17,20 @@ extern size_t huge_allocated;
/* Protects chunk-related data structures. */
extern malloc_mutex_t huge_mtx;
-void *huge_malloc(size_t size, bool zero);
-void *huge_palloc(size_t size, size_t alignment, bool zero);
-void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
+void *huge_palloc(size_t size, size_t alignment, bool zero,
+ dss_prec_t dss_prec);
+bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_dalloc);
+ size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
+#ifdef JEMALLOC_JET
+typedef void (huge_dalloc_junk_t)(void *, size_t);
+extern huge_dalloc_junk_t *huge_dalloc_junk;
+#endif
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
+dss_prec_t huge_dss_prec_get(arena_t *arena);
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
bool huge_boot(void);
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
index 475821acb..574bbb141 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
@@ -1,5 +1,5 @@
#ifndef JEMALLOC_INTERNAL_H
-#define JEMALLOC_INTERNAL_H
+#define JEMALLOC_INTERNAL_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
@@ -54,8 +54,7 @@ typedef intptr_t ssize_t;
#endif
#include <fcntl.h>
-#define JEMALLOC_NO_DEMANGLE
-#include "../jemalloc@install_suffix@.h"
+#include "jemalloc_internal_defs.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
@@ -66,13 +65,18 @@ typedef intptr_t ssize_t;
#include <valgrind/memcheck.h>
#endif
-#include "jemalloc/internal/private_namespace.h"
-
-#ifdef JEMALLOC_CC_SILENCE
-#define UNUSED JEMALLOC_ATTR(unused)
+#define JEMALLOC_NO_DEMANGLE
+#ifdef JEMALLOC_JET
+# define JEMALLOC_N(n) jet_##n
+# include "jemalloc/internal/public_namespace.h"
+# define JEMALLOC_NO_RENAME
+# include "../jemalloc@install_suffix@.h"
+# undef JEMALLOC_NO_RENAME
#else
-#define UNUSED
+# define JEMALLOC_N(n) @private_namespace@##n
+# include "../jemalloc@install_suffix@.h"
#endif
+#include "jemalloc/internal/private_namespace.h"
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
@@ -221,27 +225,12 @@ static const bool config_ivsalloc =
* JEMALLOC_H_INLINES : Inline functions.
*/
/******************************************************************************/
-#define JEMALLOC_H_TYPES
-
-#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
+#define JEMALLOC_H_TYPES
-#define ZU(z) ((size_t)z)
+#include "jemalloc/internal/jemalloc_internal_macros.h"
-#ifndef __DECONST
-# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
-#endif
-
-#ifdef JEMALLOC_DEBUG
- /* Disable inlining to make debugging easier. */
-# define JEMALLOC_INLINE
-# define inline
-#else
-# define JEMALLOC_ENABLE_INLINE
-# define JEMALLOC_INLINE static inline
-# ifdef _MSC_VER
-# define inline _inline
-# endif
-#endif
+#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
+#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
/* Smallest size class to support. */
#define LG_TINY_MIN 3
@@ -270,6 +259,9 @@ static const bool config_ivsalloc =
# ifdef __arm__
# define LG_QUANTUM 3
# endif
+# ifdef __aarch64__
+# define LG_QUANTUM 4
+# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
@@ -279,7 +271,7 @@ static const bool config_ivsalloc =
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
-# ifdef __s390x__
+# ifdef __s390__
# define LG_QUANTUM 4
# endif
# ifdef __SH4__
@@ -359,7 +351,11 @@ static const bool config_ivsalloc =
# include <malloc.h>
# define alloca _alloca
# else
-# include <alloca.h>
+# ifdef JEMALLOC_HAS_ALLOCA_H
+# include <alloca.h>
+# else
+# include <stdlib.h>
+# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * count)
@@ -428,15 +424,18 @@ static const bool config_ivsalloc =
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
-#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
-#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
-#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
-#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len)
-#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len)
-#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero)
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
+ do {} while (0)
+#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
+ do {} while (0)
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
+#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
+#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
+#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
+#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
- old_rzsize, zero)
-#define JEMALLOC_VALGRIND_FREE(ptr, rzsize)
+ old_rzsize, zero) do {} while (0)
+#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif
#include "jemalloc/internal/util.h"
@@ -463,7 +462,7 @@ static const bool config_ivsalloc =
#undef JEMALLOC_H_TYPES
/******************************************************************************/
-#define JEMALLOC_H_STRUCTS
+#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
@@ -492,14 +491,14 @@ typedef struct {
uint64_t deallocated;
} thread_allocated_t;
/*
- * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
+ * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* argument.
*/
-#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0})
+#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0})
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
-#define JEMALLOC_H_EXTERNS
+#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
extern bool opt_junk;
@@ -559,7 +558,7 @@ void jemalloc_postfork_child(void);
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
-#define JEMALLOC_H_INLINES
+#define JEMALLOC_H_INLINES
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
@@ -591,13 +590,14 @@ arena_t *choose_arena(arena_t *arena);
* for allocations.
*/
malloc_tsd_externs(arenas, arena_t *)
-malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup)
+malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
+ arenas_cleanup)
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size)
{
@@ -612,7 +612,7 @@ s2u(size_t size)
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
sa2u(size_t size, size_t alignment)
{
size_t usize;
@@ -733,32 +733,36 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
-void *imallocx(size_t size, bool try_tcache, arena_t *arena);
+void *imalloct(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
-void *icallocx(size_t size, bool try_tcache, arena_t *arena);
+void *icalloct(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
-void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
-void idallocx(void *ptr, bool try_tcache);
+void idalloct(void *ptr, bool try_tcache);
void idalloc(void *ptr);
-void iqallocx(void *ptr, bool try_tcache);
+void iqalloct(void *ptr, bool try_tcache);
void iqalloc(void *ptr);
-void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
- bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
+void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena);
+void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
+ bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
- bool zero, bool no_move);
+ bool zero);
+bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
+ bool zero);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_INLINE void *
-imallocx(size_t size, bool try_tcache, arena_t *arena)
+JEMALLOC_ALWAYS_INLINE void *
+imalloct(size_t size, bool try_tcache, arena_t *arena)
{
assert(size != 0);
@@ -766,35 +770,35 @@ imallocx(size_t size, bool try_tcache, arena_t *arena)
if (size <= arena_maxclass)
return (arena_malloc(arena, size, false, try_tcache));
else
- return (huge_malloc(size, false));
+ return (huge_malloc(size, false, huge_dss_prec_get(arena)));
}
-JEMALLOC_INLINE void *
+JEMALLOC_ALWAYS_INLINE void *
imalloc(size_t size)
{
- return (imallocx(size, true, NULL));
+ return (imalloct(size, true, NULL));
}
-JEMALLOC_INLINE void *
-icallocx(size_t size, bool try_tcache, arena_t *arena)
+JEMALLOC_ALWAYS_INLINE void *
+icalloct(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
return (arena_malloc(arena, size, true, try_tcache));
else
- return (huge_malloc(size, true));
+ return (huge_malloc(size, true, huge_dss_prec_get(arena)));
}
-JEMALLOC_INLINE void *
+JEMALLOC_ALWAYS_INLINE void *
icalloc(size_t size)
{
- return (icallocx(size, true, NULL));
+ return (icalloct(size, true, NULL));
}
-JEMALLOC_INLINE void *
-ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+JEMALLOC_ALWAYS_INLINE void *
+ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
void *ret;
@@ -809,20 +813,20 @@ ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
ret = arena_palloc(choose_arena(arena), usize,
alignment, zero);
} else if (alignment <= chunksize)
- ret = huge_malloc(usize, zero);
+ ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
else
- ret = huge_palloc(usize, alignment, zero);
+ ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
return (ret);
}
-JEMALLOC_INLINE void *
+JEMALLOC_ALWAYS_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
{
- return (ipallocx(usize, alignment, zero, true, NULL));
+ return (ipalloct(usize, alignment, zero, true, NULL));
}
/*
@@ -830,7 +834,7 @@ ipalloc(size_t usize, size_t alignment, bool zero)
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
{
size_t ret;
@@ -849,12 +853,12 @@ isalloc(const void *ptr, bool demote)
return (ret);
}
-JEMALLOC_INLINE size_t
+JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote)
{
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
- if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
+ if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
return (0);
return (isalloc(ptr, demote));
@@ -882,8 +886,8 @@ p2rz(const void *ptr)
return (u2rz(usize));
}
-JEMALLOC_INLINE void
-idallocx(void *ptr, bool try_tcache)
+JEMALLOC_ALWAYS_INLINE void
+idalloct(void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
@@ -896,35 +900,67 @@ idallocx(void *ptr, bool try_tcache)
huge_dalloc(ptr, true);
}
-JEMALLOC_INLINE void
+JEMALLOC_ALWAYS_INLINE void
idalloc(void *ptr)
{
- idallocx(ptr, true);
+ idalloct(ptr, true);
}
-JEMALLOC_INLINE void
-iqallocx(void *ptr, bool try_tcache)
+JEMALLOC_ALWAYS_INLINE void
+iqalloct(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
- idallocx(ptr, try_tcache);
+ idalloct(ptr, try_tcache);
}
-JEMALLOC_INLINE void
+JEMALLOC_ALWAYS_INLINE void
iqalloc(void *ptr)
{
- iqallocx(ptr, true);
+ iqalloct(ptr, true);
}
-JEMALLOC_INLINE void *
-irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
- bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
+JEMALLOC_ALWAYS_INLINE void *
+iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
+ arena_t *arena)
+{
+ void *p;
+ size_t usize, copysize;
+
+ usize = sa2u(size + extra, alignment);
+ if (usize == 0)
+ return (NULL);
+ p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
+ if (p == NULL) {
+ if (extra == 0)
+ return (NULL);
+ /* Try again, without extra this time. */
+ usize = sa2u(size, alignment);
+ if (usize == 0)
+ return (NULL);
+ p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
+ if (p == NULL)
+ return (NULL);
+ }
+ /*
+ * Copy at most size bytes (not size+extra), since the caller has no
+ * expectation that the extra bytes will be reliably preserved.
+ */
+ copysize = (size < oldsize) ? size : oldsize;
+ memcpy(p, ptr, copysize);
+ iqalloct(ptr, try_tcache_dalloc);
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+ bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
- void *ret;
size_t oldsize;
assert(ptr != NULL);
@@ -934,72 +970,54 @@ irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
- size_t usize, copysize;
-
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
- if (no_move)
- return (NULL);
- usize = sa2u(size + extra, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, without extra this time. */
- usize = sa2u(size, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
- arena);
- if (ret == NULL)
- return (NULL);
- }
- /*
- * Copy at most size bytes (not size+extra), since the caller
- * has no expectation that the extra bytes will be reliably
- * preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
- memcpy(ret, ptr, copysize);
- iqallocx(ptr, try_tcache_dalloc);
- return (ret);
+ return (iralloct_realign(ptr, oldsize, size, extra, alignment,
+ zero, try_tcache_alloc, try_tcache_dalloc, arena));
}
- if (no_move) {
- if (size <= arena_maxclass) {
- return (arena_ralloc_no_move(ptr, oldsize, size,
- extra, zero));
- } else {
- return (huge_ralloc_no_move(ptr, oldsize, size,
- extra));
- }
+ if (size + extra <= arena_maxclass) {
+ return (arena_ralloc(arena, ptr, oldsize, size, extra,
+ alignment, zero, try_tcache_alloc,
+ try_tcache_dalloc));
} else {
- if (size + extra <= arena_maxclass) {
- return (arena_ralloc(arena, ptr, oldsize, size, extra,
- alignment, zero, try_tcache_alloc,
- try_tcache_dalloc));
- } else {
- return (huge_ralloc(ptr, oldsize, size, extra,
- alignment, zero, try_tcache_dalloc));
- }
+ return (huge_ralloc(ptr, oldsize, size, extra,
+ alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
}
}
-JEMALLOC_INLINE void *
-iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
- bool no_move)
+JEMALLOC_ALWAYS_INLINE void *
+iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
+{
+
+ return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
+ size_t oldsize;
+
+ assert(ptr != NULL);
+ assert(size != 0);
- return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
- NULL));
+ oldsize = isalloc(ptr, config_prof);
+ if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
+ != 0) {
+ /* Existing object alignment is inadequate. */
+ return (true);
+ }
+
+ if (size <= arena_maxclass)
+ return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
+ else
+ return (huge_ralloc_no_move(ptr, oldsize, size, extra));
}
malloc_tsd_externs(thread_allocated, thread_allocated_t)
-malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
+malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
new file mode 100644
index 000000000..c166fbd9e
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -0,0 +1,205 @@
+#ifndef JEMALLOC_INTERNAL_DEFS_H_
+#define JEMALLOC_INTERNAL_DEFS_H_
+/*
+ * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
+ * public APIs to be prefixed. This makes it possible, with some care, to use
+ * multiple allocators simultaneously.
+ */
+#undef JEMALLOC_PREFIX
+#undef JEMALLOC_CPREFIX
+
+/*
+ * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
+ * For shared libraries, symbol visibility mechanisms prevent these symbols
+ * from being exported, but for static libraries, naming collisions are a real
+ * possibility.
+ */
+#undef JEMALLOC_PRIVATE_NAMESPACE
+
+/*
+ * Hyper-threaded CPUs may need a special instruction inside spin loops in
+ * order to yield to another virtual CPU.
+ */
+#undef CPU_SPINWAIT
+
+/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
+#undef JEMALLOC_ATOMIC9
+
+/*
+ * Defined if OSAtomic*() functions are available, as provided by Darwin, and
+ * documented in the atomic(3) manual page.
+ */
+#undef JEMALLOC_OSATOMIC
+
+/*
+ * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
+ * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
+ * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
+ * functions are defined in libgcc instead of being inlines)
+ */
+#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
+
+/*
+ * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
+ * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
+ * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
+ * functions are defined in libgcc instead of being inlines)
+ */
+#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
+
+/*
+ * Defined if OSSpin*() functions are available, as provided by Darwin, and
+ * documented in the spinlock(3) manual page.
+ */
+#undef JEMALLOC_OSSPIN
+
+/*
+ * Defined if _malloc_thread_cleanup() exists. At least in the case of
+ * FreeBSD, pthread_key_create() allocates, which if used during malloc
+ * bootstrapping will cause recursion into the pthreads library. Therefore, if
+ * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
+ * malloc_tsd.
+ */
+#undef JEMALLOC_MALLOC_THREAD_CLEANUP
+
+/*
+ * Defined if threaded initialization is known to be safe on this platform.
+ * Among other things, it must be possible to initialize a mutex without
+ * triggering allocation in order for threaded allocation to be safe.
+ */
+#undef JEMALLOC_THREADED_INIT
+
+/*
+ * Defined if the pthreads implementation defines
+ * _pthread_mutex_init_calloc_cb(), in which case the function is used in order
+ * to avoid recursive allocation during mutex initialization.
+ */
+#undef JEMALLOC_MUTEX_INIT_CB
+
+/* Defined if sbrk() is supported. */
+#undef JEMALLOC_HAVE_SBRK
+
+/* Non-empty if the tls_model attribute is supported. */
+#undef JEMALLOC_TLS_MODEL
+
+/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
+#undef JEMALLOC_CC_SILENCE
+
+/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
+#undef JEMALLOC_CODE_COVERAGE
+
+/*
+ * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
+ * inline functions.
+ */
+#undef JEMALLOC_DEBUG
+
+/* JEMALLOC_STATS enables statistics calculation. */
+#undef JEMALLOC_STATS
+
+/* JEMALLOC_PROF enables allocation profiling. */
+#undef JEMALLOC_PROF
+
+/* Use libunwind for profile backtracing if defined. */
+#undef JEMALLOC_PROF_LIBUNWIND
+
+/* Use libgcc for profile backtracing if defined. */
+#undef JEMALLOC_PROF_LIBGCC
+
+/* Use gcc intrinsics for profile backtracing if defined. */
+#undef JEMALLOC_PROF_GCC
+
+/*
+ * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
+ * This makes it possible to allocate/deallocate objects without any locking
+ * when the cache is in the steady state.
+ */
+#undef JEMALLOC_TCACHE
+
+/*
+ * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
+ * segment (DSS).
+ */
+#undef JEMALLOC_DSS
+
+/* Support memory filling (junk/zero/quarantine/redzone). */
+#undef JEMALLOC_FILL
+
+/* Support utrace(2)-based tracing. */
+#undef JEMALLOC_UTRACE
+
+/* Support Valgrind. */
+#undef JEMALLOC_VALGRIND
+
+/* Support optional abort() on OOM. */
+#undef JEMALLOC_XMALLOC
+
+/* Support lazy locking (avoid locking unless a second thread is launched). */
+#undef JEMALLOC_LAZY_LOCK
+
+/* One page is 2^STATIC_PAGE_SHIFT bytes. */
+#undef STATIC_PAGE_SHIFT
+
+/*
+ * If defined, use munmap() to unmap freed chunks, rather than storing them for
+ * later reuse. This is disabled by default on Linux because common sequences
+ * of mmap()/munmap() calls will cause virtual memory map holes.
+ */
+#undef JEMALLOC_MUNMAP
+
+/*
+ * If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
+ * disabled by default because it is Linux-specific and it will cause virtual
+ * memory map holes, much like munmap(2) does.
+ */
+#undef JEMALLOC_MREMAP
+
+/* TLS is used to map arenas and magazine caches to threads. */
+#undef JEMALLOC_TLS
+
+/*
+ * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
+ * within jemalloc-owned chunks before dereferencing them.
+ */
+#undef JEMALLOC_IVSALLOC
+
+/*
+ * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
+ */
+#undef JEMALLOC_ZONE
+#undef JEMALLOC_ZONE_VERSION
+
+/*
+ * Methods for purging unused pages differ between operating systems.
+ *
+ * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
+ * such that new pages will be demand-zeroed if
+ * the address region is later touched.
+ * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
+ * unused, such that they will be discarded rather
+ * than swapped out.
+ */
+#undef JEMALLOC_PURGE_MADVISE_DONTNEED
+#undef JEMALLOC_PURGE_MADVISE_FREE
+
+/*
+ * Define if operating system has alloca.h header.
+ */
+#undef JEMALLOC_HAS_ALLOCA_H
+
+/* C99 restrict keyword supported. */
+#undef JEMALLOC_HAS_RESTRICT
+
+/* For use by hash code. */
+#undef JEMALLOC_BIG_ENDIAN
+
+/* sizeof(int) == 2^LG_SIZEOF_INT. */
+#undef LG_SIZEOF_INT
+
+/* sizeof(long) == 2^LG_SIZEOF_LONG. */
+#undef LG_SIZEOF_LONG
+
+/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
+#undef LG_SIZEOF_INTMAX_T
+
+#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
new file mode 100644
index 000000000..4e2392302
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
@@ -0,0 +1,51 @@
+/*
+ * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for
+ * functions that are static inline functions if inlining is enabled, and
+ * single-definition library-private functions if inlining is disabled.
+ *
+ * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in
+ * which case the denoted functions are always static, regardless of whether
+ * inlining is enabled.
+ */
+#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE)
+ /* Disable inlining to make debugging/profiling easier. */
+# define JEMALLOC_ALWAYS_INLINE
+# define JEMALLOC_ALWAYS_INLINE_C static
+# define JEMALLOC_INLINE
+# define JEMALLOC_INLINE_C static
+# define inline
+#else
+# define JEMALLOC_ENABLE_INLINE
+# ifdef JEMALLOC_HAVE_ATTR
+# define JEMALLOC_ALWAYS_INLINE \
+ static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
+# define JEMALLOC_ALWAYS_INLINE_C \
+ static inline JEMALLOC_ATTR(always_inline)
+# else
+# define JEMALLOC_ALWAYS_INLINE static inline
+# define JEMALLOC_ALWAYS_INLINE_C static inline
+# endif
+# define JEMALLOC_INLINE static inline
+# define JEMALLOC_INLINE_C static inline
+# ifdef _MSC_VER
+# define inline _inline
+# endif
+#endif
+
+#ifdef JEMALLOC_CC_SILENCE
+# define UNUSED JEMALLOC_ATTR(unused)
+#else
+# define UNUSED
+#endif
+
+#define ZU(z) ((size_t)z)
+#define QU(q) ((uint64_t)q)
+#define QI(q) ((int64_t)q)
+
+#ifndef __DECONST
+# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
+#endif
+
+#ifndef JEMALLOC_HAS_RESTRICT
+# define restrict
+#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/private_namespace.h b/deps/jemalloc/include/jemalloc/internal/private_namespace.h
deleted file mode 100644
index 06241cd2f..000000000
--- a/deps/jemalloc/include/jemalloc/internal/private_namespace.h
+++ /dev/null
@@ -1,367 +0,0 @@
-#define a0calloc JEMALLOC_N(a0calloc)
-#define a0free JEMALLOC_N(a0free)
-#define a0malloc JEMALLOC_N(a0malloc)
-#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
-#define arena_bin_index JEMALLOC_N(arena_bin_index)
-#define arena_bin_info JEMALLOC_N(arena_bin_info)
-#define arena_boot JEMALLOC_N(arena_boot)
-#define arena_dalloc JEMALLOC_N(arena_dalloc)
-#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
-#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
-#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
-#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
-#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
-#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
-#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
-#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
-#define arena_malloc JEMALLOC_N(arena_malloc)
-#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
-#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
-#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
-#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
-#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
-#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
-#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
-#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
-#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
-#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
-#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
-#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
-#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
-#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
-#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
-#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
-#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
-#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
-#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
-#define arena_maxclass JEMALLOC_N(arena_maxclass)
-#define arena_new JEMALLOC_N(arena_new)
-#define arena_palloc JEMALLOC_N(arena_palloc)
-#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
-#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
-#define arena_prefork JEMALLOC_N(arena_prefork)
-#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
-#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
-#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
-#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
-#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
-#define arena_purge_all JEMALLOC_N(arena_purge_all)
-#define arena_ralloc JEMALLOC_N(arena_ralloc)
-#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
-#define arena_run_regind JEMALLOC_N(arena_run_regind)
-#define arena_salloc JEMALLOC_N(arena_salloc)
-#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
-#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
-#define arenas JEMALLOC_N(arenas)
-#define arenas_booted JEMALLOC_N(arenas_booted)
-#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
-#define arenas_extend JEMALLOC_N(arenas_extend)
-#define arenas_initialized JEMALLOC_N(arenas_initialized)
-#define arenas_lock JEMALLOC_N(arenas_lock)
-#define arenas_tls JEMALLOC_N(arenas_tls)
-#define arenas_tsd JEMALLOC_N(arenas_tsd)
-#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
-#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
-#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
-#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
-#define atomic_add_u JEMALLOC_N(atomic_add_u)
-#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
-#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
-#define atomic_add_z JEMALLOC_N(atomic_add_z)
-#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
-#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
-#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
-#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
-#define base_alloc JEMALLOC_N(base_alloc)
-#define base_boot JEMALLOC_N(base_boot)
-#define base_calloc JEMALLOC_N(base_calloc)
-#define base_node_alloc JEMALLOC_N(base_node_alloc)
-#define base_node_dealloc JEMALLOC_N(base_node_dealloc)
-#define base_postfork_child JEMALLOC_N(base_postfork_child)
-#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
-#define base_prefork JEMALLOC_N(base_prefork)
-#define bitmap_full JEMALLOC_N(bitmap_full)
-#define bitmap_get JEMALLOC_N(bitmap_get)
-#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
-#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups)
-#define bitmap_init JEMALLOC_N(bitmap_init)
-#define bitmap_set JEMALLOC_N(bitmap_set)
-#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
-#define bitmap_size JEMALLOC_N(bitmap_size)
-#define bitmap_unset JEMALLOC_N(bitmap_unset)
-#define bt_init JEMALLOC_N(bt_init)
-#define buferror JEMALLOC_N(buferror)
-#define choose_arena JEMALLOC_N(choose_arena)
-#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
-#define chunk_alloc JEMALLOC_N(chunk_alloc)
-#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
-#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
-#define chunk_boot JEMALLOC_N(chunk_boot)
-#define chunk_dealloc JEMALLOC_N(chunk_dealloc)
-#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap)
-#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
-#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
-#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
-#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
-#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
-#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
-#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
-#define chunk_npages JEMALLOC_N(chunk_npages)
-#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
-#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
-#define chunk_prefork JEMALLOC_N(chunk_prefork)
-#define chunk_unmap JEMALLOC_N(chunk_unmap)
-#define chunks_mtx JEMALLOC_N(chunks_mtx)
-#define chunks_rtree JEMALLOC_N(chunks_rtree)
-#define chunksize JEMALLOC_N(chunksize)
-#define chunksize_mask JEMALLOC_N(chunksize_mask)
-#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
-#define ckh_count JEMALLOC_N(ckh_count)
-#define ckh_delete JEMALLOC_N(ckh_delete)
-#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
-#define ckh_insert JEMALLOC_N(ckh_insert)
-#define ckh_isearch JEMALLOC_N(ckh_isearch)
-#define ckh_iter JEMALLOC_N(ckh_iter)
-#define ckh_new JEMALLOC_N(ckh_new)
-#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
-#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
-#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
-#define ckh_remove JEMALLOC_N(ckh_remove)
-#define ckh_search JEMALLOC_N(ckh_search)
-#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
-#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
-#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
-#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
-#define ctl_boot JEMALLOC_N(ctl_boot)
-#define ctl_bymib JEMALLOC_N(ctl_bymib)
-#define ctl_byname JEMALLOC_N(ctl_byname)
-#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
-#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
-#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
-#define ctl_prefork JEMALLOC_N(ctl_prefork)
-#define dss_prec_names JEMALLOC_N(dss_prec_names)
-#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
-#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
-#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
-#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse)
-#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start)
-#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last)
-#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new)
-#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next)
-#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch)
-#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev)
-#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch)
-#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove)
-#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter)
-#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
-#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
-#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
-#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
-#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
-#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
-#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
-#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
-#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
-#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
-#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
-#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
-#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
-#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
-#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
-#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
-#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
-#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
-#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
-#define get_errno JEMALLOC_N(get_errno)
-#define hash JEMALLOC_N(hash)
-#define huge_allocated JEMALLOC_N(huge_allocated)
-#define huge_boot JEMALLOC_N(huge_boot)
-#define huge_dalloc JEMALLOC_N(huge_dalloc)
-#define huge_malloc JEMALLOC_N(huge_malloc)
-#define huge_mtx JEMALLOC_N(huge_mtx)
-#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
-#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
-#define huge_palloc JEMALLOC_N(huge_palloc)
-#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
-#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
-#define huge_prefork JEMALLOC_N(huge_prefork)
-#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
-#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
-#define huge_ralloc JEMALLOC_N(huge_ralloc)
-#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
-#define huge_salloc JEMALLOC_N(huge_salloc)
-#define iallocm JEMALLOC_N(iallocm)
-#define icalloc JEMALLOC_N(icalloc)
-#define icallocx JEMALLOC_N(icallocx)
-#define idalloc JEMALLOC_N(idalloc)
-#define idallocx JEMALLOC_N(idallocx)
-#define imalloc JEMALLOC_N(imalloc)
-#define imallocx JEMALLOC_N(imallocx)
-#define ipalloc JEMALLOC_N(ipalloc)
-#define ipallocx JEMALLOC_N(ipallocx)
-#define iqalloc JEMALLOC_N(iqalloc)
-#define iqallocx JEMALLOC_N(iqallocx)
-#define iralloc JEMALLOC_N(iralloc)
-#define irallocx JEMALLOC_N(irallocx)
-#define isalloc JEMALLOC_N(isalloc)
-#define isthreaded JEMALLOC_N(isthreaded)
-#define ivsalloc JEMALLOC_N(ivsalloc)
-#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
-#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
-#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
-#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
-#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
-#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
-#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
-#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
-#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
-#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
-#define malloc_printf JEMALLOC_N(malloc_printf)
-#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
-#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
-#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
-#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
-#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
-#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
-#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
-#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
-#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
-#define malloc_write JEMALLOC_N(malloc_write)
-#define map_bias JEMALLOC_N(map_bias)
-#define mb_write JEMALLOC_N(mb_write)
-#define mutex_boot JEMALLOC_N(mutex_boot)
-#define narenas_auto JEMALLOC_N(narenas_auto)
-#define narenas_total JEMALLOC_N(narenas_total)
-#define narenas_total_get JEMALLOC_N(narenas_total_get)
-#define ncpus JEMALLOC_N(ncpus)
-#define nhbins JEMALLOC_N(nhbins)
-#define opt_abort JEMALLOC_N(opt_abort)
-#define opt_junk JEMALLOC_N(opt_junk)
-#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
-#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
-#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
-#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
-#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
-#define opt_narenas JEMALLOC_N(opt_narenas)
-#define opt_prof JEMALLOC_N(opt_prof)
-#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
-#define opt_prof_active JEMALLOC_N(opt_prof_active)
-#define opt_prof_final JEMALLOC_N(opt_prof_final)
-#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
-#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
-#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
-#define opt_quarantine JEMALLOC_N(opt_quarantine)
-#define opt_redzone JEMALLOC_N(opt_redzone)
-#define opt_stats_print JEMALLOC_N(opt_stats_print)
-#define opt_tcache JEMALLOC_N(opt_tcache)
-#define opt_utrace JEMALLOC_N(opt_utrace)
-#define opt_valgrind JEMALLOC_N(opt_valgrind)
-#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
-#define opt_zero JEMALLOC_N(opt_zero)
-#define p2rz JEMALLOC_N(p2rz)
-#define pages_purge JEMALLOC_N(pages_purge)
-#define pow2_ceil JEMALLOC_N(pow2_ceil)
-#define prof_backtrace JEMALLOC_N(prof_backtrace)
-#define prof_boot0 JEMALLOC_N(prof_boot0)
-#define prof_boot1 JEMALLOC_N(prof_boot1)
-#define prof_boot2 JEMALLOC_N(prof_boot2)
-#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
-#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
-#define prof_free JEMALLOC_N(prof_free)
-#define prof_gdump JEMALLOC_N(prof_gdump)
-#define prof_idump JEMALLOC_N(prof_idump)
-#define prof_interval JEMALLOC_N(prof_interval)
-#define prof_lookup JEMALLOC_N(prof_lookup)
-#define prof_malloc JEMALLOC_N(prof_malloc)
-#define prof_mdump JEMALLOC_N(prof_mdump)
-#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
-#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
-#define prof_prefork JEMALLOC_N(prof_prefork)
-#define prof_promote JEMALLOC_N(prof_promote)
-#define prof_realloc JEMALLOC_N(prof_realloc)
-#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
-#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
-#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
-#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
-#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
-#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
-#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
-#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
-#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
-#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
-#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
-#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
-#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
-#define quarantine JEMALLOC_N(quarantine)
-#define quarantine_boot JEMALLOC_N(quarantine_boot)
-#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
-#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
-#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
-#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
-#define register_zone JEMALLOC_N(register_zone)
-#define rtree_get JEMALLOC_N(rtree_get)
-#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
-#define rtree_new JEMALLOC_N(rtree_new)
-#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
-#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
-#define rtree_prefork JEMALLOC_N(rtree_prefork)
-#define rtree_set JEMALLOC_N(rtree_set)
-#define s2u JEMALLOC_N(s2u)
-#define sa2u JEMALLOC_N(sa2u)
-#define set_errno JEMALLOC_N(set_errno)
-#define stats_cactive JEMALLOC_N(stats_cactive)
-#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
-#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
-#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
-#define stats_chunks JEMALLOC_N(stats_chunks)
-#define stats_print JEMALLOC_N(stats_print)
-#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
-#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
-#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
-#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
-#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
-#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
-#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
-#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
-#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
-#define tcache_boot0 JEMALLOC_N(tcache_boot0)
-#define tcache_boot1 JEMALLOC_N(tcache_boot1)
-#define tcache_booted JEMALLOC_N(tcache_booted)
-#define tcache_create JEMALLOC_N(tcache_create)
-#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
-#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
-#define tcache_destroy JEMALLOC_N(tcache_destroy)
-#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
-#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
-#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
-#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
-#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
-#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
-#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
-#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
-#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
-#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
-#define tcache_event JEMALLOC_N(tcache_event)
-#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
-#define tcache_flush JEMALLOC_N(tcache_flush)
-#define tcache_get JEMALLOC_N(tcache_get)
-#define tcache_initialized JEMALLOC_N(tcache_initialized)
-#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
-#define tcache_salloc JEMALLOC_N(tcache_salloc)
-#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
-#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
-#define tcache_tls JEMALLOC_N(tcache_tls)
-#define tcache_tsd JEMALLOC_N(tcache_tsd)
-#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
-#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
-#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
-#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
-#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
-#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
-#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
-#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
-#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
-#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
-#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
-#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
-#define u2rz JEMALLOC_N(u2rz)
diff --git a/deps/jemalloc/include/jemalloc/internal/private_namespace.sh b/deps/jemalloc/include/jemalloc/internal/private_namespace.sh
new file mode 100755
index 000000000..cd25eb306
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/internal/private_namespace.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+for symbol in `cat $1` ; do
+ echo "#define ${symbol} JEMALLOC_N(${symbol})"
+done
diff --git a/deps/jemalloc/include/jemalloc/internal/private_symbols.txt b/deps/jemalloc/include/jemalloc/internal/private_symbols.txt
new file mode 100644
index 000000000..93516d242
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/internal/private_symbols.txt
@@ -0,0 +1,413 @@
+a0calloc
+a0free
+a0malloc
+arena_alloc_junk_small
+arena_bin_index
+arena_bin_info
+arena_boot
+arena_dalloc
+arena_dalloc_bin
+arena_dalloc_bin_locked
+arena_dalloc_junk_large
+arena_dalloc_junk_small
+arena_dalloc_large
+arena_dalloc_large_locked
+arena_dalloc_small
+arena_dss_prec_get
+arena_dss_prec_set
+arena_malloc
+arena_malloc_large
+arena_malloc_small
+arena_mapbits_allocated_get
+arena_mapbits_binind_get
+arena_mapbits_dirty_get
+arena_mapbits_get
+arena_mapbits_large_binind_set
+arena_mapbits_large_get
+arena_mapbits_large_set
+arena_mapbits_large_size_get
+arena_mapbits_small_runind_get
+arena_mapbits_small_set
+arena_mapbits_unallocated_set
+arena_mapbits_unallocated_size_get
+arena_mapbits_unallocated_size_set
+arena_mapbits_unzeroed_get
+arena_mapbits_unzeroed_set
+arena_mapbitsp_get
+arena_mapbitsp_read
+arena_mapbitsp_write
+arena_mapp_get
+arena_maxclass
+arena_new
+arena_palloc
+arena_postfork_child
+arena_postfork_parent
+arena_prefork
+arena_prof_accum
+arena_prof_accum_impl
+arena_prof_accum_locked
+arena_prof_ctx_get
+arena_prof_ctx_set
+arena_prof_promoted
+arena_ptr_small_binind_get
+arena_purge_all
+arena_quarantine_junk_small
+arena_ralloc
+arena_ralloc_junk_large
+arena_ralloc_no_move
+arena_redzone_corruption
+arena_run_regind
+arena_salloc
+arena_stats_merge
+arena_tcache_fill_small
+arenas
+arenas_booted
+arenas_cleanup
+arenas_extend
+arenas_initialized
+arenas_lock
+arenas_tls
+arenas_tsd
+arenas_tsd_boot
+arenas_tsd_cleanup_wrapper
+arenas_tsd_get
+arenas_tsd_get_wrapper
+arenas_tsd_init_head
+arenas_tsd_set
+atomic_add_u
+atomic_add_uint32
+atomic_add_uint64
+atomic_add_z
+atomic_sub_u
+atomic_sub_uint32
+atomic_sub_uint64
+atomic_sub_z
+base_alloc
+base_boot
+base_calloc
+base_node_alloc
+base_node_dealloc
+base_postfork_child
+base_postfork_parent
+base_prefork
+bitmap_full
+bitmap_get
+bitmap_info_init
+bitmap_info_ngroups
+bitmap_init
+bitmap_set
+bitmap_sfu
+bitmap_size
+bitmap_unset
+bt_init
+buferror
+choose_arena
+choose_arena_hard
+chunk_alloc
+chunk_alloc_dss
+chunk_alloc_mmap
+chunk_boot
+chunk_dealloc
+chunk_dealloc_mmap
+chunk_dss_boot
+chunk_dss_postfork_child
+chunk_dss_postfork_parent
+chunk_dss_prec_get
+chunk_dss_prec_set
+chunk_dss_prefork
+chunk_in_dss
+chunk_npages
+chunk_postfork_child
+chunk_postfork_parent
+chunk_prefork
+chunk_unmap
+chunks_mtx
+chunks_rtree
+chunksize
+chunksize_mask
+ckh_bucket_search
+ckh_count
+ckh_delete
+ckh_evict_reloc_insert
+ckh_insert
+ckh_isearch
+ckh_iter
+ckh_new
+ckh_pointer_hash
+ckh_pointer_keycomp
+ckh_rebuild
+ckh_remove
+ckh_search
+ckh_string_hash
+ckh_string_keycomp
+ckh_try_bucket_insert
+ckh_try_insert
+ctl_boot
+ctl_bymib
+ctl_byname
+ctl_nametomib
+ctl_postfork_child
+ctl_postfork_parent
+ctl_prefork
+dss_prec_names
+extent_tree_ad_first
+extent_tree_ad_insert
+extent_tree_ad_iter
+extent_tree_ad_iter_recurse
+extent_tree_ad_iter_start
+extent_tree_ad_last
+extent_tree_ad_new
+extent_tree_ad_next
+extent_tree_ad_nsearch
+extent_tree_ad_prev
+extent_tree_ad_psearch
+extent_tree_ad_remove
+extent_tree_ad_reverse_iter
+extent_tree_ad_reverse_iter_recurse
+extent_tree_ad_reverse_iter_start
+extent_tree_ad_search
+extent_tree_szad_first
+extent_tree_szad_insert
+extent_tree_szad_iter
+extent_tree_szad_iter_recurse
+extent_tree_szad_iter_start
+extent_tree_szad_last
+extent_tree_szad_new
+extent_tree_szad_next
+extent_tree_szad_nsearch
+extent_tree_szad_prev
+extent_tree_szad_psearch
+extent_tree_szad_remove
+extent_tree_szad_reverse_iter
+extent_tree_szad_reverse_iter_recurse
+extent_tree_szad_reverse_iter_start
+extent_tree_szad_search
+get_errno
+hash
+hash_fmix_32
+hash_fmix_64
+hash_get_block_32
+hash_get_block_64
+hash_rotl_32
+hash_rotl_64
+hash_x64_128
+hash_x86_128
+hash_x86_32
+huge_allocated
+huge_boot
+huge_dalloc
+huge_dalloc_junk
+huge_dss_prec_get
+huge_malloc
+huge_mtx
+huge_ndalloc
+huge_nmalloc
+huge_palloc
+huge_postfork_child
+huge_postfork_parent
+huge_prefork
+huge_prof_ctx_get
+huge_prof_ctx_set
+huge_ralloc
+huge_ralloc_no_move
+huge_salloc
+iallocm
+icalloc
+icalloct
+idalloc
+idalloct
+imalloc
+imalloct
+ipalloc
+ipalloct
+iqalloc
+iqalloct
+iralloc
+iralloct
+iralloct_realign
+isalloc
+isthreaded
+ivsalloc
+ixalloc
+jemalloc_postfork_child
+jemalloc_postfork_parent
+jemalloc_prefork
+malloc_cprintf
+malloc_mutex_init
+malloc_mutex_lock
+malloc_mutex_postfork_child
+malloc_mutex_postfork_parent
+malloc_mutex_prefork
+malloc_mutex_unlock
+malloc_printf
+malloc_snprintf
+malloc_strtoumax
+malloc_tsd_boot
+malloc_tsd_cleanup_register
+malloc_tsd_dalloc
+malloc_tsd_malloc
+malloc_tsd_no_cleanup
+malloc_vcprintf
+malloc_vsnprintf
+malloc_write
+map_bias
+mb_write
+mutex_boot
+narenas_auto
+narenas_total
+narenas_total_get
+ncpus
+nhbins
+opt_abort
+opt_dss
+opt_junk
+opt_lg_chunk
+opt_lg_dirty_mult
+opt_lg_prof_interval
+opt_lg_prof_sample
+opt_lg_tcache_max
+opt_narenas
+opt_prof
+opt_prof_accum
+opt_prof_active
+opt_prof_final
+opt_prof_gdump
+opt_prof_leak
+opt_prof_prefix
+opt_quarantine
+opt_redzone
+opt_stats_print
+opt_tcache
+opt_utrace
+opt_valgrind
+opt_xmalloc
+opt_zero
+p2rz
+pages_purge
+pow2_ceil
+prof_backtrace
+prof_boot0
+prof_boot1
+prof_boot2
+prof_bt_count
+prof_ctx_get
+prof_ctx_set
+prof_dump_open
+prof_free
+prof_gdump
+prof_idump
+prof_interval
+prof_lookup
+prof_malloc
+prof_mdump
+prof_postfork_child
+prof_postfork_parent
+prof_prefork
+prof_promote
+prof_realloc
+prof_sample_accum_update
+prof_sample_threshold_update
+prof_tdata_booted
+prof_tdata_cleanup
+prof_tdata_get
+prof_tdata_init
+prof_tdata_initialized
+prof_tdata_tls
+prof_tdata_tsd
+prof_tdata_tsd_boot
+prof_tdata_tsd_cleanup_wrapper
+prof_tdata_tsd_get
+prof_tdata_tsd_get_wrapper
+prof_tdata_tsd_init_head
+prof_tdata_tsd_set
+quarantine
+quarantine_alloc_hook
+quarantine_boot
+quarantine_booted
+quarantine_cleanup
+quarantine_init
+quarantine_tls
+quarantine_tsd
+quarantine_tsd_boot
+quarantine_tsd_cleanup_wrapper
+quarantine_tsd_get
+quarantine_tsd_get_wrapper
+quarantine_tsd_init_head
+quarantine_tsd_set
+register_zone
+rtree_delete
+rtree_get
+rtree_get_locked
+rtree_new
+rtree_postfork_child
+rtree_postfork_parent
+rtree_prefork
+rtree_set
+s2u
+sa2u
+set_errno
+small_size2bin
+stats_cactive
+stats_cactive_add
+stats_cactive_get
+stats_cactive_sub
+stats_chunks
+stats_print
+tcache_alloc_easy
+tcache_alloc_large
+tcache_alloc_small
+tcache_alloc_small_hard
+tcache_arena_associate
+tcache_arena_dissociate
+tcache_bin_flush_large
+tcache_bin_flush_small
+tcache_bin_info
+tcache_boot0
+tcache_boot1
+tcache_booted
+tcache_create
+tcache_dalloc_large
+tcache_dalloc_small
+tcache_destroy
+tcache_enabled_booted
+tcache_enabled_get
+tcache_enabled_initialized
+tcache_enabled_set
+tcache_enabled_tls
+tcache_enabled_tsd
+tcache_enabled_tsd_boot
+tcache_enabled_tsd_cleanup_wrapper
+tcache_enabled_tsd_get
+tcache_enabled_tsd_get_wrapper
+tcache_enabled_tsd_init_head
+tcache_enabled_tsd_set
+tcache_event
+tcache_event_hard
+tcache_flush
+tcache_get
+tcache_initialized
+tcache_maxclass
+tcache_salloc
+tcache_stats_merge
+tcache_thread_cleanup
+tcache_tls
+tcache_tsd
+tcache_tsd_boot
+tcache_tsd_cleanup_wrapper
+tcache_tsd_get
+tcache_tsd_get_wrapper
+tcache_tsd_init_head
+tcache_tsd_set
+thread_allocated_booted
+thread_allocated_initialized
+thread_allocated_tls
+thread_allocated_tsd
+thread_allocated_tsd_boot
+thread_allocated_tsd_cleanup_wrapper
+thread_allocated_tsd_get
+thread_allocated_tsd_get_wrapper
+thread_allocated_tsd_init_head
+thread_allocated_tsd_set
+tsd_init_check_recursion
+tsd_init_finish
+u2rz
diff --git a/deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh b/deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh
new file mode 100755
index 000000000..23fed8e80
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+for symbol in `cat $1` ; do
+ echo "#undef ${symbol}"
+done
diff --git a/deps/jemalloc/include/jemalloc/internal/prng.h b/deps/jemalloc/include/jemalloc/internal/prng.h
index 83a5462b4..7b2b06512 100644
--- a/deps/jemalloc/include/jemalloc/internal/prng.h
+++ b/deps/jemalloc/include/jemalloc/internal/prng.h
@@ -25,7 +25,7 @@
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
-#define prng32(r, lg_range, state, a, c) do { \
+#define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
@@ -35,7 +35,7 @@
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
-#define prng64(r, lg_range, state, a, c) do { \
+#define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
diff --git a/deps/jemalloc/include/jemalloc/internal/prof.h b/deps/jemalloc/include/jemalloc/internal/prof.h
index 47f22ad2d..6f162d21e 100644
--- a/deps/jemalloc/include/jemalloc/internal/prof.h
+++ b/deps/jemalloc/include/jemalloc/internal/prof.h
@@ -8,7 +8,11 @@ typedef struct prof_ctx_s prof_ctx_t;
typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
-#define PROF_PREFIX_DEFAULT "jeprof"
+#ifdef JEMALLOC_PROF
+# define PROF_PREFIX_DEFAULT "jeprof"
+#else
+# define PROF_PREFIX_DEFAULT ""
+#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
@@ -129,6 +133,7 @@ struct prof_ctx_s {
* limbo due to one of:
* - Initializing per thread counters associated with this ctx.
* - Preparing to destroy this ctx.
+ * - Dumping a heap profile that includes this ctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx.
*/
@@ -145,7 +150,11 @@ struct prof_ctx_s {
* this context.
*/
ql_head(prof_thr_cnt_t) cnts_ql;
+
+ /* Linkage for list of contexts to be dumped. */
+ ql_elm(prof_ctx_t) dump_link;
};
+typedef ql_head(prof_ctx_t) prof_ctx_list_t;
struct prof_tdata_s {
/*
@@ -195,7 +204,12 @@ extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */
-extern char opt_prof_prefix[PATH_MAX + 1];
+extern char opt_prof_prefix[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
@@ -215,6 +229,11 @@ extern bool prof_promote;
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt, unsigned nignore);
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
+#ifdef JEMALLOC_JET
+size_t prof_bt_count(void);
+typedef int (prof_dump_open_t)(bool, const char *);
+extern prof_dump_open_t *prof_dump_open;
+#endif
void prof_idump(void);
bool prof_mdump(const char *filename);
void prof_gdump(void);
@@ -237,7 +256,7 @@ void prof_postfork_child(void);
\
assert(size == s2u(size)); \
\
- prof_tdata = prof_tdata_get(); \
+ prof_tdata = prof_tdata_get(true); \
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
if (prof_tdata != NULL) \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
@@ -286,14 +305,14 @@ void prof_postfork_child(void);
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
-prof_tdata_t *prof_tdata_get(void);
+prof_tdata_t *prof_tdata_get(bool create);
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
prof_ctx_t *prof_ctx_get(const void *ptr);
-void prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
+void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
bool prof_sample_accum_update(size_t size);
-void prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt);
-void prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
- size_t old_size, prof_ctx_t *old_ctx);
+void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt);
+void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
+ size_t old_usize, prof_ctx_t *old_ctx);
void prof_free(const void *ptr, size_t size);
#endif
@@ -304,17 +323,15 @@ malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
prof_tdata_cleanup)
JEMALLOC_INLINE prof_tdata_t *
-prof_tdata_get(void)
+prof_tdata_get(bool create)
{
prof_tdata_t *prof_tdata;
cassert(config_prof);
prof_tdata = *prof_tdata_tsd_get();
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) {
- if (prof_tdata == NULL)
- prof_tdata = prof_tdata_init();
- }
+ if (create && prof_tdata == NULL)
+ prof_tdata = prof_tdata_init();
return (prof_tdata);
}
@@ -322,6 +339,20 @@ prof_tdata_get(void)
JEMALLOC_INLINE void
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
{
+ /*
+ * The body of this function is compiled out unless heap profiling is
+ * enabled, so that it is possible to compile jemalloc with floating
+ * point support completely disabled. Avoiding floating point code is
+ * important on memory-constrained systems, but it also enables a
+ * workaround for versions of glibc that don't properly save/restore
+ * floating point registers during dynamic lazy symbol loading (which
+ * internally calls into whatever malloc implementation happens to be
+ * integrated into the application). Note that some compilers (e.g.
+ * gcc 4.8) may use floating point registers for fast memory moves, so
+ * jemalloc must be compiled with such optimizations disabled (e.g.
+ * -mno-sse) in order for the workaround to be complete.
+ */
+#ifdef JEMALLOC_PROF
uint64_t r;
double u;
@@ -343,7 +374,7 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
- * (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
+ * (http://luc.devroye.org/rnbookindex.html)
*/
prng64(r, 53, prof_tdata->prng_state,
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
@@ -351,6 +382,7 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
prof_tdata->threshold = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ (uint64_t)1U;
+#endif
}
JEMALLOC_INLINE prof_ctx_t *
@@ -373,7 +405,7 @@ prof_ctx_get(const void *ptr)
}
JEMALLOC_INLINE void
-prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
+prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
@@ -383,7 +415,7 @@ prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
- arena_prof_ctx_set(ptr, ctx);
+ arena_prof_ctx_set(ptr, usize, ctx);
} else
huge_prof_ctx_set(ptr, ctx);
}
@@ -397,7 +429,7 @@ prof_sample_accum_update(size_t size)
/* Sampling logic is unnecessary if the interval is 1. */
assert(opt_lg_prof_sample != 0);
- prof_tdata = *prof_tdata_tsd_get();
+ prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (true);
@@ -418,20 +450,20 @@ prof_sample_accum_update(size_t size)
}
JEMALLOC_INLINE void
-prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
+prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt)
{
cassert(config_prof);
assert(ptr != NULL);
- assert(size == isalloc(ptr, true));
+ assert(usize == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
- if (prof_sample_accum_update(size)) {
+ if (prof_sample_accum_update(usize)) {
/*
* Don't sample. For malloc()-like allocation, it is
* always possible to tell in advance how large an
* object's usable size will be, so there should never
- * be a difference between the size passed to
+ * be a difference between the usize passed to
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert((uintptr_t)cnt == (uintptr_t)1U);
@@ -439,17 +471,17 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
- prof_ctx_set(ptr, cnt->ctx);
+ prof_ctx_set(ptr, usize, cnt->ctx);
cnt->epoch++;
/*********/
mb_write();
/*********/
cnt->cnts.curobjs++;
- cnt->cnts.curbytes += size;
+ cnt->cnts.curbytes += usize;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
- cnt->cnts.accumbytes += size;
+ cnt->cnts.accumbytes += usize;
}
/*********/
mb_write();
@@ -459,12 +491,12 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
mb_write();
/*********/
} else
- prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
+ prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
}
JEMALLOC_INLINE void
-prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
- size_t old_size, prof_ctx_t *old_ctx)
+prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
+ size_t old_usize, prof_ctx_t *old_ctx)
{
prof_thr_cnt_t *told_cnt;
@@ -472,15 +504,15 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
if (ptr != NULL) {
- assert(size == isalloc(ptr, true));
+ assert(usize == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
- if (prof_sample_accum_update(size)) {
+ if (prof_sample_accum_update(usize)) {
/*
- * Don't sample. The size passed to
+ * Don't sample. The usize passed to
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
- * its actual size was insufficient to cross
+ * its actual usize was insufficient to cross
* the sample threshold.
*/
cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
@@ -497,7 +529,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
*/
malloc_mutex_lock(old_ctx->lock);
old_ctx->cnt_merged.curobjs--;
- old_ctx->cnt_merged.curbytes -= old_size;
+ old_ctx->cnt_merged.curbytes -= old_usize;
malloc_mutex_unlock(old_ctx->lock);
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
@@ -507,23 +539,23 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U) {
- prof_ctx_set(ptr, cnt->ctx);
+ prof_ctx_set(ptr, usize, cnt->ctx);
cnt->epoch++;
} else if (ptr != NULL)
- prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
+ prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U) {
told_cnt->cnts.curobjs--;
- told_cnt->cnts.curbytes -= old_size;
+ told_cnt->cnts.curbytes -= old_usize;
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
cnt->cnts.curobjs++;
- cnt->cnts.curbytes += size;
+ cnt->cnts.curbytes += usize;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
- cnt->cnts.accumbytes += size;
+ cnt->cnts.accumbytes += usize;
}
}
/*********/
diff --git a/deps/jemalloc/include/jemalloc/internal/public_namespace.sh b/deps/jemalloc/include/jemalloc/internal/public_namespace.sh
new file mode 100755
index 000000000..362109f71
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/internal/public_namespace.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+for nm in `cat $1` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ echo "#define je_${n} JEMALLOC_N(${n})"
+done
diff --git a/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh b/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh
new file mode 100755
index 000000000..4239d1775
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+for nm in `cat $1` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ echo "#undef je_${n}"
+done
diff --git a/deps/jemalloc/include/jemalloc/internal/ql.h b/deps/jemalloc/include/jemalloc/internal/ql.h
index a9ed2393f..f70c5f6f3 100644
--- a/deps/jemalloc/include/jemalloc/internal/ql.h
+++ b/deps/jemalloc/include/jemalloc/internal/ql.h
@@ -1,61 +1,61 @@
/*
* List definitions.
*/
-#define ql_head(a_type) \
+#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
-#define ql_head_initializer(a_head) {NULL}
+#define ql_head_initializer(a_head) {NULL}
-#define ql_elm(a_type) qr(a_type)
+#define ql_elm(a_type) qr(a_type)
/* List functions. */
-#define ql_new(a_head) do { \
+#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
-#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
+#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
-#define ql_first(a_head) ((a_head)->qlh_first)
+#define ql_first(a_head) ((a_head)->qlh_first)
-#define ql_last(a_head, a_field) \
+#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
-#define ql_next(a_head, a_elm, a_field) \
+#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
-#define ql_prev(a_head, a_elm, a_field) \
+#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
-#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
+#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
-#define ql_after_insert(a_qlelm, a_elm, a_field) \
+#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
-#define ql_head_insert(a_head, a_elm, a_field) do { \
+#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
-#define ql_tail_insert(a_head, a_elm, a_field) do { \
+#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
-#define ql_remove(a_head, a_elm, a_field) do { \
+#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
@@ -66,18 +66,18 @@ struct { \
} \
} while (0)
-#define ql_head_remove(a_head, a_type, a_field) do { \
+#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
-#define ql_tail_remove(a_head, a_type, a_field) do { \
+#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
-#define ql_foreach(a_var, a_head, a_field) \
+#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
-#define ql_reverse_foreach(a_var, a_head, a_field) \
+#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
diff --git a/deps/jemalloc/include/jemalloc/internal/qr.h b/deps/jemalloc/include/jemalloc/internal/qr.h
index fe22352fe..602944b9b 100644
--- a/deps/jemalloc/include/jemalloc/internal/qr.h
+++ b/deps/jemalloc/include/jemalloc/internal/qr.h
@@ -1,28 +1,28 @@
/* Ring definitions. */
-#define qr(a_type) \
+#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
-#define qr_new(a_qr, a_field) do { \
+#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
+#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
-#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
+#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
-#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
+#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_after_insert(a_qrelm, a_qr, a_field) \
+#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
@@ -31,7 +31,7 @@ struct { \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
-#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
+#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
@@ -42,10 +42,10 @@ struct { \
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
-#define qr_split(a_qr_a, a_qr_b, a_field) \
+#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
-#define qr_remove(a_qr, a_field) do { \
+#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
@@ -54,13 +54,13 @@ struct { \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_foreach(var, a_qr, a_field) \
+#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
-#define qr_reverse_foreach(var, a_qr, a_field) \
+#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
diff --git a/deps/jemalloc/include/jemalloc/internal/quarantine.h b/deps/jemalloc/include/jemalloc/internal/quarantine.h
index 38f3d696e..16f677f73 100644
--- a/deps/jemalloc/include/jemalloc/internal/quarantine.h
+++ b/deps/jemalloc/include/jemalloc/internal/quarantine.h
@@ -1,6 +1,9 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
+typedef struct quarantine_obj_s quarantine_obj_t;
+typedef struct quarantine_s quarantine_t;
+
/* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
@@ -8,17 +11,57 @@
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
+struct quarantine_obj_s {
+ void *ptr;
+ size_t usize;
+};
+
+struct quarantine_s {
+ size_t curbytes;
+ size_t curobjs;
+ size_t first;
+#define LG_MAXOBJS_INIT 10
+ size_t lg_maxobjs;
+ quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
+};
+
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
+quarantine_t *quarantine_init(size_t lg_maxobjs);
void quarantine(void *ptr);
+void quarantine_cleanup(void *arg);
bool quarantine_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
+#ifndef JEMALLOC_ENABLE_INLINE
+malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *)
+
+void quarantine_alloc_hook(void);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
+malloc_tsd_externs(quarantine, quarantine_t *)
+malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL,
+ quarantine_cleanup)
+
+JEMALLOC_ALWAYS_INLINE void
+quarantine_alloc_hook(void)
+{
+ quarantine_t *quarantine;
+
+ assert(config_fill && opt_quarantine);
+
+ quarantine = *quarantine_tsd_get();
+ if (quarantine == NULL)
+ quarantine_init(LG_MAXOBJS_INIT);
+}
+#endif
+
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/rb.h b/deps/jemalloc/include/jemalloc/internal/rb.h
index 7b675f090..423802eb2 100644
--- a/deps/jemalloc/include/jemalloc/internal/rb.h
+++ b/deps/jemalloc/include/jemalloc/internal/rb.h
@@ -22,10 +22,6 @@
#ifndef RB_H_
#define RB_H_
-#if 0
-__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 204493 2010-02-28 22:57:13Z jasone $");
-#endif
-
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \
diff --git a/deps/jemalloc/include/jemalloc/internal/rtree.h b/deps/jemalloc/include/jemalloc/internal/rtree.h
index 9bd98548c..bc74769f5 100644
--- a/deps/jemalloc/include/jemalloc/internal/rtree.h
+++ b/deps/jemalloc/include/jemalloc/internal/rtree.h
@@ -14,17 +14,18 @@ typedef struct rtree_s rtree_t;
* Size of each radix tree node (must be a power of 2). This impacts tree
* depth.
*/
-#if (LG_SIZEOF_PTR == 2)
-# define RTREE_NODESIZE (1U << 14)
-#else
-# define RTREE_NODESIZE CACHELINE
-#endif
+#define RTREE_NODESIZE (1U << 16)
+
+typedef void *(rtree_alloc_t)(size_t);
+typedef void (rtree_dalloc_t)(void *);
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct rtree_s {
+ rtree_alloc_t *alloc;
+ rtree_dalloc_t *dalloc;
malloc_mutex_t mutex;
void **root;
unsigned height;
@@ -35,7 +36,8 @@ struct rtree_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-rtree_t *rtree_new(unsigned bits);
+rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
+void rtree_delete(rtree_t *rtree);
void rtree_prefork(rtree_t *rtree);
void rtree_postfork_parent(rtree_t *rtree);
void rtree_postfork_child(rtree_t *rtree);
@@ -45,20 +47,20 @@ void rtree_postfork_child(rtree_t *rtree);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-#ifndef JEMALLOC_DEBUG
-void *rtree_get_locked(rtree_t *rtree, uintptr_t key);
+#ifdef JEMALLOC_DEBUG
+uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
#endif
-void *rtree_get(rtree_t *rtree, uintptr_t key);
-bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
+uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
+bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \
/* The least significant bits of the key are ignored. */ \
-JEMALLOC_INLINE void * \
+JEMALLOC_INLINE uint8_t \
f(rtree_t *rtree, uintptr_t key) \
{ \
- void *ret; \
+ uint8_t ret; \
uintptr_t subkey; \
unsigned i, lshift, height, bits; \
void **node, **child; \
@@ -68,12 +70,12 @@ f(rtree_t *rtree, uintptr_t key) \
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
- subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
+ subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
- return (NULL); \
+ return (0); \
} \
} \
\
@@ -84,7 +86,10 @@ f(rtree_t *rtree, uintptr_t key) \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
- ret = node[subkey]; \
+ { \
+ uint8_t *leaf = (uint8_t *)node; \
+ ret = leaf[subkey]; \
+ } \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
@@ -123,7 +128,7 @@ RTREE_GET_GENERATE(rtree_get)
#undef RTREE_GET_VALIDATE
JEMALLOC_INLINE bool
-rtree_set(rtree_t *rtree, uintptr_t key, void *val)
+rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
{
uintptr_t subkey;
unsigned i, lshift, height, bits;
@@ -138,14 +143,14 @@ rtree_set(rtree_t *rtree, uintptr_t key, void *val)
bits);
child = (void**)node[subkey];
if (child == NULL) {
- child = (void**)base_alloc(sizeof(void *) <<
- rtree->level2bits[i+1]);
+ size_t size = ((i + 1 < height - 1) ? sizeof(void *)
+ : (sizeof(uint8_t))) << rtree->level2bits[i+1];
+ child = (void**)rtree->alloc(size);
if (child == NULL) {
malloc_mutex_unlock(&rtree->mutex);
return (true);
}
- memset(child, 0, sizeof(void *) <<
- rtree->level2bits[i+1]);
+ memset(child, 0, size);
node[subkey] = child;
}
}
@@ -153,7 +158,10 @@ rtree_set(rtree_t *rtree, uintptr_t key, void *val)
/* node is a leaf, so it contains values rather than node pointers. */
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
- node[subkey] = val;
+ {
+ uint8_t *leaf = (uint8_t *)node;
+ leaf[subkey] = val;
+ }
malloc_mutex_unlock(&rtree->mutex);
return (false);
diff --git a/deps/jemalloc/include/jemalloc/internal/tcache.h b/deps/jemalloc/include/jemalloc/internal/tcache.h
index 38d735c86..c3d4b58d4 100644
--- a/deps/jemalloc/include/jemalloc/internal/tcache.h
+++ b/deps/jemalloc/include/jemalloc/internal/tcache.h
@@ -140,11 +140,11 @@ void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
/* Map of thread-specific caches. */
malloc_tsd_externs(tcache, tcache_t *)
-malloc_tsd_funcs(JEMALLOC_INLINE, tcache, tcache_t *, NULL,
+malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL,
tcache_thread_cleanup)
/* Per thread flag that allows thread caches to be disabled. */
malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
-malloc_tsd_funcs(JEMALLOC_INLINE, tcache_enabled, tcache_enabled_t,
+malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
tcache_enabled_default, malloc_tsd_no_cleanup)
JEMALLOC_INLINE void
@@ -206,7 +206,7 @@ tcache_enabled_set(bool enabled)
}
}
-JEMALLOC_INLINE tcache_t *
+JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(bool create)
{
tcache_t *tcache;
@@ -258,7 +258,7 @@ tcache_get(bool create)
return (tcache);
}
-JEMALLOC_INLINE void
+JEMALLOC_ALWAYS_INLINE void
tcache_event(tcache_t *tcache)
{
@@ -271,7 +271,7 @@ tcache_event(tcache_t *tcache)
tcache_event_hard(tcache);
}
-JEMALLOC_INLINE void *
+JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t *tbin)
{
void *ret;
@@ -287,7 +287,7 @@ tcache_alloc_easy(tcache_bin_t *tbin)
return (ret);
}
-JEMALLOC_INLINE void *
+JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
{
void *ret;
@@ -297,6 +297,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
binind = SMALL_SIZE2BIN(size);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
+ size = arena_bin_info[binind].reg_size;
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
ret = tcache_alloc_small_hard(tcache, tbin, binind);
@@ -313,6 +314,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
} else if (opt_zero)
memset(ret, 0, size);
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
@@ -330,7 +332,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
return (ret);
}
-JEMALLOC_INLINE void *
+JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
{
void *ret;
@@ -367,6 +369,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
else if (opt_zero)
memset(ret, 0, size);
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
@@ -382,7 +385,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
return (ret);
}
-JEMALLOC_INLINE void
+JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
{
tcache_bin_t *tbin;
@@ -406,7 +409,7 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
tcache_event(tcache);
}
-JEMALLOC_INLINE void
+JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
{
size_t binind;
diff --git a/deps/jemalloc/include/jemalloc/internal/tsd.h b/deps/jemalloc/include/jemalloc/internal/tsd.h
index 0037cf35e..9fb4a23ec 100644
--- a/deps/jemalloc/include/jemalloc/internal/tsd.h
+++ b/deps/jemalloc/include/jemalloc/internal/tsd.h
@@ -6,6 +6,12 @@
typedef bool (*malloc_tsd_cleanup_t)(void);
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+typedef struct tsd_init_block_s tsd_init_block_t;
+typedef struct tsd_init_head_s tsd_init_head_t;
+#endif
+
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are four macros that support (at least) three use cases: file-private,
@@ -75,12 +81,13 @@ extern __thread a_type a_name##_tls; \
extern pthread_key_t a_name##_tsd; \
extern bool a_name##_booted;
#elif (defined(_WIN32))
-#define malloc_tsd_externs(a_name, a_type) \
+#define malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##_tsd; \
extern bool a_name##_booted;
#else
#define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##_tsd; \
+extern tsd_init_head_t a_name##_tsd_init_head; \
extern bool a_name##_booted;
#endif
@@ -105,6 +112,10 @@ a_attr bool a_name##_booted = false;
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##_tsd; \
+a_attr tsd_init_head_t a_name##_tsd_init_head = { \
+ ql_head_initializer(blocks), \
+ MALLOC_MUTEX_INITIALIZER \
+}; \
a_attr bool a_name##_booted = false;
#endif
@@ -333,8 +344,14 @@ a_name##_tsd_get_wrapper(void) \
pthread_getspecific(a_name##_tsd); \
\
if (wrapper == NULL) { \
+ tsd_init_block_t block; \
+ wrapper = tsd_init_check_recursion( \
+ &a_name##_tsd_init_head, &block); \
+ if (wrapper) \
+ return (wrapper); \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
+ block.data = wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
@@ -350,6 +367,7 @@ a_name##_tsd_get_wrapper(void) \
" TSD for "#a_name"\n"); \
abort(); \
} \
+ tsd_init_finish(&a_name##_tsd_init_head, &block); \
} \
return (wrapper); \
} \
@@ -379,6 +397,19 @@ a_name##_tsd_set(a_type *val) \
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+struct tsd_init_block_s {
+ ql_elm(tsd_init_block_t) link;
+ pthread_t thread;
+ void *data;
+};
+struct tsd_init_head_s {
+ ql_head(tsd_init_block_t) blocks;
+ malloc_mutex_t lock;
+};
+#endif
+
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
@@ -388,6 +419,12 @@ void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *);
void malloc_tsd_cleanup_register(bool (*f)(void));
void malloc_tsd_boot(void);
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+void *tsd_init_check_recursion(tsd_init_head_t *head,
+ tsd_init_block_t *block);
+void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
+#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/util.h b/deps/jemalloc/include/jemalloc/internal/util.h
index 847969363..6b938f746 100644
--- a/deps/jemalloc/include/jemalloc/internal/util.h
+++ b/deps/jemalloc/include/jemalloc/internal/util.h
@@ -14,7 +14,7 @@
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
-#define JEMALLOC_CONCAT(...) __VA_ARGS__
+#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/*
* Silence compiler warnings due to uninitialized values. This is used
@@ -42,12 +42,6 @@
} while (0)
#endif
-/* Use to assert a particular configuration, e.g., cassert(config_debug). */
-#define cassert(c) do { \
- if ((c) == false) \
- assert(false); \
-} while (0)
-
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
@@ -69,10 +63,18 @@
} while (0)
#endif
+#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (config_debug && !(e)) \
not_implemented(); \
} while (0)
+#endif
+
+/* Use to assert a particular configuration, e.g., cassert(config_debug). */
+#define cassert(c) do { \
+ if ((c) == false) \
+ not_reached(); \
+} while (0)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
@@ -82,8 +84,9 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-int buferror(char *buf, size_t buflen);
-uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base);
+int buferror(int err, char *buf, size_t buflen);
+uintmax_t malloc_strtoumax(const char *restrict nptr,
+ char **restrict endptr, int base);
void malloc_write(const char *s);
/*
@@ -107,7 +110,6 @@ void malloc_printf(const char *format, ...)
#ifndef JEMALLOC_ENABLE_INLINE
size_t pow2_ceil(size_t x);
-void malloc_write(const char *s);
void set_errno(int errnum);
int get_errno(void);
#endif
diff --git a/deps/jemalloc/include/jemalloc/jemalloc.h.in b/deps/jemalloc/include/jemalloc/jemalloc.h.in
deleted file mode 100644
index 31b1304a2..000000000
--- a/deps/jemalloc/include/jemalloc/jemalloc.h.in
+++ /dev/null
@@ -1,157 +0,0 @@
-#ifndef JEMALLOC_H_
-#define JEMALLOC_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <limits.h>
-#include <strings.h>
-
-#define JEMALLOC_VERSION "@jemalloc_version@"
-#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
-#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
-#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
-#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
-#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
-
-#include "jemalloc_defs@install_suffix@.h"
-
-#ifdef JEMALLOC_EXPERIMENTAL
-#define ALLOCM_LG_ALIGN(la) (la)
-#if LG_SIZEOF_PTR == 2
-#define ALLOCM_ALIGN(a) (ffs(a)-1)
-#else
-#define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
-#endif
-#define ALLOCM_ZERO ((int)0x40)
-#define ALLOCM_NO_MOVE ((int)0x80)
-/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
-#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
-
-#define ALLOCM_SUCCESS 0
-#define ALLOCM_ERR_OOM 1
-#define ALLOCM_ERR_NOT_MOVED 2
-#endif
-
-/*
- * The je_ prefix on the following public symbol declarations is an artifact of
- * namespace management, and should be omitted in application code unless
- * JEMALLOC_NO_DEMANGLE is defined (see below).
- */
-extern JEMALLOC_EXPORT const char *je_malloc_conf;
-extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
- const char *s);
-
-JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
-JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
- JEMALLOC_ATTR(malloc);
-JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
- size_t size) JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
- JEMALLOC_ATTR(malloc);
-JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
-JEMALLOC_EXPORT void je_free(void *ptr);
-
-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
- JEMALLOC_ATTR(malloc);
-#endif
-
-#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
-#endif
-
-JEMALLOC_EXPORT size_t je_malloc_usable_size(
- JEMALLOC_USABLE_SIZE_CONST void *ptr);
-JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
- const char *), void *je_cbopaque, const char *opts);
-JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
- size_t *miblenp);
-JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-
-#ifdef JEMALLOC_EXPERIMENTAL
-JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
- int flags) JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size,
- size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags)
- JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags)
- JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
-#endif
-
-/*
- * By default application code must explicitly refer to mangled symbol names,
- * so that it is possible to use jemalloc in conjunction with another allocator
- * in the same application. Define JEMALLOC_MANGLE in order to cause automatic
- * name mangling that matches the API prefixing that happened as a result of
- * --with-mangling and/or --with-jemalloc-prefix configuration settings.
- */
-#ifdef JEMALLOC_MANGLE
-#ifndef JEMALLOC_NO_DEMANGLE
-#define JEMALLOC_NO_DEMANGLE
-#endif
-#define malloc_conf je_malloc_conf
-#define malloc_message je_malloc_message
-#define malloc je_malloc
-#define calloc je_calloc
-#define posix_memalign je_posix_memalign
-#define aligned_alloc je_aligned_alloc
-#define realloc je_realloc
-#define free je_free
-#define malloc_usable_size je_malloc_usable_size
-#define malloc_stats_print je_malloc_stats_print
-#define mallctl je_mallctl
-#define mallctlnametomib je_mallctlnametomib
-#define mallctlbymib je_mallctlbymib
-#define memalign je_memalign
-#define valloc je_valloc
-#ifdef JEMALLOC_EXPERIMENTAL
-#define allocm je_allocm
-#define rallocm je_rallocm
-#define sallocm je_sallocm
-#define dallocm je_dallocm
-#define nallocm je_nallocm
-#endif
-#endif
-
-/*
- * The je_* macros can be used as stable alternative names for the public
- * jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant
- * for use in jemalloc itself, but it can be used by application code to
- * provide isolation from the name mangling specified via --with-mangling
- * and/or --with-jemalloc-prefix.
- */
-#ifndef JEMALLOC_NO_DEMANGLE
-#undef je_malloc_conf
-#undef je_malloc_message
-#undef je_malloc
-#undef je_calloc
-#undef je_posix_memalign
-#undef je_aligned_alloc
-#undef je_realloc
-#undef je_free
-#undef je_malloc_usable_size
-#undef je_malloc_stats_print
-#undef je_mallctl
-#undef je_mallctlnametomib
-#undef je_mallctlbymib
-#undef je_memalign
-#undef je_valloc
-#ifdef JEMALLOC_EXPERIMENTAL
-#undef je_allocm
-#undef je_rallocm
-#undef je_sallocm
-#undef je_dallocm
-#undef je_nallocm
-#endif
-#endif
-
-#ifdef __cplusplus
-};
-#endif
-#endif /* JEMALLOC_H_ */
diff --git a/deps/jemalloc/include/jemalloc/jemalloc.sh b/deps/jemalloc/include/jemalloc/jemalloc.sh
new file mode 100755
index 000000000..e4738ebae
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/jemalloc.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+objroot=$1
+
+cat <<EOF
+#ifndef JEMALLOC_H_
+#define JEMALLOC_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+EOF
+
+for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \
+ jemalloc_protos.h jemalloc_mangle.h ; do
+ cat "${objroot}include/jemalloc/${hdr}" \
+ | grep -v 'Generated from .* by configure\.' \
+ | sed -e 's/^#define /#define /g' \
+ | sed -e 's/ $//g'
+ echo
+done
+
+cat <<EOF
+#ifdef __cplusplus
+};
+#endif
+#endif /* JEMALLOC_H_ */
+EOF
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in b/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
index 1cd60254a..eb38d7105 100644
--- a/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
+++ b/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
@@ -1,222 +1,12 @@
-/*
- * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
- * public APIs to be prefixed. This makes it possible, with some care, to use
- * multiple allocators simultaneously.
- */
-#undef JEMALLOC_PREFIX
-#undef JEMALLOC_CPREFIX
-
-/*
- * Name mangling for public symbols is controlled by --with-mangling and
- * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
- * these macro definitions.
- */
-#undef je_malloc_conf
-#undef je_malloc_message
-#undef je_malloc
-#undef je_calloc
-#undef je_posix_memalign
-#undef je_aligned_alloc
-#undef je_realloc
-#undef je_free
-#undef je_malloc_usable_size
-#undef je_malloc_stats_print
-#undef je_mallctl
-#undef je_mallctlnametomib
-#undef je_mallctlbymib
-#undef je_memalign
-#undef je_valloc
-#undef je_allocm
-#undef je_rallocm
-#undef je_sallocm
-#undef je_dallocm
-#undef je_nallocm
-
-/*
- * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
- * For shared libraries, symbol visibility mechanisms prevent these symbols
- * from being exported, but for static libraries, naming collisions are a real
- * possibility.
- */
-#undef JEMALLOC_PRIVATE_NAMESPACE
-#undef JEMALLOC_N
-
-/*
- * Hyper-threaded CPUs may need a special instruction inside spin loops in
- * order to yield to another virtual CPU.
- */
-#undef CPU_SPINWAIT
-
-/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
-#undef JEMALLOC_ATOMIC9
-
-/*
- * Defined if OSAtomic*() functions are available, as provided by Darwin, and
- * documented in the atomic(3) manual page.
- */
-#undef JEMALLOC_OSATOMIC
-
-/*
- * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
- * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
- * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
- * functions are defined in libgcc instead of being inlines)
- */
-#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
-
-/*
- * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
- * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
- * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
- * functions are defined in libgcc instead of being inlines)
- */
-#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
-
-/*
- * Defined if OSSpin*() functions are available, as provided by Darwin, and
- * documented in the spinlock(3) manual page.
- */
-#undef JEMALLOC_OSSPIN
-
-/*
- * Defined if _malloc_thread_cleanup() exists. At least in the case of
- * FreeBSD, pthread_key_create() allocates, which if used during malloc
- * bootstrapping will cause recursion into the pthreads library. Therefore, if
- * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
- * malloc_tsd.
- */
-#undef JEMALLOC_MALLOC_THREAD_CLEANUP
-
-/*
- * Defined if threaded initialization is known to be safe on this platform.
- * Among other things, it must be possible to initialize a mutex without
- * triggering allocation in order for threaded allocation to be safe.
- */
-#undef JEMALLOC_THREADED_INIT
-
-/*
- * Defined if the pthreads implementation defines
- * _pthread_mutex_init_calloc_cb(), in which case the function is used in order
- * to avoid recursive allocation during mutex initialization.
- */
-#undef JEMALLOC_MUTEX_INIT_CB
-
/* Defined if __attribute__((...)) syntax is supported. */
#undef JEMALLOC_HAVE_ATTR
-#ifdef JEMALLOC_HAVE_ATTR
-# define JEMALLOC_ATTR(s) __attribute__((s))
-# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
-# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
-# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
-# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
-#elif _MSC_VER
-# define JEMALLOC_ATTR(s)
-# ifdef DLLEXPORT
-# define JEMALLOC_EXPORT __declspec(dllexport)
-# else
-# define JEMALLOC_EXPORT __declspec(dllimport)
-# endif
-# define JEMALLOC_ALIGNED(s) __declspec(align(s))
-# define JEMALLOC_SECTION(s) __declspec(allocate(s))
-# define JEMALLOC_NOINLINE __declspec(noinline)
-#else
-# define JEMALLOC_ATTR(s)
-# define JEMALLOC_EXPORT
-# define JEMALLOC_ALIGNED(s)
-# define JEMALLOC_SECTION(s)
-# define JEMALLOC_NOINLINE
-#endif
-
-/* Defined if sbrk() is supported. */
-#undef JEMALLOC_HAVE_SBRK
-
-/* Non-empty if the tls_model attribute is supported. */
-#undef JEMALLOC_TLS_MODEL
-
-/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
-#undef JEMALLOC_CC_SILENCE
-
-/*
- * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
- * inline functions.
- */
-#undef JEMALLOC_DEBUG
-
-/* JEMALLOC_STATS enables statistics calculation. */
-#undef JEMALLOC_STATS
-
-/* JEMALLOC_PROF enables allocation profiling. */
-#undef JEMALLOC_PROF
-
-/* Use libunwind for profile backtracing if defined. */
-#undef JEMALLOC_PROF_LIBUNWIND
-
-/* Use libgcc for profile backtracing if defined. */
-#undef JEMALLOC_PROF_LIBGCC
-
-/* Use gcc intrinsics for profile backtracing if defined. */
-#undef JEMALLOC_PROF_GCC
-
-/*
- * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
- * This makes it possible to allocate/deallocate objects without any locking
- * when the cache is in the steady state.
- */
-#undef JEMALLOC_TCACHE
-
-/*
- * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
- * segment (DSS).
- */
-#undef JEMALLOC_DSS
-
-/* Support memory filling (junk/zero/quarantine/redzone). */
-#undef JEMALLOC_FILL
/* Support the experimental API. */
#undef JEMALLOC_EXPERIMENTAL
-/* Support utrace(2)-based tracing. */
-#undef JEMALLOC_UTRACE
-
-/* Support Valgrind. */
-#undef JEMALLOC_VALGRIND
-
-/* Support optional abort() on OOM. */
-#undef JEMALLOC_XMALLOC
-
-/* Support lazy locking (avoid locking unless a second thread is launched). */
-#undef JEMALLOC_LAZY_LOCK
-
-/* One page is 2^STATIC_PAGE_SHIFT bytes. */
-#undef STATIC_PAGE_SHIFT
-
-/*
- * If defined, use munmap() to unmap freed chunks, rather than storing them for
- * later reuse. This is disabled by default on Linux because common sequences
- * of mmap()/munmap() calls will cause virtual memory map holes.
- */
-#undef JEMALLOC_MUNMAP
-
-/*
- * If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
- * disabled by default because it is Linux-specific and it will cause virtual
- * memory map holes, much like munmap(2) does.
- */
-#undef JEMALLOC_MREMAP
-
-/* TLS is used to map arenas and magazine caches to threads. */
-#undef JEMALLOC_TLS
-
/*
- * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
- * within jemalloc-owned chunks before dereferencing them.
- */
-#undef JEMALLOC_IVSALLOC
-
-/*
- * Define overrides for non-standard allocator-related functions if they
- * are present on the system.
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
*/
#undef JEMALLOC_OVERRIDE_MEMALIGN
#undef JEMALLOC_OVERRIDE_VALLOC
@@ -230,33 +20,5 @@
*/
#undef JEMALLOC_USABLE_SIZE_CONST
-/*
- * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
- */
-#undef JEMALLOC_ZONE
-#undef JEMALLOC_ZONE_VERSION
-
-/*
- * Methods for purging unused pages differ between operating systems.
- *
- * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
- * such that new pages will be demand-zeroed if
- * the address region is later touched.
- * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
- * unused, such that they will be discarded rather
- * than swapped out.
- */
-#undef JEMALLOC_PURGE_MADVISE_DONTNEED
-#undef JEMALLOC_PURGE_MADVISE_FREE
-
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#undef LG_SIZEOF_PTR
-
-/* sizeof(int) == 2^LG_SIZEOF_INT. */
-#undef LG_SIZEOF_INT
-
-/* sizeof(long) == 2^LG_SIZEOF_LONG. */
-#undef LG_SIZEOF_LONG
-
-/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
-#undef LG_SIZEOF_INTMAX_T
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
new file mode 100644
index 000000000..13dbdd912
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
@@ -0,0 +1,61 @@
+#include <limits.h>
+#include <strings.h>
+
+#define JEMALLOC_VERSION "@jemalloc_version@"
+#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
+#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
+#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
+#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
+#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
+
+# define MALLOCX_LG_ALIGN(la) (la)
+# if LG_SIZEOF_PTR == 2
+# define MALLOCX_ALIGN(a) (ffs(a)-1)
+# else
+# define MALLOCX_ALIGN(a) \
+ ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
+# endif
+# define MALLOCX_ZERO ((int)0x40)
+/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
+# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
+
+#ifdef JEMALLOC_EXPERIMENTAL
+# define ALLOCM_LG_ALIGN(la) (la)
+# if LG_SIZEOF_PTR == 2
+# define ALLOCM_ALIGN(a) (ffs(a)-1)
+# else
+# define ALLOCM_ALIGN(a) \
+ ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
+# endif
+# define ALLOCM_ZERO ((int)0x40)
+# define ALLOCM_NO_MOVE ((int)0x80)
+/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
+# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
+# define ALLOCM_SUCCESS 0
+# define ALLOCM_ERR_OOM 1
+# define ALLOCM_ERR_NOT_MOVED 2
+#endif
+
+#ifdef JEMALLOC_HAVE_ATTR
+# define JEMALLOC_ATTR(s) __attribute__((s))
+# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
+# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
+# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
+# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
+#elif _MSC_VER
+# define JEMALLOC_ATTR(s)
+# ifdef DLLEXPORT
+# define JEMALLOC_EXPORT __declspec(dllexport)
+# else
+# define JEMALLOC_EXPORT __declspec(dllimport)
+# endif
+# define JEMALLOC_ALIGNED(s) __declspec(align(s))
+# define JEMALLOC_SECTION(s) __declspec(allocate(s))
+# define JEMALLOC_NOINLINE __declspec(noinline)
+#else
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_EXPORT
+# define JEMALLOC_ALIGNED(s)
+# define JEMALLOC_SECTION(s)
+# define JEMALLOC_NOINLINE
+#endif
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh b/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh
new file mode 100755
index 000000000..df328b78d
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+
+public_symbols_txt=$1
+symbol_prefix=$2
+
+cat <<EOF
+/*
+ * By default application code must explicitly refer to mangled symbol names,
+ * so that it is possible to use jemalloc in conjunction with another allocator
+ * in the same application. Define JEMALLOC_MANGLE in order to cause automatic
+ * name mangling that matches the API prefixing that happened as a result of
+ * --with-mangling and/or --with-jemalloc-prefix configuration settings.
+ */
+#ifdef JEMALLOC_MANGLE
+# ifndef JEMALLOC_NO_DEMANGLE
+# define JEMALLOC_NO_DEMANGLE
+# endif
+EOF
+
+for nm in `cat ${public_symbols_txt}` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ echo "# define ${n} ${symbol_prefix}${n}"
+done
+
+cat <<EOF
+#endif
+
+/*
+ * The ${symbol_prefix}* macros can be used as stable alternative names for the
+ * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
+ * meant for use in jemalloc itself, but it can be used by application code to
+ * provide isolation from the name mangling specified via --with-mangling
+ * and/or --with-jemalloc-prefix.
+ */
+#ifndef JEMALLOC_NO_DEMANGLE
+EOF
+
+for nm in `cat ${public_symbols_txt}` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ echo "# undef ${symbol_prefix}${n}"
+done
+
+cat <<EOF
+#endif
+EOF
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in b/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in
new file mode 100644
index 000000000..25446de3d
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in
@@ -0,0 +1,58 @@
+/*
+ * The @je_@ prefix on the following public symbol declarations is an artifact
+ * of namespace management, and should be omitted in application code unless
+ * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
+ */
+extern JEMALLOC_EXPORT const char *@je_@malloc_conf;
+extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
+ const char *s);
+
+JEMALLOC_EXPORT void *@je_@malloc(size_t size) JEMALLOC_ATTR(malloc);
+JEMALLOC_EXPORT void *@je_@calloc(size_t num, size_t size)
+ JEMALLOC_ATTR(malloc);
+JEMALLOC_EXPORT int @je_@posix_memalign(void **memptr, size_t alignment,
+ size_t size) JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT void *@je_@aligned_alloc(size_t alignment, size_t size)
+ JEMALLOC_ATTR(malloc);
+JEMALLOC_EXPORT void *@je_@realloc(void *ptr, size_t size);
+JEMALLOC_EXPORT void @je_@free(void *ptr);
+
+JEMALLOC_EXPORT void *@je_@mallocx(size_t size, int flags);
+JEMALLOC_EXPORT void *@je_@rallocx(void *ptr, size_t size, int flags);
+JEMALLOC_EXPORT size_t @je_@xallocx(void *ptr, size_t size, size_t extra,
+ int flags);
+JEMALLOC_EXPORT size_t @je_@sallocx(const void *ptr, int flags);
+JEMALLOC_EXPORT void @je_@dallocx(void *ptr, int flags);
+JEMALLOC_EXPORT size_t @je_@nallocx(size_t size, int flags);
+
+JEMALLOC_EXPORT int @je_@mallctl(const char *name, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT int @je_@mallctlnametomib(const char *name, size_t *mibp,
+ size_t *miblenp);
+JEMALLOC_EXPORT int @je_@mallctlbymib(const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT void @je_@malloc_stats_print(void (*write_cb)(void *,
+ const char *), void *@je_@cbopaque, const char *opts);
+JEMALLOC_EXPORT size_t @je_@malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr);
+
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size)
+ JEMALLOC_ATTR(malloc);
+#endif
+
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc);
+#endif
+
+#ifdef JEMALLOC_EXPERIMENTAL
+JEMALLOC_EXPORT int @je_@allocm(void **ptr, size_t *rsize, size_t size,
+ int flags) JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT int @je_@rallocm(void **ptr, size_t *rsize, size_t size,
+ size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT int @je_@sallocm(const void *ptr, size_t *rsize, int flags)
+ JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT int @je_@dallocm(void *ptr, int flags)
+ JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT int @je_@nallocm(size_t *rsize, size_t size, int flags);
+#endif
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_rename.sh b/deps/jemalloc/include/jemalloc/jemalloc_rename.sh
new file mode 100755
index 000000000..f94389120
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/jemalloc_rename.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+public_symbols_txt=$1
+
+cat <<EOF
+/*
+ * Name mangling for public symbols is controlled by --with-mangling and
+ * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
+ * these macro definitions.
+ */
+#ifndef JEMALLOC_NO_RENAME
+EOF
+
+for nm in `cat ${public_symbols_txt}` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
+ echo "# define je_${n} ${m}"
+done
+
+cat <<EOF
+#endif
+EOF
diff --git a/deps/jemalloc/src/arena.c b/deps/jemalloc/src/arena.c
index 0c53b071b..dad707b63 100644
--- a/deps/jemalloc/src/arena.c
+++ b/deps/jemalloc/src/arena.c
@@ -38,52 +38,18 @@ const uint8_t small_size2bin[] = {
};
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
- size_t pageind, size_t npages, bool maybe_adjac_pred,
- bool maybe_adjac_succ);
-static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
- size_t pageind, size_t npages, bool maybe_adjac_pred,
- bool maybe_adjac_succ);
-static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
- bool large, size_t binind, bool zero);
-static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
-static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
-static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
- bool large, size_t binind, bool zero);
-static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
- size_t binind, bool zero);
-static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
- arena_chunk_t *chunk, void *arg);
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
bool cleaned);
-static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize);
-static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
-static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
-static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
-static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
-static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
-static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
-static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
-static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
-static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t size);
-static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
-static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
- size_t extra, bool zero);
-static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
- size_t min_run_size);
-static void bin_info_init(void);
/******************************************************************************/
@@ -359,60 +325,73 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
}
static inline void
-arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
+arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
+{
+
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), (npages << LG_PAGE));
+ memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
+ (npages << LG_PAGE));
+}
+
+static inline void
+arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
+{
+
+ VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), PAGE);
+}
+
+static inline void
+arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
size_t i;
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
+ arena_run_page_mark_zeroed(chunk, run_ind);
for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0);
}
static void
-arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
- size_t binind, bool zero)
+arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
{
- arena_chunk_t *chunk;
- size_t run_ind, total_pages, need_pages, rem_pages, i;
- size_t flag_dirty;
- assert((large && binind == BININD_INVALID) || (large == false && binind
- != BININD_INVALID));
+ if (config_stats) {
+ ssize_t cactive_diff = CHUNK_CEILING((arena->nactive +
+ add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive -
+ sub_pages) << LG_PAGE);
+ if (cactive_diff != 0)
+ stats_cactive_add(cactive_diff);
+ }
+}
+
+static void
+arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
+ size_t flag_dirty, size_t need_pages)
+{
+ size_t total_pages, rem_pages;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
LG_PAGE;
assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
flag_dirty);
- need_pages = (size >> LG_PAGE);
- assert(need_pages > 0);
assert(need_pages <= total_pages);
rem_pages = total_pages - need_pages;
arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
- if (config_stats) {
- /*
- * Update stats_cactive if nactive is crossing a chunk
- * multiple.
- */
- size_t cactive_diff = CHUNK_CEILING((arena->nactive +
- need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
- LG_PAGE);
- if (cactive_diff != 0)
- stats_cactive_add(cactive_diff);
- }
+ arena_cactive_update(arena, need_pages, 0);
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) {
if (flag_dirty != 0) {
- arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
- (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
+ arena_mapbits_unallocated_set(chunk,
+ run_ind+need_pages, (rem_pages << LG_PAGE),
+ flag_dirty);
arena_mapbits_unallocated_set(chunk,
run_ind+total_pages-1, (rem_pages << LG_PAGE),
- CHUNK_MAP_DIRTY);
+ flag_dirty);
} else {
arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
(rem_pages << LG_PAGE),
@@ -426,156 +405,219 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
false, true);
}
+}
- /*
- * Update the page map separately for large vs. small runs, since it is
- * possible to avoid iteration for large mallocs.
- */
- if (large) {
- if (zero) {
- if (flag_dirty == 0) {
- /*
- * The run is clean, so some pages may be
- * zeroed (i.e. never before touched).
- */
- for (i = 0; i < need_pages; i++) {
- if (arena_mapbits_unzeroed_get(chunk,
- run_ind+i) != 0) {
- VALGRIND_MAKE_MEM_UNDEFINED(
- (void *)((uintptr_t)
- chunk + ((run_ind+i) <<
- LG_PAGE)), PAGE);
- memset((void *)((uintptr_t)
- chunk + ((run_ind+i) <<
- LG_PAGE)), 0, PAGE);
- } else if (config_debug) {
- VALGRIND_MAKE_MEM_DEFINED(
- (void *)((uintptr_t)
- chunk + ((run_ind+i) <<
- LG_PAGE)), PAGE);
- arena_chunk_validate_zeroed(
- chunk, run_ind+i);
- }
+static void
+arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
+ bool remove, bool zero)
+{
+ arena_chunk_t *chunk;
+ size_t flag_dirty, run_ind, need_pages, i;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ need_pages = (size >> LG_PAGE);
+ assert(need_pages > 0);
+
+ if (remove) {
+ arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
+ need_pages);
+ }
+
+ if (zero) {
+ if (flag_dirty == 0) {
+ /*
+ * The run is clean, so some pages may be zeroed (i.e.
+ * never before touched).
+ */
+ for (i = 0; i < need_pages; i++) {
+ if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
+ != 0)
+ arena_run_zero(chunk, run_ind+i, 1);
+ else if (config_debug) {
+ arena_run_page_validate_zeroed(chunk,
+ run_ind+i);
+ } else {
+ arena_run_page_mark_zeroed(chunk,
+ run_ind+i);
}
- } else {
- /*
- * The run is dirty, so all pages must be
- * zeroed.
- */
- VALGRIND_MAKE_MEM_UNDEFINED((void
- *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), (need_pages << LG_PAGE));
- memset((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), 0, (need_pages << LG_PAGE));
}
+ } else {
+ /* The run is dirty, so all pages must be zeroed. */
+ arena_run_zero(chunk, run_ind, need_pages);
}
-
- /*
- * Set the last element first, in case the run only contains one
- * page (i.e. both statements set the same element).
- */
- arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
- flag_dirty);
- arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
} else {
- assert(zero == false);
- /*
- * Propagate the dirty and unzeroed flags to the allocated
- * small run, so that arena_dalloc_bin_run() has the ability to
- * conditionally trim clean pages.
- */
- arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
- /*
- * The first page will always be dirtied during small run
- * initialization, so a validation failure here would not
- * actually cause an observable failure.
- */
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
- arena_chunk_validate_zeroed(chunk, run_ind);
- for (i = 1; i < need_pages - 1; i++) {
- arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
- arena_chunk_validate_zeroed(chunk, run_ind+i);
- }
- arena_mapbits_small_set(chunk, run_ind+need_pages-1,
- need_pages-1, binind, flag_dirty);
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
- 0) {
- arena_chunk_validate_zeroed(chunk,
- run_ind+need_pages-1);
- }
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
+
+ /*
+ * Set the last element first, in case the run only contains one page
+ * (i.e. both statements set the same element).
+ */
+ arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
+ arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
+}
+
+static void
+arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
+{
+
+ arena_run_split_large_helper(arena, run, size, true, zero);
+}
+
+static void
+arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
+{
+
+ arena_run_split_large_helper(arena, run, size, false, zero);
+}
+
+static void
+arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
+ size_t binind)
+{
+ arena_chunk_t *chunk;
+ size_t flag_dirty, run_ind, need_pages, i;
+
+ assert(binind != BININD_INVALID);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ need_pages = (size >> LG_PAGE);
+ assert(need_pages > 0);
+
+ arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
+
+ /*
+ * Propagate the dirty and unzeroed flags to the allocated small run,
+ * so that arena_dalloc_bin_run() has the ability to conditionally trim
+ * clean pages.
+ */
+ arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
+ /*
+ * The first page will always be dirtied during small run
+ * initialization, so a validation failure here would not actually
+ * cause an observable failure.
+ */
+ if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
+ run_ind) == 0)
+ arena_run_page_validate_zeroed(chunk, run_ind);
+ for (i = 1; i < need_pages - 1; i++) {
+ arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
+ if (config_debug && flag_dirty == 0 &&
+ arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
+ arena_run_page_validate_zeroed(chunk, run_ind+i);
+ }
+ arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
+ binind, flag_dirty);
+ if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
+ run_ind+need_pages-1) == 0)
+ arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
static arena_chunk_t *
-arena_chunk_alloc(arena_t *arena)
+arena_chunk_init_spare(arena_t *arena)
{
arena_chunk_t *chunk;
- size_t i;
- if (arena->spare != NULL) {
- chunk = arena->spare;
- arena->spare = NULL;
+ assert(arena->spare != NULL);
- assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
- assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxclass);
- assert(arena_mapbits_unallocated_size_get(chunk,
- chunk_npages-1) == arena_maxclass);
- assert(arena_mapbits_dirty_get(chunk, map_bias) ==
- arena_mapbits_dirty_get(chunk, chunk_npages-1));
- } else {
- bool zero;
- size_t unzeroed;
+ chunk = arena->spare;
+ arena->spare = NULL;
- zero = false;
- malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
- false, &zero, arena->dss_prec);
- malloc_mutex_lock(&arena->lock);
- if (chunk == NULL)
- return (NULL);
- if (config_stats)
- arena->stats.mapped += chunksize;
+ assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
+ assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
+ assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
+ arena_maxclass);
+ assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
+ arena_maxclass);
+ assert(arena_mapbits_dirty_get(chunk, map_bias) ==
+ arena_mapbits_dirty_get(chunk, chunk_npages-1));
- chunk->arena = arena;
+ return (chunk);
+}
- /*
- * Claim that no pages are in use, since the header is merely
- * overhead.
- */
- chunk->ndirty = 0;
+static arena_chunk_t *
+arena_chunk_init_hard(arena_t *arena)
+{
+ arena_chunk_t *chunk;
+ bool zero;
+ size_t unzeroed, i;
- chunk->nruns_avail = 0;
- chunk->nruns_adjac = 0;
+ assert(arena->spare == NULL);
- /*
- * Initialize the map to contain one maximal free untouched run.
- * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
- * chunk.
- */
- unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
- arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
- unzeroed);
- /*
- * There is no need to initialize the internal page map entries
- * unless the chunk is not zeroed.
- */
- if (zero == false) {
- for (i = map_bias+1; i < chunk_npages-1; i++)
- arena_mapbits_unzeroed_set(chunk, i, unzeroed);
- } else if (config_debug) {
+ zero = false;
+ malloc_mutex_unlock(&arena->lock);
+ chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false,
+ &zero, arena->dss_prec);
+ malloc_mutex_lock(&arena->lock);
+ if (chunk == NULL)
+ return (NULL);
+ if (config_stats)
+ arena->stats.mapped += chunksize;
+
+ chunk->arena = arena;
+
+ /*
+ * Claim that no pages are in use, since the header is merely overhead.
+ */
+ chunk->ndirty = 0;
+
+ chunk->nruns_avail = 0;
+ chunk->nruns_adjac = 0;
+
+ /*
+ * Initialize the map to contain one maximal free untouched run. Mark
+ * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
+ */
+ unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
+ arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
+ unzeroed);
+ /*
+ * There is no need to initialize the internal page map entries unless
+ * the chunk is not zeroed.
+ */
+ if (zero == false) {
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk,
+ map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
+ chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
+ map_bias+1)));
+ for (i = map_bias+1; i < chunk_npages-1; i++)
+ arena_mapbits_unzeroed_set(chunk, i, unzeroed);
+ } else {
+ VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
+ map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
+ chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
+ map_bias+1)));
+ if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
unzeroed);
}
}
- arena_mapbits_unallocated_set(chunk, chunk_npages-1,
- arena_maxclass, unzeroed);
+ }
+ arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
+ unzeroed);
+
+ return (chunk);
+}
+
+static arena_chunk_t *
+arena_chunk_alloc(arena_t *arena)
+{
+ arena_chunk_t *chunk;
+
+ if (arena->spare != NULL)
+ chunk = arena_chunk_init_spare(arena);
+ else {
+ chunk = arena_chunk_init_hard(arena);
+ if (chunk == NULL)
+ return (NULL);
}
/* Insert the run into the runs_avail tree. */
@@ -618,8 +660,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
}
static arena_run_t *
-arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
- bool zero)
+arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{
arena_run_t *run;
arena_chunk_map_t *mapelm, key;
@@ -634,7 +675,7 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE));
- arena_run_split(arena, run, size, large, binind, zero);
+ arena_run_split_large(arena, run, size, zero);
return (run);
}
@@ -642,19 +683,72 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
}
static arena_run_t *
-arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
- bool zero)
+arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
+{
+ arena_chunk_t *chunk;
+ arena_run_t *run;
+
+ assert(size <= arena_maxclass);
+ assert((size & PAGE_MASK) == 0);
+
+ /* Search the arena's chunks for the lowest best fit. */
+ run = arena_run_alloc_large_helper(arena, size, zero);
+ if (run != NULL)
+ return (run);
+
+ /*
+ * No usable runs. Create a new chunk from which to allocate the run.
+ */
+ chunk = arena_chunk_alloc(arena);
+ if (chunk != NULL) {
+ run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
+ arena_run_split_large(arena, run, size, zero);
+ return (run);
+ }
+
+ /*
+ * arena_chunk_alloc() failed, but another thread may have made
+ * sufficient memory available while this one dropped arena->lock in
+ * arena_chunk_alloc(), so search one more time.
+ */
+ return (arena_run_alloc_large_helper(arena, size, zero));
+}
+
+static arena_run_t *
+arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
+{
+ arena_run_t *run;
+ arena_chunk_map_t *mapelm, key;
+
+ key.bits = size | CHUNK_MAP_KEY;
+ mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
+ if (mapelm != NULL) {
+ arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
+ size_t pageind = (((uintptr_t)mapelm -
+ (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+ + map_bias;
+
+ run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
+ LG_PAGE));
+ arena_run_split_small(arena, run, size, binind);
+ return (run);
+ }
+
+ return (NULL);
+}
+
+static arena_run_t *
+arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
assert(size <= arena_maxclass);
assert((size & PAGE_MASK) == 0);
- assert((large && binind == BININD_INVALID) || (large == false && binind
- != BININD_INVALID));
+ assert(binind != BININD_INVALID);
/* Search the arena's chunks for the lowest best fit. */
- run = arena_run_alloc_helper(arena, size, large, binind, zero);
+ run = arena_run_alloc_small_helper(arena, size, binind);
if (run != NULL)
return (run);
@@ -664,7 +758,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
- arena_run_split(arena, run, size, large, binind, zero);
+ arena_run_split_small(arena, run, size, binind);
return (run);
}
@@ -673,7 +767,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
- return (arena_run_alloc_helper(arena, size, large, binind, zero));
+ return (arena_run_alloc_small_helper(arena, size, binind));
}
static inline void
@@ -699,48 +793,42 @@ arena_maybe_purge(arena_t *arena)
arena_purge(arena, false);
}
-static inline size_t
-arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
+static arena_chunk_t *
+chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
{
- size_t npurged;
- ql_head(arena_chunk_map_t) mapelms;
- arena_chunk_map_t *mapelm;
- size_t pageind, npages;
- size_t nmadvise;
+ size_t *ndirty = (size_t *)arg;
- ql_new(&mapelms);
+ assert(chunk->ndirty != 0);
+ *ndirty += chunk->ndirty;
+ return (NULL);
+}
+
+static size_t
+arena_compute_npurgatory(arena_t *arena, bool all)
+{
+ size_t npurgatory, npurgeable;
/*
- * If chunk is the spare, temporarily re-allocate it, 1) so that its
- * run is reinserted into runs_avail, and 2) so that it cannot be
- * completely discarded by another thread while arena->lock is dropped
- * by this thread. Note that the arena_run_dalloc() call will
- * implicitly deallocate the chunk, so no explicit action is required
- * in this function to deallocate the chunk.
- *
- * Note that once a chunk contains dirty pages, it cannot again contain
- * a single run unless 1) it is a dirty run, or 2) this function purges
- * dirty pages and causes the transition to a single clean run. Thus
- * (chunk == arena->spare) is possible, but it is not possible for
- * this function to be called on the spare unless it contains a dirty
- * run.
+ * Compute the minimum number of pages that this thread should try to
+ * purge.
*/
- if (chunk == arena->spare) {
- assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
- assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
+ npurgeable = arena->ndirty - arena->npurgatory;
- arena_chunk_alloc(arena);
- }
+ if (all == false) {
+ size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
- if (config_stats)
- arena->stats.purged += chunk->ndirty;
+ npurgatory = npurgeable - threshold;
+ } else
+ npurgatory = npurgeable;
- /*
- * Operate on all dirty runs if there is no clean/dirty run
- * fragmentation.
- */
- if (chunk->nruns_adjac == 0)
- all = true;
+ return (npurgatory);
+}
+
+static void
+arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all,
+ arena_chunk_mapelms_t *mapelms)
+{
+ size_t pageind, npages;
/*
* Temporarily allocate free dirty runs within chunk. If all is false,
@@ -748,7 +836,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
* all dirty runs.
*/
for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
- mapelm = arena_mapp_get(chunk, pageind);
+ arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
size_t run_size =
arena_mapbits_unallocated_size_get(chunk, pageind);
@@ -764,11 +852,11 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
arena_run_t *run = (arena_run_t *)((uintptr_t)
chunk + (uintptr_t)(pageind << LG_PAGE));
- arena_run_split(arena, run, run_size, true,
- BININD_INVALID, false);
+ arena_run_split_large(arena, run, run_size,
+ false);
/* Append to list for later processing. */
ql_elm_new(mapelm, u.ql_link);
- ql_tail_insert(&mapelms, mapelm, u.ql_link);
+ ql_tail_insert(mapelms, mapelm, u.ql_link);
}
} else {
/* Skip run. */
@@ -792,12 +880,20 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
assert(pageind == chunk_npages);
assert(chunk->ndirty == 0 || all == false);
assert(chunk->nruns_adjac == 0);
+}
+
+static size_t
+arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk,
+ arena_chunk_mapelms_t *mapelms)
+{
+ size_t npurged, pageind, npages, nmadvise;
+ arena_chunk_map_t *mapelm;
malloc_mutex_unlock(&arena->lock);
if (config_stats)
nmadvise = 0;
npurged = 0;
- ql_foreach(mapelm, &mapelms, u.ql_link) {
+ ql_foreach(mapelm, mapelms, u.ql_link) {
bool unzeroed;
size_t flag_unzeroed, i;
@@ -831,30 +927,75 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
if (config_stats)
arena->stats.nmadvise += nmadvise;
+ return (npurged);
+}
+
+static void
+arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk,
+ arena_chunk_mapelms_t *mapelms)
+{
+ arena_chunk_map_t *mapelm;
+ size_t pageind;
+
/* Deallocate runs. */
- for (mapelm = ql_first(&mapelms); mapelm != NULL;
- mapelm = ql_first(&mapelms)) {
+ for (mapelm = ql_first(mapelms); mapelm != NULL;
+ mapelm = ql_first(mapelms)) {
arena_run_t *run;
pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t)) + map_bias;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
LG_PAGE));
- ql_remove(&mapelms, mapelm, u.ql_link);
+ ql_remove(mapelms, mapelm, u.ql_link);
arena_run_dalloc(arena, run, false, true);
}
-
- return (npurged);
}
-static arena_chunk_t *
-chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
+static inline size_t
+arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
{
- size_t *ndirty = (size_t *)arg;
+ size_t npurged;
+ arena_chunk_mapelms_t mapelms;
- assert(chunk->ndirty != 0);
- *ndirty += chunk->ndirty;
- return (NULL);
+ ql_new(&mapelms);
+
+ /*
+ * If chunk is the spare, temporarily re-allocate it, 1) so that its
+ * run is reinserted into runs_avail, and 2) so that it cannot be
+ * completely discarded by another thread while arena->lock is dropped
+ * by this thread. Note that the arena_run_dalloc() call will
+ * implicitly deallocate the chunk, so no explicit action is required
+ * in this function to deallocate the chunk.
+ *
+ * Note that once a chunk contains dirty pages, it cannot again contain
+ * a single run unless 1) it is a dirty run, or 2) this function purges
+ * dirty pages and causes the transition to a single clean run. Thus
+ * (chunk == arena->spare) is possible, but it is not possible for
+ * this function to be called on the spare unless it contains a dirty
+ * run.
+ */
+ if (chunk == arena->spare) {
+ assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
+ assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
+
+ arena_chunk_alloc(arena);
+ }
+
+ if (config_stats)
+ arena->stats.purged += chunk->ndirty;
+
+ /*
+ * Operate on all dirty runs if there is no clean/dirty run
+ * fragmentation.
+ */
+ if (chunk->nruns_adjac == 0)
+ all = true;
+
+ arena_chunk_stash_dirty(arena, chunk, all, &mapelms);
+ npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms);
+ arena_chunk_unstash_purged(arena, chunk, &mapelms);
+
+ return (npurged);
}
static void
@@ -877,21 +1018,11 @@ arena_purge(arena_t *arena, bool all)
arena->stats.npurge++;
/*
- * Compute the minimum number of pages that this thread should try to
- * purge, and add the result to arena->npurgatory. This will keep
- * multiple threads from racing to reduce ndirty below the threshold.
+ * Add the minimum number of pages this thread should try to purge to
+ * arena->npurgatory. This will keep multiple threads from racing to
+ * reduce ndirty below the threshold.
*/
- {
- size_t npurgeable = arena->ndirty - arena->npurgatory;
-
- if (all == false) {
- size_t threshold = (arena->nactive >>
- opt_lg_dirty_mult);
-
- npurgatory = npurgeable - threshold;
- } else
- npurgatory = npurgeable;
- }
+ npurgatory = arena_compute_npurgatory(arena, all);
arena->npurgatory += npurgatory;
while (npurgatory > 0) {
@@ -958,61 +1089,12 @@ arena_purge_all(arena_t *arena)
}
static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
+arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
+ size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
{
- arena_chunk_t *chunk;
- size_t size, run_ind, run_pages, flag_dirty;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- assert(run_ind >= map_bias);
- assert(run_ind < chunk_npages);
- if (arena_mapbits_large_get(chunk, run_ind) != 0) {
- size = arena_mapbits_large_size_get(chunk, run_ind);
- assert(size == PAGE ||
- arena_mapbits_large_size_get(chunk,
- run_ind+(size>>LG_PAGE)-1) == 0);
- } else {
- size_t binind = arena_bin_index(arena, run->bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
- size = bin_info->run_size;
- }
- run_pages = (size >> LG_PAGE);
- if (config_stats) {
- /*
- * Update stats_cactive if nactive is crossing a chunk
- * multiple.
- */
- size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
- CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
- if (cactive_diff != 0)
- stats_cactive_sub(cactive_diff);
- }
- arena->nactive -= run_pages;
-
- /*
- * The run is dirty if the caller claims to have dirtied it, as well as
- * if it was already dirty before being allocated and the caller
- * doesn't claim to have cleaned it.
- */
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
- dirty = true;
- flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
-
- /* Mark pages as unallocated in the chunk map. */
- if (dirty) {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- CHUNK_MAP_DIRTY);
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- CHUNK_MAP_DIRTY);
- } else {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- arena_mapbits_unzeroed_get(chunk, run_ind));
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
- }
+ size_t size = *p_size;
+ size_t run_ind = *p_run_ind;
+ size_t run_pages = *p_run_pages;
/* Try to coalesce forward. */
if (run_ind + run_pages < chunk_npages &&
@@ -1042,8 +1124,9 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
}
/* Try to coalesce backward. */
- if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
- == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
+ if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
+ run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
+ flag_dirty) {
size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
run_ind-1);
size_t prun_pages = prun_size >> LG_PAGE;
@@ -1068,6 +1151,62 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
size);
}
+ *p_size = size;
+ *p_run_ind = run_ind;
+ *p_run_pages = run_pages;
+}
+
+static void
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
+{
+ arena_chunk_t *chunk;
+ size_t size, run_ind, run_pages, flag_dirty;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ assert(run_ind >= map_bias);
+ assert(run_ind < chunk_npages);
+ if (arena_mapbits_large_get(chunk, run_ind) != 0) {
+ size = arena_mapbits_large_size_get(chunk, run_ind);
+ assert(size == PAGE ||
+ arena_mapbits_large_size_get(chunk,
+ run_ind+(size>>LG_PAGE)-1) == 0);
+ } else {
+ size_t binind = arena_bin_index(arena, run->bin);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ size = bin_info->run_size;
+ }
+ run_pages = (size >> LG_PAGE);
+ arena_cactive_update(arena, 0, run_pages);
+ arena->nactive -= run_pages;
+
+ /*
+ * The run is dirty if the caller claims to have dirtied it, as well as
+ * if it was already dirty before being allocated and the caller
+ * doesn't claim to have cleaned it.
+ */
+ assert(arena_mapbits_dirty_get(chunk, run_ind) ==
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
+ if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
+ dirty = true;
+ flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
+
+ /* Mark pages as unallocated in the chunk map. */
+ if (dirty) {
+ arena_mapbits_unallocated_set(chunk, run_ind, size,
+ CHUNK_MAP_DIRTY);
+ arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
+ CHUNK_MAP_DIRTY);
+ } else {
+ arena_mapbits_unallocated_set(chunk, run_ind, size,
+ arena_mapbits_unzeroed_get(chunk, run_ind));
+ arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
+ arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
+ }
+
+ arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
+ flag_dirty);
+
/* Insert into runs_avail, now that coalescing is complete. */
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
@@ -1235,14 +1374,12 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
malloc_mutex_unlock(&bin->lock);
/******************************/
malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
+ run = arena_run_alloc_small(arena, bin_info->run_size, binind);
if (run != NULL) {
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
(uintptr_t)bin_info->bitmap_offset);
/* Initialize run internals. */
- VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
- bin_info->redzone_size);
run->bin = bin;
run->nextind = 0;
run->nfree = bin_info->nregs;
@@ -1260,7 +1397,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
}
/*
- * arena_run_alloc() failed, but another thread may have made
+ * arena_run_alloc_small() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
@@ -1295,12 +1432,12 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
arena_chunk_t *chunk;
/*
- * arena_run_alloc() may have allocated run, or it may
- * have pulled run from the bin's run tree. Therefore
- * it is unsafe to make any assumptions about how run
- * has previously been used, and arena_bin_lower_run()
- * must be called, as if a region were just deallocated
- * from the run.
+ * arena_run_alloc_small() may have allocated run, or
+ * it may have pulled run from the bin's run tree.
+ * Therefore it is unsafe to make any assumptions about
+ * how run has previously been used, and
+ * arena_bin_lower_run() must be called, as if a region
+ * were just deallocated from the run.
*/
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
if (run->nfree == bin_info->nregs)
@@ -1322,21 +1459,6 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
}
void
-arena_prof_accum(arena_t *arena, uint64_t accumbytes)
-{
-
- cassert(config_prof);
-
- if (config_prof && prof_interval != 0) {
- arena->prof_accumbytes += accumbytes;
- if (arena->prof_accumbytes >= prof_interval) {
- prof_idump();
- arena->prof_accumbytes -= prof_interval;
- }
- }
-}
-
-void
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
uint64_t prof_accumbytes)
{
@@ -1347,11 +1469,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
assert(tbin->ncached == 0);
- if (config_prof) {
- malloc_mutex_lock(&arena->lock);
- arena_prof_accum(arena, prof_accumbytes);
- malloc_mutex_unlock(&arena->lock);
- }
+ if (config_prof && arena_prof_accum(arena, prof_accumbytes))
+ prof_idump();
bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
@@ -1396,8 +1515,28 @@ arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
}
}
-void
-arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
+#ifdef JEMALLOC_JET
+#undef arena_redzone_corruption
+#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
+#endif
+static void
+arena_redzone_corruption(void *ptr, size_t usize, bool after,
+ size_t offset, uint8_t byte)
+{
+
+ malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
+ "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
+ after ? "after" : "before", ptr, usize, byte);
+}
+#ifdef JEMALLOC_JET
+#undef arena_redzone_corruption
+#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
+arena_redzone_corruption_t *arena_redzone_corruption =
+ JEMALLOC_N(arena_redzone_corruption_impl);
+#endif
+
+static void
+arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
{
size_t size = bin_info->reg_size;
size_t redzone_size = bin_info->redzone_size;
@@ -1405,29 +1544,61 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
bool error = false;
for (i = 1; i <= redzone_size; i++) {
- unsigned byte;
- if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
+ uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
+ if (*byte != 0xa5) {
error = true;
- malloc_printf("<jemalloc>: Corrupt redzone "
- "%zu byte%s before %p (size %zu), byte=%#x\n", i,
- (i == 1) ? "" : "s", ptr, size, byte);
+ arena_redzone_corruption(ptr, size, false, i, *byte);
+ if (reset)
+ *byte = 0xa5;
}
}
for (i = 0; i < redzone_size; i++) {
- unsigned byte;
- if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
+ uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
+ if (*byte != 0xa5) {
error = true;
- malloc_printf("<jemalloc>: Corrupt redzone "
- "%zu byte%s after end of %p (size %zu), byte=%#x\n",
- i, (i == 1) ? "" : "s", ptr, size, byte);
+ arena_redzone_corruption(ptr, size, true, i, *byte);
+ if (reset)
+ *byte = 0xa5;
}
}
if (opt_abort && error)
abort();
+}
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_small
+#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
+#endif
+void
+arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
+{
+ size_t redzone_size = bin_info->redzone_size;
+
+ arena_redzones_validate(ptr, bin_info, false);
memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
bin_info->reg_interval);
}
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_small
+#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
+arena_dalloc_junk_small_t *arena_dalloc_junk_small =
+ JEMALLOC_N(arena_dalloc_junk_small_impl);
+#endif
+
+void
+arena_quarantine_junk_small(void *ptr, size_t usize)
+{
+ size_t binind;
+ arena_bin_info_t *bin_info;
+ cassert(config_fill);
+ assert(opt_junk);
+ assert(opt_quarantine);
+ assert(usize <= SMALL_MAXCLASS);
+
+ binind = SMALL_SIZE2BIN(usize);
+ bin_info = &arena_bin_info[binind];
+ arena_redzones_validate(ptr, bin_info, true);
+}
void *
arena_malloc_small(arena_t *arena, size_t size, bool zero)
@@ -1459,11 +1630,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
bin->stats.nrequests++;
}
malloc_mutex_unlock(&bin->lock);
- if (config_prof && isthreaded == false) {
- malloc_mutex_lock(&arena->lock);
- arena_prof_accum(arena, size);
- malloc_mutex_unlock(&arena->lock);
- }
+ if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
+ prof_idump();
if (zero == false) {
if (config_fill) {
@@ -1473,6 +1641,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
} else if (opt_zero)
memset(ret, 0, size);
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
@@ -1489,11 +1658,12 @@ void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
+ UNUSED bool idump;
/* Large allocation. */
size = PAGE_CEILING(size);
malloc_mutex_lock(&arena->lock);
- ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
+ ret = (void *)arena_run_alloc_large(arena, size, zero);
if (ret == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
@@ -1507,8 +1677,10 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
}
if (config_prof)
- arena_prof_accum(arena, size);
+ idump = arena_prof_accum_locked(arena, size);
malloc_mutex_unlock(&arena->lock);
+ if (config_prof && idump)
+ prof_idump();
if (zero == false) {
if (config_fill) {
@@ -1537,7 +1709,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
alloc_size = size + alignment - PAGE;
malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
+ run = arena_run_alloc_large(arena, alloc_size, false);
if (run == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
@@ -1557,6 +1729,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
false);
}
+ arena_run_init_large(arena, (arena_run_t *)ret, size, zero);
if (config_stats) {
arena->stats.nmalloc_large++;
@@ -1760,21 +1933,38 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
}
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_large
+#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
+#endif
+static void
+arena_dalloc_junk_large(void *ptr, size_t usize)
+{
+
+ if (config_fill && opt_junk)
+ memset(ptr, 0x5a, usize);
+}
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_large
+#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
+arena_dalloc_junk_large_t *arena_dalloc_junk_large =
+ JEMALLOC_N(arena_dalloc_junk_large_impl);
+#endif
+
void
arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
if (config_fill || config_stats) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t size = arena_mapbits_large_size_get(chunk, pageind);
+ size_t usize = arena_mapbits_large_size_get(chunk, pageind);
- if (config_fill && config_stats && opt_junk)
- memset(ptr, 0x5a, size);
+ arena_dalloc_junk_large(ptr, usize);
if (config_stats) {
arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
+ arena->stats.allocated_large -= usize;
+ arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
+ arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
}
}
@@ -1845,9 +2035,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t flag_dirty;
size_t splitsize = (oldsize + followsize <= size + extra)
? followsize : size + extra - oldsize;
- arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
- ((pageind+npages) << LG_PAGE)), splitsize, true,
- BININD_INVALID, zero);
+ arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk +
+ ((pageind+npages) << LG_PAGE)), splitsize, zero);
size = oldsize + splitsize;
npages = size >> LG_PAGE;
@@ -1886,6 +2075,26 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
return (true);
}
+#ifdef JEMALLOC_JET
+#undef arena_ralloc_junk_large
+#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
+#endif
+static void
+arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
+{
+
+ if (config_fill && opt_junk) {
+ memset((void *)((uintptr_t)ptr + usize), 0x5a,
+ old_usize - usize);
+ }
+}
+#ifdef JEMALLOC_JET
+#undef arena_ralloc_junk_large
+#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
+arena_ralloc_junk_large_t *arena_ralloc_junk_large =
+ JEMALLOC_N(arena_ralloc_junk_large_impl);
+#endif
+
/*
* Try to resize a large allocation, in order to avoid copying. This will
* always fail if growing an object, and the following run is already in use.
@@ -1899,10 +2108,6 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
psize = PAGE_CEILING(size + extra);
if (psize == oldsize) {
/* Same size class. */
- if (config_fill && opt_junk && size < oldsize) {
- memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
- size);
- }
return (false);
} else {
arena_chunk_t *chunk;
@@ -1913,10 +2118,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
if (psize < oldsize) {
/* Fill before shrinking in order avoid a race. */
- if (config_fill && opt_junk) {
- memset((void *)((uintptr_t)ptr + size), 0x5a,
- oldsize - size);
- }
+ arena_ralloc_junk_large(ptr, oldsize, psize);
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
psize);
return (false);
@@ -1924,17 +2126,23 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
oldsize, PAGE_CEILING(size),
psize - PAGE_CEILING(size), zero);
- if (config_fill && ret == false && zero == false &&
- opt_zero) {
- memset((void *)((uintptr_t)ptr + oldsize), 0,
- size - oldsize);
+ if (config_fill && ret == false && zero == false) {
+ if (opt_junk) {
+ memset((void *)((uintptr_t)ptr +
+ oldsize), 0xa5, isalloc(ptr,
+ config_prof) - oldsize);
+ } else if (opt_zero) {
+ memset((void *)((uintptr_t)ptr +
+ oldsize), 0, isalloc(ptr,
+ config_prof) - oldsize);
+ }
}
return (ret);
}
}
}
-void *
+bool
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{
@@ -1949,25 +2157,20 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
if ((size + extra <= SMALL_MAXCLASS &&
SMALL_SIZE2BIN(size + extra) ==
SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
- size + extra >= oldsize)) {
- if (config_fill && opt_junk && size < oldsize) {
- memset((void *)((uintptr_t)ptr + size),
- 0x5a, oldsize - size);
- }
- return (ptr);
- }
+ size + extra >= oldsize))
+ return (false);
} else {
assert(size <= arena_maxclass);
if (size + extra > SMALL_MAXCLASS) {
if (arena_ralloc_large(ptr, oldsize, size,
extra, zero) == false)
- return (ptr);
+ return (false);
}
}
}
/* Reallocation would require a move. */
- return (NULL);
+ return (true);
}
void *
@@ -1979,9 +2182,8 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t copysize;
/* Try to avoid moving the allocation. */
- ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
- if (ret != NULL)
- return (ret);
+ if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
+ return (ptr);
/*
* size and oldsize are different enough that we need to move the
@@ -1992,7 +2194,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
+ ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
} else
ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
@@ -2004,7 +2206,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
+ ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
arena);
} else
ret = arena_malloc(arena, size, zero, try_tcache_alloc);
@@ -2022,7 +2224,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
copysize = (size < oldsize) ? size : oldsize;
VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
- iqallocx(ptr, try_tcache_dalloc);
+ iqalloct(ptr, try_tcache_dalloc);
return (ret);
}
@@ -2277,7 +2479,6 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
bin_info->reg_interval) - pad_size;
} while (try_hdr_size > try_redzone0_offset);
} while (try_run_size <= arena_maxclass
- && try_run_size <= arena_maxclass
&& RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
RUN_MAX_OVRHD_RELAX
&& (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
diff --git a/deps/jemalloc/src/base.c b/deps/jemalloc/src/base.c
index b1a5945ef..4e62e8fa9 100644
--- a/deps/jemalloc/src/base.c
+++ b/deps/jemalloc/src/base.c
@@ -63,6 +63,7 @@ base_alloc(size_t size)
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
malloc_mutex_unlock(&base_mtx);
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
return (ret);
}
@@ -88,6 +89,7 @@ base_node_alloc(void)
ret = base_nodes;
base_nodes = *(extent_node_t **)ret;
malloc_mutex_unlock(&base_mtx);
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
} else {
malloc_mutex_unlock(&base_mtx);
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
@@ -100,6 +102,7 @@ void
base_node_dealloc(extent_node_t *node)
{
+ VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
malloc_mutex_lock(&base_mtx);
*(extent_node_t **)node = base_nodes;
base_nodes = node;
diff --git a/deps/jemalloc/src/bitmap.c b/deps/jemalloc/src/bitmap.c
index b47e26290..e2bd907d5 100644
--- a/deps/jemalloc/src/bitmap.c
+++ b/deps/jemalloc/src/bitmap.c
@@ -1,4 +1,4 @@
-#define JEMALLOC_BITMAP_C_
+#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
diff --git a/deps/jemalloc/src/chunk.c b/deps/jemalloc/src/chunk.c
index 1a3bb4f67..90ab116ae 100644
--- a/deps/jemalloc/src/chunk.c
+++ b/deps/jemalloc/src/chunk.c
@@ -78,6 +78,9 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
assert(node->size >= leadsize + size);
trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize);
+ zeroed = node->zeroed;
+ if (zeroed)
+ *zero = true;
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
@@ -108,23 +111,26 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
}
node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize;
+ node->zeroed = zeroed;
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
node = NULL;
}
malloc_mutex_unlock(&chunks_mtx);
- zeroed = false;
- if (node != NULL) {
- if (node->zeroed) {
- zeroed = true;
- *zero = true;
- }
+ if (node != NULL)
base_node_dealloc(node);
- }
- if (zeroed == false && *zero) {
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- memset(ret, 0, size);
+ if (*zero) {
+ if (zeroed == false)
+ memset(ret, 0, size);
+ else if (config_debug) {
+ size_t i;
+ size_t *p = (size_t *)(uintptr_t)ret;
+
+ VALGRIND_MAKE_MEM_DEFINED(ret, size);
+ for (i = 0; i < size / sizeof(size_t); i++)
+ assert(p[i] == 0);
+ }
}
return (ret);
}
@@ -172,35 +178,32 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
/* All strategies for allocation failed. */
ret = NULL;
label_return:
- if (config_ivsalloc && base == false && ret != NULL) {
- if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
- chunk_dealloc(ret, size, true);
- return (NULL);
+ if (ret != NULL) {
+ if (config_ivsalloc && base == false) {
+ if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
+ chunk_dealloc(ret, size, true);
+ return (NULL);
+ }
}
- }
- if ((config_stats || config_prof) && ret != NULL) {
- bool gdump;
- malloc_mutex_lock(&chunks_mtx);
- if (config_stats)
- stats_chunks.nchunks += (size / chunksize);
- stats_chunks.curchunks += (size / chunksize);
- if (stats_chunks.curchunks > stats_chunks.highchunks) {
- stats_chunks.highchunks = stats_chunks.curchunks;
- if (config_prof)
- gdump = true;
- } else if (config_prof)
- gdump = false;
- malloc_mutex_unlock(&chunks_mtx);
- if (config_prof && opt_prof && opt_prof_gdump && gdump)
- prof_gdump();
- }
- if (config_debug && *zero && ret != NULL) {
- size_t i;
- size_t *p = (size_t *)(uintptr_t)ret;
-
- VALGRIND_MAKE_MEM_DEFINED(ret, size);
- for (i = 0; i < size / sizeof(size_t); i++)
- assert(p[i] == 0);
+ if (config_stats || config_prof) {
+ bool gdump;
+ malloc_mutex_lock(&chunks_mtx);
+ if (config_stats)
+ stats_chunks.nchunks += (size / chunksize);
+ stats_chunks.curchunks += (size / chunksize);
+ if (stats_chunks.curchunks > stats_chunks.highchunks) {
+ stats_chunks.highchunks =
+ stats_chunks.curchunks;
+ if (config_prof)
+ gdump = true;
+ } else if (config_prof)
+ gdump = false;
+ malloc_mutex_unlock(&chunks_mtx);
+ if (config_prof && opt_prof && opt_prof_gdump && gdump)
+ prof_gdump();
+ }
+ if (config_valgrind)
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
@@ -211,9 +214,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size)
{
bool unzeroed;
- extent_node_t *xnode, *node, *prev, key;
+ extent_node_t *xnode, *node, *prev, *xprev, key;
unzeroed = pages_purge(chunk, size);
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
/*
* Allocate a node before acquiring chunks_mtx even though it might not
@@ -222,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* held.
*/
xnode = base_node_alloc();
+ /* Use xprev to implement conditional deferred deallocation of prev. */
+ xprev = NULL;
malloc_mutex_lock(&chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size);
@@ -238,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->size += size;
node->zeroed = (node->zeroed && (unzeroed == false));
extent_tree_szad_insert(chunks_szad, node);
- if (xnode != NULL)
- base_node_dealloc(xnode);
} else {
/* Coalescing forward failed, so insert a new node. */
if (xnode == NULL) {
@@ -249,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* already been purged, so this is only a virtual
* memory leak.
*/
- malloc_mutex_unlock(&chunks_mtx);
- return;
+ goto label_return;
}
node = xnode;
+ xnode = NULL; /* Prevent deallocation below. */
node->addr = chunk;
node->size = size;
node->zeroed = (unzeroed == false);
@@ -278,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->zeroed = (node->zeroed && prev->zeroed);
extent_tree_szad_insert(chunks_szad, node);
- base_node_dealloc(prev);
+ xprev = prev;
}
+
+label_return:
malloc_mutex_unlock(&chunks_mtx);
+ /*
+ * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
+ * avoid potential deadlock.
+ */
+ if (xnode != NULL)
+ base_node_dealloc(xnode);
+ if (xprev != NULL)
+ base_node_dealloc(xprev);
}
void
@@ -307,7 +321,7 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
assert((size & chunksize_mask) == 0);
if (config_ivsalloc)
- rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
+ rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
if (config_stats || config_prof) {
malloc_mutex_lock(&chunks_mtx);
assert(stats_chunks.curchunks >= (size / chunksize));
@@ -342,7 +356,7 @@ chunk_boot(void)
extent_tree_ad_new(&chunks_ad_dss);
if (config_ivsalloc) {
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
- opt_lg_chunk);
+ opt_lg_chunk, base_alloc, NULL);
if (chunks_rtree == NULL)
return (true);
}
@@ -354,7 +368,7 @@ void
chunk_prefork(void)
{
- malloc_mutex_lock(&chunks_mtx);
+ malloc_mutex_prefork(&chunks_mtx);
if (config_ivsalloc)
rtree_prefork(chunks_rtree);
chunk_dss_prefork();
diff --git a/deps/jemalloc/src/chunk_dss.c b/deps/jemalloc/src/chunk_dss.c
index 24781cc52..510bb8bee 100644
--- a/deps/jemalloc/src/chunk_dss.c
+++ b/deps/jemalloc/src/chunk_dss.c
@@ -28,16 +28,17 @@ static void *dss_max;
/******************************************************************************/
-#ifndef JEMALLOC_HAVE_SBRK
static void *
-sbrk(intptr_t increment)
+chunk_dss_sbrk(intptr_t increment)
{
+#ifdef JEMALLOC_HAVE_SBRK
+ return (sbrk(increment));
+#else
not_implemented();
-
return (NULL);
-}
#endif
+}
dss_prec_t
chunk_dss_prec_get(void)
@@ -93,7 +94,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
*/
do {
/* Get the current end of the DSS. */
- dss_max = sbrk(0);
+ dss_max = chunk_dss_sbrk(0);
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
@@ -117,7 +118,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
return (NULL);
}
incr = gap_size + cpad_size + size;
- dss_prev = sbrk(incr);
+ dss_prev = chunk_dss_sbrk(incr);
if (dss_prev == dss_max) {
/* Success. */
dss_max = dss_next;
@@ -163,7 +164,7 @@ chunk_dss_boot(void)
if (malloc_mutex_init(&dss_mtx))
return (true);
- dss_base = sbrk(0);
+ dss_base = chunk_dss_sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
diff --git a/deps/jemalloc/src/chunk_mmap.c b/deps/jemalloc/src/chunk_mmap.c
index 8a42e7591..2056d793f 100644
--- a/deps/jemalloc/src/chunk_mmap.c
+++ b/deps/jemalloc/src/chunk_mmap.c
@@ -43,7 +43,7 @@ pages_map(void *addr, size_t size)
if (munmap(ret, size) == -1) {
char buf[BUFERROR_BUF];
- buferror(buf, sizeof(buf));
+ buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc: Error in munmap(): %s\n",
buf);
if (opt_abort)
@@ -69,7 +69,7 @@ pages_unmap(void *addr, size_t size)
{
char buf[BUFERROR_BUF];
- buferror(buf, sizeof(buf));
+ buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in "
#ifdef _WIN32
"VirtualFree"
diff --git a/deps/jemalloc/src/ckh.c b/deps/jemalloc/src/ckh.c
index 742a950be..04c529661 100644
--- a/deps/jemalloc/src/ckh.c
+++ b/deps/jemalloc/src/ckh.c
@@ -49,7 +49,7 @@ static void ckh_shrink(ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
-JEMALLOC_INLINE size_t
+JEMALLOC_INLINE_C size_t
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
{
ckhc_t *cell;
@@ -67,28 +67,28 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
-JEMALLOC_INLINE size_t
+JEMALLOC_INLINE_C size_t
ckh_isearch(ckh_t *ckh, const void *key)
{
- size_t hash1, hash2, bucket, cell;
+ size_t hashes[2], bucket, cell;
assert(ckh != NULL);
- ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
+ ckh->hash(key, hashes);
/* Search primary bucket. */
- bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
if (cell != SIZE_T_MAX)
return (cell);
/* Search secondary bucket. */
- bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
return (cell);
}
-JEMALLOC_INLINE bool
+JEMALLOC_INLINE_C bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
const void *data)
{
@@ -120,13 +120,13 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
-JEMALLOC_INLINE bool
+JEMALLOC_INLINE_C bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
void const **argdata)
{
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
- size_t hash1, hash2, bucket, tbucket;
+ size_t hashes[2], bucket, tbucket;
unsigned i;
bucket = argbucket;
@@ -155,10 +155,11 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
#endif
/* Find the alternate bucket for the evicted item. */
- ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
- tbucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ ckh->hash(key, hashes);
+ tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (tbucket == bucket) {
- tbucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
+ - 1);
/*
* It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However,
@@ -189,22 +190,22 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
}
}
-JEMALLOC_INLINE bool
+JEMALLOC_INLINE_C bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
{
- size_t hash1, hash2, bucket;
+ size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
- ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
+ ckh->hash(key, hashes);
/* Try to insert in primary bucket. */
- bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
/* Try to insert in secondary bucket. */
- bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
@@ -218,7 +219,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
-JEMALLOC_INLINE bool
+JEMALLOC_INLINE_C bool
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
{
size_t count, i, nins;
@@ -417,9 +418,8 @@ ckh_delete(ckh_t *ckh)
#endif
idalloc(ckh->tab);
-#ifdef JEMALLOC_DEBUG
- memset(ckh, 0x5a, sizeof(ckh_t));
-#endif
+ if (config_debug)
+ memset(ckh, 0x5a, sizeof(ckh_t));
}
size_t
@@ -526,31 +526,10 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
}
void
-ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
+ckh_string_hash(const void *key, size_t r_hash[2])
{
- size_t ret1, ret2;
- uint64_t h;
-
- assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
- assert(hash1 != NULL);
- assert(hash2 != NULL);
-
- h = hash(key, strlen((const char *)key), UINT64_C(0x94122f335b332aea));
- if (minbits <= 32) {
- /*
- * Avoid doing multiple hashes, since a single hash provides
- * enough bits.
- */
- ret1 = h & ZU(0xffffffffU);
- ret2 = h >> 32;
- } else {
- ret1 = h;
- ret2 = hash(key, strlen((const char *)key),
- UINT64_C(0x8432a476666bbc13));
- }
- *hash1 = ret1;
- *hash2 = ret2;
+ hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
}
bool
@@ -564,41 +543,16 @@ ckh_string_keycomp(const void *k1, const void *k2)
}
void
-ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
- size_t *hash2)
+ckh_pointer_hash(const void *key, size_t r_hash[2])
{
- size_t ret1, ret2;
- uint64_t h;
union {
const void *v;
- uint64_t i;
+ size_t i;
} u;
- assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
- assert(hash1 != NULL);
- assert(hash2 != NULL);
-
assert(sizeof(u.v) == sizeof(u.i));
-#if (LG_SIZEOF_PTR != LG_SIZEOF_INT)
- u.i = 0;
-#endif
u.v = key;
- h = hash(&u.i, sizeof(u.i), UINT64_C(0xd983396e68886082));
- if (minbits <= 32) {
- /*
- * Avoid doing multiple hashes, since a single hash provides
- * enough bits.
- */
- ret1 = h & ZU(0xffffffffU);
- ret2 = h >> 32;
- } else {
- assert(SIZEOF_PTR == 8);
- ret1 = h;
- ret2 = hash(&u.i, sizeof(u.i), UINT64_C(0x5e2be9aff8709a5d));
- }
-
- *hash1 = ret1;
- *hash2 = ret2;
+ hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
}
bool
diff --git a/deps/jemalloc/src/ctl.c b/deps/jemalloc/src/ctl.c
index 6e01b1e27..cc2c5aef5 100644
--- a/deps/jemalloc/src/ctl.c
+++ b/deps/jemalloc/src/ctl.c
@@ -546,43 +546,30 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
static bool
ctl_grow(void)
{
- size_t astats_size;
ctl_arena_stats_t *astats;
arena_t **tarenas;
- /* Extend arena stats and arenas arrays. */
- astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t);
- if (ctl_stats.narenas == narenas_auto) {
- /* ctl_stats.arenas and arenas came from base_alloc(). */
- astats = (ctl_arena_stats_t *)imalloc(astats_size);
- if (astats == NULL)
- return (true);
- memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
- sizeof(ctl_arena_stats_t));
-
- tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
- sizeof(arena_t *));
- if (tarenas == NULL) {
- idalloc(astats);
- return (true);
- }
- memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *));
- } else {
- astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas,
- astats_size, 0, 0, false, false);
- if (astats == NULL)
- return (true);
-
- tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) *
- sizeof(arena_t *), 0, 0, false, false);
- if (tarenas == NULL)
- return (true);
+ /* Allocate extended arena stats and arenas arrays. */
+ astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) *
+ sizeof(ctl_arena_stats_t));
+ if (astats == NULL)
+ return (true);
+ tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
+ sizeof(arena_t *));
+ if (tarenas == NULL) {
+ idalloc(astats);
+ return (true);
}
- /* Initialize the new astats and arenas elements. */
+
+ /* Initialize the new astats element. */
+ memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
+ sizeof(ctl_arena_stats_t));
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
- if (ctl_arena_init(&astats[ctl_stats.narenas + 1]))
+ if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
+ idalloc(tarenas);
+ idalloc(astats);
return (true);
- tarenas[ctl_stats.narenas] = NULL;
+ }
/* Swap merged stats to their new location. */
{
ctl_arena_stats_t tstats;
@@ -593,13 +580,34 @@ ctl_grow(void)
memcpy(&astats[ctl_stats.narenas + 1], &tstats,
sizeof(ctl_arena_stats_t));
}
+ /* Initialize the new arenas element. */
+ tarenas[ctl_stats.narenas] = NULL;
+ {
+ arena_t **arenas_old = arenas;
+ /*
+ * Swap extended arenas array into place. Although ctl_mtx
+ * protects this function from other threads extending the
+ * array, it does not protect from other threads mutating it
+ * (i.e. initializing arenas and setting array elements to
+ * point to them). Therefore, array copying must happen under
+ * the protection of arenas_lock.
+ */
+ malloc_mutex_lock(&arenas_lock);
+ arenas = tarenas;
+ memcpy(arenas, arenas_old, ctl_stats.narenas *
+ sizeof(arena_t *));
+ narenas_total++;
+ arenas_extend(narenas_total - 1);
+ malloc_mutex_unlock(&arenas_lock);
+ /*
+ * Deallocate arenas_old only if it came from imalloc() (not
+ * base_alloc()).
+ */
+ if (ctl_stats.narenas != narenas_auto)
+ idalloc(arenas_old);
+ }
ctl_stats.arenas = astats;
ctl_stats.narenas++;
- malloc_mutex_lock(&arenas_lock);
- arenas = tarenas;
- narenas_total++;
- arenas_extend(narenas_total - 1);
- malloc_mutex_unlock(&arenas_lock);
return (false);
}
@@ -921,7 +929,7 @@ void
ctl_prefork(void)
{
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_prefork(&ctl_mtx);
}
void
@@ -960,11 +968,11 @@ ctl_postfork_child(void)
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \
- memcpy(oldp, (void *)&v, copylen); \
+ memcpy(oldp, (void *)&(v), copylen); \
ret = EINVAL; \
goto label_return; \
} else \
- *(t *)oldp = v; \
+ *(t *)oldp = (v); \
} \
} while (0)
@@ -974,7 +982,7 @@ ctl_postfork_child(void)
ret = EINVAL; \
goto label_return; \
} \
- v = *(t *)newp; \
+ (v) = *(t *)newp; \
} \
} while (0)
@@ -995,7 +1003,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
if (l) \
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
- oldval = v; \
+ oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
@@ -1017,7 +1025,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
return (ENOENT); \
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
- oldval = v; \
+ oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
@@ -1036,7 +1044,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
\
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
- oldval = v; \
+ oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
@@ -1060,7 +1068,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
if ((c) == false) \
return (ENOENT); \
READONLY(); \
- oldval = v; \
+ oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
@@ -1077,7 +1085,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
t oldval; \
\
READONLY(); \
- oldval = v; \
+ oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
@@ -1102,6 +1110,8 @@ label_return: \
return (ret); \
}
+/******************************************************************************/
+
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
@@ -1109,7 +1119,7 @@ epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
- uint64_t newval;
+ UNUSED uint64_t newval;
malloc_mutex_lock(&ctl_mtx);
WRITE(newval, uint64_t);
@@ -1123,49 +1133,52 @@ label_return:
return (ret);
}
-static int
-thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
- bool oldval;
-
- if (config_tcache == false)
- return (ENOENT);
-
- oldval = tcache_enabled_get();
- if (newp != NULL) {
- if (newlen != sizeof(bool)) {
- ret = EINVAL;
- goto label_return;
- }
- tcache_enabled_set(*(bool *)newp);
- }
- READ(oldval, bool);
-
- ret = 0;
-label_return:
- return (ret);
-}
-
-static int
-thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
+/******************************************************************************/
- if (config_tcache == false)
- return (ENOENT);
+CTL_RO_BOOL_CONFIG_GEN(config_debug)
+CTL_RO_BOOL_CONFIG_GEN(config_dss)
+CTL_RO_BOOL_CONFIG_GEN(config_fill)
+CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
+CTL_RO_BOOL_CONFIG_GEN(config_mremap)
+CTL_RO_BOOL_CONFIG_GEN(config_munmap)
+CTL_RO_BOOL_CONFIG_GEN(config_prof)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
+CTL_RO_BOOL_CONFIG_GEN(config_stats)
+CTL_RO_BOOL_CONFIG_GEN(config_tcache)
+CTL_RO_BOOL_CONFIG_GEN(config_tls)
+CTL_RO_BOOL_CONFIG_GEN(config_utrace)
+CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
+CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
- READONLY();
- WRITEONLY();
+/******************************************************************************/
- tcache_flush();
+CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
+CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
+CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
+CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
+CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
+CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
+CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
+CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
+CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
+CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
+CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
+CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
+CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
- ret = 0;
-label_return:
- return (ret);
-}
+/******************************************************************************/
static int
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@@ -1227,50 +1240,49 @@ CTL_RO_NL_CGEN(config_stats, thread_deallocated,
CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
&thread_allocated_tsd_get()->deallocated, uint64_t *)
-/******************************************************************************/
+static int
+thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
-CTL_RO_BOOL_CONFIG_GEN(config_debug)
-CTL_RO_BOOL_CONFIG_GEN(config_dss)
-CTL_RO_BOOL_CONFIG_GEN(config_fill)
-CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
-CTL_RO_BOOL_CONFIG_GEN(config_mremap)
-CTL_RO_BOOL_CONFIG_GEN(config_munmap)
-CTL_RO_BOOL_CONFIG_GEN(config_prof)
-CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
-CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
-CTL_RO_BOOL_CONFIG_GEN(config_stats)
-CTL_RO_BOOL_CONFIG_GEN(config_tcache)
-CTL_RO_BOOL_CONFIG_GEN(config_tls)
-CTL_RO_BOOL_CONFIG_GEN(config_utrace)
-CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
-CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
+ if (config_tcache == false)
+ return (ENOENT);
-/******************************************************************************/
+ oldval = tcache_enabled_get();
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ tcache_enabled_set(*(bool *)newp);
+ }
+ READ(oldval, bool);
-CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
-CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
-CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
-CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
-CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
-CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
-CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
-CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
-CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
-CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
-CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
-CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
-CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
-CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
-CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
-CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
-CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
-CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
-CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
-CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (config_tcache == false)
+ return (ENOENT);
+
+ READONLY();
+ WRITEONLY();
+
+ tcache_flush();
+
+ ret = 0;
+label_return:
+ return (ret);
+}
/******************************************************************************/
@@ -1382,31 +1394,8 @@ label_return:
return (ret);
}
-
/******************************************************************************/
-CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
-CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
-CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
-static const ctl_named_node_t *
-arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
-{
-
- if (i > NBINS)
- return (NULL);
- return (super_arenas_bin_i_node);
-}
-
-CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
-static const ctl_named_node_t *
-arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
-{
-
- if (i > nlclasses)
- return (NULL);
- return (super_arenas_lrun_i_node);
-}
-
static int
arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
@@ -1460,7 +1449,28 @@ CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
+CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
+CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
+static const ctl_named_node_t *
+arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+
+ if (i > NBINS)
+ return (NULL);
+ return (super_arenas_bin_i_node);
+}
+
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
+CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
+static const ctl_named_node_t *
+arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+
+ if (i > nlclasses)
+ return (NULL);
+ return (super_arenas_lrun_i_node);
+}
static int
arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@@ -1492,6 +1502,7 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
+ unsigned narenas;
malloc_mutex_lock(&ctl_mtx);
READONLY();
@@ -1499,7 +1510,8 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = EAGAIN;
goto label_return;
}
- READ(ctl_stats.narenas - 1, unsigned);
+ narenas = ctl_stats.narenas - 1;
+ READ(narenas, unsigned);
ret = 0;
label_return:
@@ -1565,6 +1577,11 @@ CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
/******************************************************************************/
+CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
+CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
+
CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
size_t)
CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
@@ -1572,6 +1589,20 @@ CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
+
+CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
+CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
+CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
+CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
+ ctl_stats.arenas[mib[2]].astats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
+ ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
+ ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
+ ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
+
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
ctl_stats.arenas[mib[2]].allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
@@ -1635,19 +1666,6 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
return (super_stats_arenas_i_lruns_j_node);
}
-CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
-CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
-CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
-CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
- ctl_stats.arenas[mib[2]].astats.mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
- ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
- ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
- ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
-
static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{
@@ -1664,8 +1682,3 @@ label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
-
-CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
-CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
-CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
diff --git a/deps/jemalloc/src/huge.c b/deps/jemalloc/src/huge.c
index aa08d43d3..d72f21357 100644
--- a/deps/jemalloc/src/huge.c
+++ b/deps/jemalloc/src/huge.c
@@ -16,14 +16,14 @@ malloc_mutex_t huge_mtx;
static extent_tree_t huge;
void *
-huge_malloc(size_t size, bool zero)
+huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
{
- return (huge_palloc(size, chunksize, zero));
+ return (huge_palloc(size, chunksize, zero, dss_prec));
}
void *
-huge_palloc(size_t size, size_t alignment, bool zero)
+huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
{
void *ret;
size_t csize;
@@ -48,8 +48,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- ret = chunk_alloc(csize, alignment, false, &is_zeroed,
- chunk_dss_prec_get());
+ ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@@ -78,7 +77,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
return (ret);
}
-void *
+bool
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
{
@@ -89,28 +88,23 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize);
- if (config_fill && opt_junk && size < oldsize) {
- memset((void *)((uintptr_t)ptr + size), 0x5a,
- oldsize - size);
- }
- return (ptr);
+ return (false);
}
/* Reallocation would require a move. */
- return (NULL);
+ return (true);
}
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_dalloc)
+ size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
{
void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */
- ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
- if (ret != NULL)
- return (ret);
+ if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
+ return (ptr);
/*
* size and oldsize are different enough that we need to use a
@@ -118,18 +112,18 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* space and copying.
*/
if (alignment > chunksize)
- ret = huge_palloc(size + extra, alignment, zero);
+ ret = huge_palloc(size + extra, alignment, zero, dss_prec);
else
- ret = huge_malloc(size + extra, zero);
+ ret = huge_malloc(size + extra, zero, dss_prec);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment > chunksize)
- ret = huge_palloc(size, alignment, zero);
+ ret = huge_palloc(size, alignment, zero, dss_prec);
else
- ret = huge_malloc(size, zero);
+ ret = huge_malloc(size, zero, dss_prec);
if (ret == NULL)
return (NULL);
@@ -169,23 +163,56 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
char buf[BUFERROR_BUF];
- buferror(buf, sizeof(buf));
+ buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in mremap(): %s\n",
buf);
if (opt_abort)
abort();
memcpy(ret, ptr, copysize);
chunk_dealloc_mmap(ptr, oldsize);
+ } else if (config_fill && zero == false && opt_junk && oldsize
+ < newsize) {
+ /*
+ * mremap(2) clobbers the original mapping, so
+ * junk/zero filling is not preserved. There is no
+ * need to zero fill here, since any trailing
+ * uninititialized memory is demand-zeroed by the
+ * kernel, but junk filling must be redone.
+ */
+ memset(ret + oldsize, 0xa5, newsize - oldsize);
}
} else
#endif
{
memcpy(ret, ptr, copysize);
- iqallocx(ptr, try_tcache_dalloc);
+ iqalloct(ptr, try_tcache_dalloc);
}
return (ret);
}
+#ifdef JEMALLOC_JET
+#undef huge_dalloc_junk
+#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
+#endif
+static void
+huge_dalloc_junk(void *ptr, size_t usize)
+{
+
+ if (config_fill && config_dss && opt_junk) {
+ /*
+ * Only bother junk filling if the chunk isn't about to be
+ * unmapped.
+ */
+ if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
+ memset(ptr, 0x5a, usize);
+ }
+}
+#ifdef JEMALLOC_JET
+#undef huge_dalloc_junk
+#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
+huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
+#endif
+
void
huge_dalloc(void *ptr, bool unmap)
{
@@ -208,8 +235,8 @@ huge_dalloc(void *ptr, bool unmap)
malloc_mutex_unlock(&huge_mtx);
- if (unmap && config_fill && config_dss && opt_junk)
- memset(node->addr, 0x5a, node->size);
+ if (unmap)
+ huge_dalloc_junk(node->addr, node->size);
chunk_dealloc(node->addr, node->size, unmap);
@@ -236,6 +263,13 @@ huge_salloc(const void *ptr)
return (ret);
}
+dss_prec_t
+huge_dss_prec_get(arena_t *arena)
+{
+
+ return (arena_dss_prec_get(choose_arena(arena)));
+}
+
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c
index 8a667b62e..204778bc8 100644
--- a/deps/jemalloc/src/jemalloc.c
+++ b/deps/jemalloc/src/jemalloc.c
@@ -10,17 +10,20 @@ malloc_tsd_data(, thread_allocated, thread_allocated_t,
/* Runtime configuration options. */
const char *je_malloc_conf;
+bool opt_abort =
#ifdef JEMALLOC_DEBUG
-bool opt_abort = true;
-# ifdef JEMALLOC_FILL
-bool opt_junk = true;
-# else
-bool opt_junk = false;
-# endif
+ true
#else
-bool opt_abort = false;
-bool opt_junk = false;
+ false
#endif
+ ;
+bool opt_junk =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ true
+#else
+ false
+#endif
+ ;
size_t opt_quarantine = ZU(0);
bool opt_redzone = false;
bool opt_utrace = false;
@@ -83,11 +86,13 @@ typedef struct {
#ifdef JEMALLOC_UTRACE
# define UTRACE(a, b, c) do { \
if (opt_utrace) { \
+ int utrace_serrno = errno; \
malloc_utrace_t ut; \
ut.p = (a); \
ut.s = (b); \
ut.r = (c); \
utrace(&ut, sizeof(ut)); \
+ errno = utrace_serrno; \
} \
} while (0)
#else
@@ -95,18 +100,12 @@ typedef struct {
#endif
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void stats_print_atexit(void);
-static unsigned malloc_ncpus(void);
-static bool malloc_conf_next(char const **opts_p, char const **k_p,
- size_t *klen_p, char const **v_p, size_t *vlen_p);
-static void malloc_conf_error(const char *msg, const char *k, size_t klen,
- const char *v, size_t vlen);
-static void malloc_conf_init(void);
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
static bool malloc_init_hard(void);
-static int imemalign(void **memptr, size_t alignment, size_t size,
- size_t min_alignment);
/******************************************************************************/
/*
@@ -247,7 +246,6 @@ stats_print_atexit(void)
static unsigned
malloc_ncpus(void)
{
- unsigned ret;
long result;
#ifdef _WIN32
@@ -257,14 +255,7 @@ malloc_ncpus(void)
#else
result = sysconf(_SC_NPROCESSORS_ONLN);
#endif
- if (result == -1) {
- /* Error. */
- ret = 1;
- } else {
- ret = (unsigned)result;
- }
-
- return (ret);
+ return ((result == -1) ? 1 : (unsigned)result);
}
void
@@ -277,12 +268,30 @@ arenas_cleanup(void *arg)
malloc_mutex_unlock(&arenas_lock);
}
-static inline bool
+JEMALLOC_ALWAYS_INLINE_C void
+malloc_thread_init(void)
+{
+
+ /*
+ * TSD initialization can't be safely done as a side effect of
+ * deallocation, because it is possible for a thread to do nothing but
+ * deallocate its TLS data via free(), in which case writing to TLS
+ * would cause write-after-free memory corruption. The quarantine
+ * facility *only* gets used as a side effect of deallocation, so make
+ * a best effort attempt at initializing its TSD by hooking all
+ * allocation events.
+ */
+ if (config_fill && opt_quarantine)
+ quarantine_alloc_hook();
+}
+
+JEMALLOC_ALWAYS_INLINE_C bool
malloc_init(void)
{
- if (malloc_initialized == false)
- return (malloc_init_hard());
+ if (malloc_initialized == false && malloc_init_hard())
+ return (true);
+ malloc_thread_init();
return (false);
}
@@ -413,8 +422,9 @@ malloc_conf_init(void)
}
break;
case 1: {
+ int linklen = 0;
#ifndef _WIN32
- int linklen;
+ int saved_errno = errno;
const char *linkname =
# ifdef JEMALLOC_PREFIX
"/etc/"JEMALLOC_PREFIX"malloc.conf"
@@ -423,21 +433,20 @@ malloc_conf_init(void)
# endif
;
- if ((linklen = readlink(linkname, buf,
- sizeof(buf) - 1)) != -1) {
- /*
- * Use the contents of the "/etc/malloc.conf"
- * symbolic link's name.
- */
- buf[linklen] = '\0';
- opts = buf;
- } else
-#endif
- {
+ /*
+ * Try to use the contents of the "/etc/malloc.conf"
+ * symbolic link's name.
+ */
+ linklen = readlink(linkname, buf, sizeof(buf) - 1);
+ if (linklen == -1) {
/* No configuration specified. */
- buf[0] = '\0';
- opts = buf;
+ linklen = 0;
+ /* restore errno */
+ set_errno(saved_errno);
}
+#endif
+ buf[linklen] = '\0';
+ opts = buf;
break;
} case 2: {
const char *envname =
@@ -461,15 +470,14 @@ malloc_conf_init(void)
}
break;
} default:
- /* NOTREACHED */
- assert(false);
+ not_reached();
buf[0] = '\0';
opts = buf;
}
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
&vlen) == false) {
-#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
+#define CONF_HANDLE_BOOL(o, n) \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
if (strncmp("true", v, vlen) == 0 && \
@@ -483,16 +491,9 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} \
- hit = true; \
- } else \
- hit = false;
-#define CONF_HANDLE_BOOL(o, n) { \
- bool hit; \
- CONF_HANDLE_BOOL_HIT(o, n, hit); \
- if (hit) \
continue; \
-}
-#define CONF_HANDLE_SIZE_T(o, n, min, max) \
+ }
+#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
uintmax_t um; \
@@ -505,12 +506,23 @@ malloc_conf_init(void)
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
- } else if (um < min || um > max) { \
- malloc_conf_error( \
- "Out-of-range conf value", \
- k, klen, v, vlen); \
- } else \
- o = um; \
+ } else if (clip) { \
+ if (min != 0 && um < min) \
+ o = min; \
+ else if (um > max) \
+ o = max; \
+ else \
+ o = um; \
+ } else { \
+ if ((min != 0 && um < min) || \
+ um > max) { \
+ malloc_conf_error( \
+ "Out-of-range " \
+ "conf value", \
+ k, klen, v, vlen); \
+ } else \
+ o = um; \
+ } \
continue; \
}
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
@@ -555,7 +567,8 @@ malloc_conf_init(void)
* config_fill.
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
- (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
+ (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
+ true)
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
@@ -581,14 +594,14 @@ malloc_conf_init(void)
continue;
}
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
- SIZE_T_MAX)
+ SIZE_T_MAX, false)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1)
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
if (config_fill) {
CONF_HANDLE_BOOL(opt_junk, "junk")
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
- 0, SIZE_T_MAX)
+ 0, SIZE_T_MAX, false)
CONF_HANDLE_BOOL(opt_redzone, "redzone")
CONF_HANDLE_BOOL(opt_zero, "zero")
}
@@ -668,17 +681,6 @@ malloc_init_hard(void)
malloc_conf_init();
-#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
- && !defined(_WIN32))
- /* Register fork handlers. */
- if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
- jemalloc_postfork_child) != 0) {
- malloc_write("<jemalloc>: Error in pthread_atfork()\n");
- if (opt_abort)
- abort();
- }
-#endif
-
if (opt_stats_print) {
/* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) {
@@ -718,8 +720,10 @@ malloc_init_hard(void)
return (true);
}
- if (malloc_mutex_init(&arenas_lock))
+ if (malloc_mutex_init(&arenas_lock)) {
+ malloc_mutex_unlock(&init_lock);
return (true);
+ }
/*
* Create enough scaffolding to allow recursive allocation in
@@ -765,9 +769,25 @@ malloc_init_hard(void)
return (true);
}
- /* Get number of CPUs. */
malloc_mutex_unlock(&init_lock);
+ /**********************************************************************/
+ /* Recursive allocation may follow. */
+
ncpus = malloc_ncpus();
+
+#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
+ && !defined(_WIN32))
+ /* LinuxThreads's pthread_atfork() allocates. */
+ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
+ jemalloc_postfork_child) != 0) {
+ malloc_write("<jemalloc>: Error in pthread_atfork()\n");
+ if (opt_abort)
+ abort();
+ }
+#endif
+
+ /* Done recursively allocating. */
+ /**********************************************************************/
malloc_mutex_lock(&init_lock);
if (mutex_boot()) {
@@ -814,6 +834,7 @@ malloc_init_hard(void)
malloc_initialized = true;
malloc_mutex_unlock(&init_lock);
+
return (false);
}
@@ -825,42 +846,88 @@ malloc_init_hard(void)
* Begin malloc(3)-compatible functions.
*/
+static void *
+imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ p = imalloc(SMALL_MAXCLASS+1);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = imalloc(usize);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = imalloc_prof_sample(usize, cnt);
+ else
+ p = imalloc(usize);
+ if (p == NULL)
+ return (NULL);
+ prof_malloc(p, usize, cnt);
+
+ return (p);
+}
+
+/*
+ * MALLOC_BODY() is a macro rather than a function because its contents are in
+ * the fast path, but inlining would cause reliability issues when determining
+ * how many frames to discard from heap profiling backtraces.
+ */
+#define MALLOC_BODY(ret, size, usize) do { \
+ if (malloc_init()) \
+ ret = NULL; \
+ else { \
+ if (config_prof && opt_prof) { \
+ prof_thr_cnt_t *cnt; \
+ \
+ usize = s2u(size); \
+ /* \
+ * Call PROF_ALLOC_PREP() here rather than in \
+ * imalloc_prof() so that imalloc_prof() can be \
+ * inlined without introducing uncertainty \
+ * about the number of backtrace frames to \
+ * ignore. imalloc_prof() is in the fast path \
+ * when heap profiling is enabled, so inlining \
+ * is critical to performance. (For \
+ * consistency all callers of PROF_ALLOC_PREP() \
+ * are structured similarly, even though e.g. \
+ * realloc() isn't called enough for inlining \
+ * to be critical.) \
+ */ \
+ PROF_ALLOC_PREP(1, usize, cnt); \
+ ret = imalloc_prof(usize, cnt); \
+ } else { \
+ if (config_stats || (config_valgrind && \
+ opt_valgrind)) \
+ usize = s2u(size); \
+ ret = imalloc(size); \
+ } \
+ } \
+} while (0)
+
void *
je_malloc(size_t size)
{
void *ret;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
-
- if (malloc_init()) {
- ret = NULL;
- goto label_oom;
- }
if (size == 0)
size = 1;
- if (config_prof && opt_prof) {
- usize = s2u(size);
- PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL) {
- ret = NULL;
- goto label_oom;
- }
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- ret = imalloc(SMALL_MAXCLASS+1);
- if (ret != NULL)
- arena_prof_promoted(ret, usize);
- } else
- ret = imalloc(size);
- } else {
- if (config_stats || (config_valgrind && opt_valgrind))
- usize = s2u(size);
- ret = imalloc(size);
- }
+ MALLOC_BODY(ret, size, usize);
-label_oom:
if (ret == NULL) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in malloc(): "
@@ -869,8 +936,6 @@ label_oom:
}
set_errno(ENOMEM);
}
- if (config_prof && opt_prof && ret != NULL)
- prof_malloc(ret, usize, cnt);
if (config_stats && ret != NULL) {
assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
@@ -880,28 +945,63 @@ label_oom:
return (ret);
}
+static void *
+imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0);
+ p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment,
+ false);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = ipalloc(usize, alignment, false);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = imemalign_prof_sample(alignment, usize, cnt);
+ else
+ p = ipalloc(usize, alignment, false);
+ if (p == NULL)
+ return (NULL);
+ prof_malloc(p, usize, cnt);
+
+ return (p);
+}
+
JEMALLOC_ATTR(nonnull(1))
#ifdef JEMALLOC_PROF
/*
* Avoid any uncertainty as to how many backtrace frames to ignore in
* PROF_ALLOC_PREP().
*/
-JEMALLOC_ATTR(noinline)
+JEMALLOC_NOINLINE
#endif
static int
-imemalign(void **memptr, size_t alignment, size_t size,
- size_t min_alignment)
+imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
{
int ret;
size_t usize;
void *result;
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
assert(min_alignment != 0);
- if (malloc_init())
+ if (malloc_init()) {
result = NULL;
- else {
+ goto label_oom;
+ } else {
if (size == 0)
size = 1;
@@ -921,57 +1021,38 @@ imemalign(void **memptr, size_t alignment, size_t size,
usize = sa2u(size, alignment);
if (usize == 0) {
result = NULL;
- ret = ENOMEM;
- goto label_return;
+ goto label_oom;
}
if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
PROF_ALLOC_PREP(2, usize, cnt);
- if (cnt == NULL) {
- result = NULL;
- ret = EINVAL;
- } else {
- if (prof_promote && (uintptr_t)cnt !=
- (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
- assert(sa2u(SMALL_MAXCLASS+1,
- alignment) != 0);
- result = ipalloc(sa2u(SMALL_MAXCLASS+1,
- alignment), alignment, false);
- if (result != NULL) {
- arena_prof_promoted(result,
- usize);
- }
- } else {
- result = ipalloc(usize, alignment,
- false);
- }
- }
+ result = imemalign_prof(alignment, usize, cnt);
} else
result = ipalloc(usize, alignment, false);
- }
-
- if (result == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error allocating aligned "
- "memory: out of memory\n");
- abort();
- }
- ret = ENOMEM;
- goto label_return;
+ if (result == NULL)
+ goto label_oom;
}
*memptr = result;
ret = 0;
-
label_return:
if (config_stats && result != NULL) {
assert(usize == isalloc(result, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
- if (config_prof && opt_prof && result != NULL)
- prof_malloc(result, usize, cnt);
UTRACE(0, size, result);
return (ret);
+label_oom:
+ assert(result == NULL);
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error allocating aligned memory: "
+ "out of memory\n");
+ abort();
+ }
+ ret = ENOMEM;
+ goto label_return;
}
int
@@ -998,13 +1079,46 @@ je_aligned_alloc(size_t alignment, size_t size)
return (ret);
}
+static void *
+icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ p = icalloc(SMALL_MAXCLASS+1);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = icalloc(usize);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+icalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = icalloc_prof_sample(usize, cnt);
+ else
+ p = icalloc(usize);
+ if (p == NULL)
+ return (NULL);
+ prof_malloc(p, usize, cnt);
+
+ return (p);
+}
+
void *
je_calloc(size_t num, size_t size)
{
void *ret;
size_t num_size;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
if (malloc_init()) {
num_size = 0;
@@ -1033,19 +1147,11 @@ je_calloc(size_t num, size_t size)
}
if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
usize = s2u(num_size);
PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL) {
- ret = NULL;
- goto label_return;
- }
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
- <= SMALL_MAXCLASS) {
- ret = icalloc(SMALL_MAXCLASS+1);
- if (ret != NULL)
- arena_prof_promoted(ret, usize);
- } else
- ret = icalloc(num_size);
+ ret = icalloc_prof(usize, cnt);
} else {
if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(num_size);
@@ -1061,9 +1167,6 @@ label_return:
}
set_errno(ENOMEM);
}
-
- if (config_prof && opt_prof && ret != NULL)
- prof_malloc(ret, usize, cnt);
if (config_stats && ret != NULL) {
assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
@@ -1073,150 +1176,126 @@ label_return:
return (ret);
}
+static void *
+irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = iralloc(oldptr, usize, 0, 0, false);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+ prof_ctx_t *old_ctx;
+
+ old_ctx = prof_ctx_get(oldptr);
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = irealloc_prof_sample(oldptr, usize, cnt);
+ else
+ p = iralloc(oldptr, usize, 0, 0, false);
+ if (p == NULL)
+ return (NULL);
+ prof_realloc(p, usize, cnt, old_usize, old_ctx);
+
+ return (p);
+}
+
+JEMALLOC_INLINE_C void
+ifree(void *ptr)
+{
+ size_t usize;
+ UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ assert(ptr != NULL);
+ assert(malloc_initialized || IS_INITIALIZER);
+
+ if (config_prof && opt_prof) {
+ usize = isalloc(ptr, config_prof);
+ prof_free(ptr, usize);
+ } else if (config_stats || config_valgrind)
+ usize = isalloc(ptr, config_prof);
+ if (config_stats)
+ thread_allocated_tsd_get()->deallocated += usize;
+ if (config_valgrind && opt_valgrind)
+ rzsize = p2rz(ptr);
+ iqalloc(ptr);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+}
+
void *
je_realloc(void *ptr, size_t size)
{
void *ret;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- size_t old_size = 0;
- size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
- prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
+ size_t old_usize = 0;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0) {
if (ptr != NULL) {
- /* realloc(ptr, 0) is equivalent to free(p). */
- if (config_prof) {
- old_size = isalloc(ptr, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(ptr);
- } else if (config_stats) {
- old_size = isalloc(ptr, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_size);
- } else if (config_valgrind && opt_valgrind) {
- old_size = isalloc(ptr, false);
- old_rzsize = u2rz(old_size);
- }
- if (config_prof && opt_prof) {
- old_ctx = prof_ctx_get(ptr);
- cnt = NULL;
- }
- iqalloc(ptr);
- ret = NULL;
- goto label_return;
- } else
- size = 1;
+ /* realloc(ptr, 0) is equivalent to free(ptr). */
+ UTRACE(ptr, 0, 0);
+ ifree(ptr);
+ return (NULL);
+ }
+ size = 1;
}
if (ptr != NULL) {
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
+
+ if ((config_prof && opt_prof) || config_stats ||
+ (config_valgrind && opt_valgrind))
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
- if (config_prof) {
- old_size = isalloc(ptr, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(ptr);
- } else if (config_stats) {
- old_size = isalloc(ptr, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_size);
- } else if (config_valgrind && opt_valgrind) {
- old_size = isalloc(ptr, false);
- old_rzsize = u2rz(old_size);
- }
if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
usize = s2u(size);
- old_ctx = prof_ctx_get(ptr);
PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL) {
- old_ctx = NULL;
- ret = NULL;
- goto label_oom;
- }
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
- usize <= SMALL_MAXCLASS) {
- ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
- false, false);
- if (ret != NULL)
- arena_prof_promoted(ret, usize);
- else
- old_ctx = NULL;
- } else {
- ret = iralloc(ptr, size, 0, 0, false, false);
- if (ret == NULL)
- old_ctx = NULL;
- }
+ ret = irealloc_prof(ptr, old_usize, usize, cnt);
} else {
if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(size);
- ret = iralloc(ptr, size, 0, 0, false, false);
- }
-
-label_oom:
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in realloc(): "
- "out of memory\n");
- abort();
- }
- set_errno(ENOMEM);
+ ret = iralloc(ptr, size, 0, 0, false);
}
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
- if (config_prof && opt_prof)
- old_ctx = NULL;
- if (malloc_init()) {
- if (config_prof && opt_prof)
- cnt = NULL;
- ret = NULL;
- } else {
- if (config_prof && opt_prof) {
- usize = s2u(size);
- PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL)
- ret = NULL;
- else {
- if (prof_promote && (uintptr_t)cnt !=
- (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- ret = imalloc(SMALL_MAXCLASS+1);
- if (ret != NULL) {
- arena_prof_promoted(ret,
- usize);
- }
- } else
- ret = imalloc(size);
- }
- } else {
- if (config_stats || (config_valgrind &&
- opt_valgrind))
- usize = s2u(size);
- ret = imalloc(size);
- }
- }
+ MALLOC_BODY(ret, size, usize);
+ }
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in realloc(): "
- "out of memory\n");
- abort();
- }
- set_errno(ENOMEM);
+ if (ret == NULL) {
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error in realloc(): "
+ "out of memory\n");
+ abort();
}
+ set_errno(ENOMEM);
}
-
-label_return:
- if (config_prof && opt_prof)
- prof_realloc(ret, usize, cnt, old_size, old_ctx);
if (config_stats && ret != NULL) {
thread_allocated_t *ta;
assert(usize == isalloc(ret, config_prof));
ta = thread_allocated_tsd_get();
ta->allocated += usize;
- ta->deallocated += old_size;
+ ta->deallocated += old_usize;
}
UTRACE(ptr, size, ret);
- JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
+ JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize,
+ false);
return (ret);
}
@@ -1225,24 +1304,8 @@ je_free(void *ptr)
{
UTRACE(ptr, 0, 0);
- if (ptr != NULL) {
- size_t usize;
- size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
-
- assert(malloc_initialized || IS_INITIALIZER);
-
- if (config_prof && opt_prof) {
- usize = isalloc(ptr, config_prof);
- prof_free(ptr, usize);
- } else if (config_stats || config_valgrind)
- usize = isalloc(ptr, config_prof);
- if (config_stats)
- thread_allocated_tsd_get()->deallocated += usize;
- if (config_valgrind && opt_valgrind)
- rzsize = p2rz(ptr);
- iqalloc(ptr);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
- }
+ if (ptr != NULL)
+ ifree(ptr);
}
/*
@@ -1308,206 +1371,344 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
* Begin non-standard functions.
*/
-size_t
-je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena)
{
- size_t ret;
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
+ alignment)));
- if (config_ivsalloc)
- ret = ivsalloc(ptr, config_prof);
+ if (alignment != 0)
+ return (ipalloct(usize, alignment, zero, try_tcache, arena));
+ else if (zero)
+ return (icalloct(usize, try_tcache, arena));
else
- ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
-
- return (ret);
+ return (imalloct(usize, try_tcache, arena));
}
-void
-je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
+static void *
+imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena, prof_thr_cnt_t *cnt)
{
+ void *p;
- stats_print(write_cb, cbopaque, opts);
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ size_t usize_promoted = (alignment == 0) ?
+ s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment);
+ assert(usize_promoted != 0);
+ p = imallocx(usize_promoted, alignment, zero, try_tcache,
+ arena);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = imallocx(usize, alignment, zero, try_tcache, arena);
+
+ return (p);
}
-int
-je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen)
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena, prof_thr_cnt_t *cnt)
{
+ void *p;
- if (malloc_init())
- return (EAGAIN);
+ if ((uintptr_t)cnt != (uintptr_t)1U) {
+ p = imallocx_prof_sample(usize, alignment, zero, try_tcache,
+ arena, cnt);
+ } else
+ p = imallocx(usize, alignment, zero, try_tcache, arena);
+ if (p == NULL)
+ return (NULL);
+ prof_malloc(p, usize, cnt);
- return (ctl_byname(name, oldp, oldlenp, newp, newlen));
+ return (p);
}
-int
-je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
+void *
+je_mallocx(size_t size, int flags)
{
+ void *p;
+ size_t usize;
+ size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
+ & (SIZE_T_MAX-1));
+ bool zero = flags & MALLOCX_ZERO;
+ unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ arena_t *arena;
+ bool try_tcache;
+
+ assert(size != 0);
if (malloc_init())
- return (EAGAIN);
+ goto label_oom;
- return (ctl_nametomib(name, mibp, miblenp));
+ if (arena_ind != UINT_MAX) {
+ arena = arenas[arena_ind];
+ try_tcache = false;
+ } else {
+ arena = NULL;
+ try_tcache = true;
+ }
+
+ usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+ assert(usize != 0);
+
+ if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
+ PROF_ALLOC_PREP(1, usize, cnt);
+ p = imallocx_prof(usize, alignment, zero, try_tcache, arena,
+ cnt);
+ } else
+ p = imallocx(usize, alignment, zero, try_tcache, arena);
+ if (p == NULL)
+ goto label_oom;
+
+ if (config_stats) {
+ assert(usize == isalloc(p, config_prof));
+ thread_allocated_tsd_get()->allocated += usize;
+ }
+ UTRACE(0, size, p);
+ JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
+ return (p);
+label_oom:
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
+ abort();
+ }
+ UTRACE(0, size, 0);
+ return (NULL);
}
-int
-je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+static void *
+irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize,
+ bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena,
+ prof_thr_cnt_t *cnt)
{
+ void *p;
- if (malloc_init())
- return (EAGAIN);
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+ size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else {
+ p = iralloct(oldptr, size, 0, alignment, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena);
+ }
- return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
+ return (p);
}
-/*
- * End non-standard functions.
- */
-/******************************************************************************/
-/*
- * Begin experimental functions.
- */
-#ifdef JEMALLOC_EXPERIMENTAL
-
-JEMALLOC_INLINE void *
-iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena)
+JEMALLOC_ALWAYS_INLINE_C void *
+irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
+ size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
+ arena_t *arena, prof_thr_cnt_t *cnt)
{
+ void *p;
+ prof_ctx_t *old_ctx;
- assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
- alignment)));
+ old_ctx = prof_ctx_get(oldptr);
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena, cnt);
+ else {
+ p = iralloct(oldptr, size, 0, alignment, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena);
+ }
+ if (p == NULL)
+ return (NULL);
- if (alignment != 0)
- return (ipallocx(usize, alignment, zero, try_tcache, arena));
- else if (zero)
- return (icallocx(usize, try_tcache, arena));
- else
- return (imallocx(usize, try_tcache, arena));
+ if (p == oldptr && alignment != 0) {
+ /*
+ * The allocation did not move, so it is possible that the size
+ * class is smaller than would guarantee the requested
+ * alignment, and that the alignment constraint was
+ * serendipitously satisfied. Additionally, old_usize may not
+ * be the same as the current usize because of in-place large
+ * reallocation. Therefore, query the actual value of usize.
+ */
+ *usize = isalloc(p, config_prof);
+ }
+ prof_realloc(p, *usize, cnt, old_usize, old_ctx);
+
+ return (p);
}
-int
-je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
+void *
+je_rallocx(void *ptr, size_t size, int flags)
{
void *p;
- size_t usize;
- size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+ size_t usize, old_usize;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
- bool zero = flags & ALLOCM_ZERO;
+ bool zero = flags & MALLOCX_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ bool try_tcache_alloc, try_tcache_dalloc;
arena_t *arena;
- bool try_tcache;
assert(ptr != NULL);
assert(size != 0);
-
- if (malloc_init())
- goto label_oom;
+ assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (arena_ind != UINT_MAX) {
+ arena_chunk_t *chunk;
+ try_tcache_alloc = false;
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ try_tcache_dalloc = (chunk == ptr || chunk->arena !=
+ arenas[arena_ind]);
arena = arenas[arena_ind];
- try_tcache = false;
} else {
+ try_tcache_alloc = true;
+ try_tcache_dalloc = true;
arena = NULL;
- try_tcache = true;
}
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- if (usize == 0)
- goto label_oom;
+ if ((config_prof && opt_prof) || config_stats ||
+ (config_valgrind && opt_valgrind))
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
+ usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+ assert(usize != 0);
PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL)
+ p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena, cnt);
+ if (p == NULL)
goto label_oom;
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- size_t usize_promoted = (alignment == 0) ?
- s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
- alignment);
- assert(usize_promoted != 0);
- p = iallocm(usize_promoted, alignment, zero,
- try_tcache, arena);
- if (p == NULL)
- goto label_oom;
- arena_prof_promoted(p, usize);
- } else {
- p = iallocm(usize, alignment, zero, try_tcache, arena);
- if (p == NULL)
- goto label_oom;
- }
- prof_malloc(p, usize, cnt);
} else {
- p = iallocm(usize, alignment, zero, try_tcache, arena);
+ p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc,
+ try_tcache_dalloc, arena);
if (p == NULL)
goto label_oom;
+ if (config_stats || (config_valgrind && opt_valgrind))
+ usize = isalloc(p, config_prof);
}
- if (rsize != NULL)
- *rsize = usize;
- *ptr = p;
if (config_stats) {
- assert(usize == isalloc(p, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
+ thread_allocated_t *ta;
+ ta = thread_allocated_tsd_get();
+ ta->allocated += usize;
+ ta->deallocated += old_usize;
}
- UTRACE(0, size, p);
- JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
- return (ALLOCM_SUCCESS);
+ UTRACE(ptr, size, p);
+ JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero);
+ return (p);
label_oom:
if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in allocm(): "
- "out of memory\n");
+ malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
abort();
}
- *ptr = NULL;
- UTRACE(0, size, 0);
- return (ALLOCM_ERR_OOM);
+ UTRACE(ptr, size, 0);
+ return (NULL);
}
-int
-je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
+JEMALLOC_ALWAYS_INLINE_C size_t
+ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, bool zero, arena_t *arena)
+{
+ size_t usize;
+
+ if (ixalloc(ptr, size, extra, alignment, zero))
+ return (old_usize);
+ usize = isalloc(ptr, config_prof);
+
+ return (usize);
+}
+
+static size_t
+ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, size_t max_usize, bool zero, arena_t *arena,
+ prof_thr_cnt_t *cnt)
+{
+ size_t usize;
+
+ if (cnt == NULL)
+ return (old_usize);
+ /* Use minimum usize to determine whether promotion may happen. */
+ if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size,
+ alignment)) <= SMALL_MAXCLASS) {
+ if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+ size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
+ alignment, zero))
+ return (old_usize);
+ usize = isalloc(ptr, config_prof);
+ if (max_usize < PAGE)
+ arena_prof_promoted(ptr, usize);
+ } else {
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero, arena);
+ }
+
+ return (usize);
+}
+
+JEMALLOC_ALWAYS_INLINE_C size_t
+ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, size_t max_usize, bool zero, arena_t *arena,
+ prof_thr_cnt_t *cnt)
{
- void *p, *q;
size_t usize;
- size_t old_size;
- size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+ prof_ctx_t *old_ctx;
+
+ old_ctx = prof_ctx_get(ptr);
+ if ((uintptr_t)cnt != (uintptr_t)1U) {
+ usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
+ alignment, zero, max_usize, arena, cnt);
+ } else {
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero, arena);
+ }
+ if (usize == old_usize)
+ return (usize);
+ prof_realloc(ptr, usize, cnt, old_usize, old_ctx);
+
+ return (usize);
+}
+
+size_t
+je_xallocx(void *ptr, size_t size, size_t extra, int flags)
+{
+ size_t usize, old_usize;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
- bool zero = flags & ALLOCM_ZERO;
- bool no_move = flags & ALLOCM_NO_MOVE;
+ bool zero = flags & MALLOCX_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- bool try_tcache_alloc, try_tcache_dalloc;
arena_t *arena;
assert(ptr != NULL);
- assert(*ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
- if (arena_ind != UINT_MAX) {
- arena_chunk_t *chunk;
- try_tcache_alloc = true;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
- try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
- arenas[arena_ind]);
+ if (arena_ind != UINT_MAX)
arena = arenas[arena_ind];
- } else {
- try_tcache_alloc = true;
- try_tcache_dalloc = true;
+ else
arena = NULL;
- }
- p = *ptr;
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_usize);
+
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
-
/*
- * usize isn't knowable before iralloc() returns when extra is
+ * usize isn't knowable before ixalloc() returns when extra is
* non-zero. Therefore, compute its maximum possible value and
* use that in PROF_ALLOC_PREP() to decide whether to capture a
* backtrace. prof_realloc() will use the actual usize to
@@ -1515,111 +1716,51 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
*/
size_t max_usize = (alignment == 0) ? s2u(size+extra) :
sa2u(size+extra, alignment);
- prof_ctx_t *old_ctx = prof_ctx_get(p);
- old_size = isalloc(p, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(p);
PROF_ALLOC_PREP(1, max_usize, cnt);
- if (cnt == NULL)
- goto label_oom;
- /*
- * Use minimum usize to determine whether promotion may happen.
- */
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
- && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
- <= SMALL_MAXCLASS) {
- q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
- size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
- alignment, zero, no_move, try_tcache_alloc,
- try_tcache_dalloc, arena);
- if (q == NULL)
- goto label_err;
- if (max_usize < PAGE) {
- usize = max_usize;
- arena_prof_promoted(q, usize);
- } else
- usize = isalloc(q, config_prof);
- } else {
- q = irallocx(p, size, extra, alignment, zero, no_move,
- try_tcache_alloc, try_tcache_dalloc, arena);
- if (q == NULL)
- goto label_err;
- usize = isalloc(q, config_prof);
- }
- prof_realloc(q, usize, cnt, old_size, old_ctx);
- if (rsize != NULL)
- *rsize = usize;
+ usize = ixallocx_prof(ptr, old_usize, size, extra, alignment,
+ max_usize, zero, arena, cnt);
} else {
- if (config_stats) {
- old_size = isalloc(p, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_size);
- } else if (config_valgrind && opt_valgrind) {
- old_size = isalloc(p, false);
- old_rzsize = u2rz(old_size);
- }
- q = irallocx(p, size, extra, alignment, zero, no_move,
- try_tcache_alloc, try_tcache_dalloc, arena);
- if (q == NULL)
- goto label_err;
- if (config_stats)
- usize = isalloc(q, config_prof);
- if (rsize != NULL) {
- if (config_stats == false)
- usize = isalloc(q, config_prof);
- *rsize = usize;
- }
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero, arena);
}
+ if (usize == old_usize)
+ goto label_not_resized;
- *ptr = q;
if (config_stats) {
thread_allocated_t *ta;
ta = thread_allocated_tsd_get();
ta->allocated += usize;
- ta->deallocated += old_size;
+ ta->deallocated += old_usize;
}
- UTRACE(p, size, q);
- JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
- return (ALLOCM_SUCCESS);
-label_err:
- if (no_move) {
- UTRACE(p, size, q);
- return (ALLOCM_ERR_NOT_MOVED);
- }
-label_oom:
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in rallocm(): "
- "out of memory\n");
- abort();
- }
- UTRACE(p, size, 0);
- return (ALLOCM_ERR_OOM);
+ JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero);
+label_not_resized:
+ UTRACE(ptr, size, ptr);
+ return (usize);
}
-int
-je_sallocm(const void *ptr, size_t *rsize, int flags)
+size_t
+je_sallocx(const void *ptr, int flags)
{
- size_t sz;
+ size_t usize;
assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (config_ivsalloc)
- sz = ivsalloc(ptr, config_prof);
+ usize = ivsalloc(ptr, config_prof);
else {
assert(ptr != NULL);
- sz = isalloc(ptr, config_prof);
+ usize = isalloc(ptr, config_prof);
}
- assert(rsize != NULL);
- *rsize = sz;
- return (ALLOCM_SUCCESS);
+ return (usize);
}
-int
-je_dallocm(void *ptr, int flags)
+void
+je_dallocx(void *ptr, int flags)
{
size_t usize;
- size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
bool try_tcache;
@@ -1645,28 +1786,162 @@ je_dallocm(void *ptr, int flags)
thread_allocated_tsd_get()->deallocated += usize;
if (config_valgrind && opt_valgrind)
rzsize = p2rz(ptr);
- iqallocx(ptr, try_tcache);
+ iqalloct(ptr, try_tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
-
- return (ALLOCM_SUCCESS);
}
-int
-je_nallocm(size_t *rsize, size_t size, int flags)
+size_t
+je_nallocx(size_t size, int flags)
{
size_t usize;
- size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+ size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
assert(size != 0);
if (malloc_init())
- return (ALLOCM_ERR_OOM);
+ return (0);
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- if (usize == 0)
+ assert(usize != 0);
+ return (usize);
+}
+
+int
+je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen)
+{
+
+ if (malloc_init())
+ return (EAGAIN);
+
+ return (ctl_byname(name, oldp, oldlenp, newp, newlen));
+}
+
+int
+je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
+{
+
+ if (malloc_init())
+ return (EAGAIN);
+
+ return (ctl_nametomib(name, mibp, miblenp));
+}
+
+int
+je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+
+ if (malloc_init())
+ return (EAGAIN);
+
+ return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
+}
+
+void
+je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts)
+{
+
+ stats_print(write_cb, cbopaque, opts);
+}
+
+size_t
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
+{
+ size_t ret;
+
+ assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
+
+ if (config_ivsalloc)
+ ret = ivsalloc(ptr, config_prof);
+ else
+ ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
+
+ return (ret);
+}
+
+/*
+ * End non-standard functions.
+ */
+/******************************************************************************/
+/*
+ * Begin experimental functions.
+ */
+#ifdef JEMALLOC_EXPERIMENTAL
+
+int
+je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
+{
+ void *p;
+
+ assert(ptr != NULL);
+
+ p = je_mallocx(size, flags);
+ if (p == NULL)
return (ALLOCM_ERR_OOM);
+ if (rsize != NULL)
+ *rsize = isalloc(p, config_prof);
+ *ptr = p;
+ return (ALLOCM_SUCCESS);
+}
+int
+je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
+{
+ int ret;
+ bool no_move = flags & ALLOCM_NO_MOVE;
+
+ assert(ptr != NULL);
+ assert(*ptr != NULL);
+ assert(size != 0);
+ assert(SIZE_T_MAX - size >= extra);
+
+ if (no_move) {
+ size_t usize = je_xallocx(*ptr, size, extra, flags);
+ ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
+ if (rsize != NULL)
+ *rsize = usize;
+ } else {
+ void *p = je_rallocx(*ptr, size+extra, flags);
+ if (p != NULL) {
+ *ptr = p;
+ ret = ALLOCM_SUCCESS;
+ } else
+ ret = ALLOCM_ERR_OOM;
+ if (rsize != NULL)
+ *rsize = isalloc(*ptr, config_prof);
+ }
+ return (ret);
+}
+
+int
+je_sallocm(const void *ptr, size_t *rsize, int flags)
+{
+
+ assert(rsize != NULL);
+ *rsize = je_sallocx(ptr, flags);
+ return (ALLOCM_SUCCESS);
+}
+
+int
+je_dallocm(void *ptr, int flags)
+{
+
+ je_dallocx(ptr, flags);
+ return (ALLOCM_SUCCESS);
+}
+
+int
+je_nallocm(size_t *rsize, size_t size, int flags)
+{
+ size_t usize;
+
+ usize = je_nallocx(size, flags);
+ if (usize == 0)
+ return (ALLOCM_ERR_OOM);
if (rsize != NULL)
*rsize = usize;
return (ALLOCM_SUCCESS);
@@ -1721,12 +1996,12 @@ _malloc_prefork(void)
/* Acquire all mutexes in a safe order. */
ctl_prefork();
+ prof_prefork();
malloc_mutex_prefork(&arenas_lock);
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_prefork(arenas[i]);
}
- prof_prefork();
chunk_prefork();
base_prefork();
huge_prefork();
@@ -1752,12 +2027,12 @@ _malloc_postfork(void)
huge_postfork_parent();
base_postfork_parent();
chunk_postfork_parent();
- prof_postfork_parent();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_parent(arenas[i]);
}
malloc_mutex_postfork_parent(&arenas_lock);
+ prof_postfork_parent();
ctl_postfork_parent();
}
@@ -1772,12 +2047,12 @@ jemalloc_postfork_child(void)
huge_postfork_child();
base_postfork_child();
chunk_postfork_child();
- prof_postfork_child();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_child(arenas[i]);
}
malloc_mutex_postfork_child(&arenas_lock);
+ prof_postfork_child();
ctl_postfork_child();
}
@@ -1801,7 +2076,7 @@ a0alloc(size_t size, bool zero)
if (size <= arena_maxclass)
return (arena_malloc(arenas[0], size, zero, false));
else
- return (huge_malloc(size, zero));
+ return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
}
void *
diff --git a/deps/jemalloc/src/mutex.c b/deps/jemalloc/src/mutex.c
index 55e18c237..788eca387 100644
--- a/deps/jemalloc/src/mutex.c
+++ b/deps/jemalloc/src/mutex.c
@@ -6,7 +6,7 @@
#endif
#ifndef _CRT_SPINCOUNT
-#define _CRT_SPINCOUNT 4000
+#define _CRT_SPINCOUNT 4000
#endif
/******************************************************************************/
diff --git a/deps/jemalloc/src/prof.c b/deps/jemalloc/src/prof.c
index 04964ef7c..7722b7b43 100644
--- a/deps/jemalloc/src/prof.c
+++ b/deps/jemalloc/src/prof.c
@@ -24,9 +24,14 @@ bool opt_prof_gdump = false;
bool opt_prof_final = true;
bool opt_prof_leak = false;
bool opt_prof_accum = false;
-char opt_prof_prefix[PATH_MAX + 1];
+char opt_prof_prefix[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
-uint64_t prof_interval;
+uint64_t prof_interval = 0;
bool prof_promote;
/*
@@ -54,10 +59,17 @@ static uint64_t prof_dump_useq;
/*
* This buffer is rather large for stack allocation, so use a single buffer for
- * all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since
- * it must be locked anyway during dumping.
+ * all profile dumps.
*/
-static char prof_dump_buf[PROF_DUMP_BUFSIZE];
+static malloc_mutex_t prof_dump_mtx;
+static char prof_dump_buf[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PROF_DUMP_BUFSIZE
+#else
+ 1
+#endif
+];
static unsigned prof_dump_buf_end;
static int prof_dump_fd;
@@ -65,37 +77,6 @@ static int prof_dump_fd;
static bool prof_booted = false;
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static prof_bt_t *bt_dup(prof_bt_t *bt);
-static void bt_destroy(prof_bt_t *bt);
-#ifdef JEMALLOC_PROF_LIBGCC
-static _Unwind_Reason_Code prof_unwind_init_callback(
- struct _Unwind_Context *context, void *arg);
-static _Unwind_Reason_Code prof_unwind_callback(
- struct _Unwind_Context *context, void *arg);
-#endif
-static bool prof_flush(bool propagate_err);
-static bool prof_write(bool propagate_err, const char *s);
-static bool prof_printf(bool propagate_err, const char *format, ...)
- JEMALLOC_ATTR(format(printf, 2, 3));
-static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all,
- size_t *leak_nctx);
-static void prof_ctx_destroy(prof_ctx_t *ctx);
-static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt);
-static bool prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx,
- prof_bt_t *bt);
-static bool prof_dump_maps(bool propagate_err);
-static bool prof_dump(bool propagate_err, const char *filename,
- bool leakcheck);
-static void prof_dump_filename(char *filename, char v, int64_t vseq);
-static void prof_fdump(void);
-static void prof_bt_hash(const void *key, unsigned minbits, size_t *hash1,
- size_t *hash2);
-static bool prof_bt_keycomp(const void *k1, const void *k2);
-static malloc_mutex_t *prof_ctx_mutex_choose(void);
-
-/******************************************************************************/
void
bt_init(prof_bt_t *bt, void **vec)
@@ -424,10 +405,169 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore)
{
cassert(config_prof);
- assert(false);
+ not_reached();
}
#endif
+static malloc_mutex_t *
+prof_ctx_mutex_choose(void)
+{
+ unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
+
+ return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
+}
+
+static void
+prof_ctx_init(prof_ctx_t *ctx, prof_bt_t *bt)
+{
+
+ ctx->bt = bt;
+ ctx->lock = prof_ctx_mutex_choose();
+ /*
+ * Set nlimbo to 1, in order to avoid a race condition with
+ * prof_ctx_merge()/prof_ctx_destroy().
+ */
+ ctx->nlimbo = 1;
+ ql_elm_new(ctx, dump_link);
+ memset(&ctx->cnt_merged, 0, sizeof(prof_cnt_t));
+ ql_new(&ctx->cnts_ql);
+}
+
+static void
+prof_ctx_destroy(prof_ctx_t *ctx)
+{
+ prof_tdata_t *prof_tdata;
+
+ cassert(config_prof);
+
+ /*
+ * Check that ctx is still unused by any thread cache before destroying
+ * it. prof_lookup() increments ctx->nlimbo in order to avoid a race
+ * condition with this function, as does prof_ctx_merge() in order to
+ * avoid a race between the main body of prof_ctx_merge() and entry
+ * into this function.
+ */
+ prof_tdata = prof_tdata_get(false);
+ assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
+ prof_enter(prof_tdata);
+ malloc_mutex_lock(ctx->lock);
+ if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
+ ctx->nlimbo == 1) {
+ assert(ctx->cnt_merged.curbytes == 0);
+ assert(ctx->cnt_merged.accumobjs == 0);
+ assert(ctx->cnt_merged.accumbytes == 0);
+ /* Remove ctx from bt2ctx. */
+ if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
+ not_reached();
+ prof_leave(prof_tdata);
+ /* Destroy ctx. */
+ malloc_mutex_unlock(ctx->lock);
+ bt_destroy(ctx->bt);
+ idalloc(ctx);
+ } else {
+ /*
+ * Compensate for increment in prof_ctx_merge() or
+ * prof_lookup().
+ */
+ ctx->nlimbo--;
+ malloc_mutex_unlock(ctx->lock);
+ prof_leave(prof_tdata);
+ }
+}
+
+static void
+prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
+{
+ bool destroy;
+
+ cassert(config_prof);
+
+ /* Merge cnt stats and detach from ctx. */
+ malloc_mutex_lock(ctx->lock);
+ ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
+ ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
+ ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
+ ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
+ ql_remove(&ctx->cnts_ql, cnt, cnts_link);
+ if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
+ ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
+ /*
+ * Increment ctx->nlimbo in order to keep another thread from
+ * winning the race to destroy ctx while this one has ctx->lock
+ * dropped. Without this, it would be possible for another
+ * thread to:
+ *
+ * 1) Sample an allocation associated with ctx.
+ * 2) Deallocate the sampled object.
+ * 3) Successfully prof_ctx_destroy(ctx).
+ *
+ * The result would be that ctx no longer exists by the time
+ * this thread accesses it in prof_ctx_destroy().
+ */
+ ctx->nlimbo++;
+ destroy = true;
+ } else
+ destroy = false;
+ malloc_mutex_unlock(ctx->lock);
+ if (destroy)
+ prof_ctx_destroy(ctx);
+}
+
+static bool
+prof_lookup_global(prof_bt_t *bt, prof_tdata_t *prof_tdata, void **p_btkey,
+ prof_ctx_t **p_ctx, bool *p_new_ctx)
+{
+ union {
+ prof_ctx_t *p;
+ void *v;
+ } ctx;
+ union {
+ prof_bt_t *p;
+ void *v;
+ } btkey;
+ bool new_ctx;
+
+ prof_enter(prof_tdata);
+ if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
+ /* bt has never been seen before. Insert it. */
+ ctx.v = imalloc(sizeof(prof_ctx_t));
+ if (ctx.v == NULL) {
+ prof_leave(prof_tdata);
+ return (true);
+ }
+ btkey.p = bt_dup(bt);
+ if (btkey.v == NULL) {
+ prof_leave(prof_tdata);
+ idalloc(ctx.v);
+ return (true);
+ }
+ prof_ctx_init(ctx.p, btkey.p);
+ if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
+ /* OOM. */
+ prof_leave(prof_tdata);
+ idalloc(btkey.v);
+ idalloc(ctx.v);
+ return (true);
+ }
+ new_ctx = true;
+ } else {
+ /*
+ * Increment nlimbo, in order to avoid a race condition with
+ * prof_ctx_merge()/prof_ctx_destroy().
+ */
+ malloc_mutex_lock(ctx.p->lock);
+ ctx.p->nlimbo++;
+ malloc_mutex_unlock(ctx.p->lock);
+ new_ctx = false;
+ }
+ prof_leave(prof_tdata);
+
+ *p_btkey = btkey.v;
+ *p_ctx = ctx.p;
+ *p_new_ctx = new_ctx;
+ return (false);
+}
+
prof_thr_cnt_t *
prof_lookup(prof_bt_t *bt)
{
@@ -439,67 +579,21 @@ prof_lookup(prof_bt_t *bt)
cassert(config_prof);
- prof_tdata = prof_tdata_get();
+ prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (NULL);
if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
- union {
- prof_bt_t *p;
- void *v;
- } btkey;
- union {
- prof_ctx_t *p;
- void *v;
- } ctx;
+ void *btkey;
+ prof_ctx_t *ctx;
bool new_ctx;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
- prof_enter(prof_tdata);
- if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
- /* bt has never been seen before. Insert it. */
- ctx.v = imalloc(sizeof(prof_ctx_t));
- if (ctx.v == NULL) {
- prof_leave(prof_tdata);
- return (NULL);
- }
- btkey.p = bt_dup(bt);
- if (btkey.v == NULL) {
- prof_leave(prof_tdata);
- idalloc(ctx.v);
- return (NULL);
- }
- ctx.p->bt = btkey.p;
- ctx.p->lock = prof_ctx_mutex_choose();
- /*
- * Set nlimbo to 1, in order to avoid a race condition
- * with prof_ctx_merge()/prof_ctx_destroy().
- */
- ctx.p->nlimbo = 1;
- memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t));
- ql_new(&ctx.p->cnts_ql);
- if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
- /* OOM. */
- prof_leave(prof_tdata);
- idalloc(btkey.v);
- idalloc(ctx.v);
- return (NULL);
- }
- new_ctx = true;
- } else {
- /*
- * Increment nlimbo, in order to avoid a race condition
- * with prof_ctx_merge()/prof_ctx_destroy().
- */
- malloc_mutex_lock(ctx.p->lock);
- ctx.p->nlimbo++;
- malloc_mutex_unlock(ctx.p->lock);
- new_ctx = false;
- }
- prof_leave(prof_tdata);
+ if (prof_lookup_global(bt, prof_tdata, &btkey, &ctx, &new_ctx))
+ return (NULL);
/* Link a prof_thd_cnt_t into ctx for this thread. */
if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
@@ -512,7 +606,7 @@ prof_lookup(prof_bt_t *bt)
assert(ret.v != NULL);
if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
NULL, NULL))
- assert(false);
+ not_reached();
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
prof_ctx_merge(ret.p->ctx, ret.p);
/* ret can now be re-used. */
@@ -522,27 +616,27 @@ prof_lookup(prof_bt_t *bt)
ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) {
if (new_ctx)
- prof_ctx_destroy(ctx.p);
+ prof_ctx_destroy(ctx);
return (NULL);
}
ql_elm_new(ret.p, cnts_link);
ql_elm_new(ret.p, lru_link);
}
/* Finish initializing ret. */
- ret.p->ctx = ctx.p;
+ ret.p->ctx = ctx;
ret.p->epoch = 0;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
- if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
+ if (ckh_insert(&prof_tdata->bt2cnt, btkey, ret.v)) {
if (new_ctx)
- prof_ctx_destroy(ctx.p);
+ prof_ctx_destroy(ctx);
idalloc(ret.v);
return (NULL);
}
ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
- malloc_mutex_lock(ctx.p->lock);
- ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
- ctx.p->nlimbo--;
- malloc_mutex_unlock(ctx.p->lock);
+ malloc_mutex_lock(ctx->lock);
+ ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link);
+ ctx->nlimbo--;
+ malloc_mutex_unlock(ctx->lock);
} else {
/* Move ret to the front of the LRU. */
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
@@ -552,8 +646,52 @@ prof_lookup(prof_bt_t *bt)
return (ret.p);
}
+#ifdef JEMALLOC_JET
+size_t
+prof_bt_count(void)
+{
+ size_t bt_count;
+ prof_tdata_t *prof_tdata;
+
+ prof_tdata = prof_tdata_get(false);
+ if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ return (0);
+
+ prof_enter(prof_tdata);
+ bt_count = ckh_count(&bt2ctx);
+ prof_leave(prof_tdata);
+
+ return (bt_count);
+}
+#endif
+
+#ifdef JEMALLOC_JET
+#undef prof_dump_open
+#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
+#endif
+static int
+prof_dump_open(bool propagate_err, const char *filename)
+{
+ int fd;
+
+ fd = creat(filename, 0644);
+ if (fd == -1 && propagate_err == false) {
+ malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
+ filename);
+ if (opt_abort)
+ abort();
+ }
+
+ return (fd);
+}
+#ifdef JEMALLOC_JET
+#undef prof_dump_open
+#define prof_dump_open JEMALLOC_N(prof_dump_open)
+prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
+#endif
+
static bool
-prof_flush(bool propagate_err)
+prof_dump_flush(bool propagate_err)
{
bool ret = false;
ssize_t err;
@@ -576,7 +714,20 @@ prof_flush(bool propagate_err)
}
static bool
-prof_write(bool propagate_err, const char *s)
+prof_dump_close(bool propagate_err)
+{
+ bool ret;
+
+ assert(prof_dump_fd != -1);
+ ret = prof_dump_flush(propagate_err);
+ close(prof_dump_fd);
+ prof_dump_fd = -1;
+
+ return (ret);
+}
+
+static bool
+prof_dump_write(bool propagate_err, const char *s)
{
unsigned i, slen, n;
@@ -587,7 +738,7 @@ prof_write(bool propagate_err, const char *s)
while (i < slen) {
/* Flush the buffer if it is full. */
if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
- if (prof_flush(propagate_err) && propagate_err)
+ if (prof_dump_flush(propagate_err) && propagate_err)
return (true);
if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
@@ -607,7 +758,7 @@ prof_write(bool propagate_err, const char *s)
JEMALLOC_ATTR(format(printf, 2, 3))
static bool
-prof_printf(bool propagate_err, const char *format, ...)
+prof_dump_printf(bool propagate_err, const char *format, ...)
{
bool ret;
va_list ap;
@@ -616,13 +767,14 @@ prof_printf(bool propagate_err, const char *format, ...)
va_start(ap, format);
malloc_vsnprintf(buf, sizeof(buf), format, ap);
va_end(ap);
- ret = prof_write(propagate_err, buf);
+ ret = prof_dump_write(propagate_err, buf);
return (ret);
}
static void
-prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
+prof_dump_ctx_prep(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx,
+ prof_ctx_list_t *ctx_ql)
{
prof_thr_cnt_t *thr_cnt;
prof_cnt_t tcnt;
@@ -631,6 +783,14 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
malloc_mutex_lock(ctx->lock);
+ /*
+ * Increment nlimbo so that ctx won't go away before dump.
+ * Additionally, link ctx into the dump list so that it is included in
+ * prof_dump()'s second pass.
+ */
+ ctx->nlimbo++;
+ ql_tail_insert(ctx_ql, ctx, dump_link);
+
memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
volatile unsigned *epoch = &thr_cnt->epoch;
@@ -671,89 +831,52 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
malloc_mutex_unlock(ctx->lock);
}
-static void
-prof_ctx_destroy(prof_ctx_t *ctx)
+static bool
+prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
{
- prof_tdata_t *prof_tdata;
-
- cassert(config_prof);
- /*
- * Check that ctx is still unused by any thread cache before destroying
- * it. prof_lookup() increments ctx->nlimbo in order to avoid a race
- * condition with this function, as does prof_ctx_merge() in order to
- * avoid a race between the main body of prof_ctx_merge() and entry
- * into this function.
- */
- prof_tdata = *prof_tdata_tsd_get();
- assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
- prof_enter(prof_tdata);
- malloc_mutex_lock(ctx->lock);
- if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
- ctx->nlimbo == 1) {
- assert(ctx->cnt_merged.curbytes == 0);
- assert(ctx->cnt_merged.accumobjs == 0);
- assert(ctx->cnt_merged.accumbytes == 0);
- /* Remove ctx from bt2ctx. */
- if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
- assert(false);
- prof_leave(prof_tdata);
- /* Destroy ctx. */
- malloc_mutex_unlock(ctx->lock);
- bt_destroy(ctx->bt);
- idalloc(ctx);
+ if (opt_lg_prof_sample == 0) {
+ if (prof_dump_printf(propagate_err,
+ "heap profile: %"PRId64": %"PRId64
+ " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
+ cnt_all->curobjs, cnt_all->curbytes,
+ cnt_all->accumobjs, cnt_all->accumbytes))
+ return (true);
} else {
- /*
- * Compensate for increment in prof_ctx_merge() or
- * prof_lookup().
- */
- ctx->nlimbo--;
- malloc_mutex_unlock(ctx->lock);
- prof_leave(prof_tdata);
+ if (prof_dump_printf(propagate_err,
+ "heap profile: %"PRId64": %"PRId64
+ " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
+ cnt_all->curobjs, cnt_all->curbytes,
+ cnt_all->accumobjs, cnt_all->accumbytes,
+ ((uint64_t)1U << opt_lg_prof_sample)))
+ return (true);
}
+
+ return (false);
}
static void
-prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
+prof_dump_ctx_cleanup_locked(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
{
- bool destroy;
- cassert(config_prof);
+ ctx->nlimbo--;
+ ql_remove(ctx_ql, ctx, dump_link);
+}
+
+static void
+prof_dump_ctx_cleanup(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
+{
- /* Merge cnt stats and detach from ctx. */
malloc_mutex_lock(ctx->lock);
- ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
- ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
- ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
- ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
- ql_remove(&ctx->cnts_ql, cnt, cnts_link);
- if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
- ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
- /*
- * Increment ctx->nlimbo in order to keep another thread from
- * winning the race to destroy ctx while this one has ctx->lock
- * dropped. Without this, it would be possible for another
- * thread to:
- *
- * 1) Sample an allocation associated with ctx.
- * 2) Deallocate the sampled object.
- * 3) Successfully prof_ctx_destroy(ctx).
- *
- * The result would be that ctx no longer exists by the time
- * this thread accesses it in prof_ctx_destroy().
- */
- ctx->nlimbo++;
- destroy = true;
- } else
- destroy = false;
+ prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
malloc_mutex_unlock(ctx->lock);
- if (destroy)
- prof_ctx_destroy(ctx);
}
static bool
-prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt)
+prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, const prof_bt_t *bt,
+ prof_ctx_list_t *ctx_ql)
{
+ bool ret;
unsigned i;
cassert(config_prof);
@@ -765,66 +888,109 @@ prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt)
* filled in. Avoid dumping any ctx that is an artifact of either
* implementation detail.
*/
+ malloc_mutex_lock(ctx->lock);
if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) ||
(opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) {
assert(ctx->cnt_summed.curobjs == 0);
assert(ctx->cnt_summed.curbytes == 0);
assert(ctx->cnt_summed.accumobjs == 0);
assert(ctx->cnt_summed.accumbytes == 0);
- return (false);
+ ret = false;
+ goto label_return;
}
- if (prof_printf(propagate_err, "%"PRId64": %"PRId64
+ if (prof_dump_printf(propagate_err, "%"PRId64": %"PRId64
" [%"PRIu64": %"PRIu64"] @",
ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
- ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes))
- return (true);
+ ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) {
+ ret = true;
+ goto label_return;
+ }
for (i = 0; i < bt->len; i++) {
- if (prof_printf(propagate_err, " %#"PRIxPTR,
- (uintptr_t)bt->vec[i]))
- return (true);
+ if (prof_dump_printf(propagate_err, " %#"PRIxPTR,
+ (uintptr_t)bt->vec[i])) {
+ ret = true;
+ goto label_return;
+ }
}
- if (prof_write(propagate_err, "\n"))
- return (true);
+ if (prof_dump_write(propagate_err, "\n")) {
+ ret = true;
+ goto label_return;
+ }
- return (false);
+ ret = false;
+label_return:
+ prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
+ malloc_mutex_unlock(ctx->lock);
+ return (ret);
}
static bool
prof_dump_maps(bool propagate_err)
{
+ bool ret;
int mfd;
char filename[PATH_MAX + 1];
cassert(config_prof);
-
+#ifdef __FreeBSD__
+ malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map");
+#else
malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
(int)getpid());
+#endif
mfd = open(filename, O_RDONLY);
if (mfd != -1) {
ssize_t nread;
- if (prof_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
- propagate_err)
- return (true);
+ if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
+ propagate_err) {
+ ret = true;
+ goto label_return;
+ }
nread = 0;
do {
prof_dump_buf_end += nread;
if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
/* Make space in prof_dump_buf before read(). */
- if (prof_flush(propagate_err) && propagate_err)
- return (true);
+ if (prof_dump_flush(propagate_err) &&
+ propagate_err) {
+ ret = true;
+ goto label_return;
+ }
}
nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
PROF_DUMP_BUFSIZE - prof_dump_buf_end);
} while (nread > 0);
+ } else {
+ ret = true;
+ goto label_return;
+ }
+
+ ret = false;
+label_return:
+ if (mfd != -1)
close(mfd);
- } else
- return (true);
+ return (ret);
+}
- return (false);
+static void
+prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_nctx,
+ const char *filename)
+{
+
+ if (cnt_all->curbytes != 0) {
+ malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
+ PRId64" object%s, %zu context%s\n",
+ cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
+ cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
+ leak_nctx, (leak_nctx != 1) ? "s" : "");
+ malloc_printf(
+ "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
+ filename);
+ }
}
static bool
@@ -834,98 +1000,74 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck)
prof_cnt_t cnt_all;
size_t tabind;
union {
- prof_bt_t *p;
- void *v;
- } bt;
- union {
prof_ctx_t *p;
void *v;
} ctx;
size_t leak_nctx;
+ prof_ctx_list_t ctx_ql;
cassert(config_prof);
- prof_tdata = prof_tdata_get();
+ prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (true);
- prof_enter(prof_tdata);
- prof_dump_fd = creat(filename, 0644);
- if (prof_dump_fd == -1) {
- if (propagate_err == false) {
- malloc_printf(
- "<jemalloc>: creat(\"%s\"), 0644) failed\n",
- filename);
- if (opt_abort)
- abort();
- }
- goto label_error;
- }
+
+ malloc_mutex_lock(&prof_dump_mtx);
/* Merge per thread profile stats, and sum them in cnt_all. */
memset(&cnt_all, 0, sizeof(prof_cnt_t));
leak_nctx = 0;
+ ql_new(&ctx_ql);
+ prof_enter(prof_tdata);
for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
- prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx);
+ prof_dump_ctx_prep(ctx.p, &cnt_all, &leak_nctx, &ctx_ql);
+ prof_leave(prof_tdata);
+
+ /* Create dump file. */
+ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
+ goto label_open_close_error;
/* Dump profile header. */
- if (opt_lg_prof_sample == 0) {
- if (prof_printf(propagate_err,
- "heap profile: %"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
- cnt_all.curobjs, cnt_all.curbytes,
- cnt_all.accumobjs, cnt_all.accumbytes))
- goto label_error;
- } else {
- if (prof_printf(propagate_err,
- "heap profile: %"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
- cnt_all.curobjs, cnt_all.curbytes,
- cnt_all.accumobjs, cnt_all.accumbytes,
- ((uint64_t)1U << opt_lg_prof_sample)))
- goto label_error;
- }
+ if (prof_dump_header(propagate_err, &cnt_all))
+ goto label_write_error;
- /* Dump per ctx profile stats. */
- for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v)
- == false;) {
- if (prof_dump_ctx(propagate_err, ctx.p, bt.p))
- goto label_error;
+ /* Dump per ctx profile stats. */
+ while ((ctx.p = ql_first(&ctx_ql)) != NULL) {
+ if (prof_dump_ctx(propagate_err, ctx.p, ctx.p->bt, &ctx_ql))
+ goto label_write_error;
}
/* Dump /proc/<pid>/maps if possible. */
if (prof_dump_maps(propagate_err))
- goto label_error;
+ goto label_write_error;
- if (prof_flush(propagate_err))
- goto label_error;
- close(prof_dump_fd);
- prof_leave(prof_tdata);
+ if (prof_dump_close(propagate_err))
+ goto label_open_close_error;
- if (leakcheck && cnt_all.curbytes != 0) {
- malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
- PRId64" object%s, %zu context%s\n",
- cnt_all.curbytes, (cnt_all.curbytes != 1) ? "s" : "",
- cnt_all.curobjs, (cnt_all.curobjs != 1) ? "s" : "",
- leak_nctx, (leak_nctx != 1) ? "s" : "");
- malloc_printf(
- "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
- filename);
- }
+ malloc_mutex_unlock(&prof_dump_mtx);
+
+ if (leakcheck)
+ prof_leakcheck(&cnt_all, leak_nctx, filename);
return (false);
-label_error:
- prof_leave(prof_tdata);
+label_write_error:
+ prof_dump_close(propagate_err);
+label_open_close_error:
+ while ((ctx.p = ql_first(&ctx_ql)) != NULL)
+ prof_dump_ctx_cleanup(ctx.p, &ctx_ql);
+ malloc_mutex_unlock(&prof_dump_mtx);
return (true);
}
#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
+#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
static void
prof_dump_filename(char *filename, char v, int64_t vseq)
{
cassert(config_prof);
- if (vseq != UINT64_C(0xffffffffffffffff)) {
+ if (vseq != VSEQ_INVALID) {
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"PRIu64".%c%"PRId64".heap",
@@ -951,7 +1093,7 @@ prof_fdump(void)
if (opt_prof_final && opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff));
+ prof_dump_filename(filename, 'f', VSEQ_INVALID);
malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(false, filename, opt_prof_leak);
}
@@ -967,11 +1109,7 @@ prof_idump(void)
if (prof_booted == false)
return;
- /*
- * Don't call prof_tdata_get() here, because it could cause recursive
- * allocation.
- */
- prof_tdata = *prof_tdata_tsd_get();
+ prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return;
if (prof_tdata->enq) {
@@ -1021,11 +1159,7 @@ prof_gdump(void)
if (prof_booted == false)
return;
- /*
- * Don't call prof_tdata_get() here, because it could cause recursive
- * allocation.
- */
- prof_tdata = *prof_tdata_tsd_get();
+ prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return;
if (prof_tdata->enq) {
@@ -1043,34 +1177,13 @@ prof_gdump(void)
}
static void
-prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
+prof_bt_hash(const void *key, size_t r_hash[2])
{
- size_t ret1, ret2;
- uint64_t h;
prof_bt_t *bt = (prof_bt_t *)key;
cassert(config_prof);
- assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
- assert(hash1 != NULL);
- assert(hash2 != NULL);
- h = hash(bt->vec, bt->len * sizeof(void *),
- UINT64_C(0x94122f335b332aea));
- if (minbits <= 32) {
- /*
- * Avoid doing multiple hashes, since a single hash provides
- * enough bits.
- */
- ret1 = h & ZU(0xffffffffU);
- ret2 = h >> 32;
- } else {
- ret1 = h;
- ret2 = hash(bt->vec, bt->len * sizeof(void *),
- UINT64_C(0x8432a476666bbc13));
- }
-
- *hash1 = ret1;
- *hash2 = ret2;
+ hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
}
static bool
@@ -1086,14 +1199,6 @@ prof_bt_keycomp(const void *k1, const void *k2)
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
-static malloc_mutex_t *
-prof_ctx_mutex_choose(void)
-{
- unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
-
- return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
-}
-
prof_tdata_t *
prof_tdata_init(void)
{
@@ -1206,13 +1311,11 @@ prof_boot1(void)
*/
opt_prof = true;
opt_prof_gdump = false;
- prof_interval = 0;
} else if (opt_prof) {
if (opt_lg_prof_interval >= 0) {
prof_interval = (((uint64_t)1U) <<
opt_lg_prof_interval);
- } else
- prof_interval = 0;
+ }
}
prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
@@ -1240,6 +1343,8 @@ prof_boot2(void)
if (malloc_mutex_init(&prof_dump_seq_mtx))
return (true);
+ if (malloc_mutex_init(&prof_dump_mtx))
+ return (true);
if (atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
@@ -1277,10 +1382,10 @@ prof_prefork(void)
if (opt_prof) {
unsigned i;
- malloc_mutex_lock(&bt2ctx_mtx);
- malloc_mutex_lock(&prof_dump_seq_mtx);
+ malloc_mutex_prefork(&bt2ctx_mtx);
+ malloc_mutex_prefork(&prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_lock(&ctx_locks[i]);
+ malloc_mutex_prefork(&ctx_locks[i]);
}
}
diff --git a/deps/jemalloc/src/quarantine.c b/deps/jemalloc/src/quarantine.c
index 9005ab3ba..543151164 100644
--- a/deps/jemalloc/src/quarantine.c
+++ b/deps/jemalloc/src/quarantine.c
@@ -1,3 +1,4 @@
+#define JEMALLOC_QUARANTINE_C_
#include "jemalloc/internal/jemalloc_internal.h"
/*
@@ -11,39 +12,18 @@
/******************************************************************************/
/* Data. */
-typedef struct quarantine_obj_s quarantine_obj_t;
-typedef struct quarantine_s quarantine_t;
-
-struct quarantine_obj_s {
- void *ptr;
- size_t usize;
-};
-
-struct quarantine_s {
- size_t curbytes;
- size_t curobjs;
- size_t first;
-#define LG_MAXOBJS_INIT 10
- size_t lg_maxobjs;
- quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
-};
-
-static void quarantine_cleanup(void *arg);
-
-malloc_tsd_data(static, quarantine, quarantine_t *, NULL)
-malloc_tsd_funcs(JEMALLOC_INLINE, quarantine, quarantine_t *, NULL,
- quarantine_cleanup)
+malloc_tsd_data(, quarantine, quarantine_t *, NULL)
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static quarantine_t *quarantine_init(size_t lg_maxobjs);
static quarantine_t *quarantine_grow(quarantine_t *quarantine);
+static void quarantine_drain_one(quarantine_t *quarantine);
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
/******************************************************************************/
-static quarantine_t *
+quarantine_t *
quarantine_init(size_t lg_maxobjs)
{
quarantine_t *quarantine;
@@ -68,8 +48,10 @@ quarantine_grow(quarantine_t *quarantine)
quarantine_t *ret;
ret = quarantine_init(quarantine->lg_maxobjs + 1);
- if (ret == NULL)
+ if (ret == NULL) {
+ quarantine_drain_one(quarantine);
return (quarantine);
+ }
ret->curbytes = quarantine->curbytes;
ret->curobjs = quarantine->curobjs;
@@ -89,23 +71,29 @@ quarantine_grow(quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
+ idalloc(quarantine);
return (ret);
}
static void
+quarantine_drain_one(quarantine_t *quarantine)
+{
+ quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
+ assert(obj->usize == isalloc(obj->ptr, config_prof));
+ idalloc(obj->ptr);
+ quarantine->curbytes -= obj->usize;
+ quarantine->curobjs--;
+ quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
+ quarantine->lg_maxobjs) - 1);
+}
+
+static void
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
{
- while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) {
- quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
- assert(obj->usize == isalloc(obj->ptr, config_prof));
- idalloc(obj->ptr);
- quarantine->curbytes -= obj->usize;
- quarantine->curobjs--;
- quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
- quarantine->lg_maxobjs) - 1);
- }
+ while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
+ quarantine_drain_one(quarantine);
}
void
@@ -119,24 +107,16 @@ quarantine(void *ptr)
quarantine = *quarantine_tsd_get();
if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
- if (quarantine == NULL) {
- if ((quarantine = quarantine_init(LG_MAXOBJS_INIT)) ==
- NULL) {
- idalloc(ptr);
- return;
- }
- } else {
- if (quarantine == QUARANTINE_STATE_PURGATORY) {
- /*
- * Make a note that quarantine() was called
- * after quarantine_cleanup() was called.
- */
- quarantine = QUARANTINE_STATE_REINCARNATED;
- quarantine_tsd_set(&quarantine);
- }
- idalloc(ptr);
- return;
+ if (quarantine == QUARANTINE_STATE_PURGATORY) {
+ /*
+ * Make a note that quarantine() was called after
+ * quarantine_cleanup() was called.
+ */
+ quarantine = QUARANTINE_STATE_REINCARNATED;
+ quarantine_tsd_set(&quarantine);
}
+ idalloc(ptr);
+ return;
}
/*
* Drain one or more objects if the quarantine size limit would be
@@ -161,15 +141,24 @@ quarantine(void *ptr)
obj->usize = usize;
quarantine->curbytes += usize;
quarantine->curobjs++;
- if (opt_junk)
- memset(ptr, 0x5a, usize);
+ if (config_fill && opt_junk) {
+ /*
+ * Only do redzone validation if Valgrind isn't in
+ * operation.
+ */
+ if ((config_valgrind == false || opt_valgrind == false)
+ && usize <= SMALL_MAXCLASS)
+ arena_quarantine_junk_small(ptr, usize);
+ else
+ memset(ptr, 0x5a, usize);
+ }
} else {
assert(quarantine->curbytes == 0);
idalloc(ptr);
}
}
-static void
+void
quarantine_cleanup(void *arg)
{
quarantine_t *quarantine = *(quarantine_t **)arg;
diff --git a/deps/jemalloc/src/rtree.c b/deps/jemalloc/src/rtree.c
index 90c6935a0..205957ac4 100644
--- a/deps/jemalloc/src/rtree.c
+++ b/deps/jemalloc/src/rtree.c
@@ -2,42 +2,55 @@
#include "jemalloc/internal/jemalloc_internal.h"
rtree_t *
-rtree_new(unsigned bits)
+rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc)
{
rtree_t *ret;
- unsigned bits_per_level, height, i;
+ unsigned bits_per_level, bits_in_leaf, height, i;
+
+ assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
- height = bits / bits_per_level;
- if (height * bits_per_level != bits)
- height++;
- assert(height * bits_per_level >= bits);
+ bits_in_leaf = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1;
+ if (bits > bits_in_leaf) {
+ height = 1 + (bits - bits_in_leaf) / bits_per_level;
+ if ((height-1) * bits_per_level + bits_in_leaf != bits)
+ height++;
+ } else {
+ height = 1;
+ }
+ assert((height-1) * bits_per_level + bits_in_leaf >= bits);
- ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) +
+ ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) +
(sizeof(unsigned) * height));
if (ret == NULL)
return (NULL);
memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
height));
+ ret->alloc = alloc;
+ ret->dalloc = dalloc;
if (malloc_mutex_init(&ret->mutex)) {
- /* Leak the rtree. */
+ if (dalloc != NULL)
+ dalloc(ret);
return (NULL);
}
ret->height = height;
- if (bits_per_level * height > bits)
- ret->level2bits[0] = bits % bits_per_level;
- else
- ret->level2bits[0] = bits_per_level;
- for (i = 1; i < height; i++)
- ret->level2bits[i] = bits_per_level;
-
- ret->root = (void**)base_alloc(sizeof(void *) << ret->level2bits[0]);
+ if (height > 1) {
+ if ((height-1) * bits_per_level + bits_in_leaf > bits) {
+ ret->level2bits[0] = (bits - bits_in_leaf) %
+ bits_per_level;
+ } else
+ ret->level2bits[0] = bits_per_level;
+ for (i = 1; i < height-1; i++)
+ ret->level2bits[i] = bits_per_level;
+ ret->level2bits[height-1] = bits_in_leaf;
+ } else
+ ret->level2bits[0] = bits;
+
+ ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]);
if (ret->root == NULL) {
- /*
- * We leak the rtree here, since there's no generic base
- * deallocation.
- */
+ if (dalloc != NULL)
+ dalloc(ret);
return (NULL);
}
memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
@@ -45,6 +58,31 @@ rtree_new(unsigned bits)
return (ret);
}
+static void
+rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
+{
+
+ if (level < rtree->height - 1) {
+ size_t nchildren, i;
+
+ nchildren = ZU(1) << rtree->level2bits[level];
+ for (i = 0; i < nchildren; i++) {
+ void **child = (void **)node[i];
+ if (child != NULL)
+ rtree_delete_subtree(rtree, child, level + 1);
+ }
+ }
+ rtree->dalloc(node);
+}
+
+void
+rtree_delete(rtree_t *rtree)
+{
+
+ rtree_delete_subtree(rtree, rtree->root, 0);
+ rtree->dalloc(rtree);
+}
+
void
rtree_prefork(rtree_t *rtree)
{
diff --git a/deps/jemalloc/src/stats.c b/deps/jemalloc/src/stats.c
index 43f87af67..bef2ab33c 100644
--- a/deps/jemalloc/src/stats.c
+++ b/deps/jemalloc/src/stats.c
@@ -345,25 +345,25 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
bv ? "enabled" : "disabled");
-#define OPT_WRITE_BOOL(n) \
+#define OPT_WRITE_BOOL(n) \
if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
}
-#define OPT_WRITE_SIZE_T(n) \
+#define OPT_WRITE_SIZE_T(n) \
if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
}
-#define OPT_WRITE_SSIZE_T(n) \
+#define OPT_WRITE_SSIZE_T(n) \
if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
}
-#define OPT_WRITE_CHAR_P(n) \
+#define OPT_WRITE_CHAR_P(n) \
if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
diff --git a/deps/jemalloc/src/tcache.c b/deps/jemalloc/src/tcache.c
index 47e14f30b..6de92960b 100644
--- a/deps/jemalloc/src/tcache.c
+++ b/deps/jemalloc/src/tcache.c
@@ -97,9 +97,8 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
arena_bin_t *bin = &arena->bins[binind];
if (config_prof && arena == tcache->arena) {
- malloc_mutex_lock(&arena->lock);
- arena_prof_accum(arena, tcache->prof_accumbytes);
- malloc_mutex_unlock(&arena->lock);
+ if (arena_prof_accum(arena, tcache->prof_accumbytes))
+ prof_idump();
tcache->prof_accumbytes = 0;
}
@@ -176,11 +175,14 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
arena_t *arena = chunk->arena;
+ UNUSED bool idump;
+ if (config_prof)
+ idump = false;
malloc_mutex_lock(&arena->lock);
if ((config_prof || config_stats) && arena == tcache->arena) {
if (config_prof) {
- arena_prof_accum(arena,
+ idump = arena_prof_accum_locked(arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
@@ -212,6 +214,8 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
}
}
malloc_mutex_unlock(&arena->lock);
+ if (config_prof && idump)
+ prof_idump();
}
if (config_stats && merged_stats == false) {
/*
@@ -256,8 +260,8 @@ tcache_arena_dissociate(tcache_t *tcache)
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(&tcache->arena->lock);
ql_remove(&tcache->arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(&tcache->arena->lock);
tcache_stats_merge(tcache, tcache->arena);
+ malloc_mutex_unlock(&tcache->arena->lock);
}
}
@@ -288,7 +292,7 @@ tcache_create(arena_t *arena)
else if (size <= tcache_maxclass)
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
else
- tcache = (tcache_t *)icallocx(size, false, arena);
+ tcache = (tcache_t *)icalloct(size, false, arena);
if (tcache == NULL)
return (NULL);
@@ -343,11 +347,9 @@ tcache_destroy(tcache_t *tcache)
}
}
- if (config_prof && tcache->prof_accumbytes > 0) {
- malloc_mutex_lock(&tcache->arena->lock);
- arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
- malloc_mutex_unlock(&tcache->arena->lock);
- }
+ if (config_prof && tcache->prof_accumbytes > 0 &&
+ arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
+ prof_idump();
tcache_size = arena_salloc(tcache, false);
if (tcache_size <= SMALL_MAXCLASS) {
@@ -364,7 +366,7 @@ tcache_destroy(tcache_t *tcache)
arena_dalloc_large(arena, chunk, tcache);
} else
- idallocx(tcache, false);
+ idalloct(tcache, false);
}
void
@@ -397,11 +399,14 @@ tcache_thread_cleanup(void *arg)
}
}
+/* Caller must own arena->lock. */
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
unsigned i;
+ cassert(config_stats);
+
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
diff --git a/deps/jemalloc/src/tsd.c b/deps/jemalloc/src/tsd.c
index 961a54632..700caabfe 100644
--- a/deps/jemalloc/src/tsd.c
+++ b/deps/jemalloc/src/tsd.c
@@ -21,7 +21,7 @@ void
malloc_tsd_dalloc(void *wrapper)
{
- idalloc(wrapper);
+ idalloct(wrapper, false);
}
void
@@ -105,3 +105,37 @@ JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
+
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+void *
+tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
+{
+ pthread_t self = pthread_self();
+ tsd_init_block_t *iter;
+
+ /* Check whether this thread has already inserted into the list. */
+ malloc_mutex_lock(&head->lock);
+ ql_foreach(iter, &head->blocks, link) {
+ if (iter->thread == self) {
+ malloc_mutex_unlock(&head->lock);
+ return (iter->data);
+ }
+ }
+ /* Insert block into list. */
+ ql_elm_new(block, link);
+ block->thread = self;
+ ql_tail_insert(&head->blocks, block, link);
+ malloc_mutex_unlock(&head->lock);
+ return (NULL);
+}
+
+void
+tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
+{
+
+ malloc_mutex_lock(&head->lock);
+ ql_remove(&head->blocks, block, link);
+ malloc_mutex_unlock(&head->lock);
+}
+#endif
diff --git a/deps/jemalloc/src/util.c b/deps/jemalloc/src/util.c
index b3a011436..93a19fd16 100644
--- a/deps/jemalloc/src/util.c
+++ b/deps/jemalloc/src/util.c
@@ -77,7 +77,7 @@ malloc_write(const char *s)
* provide a wrapper.
*/
int
-buferror(char *buf, size_t buflen)
+buferror(int err, char *buf, size_t buflen)
{
#ifdef _WIN32
@@ -85,34 +85,36 @@ buferror(char *buf, size_t buflen)
(LPSTR)buf, buflen, NULL);
return (0);
#elif defined(_GNU_SOURCE)
- char *b = strerror_r(errno, buf, buflen);
+ char *b = strerror_r(err, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
buf[buflen-1] = '\0';
}
return (0);
#else
- return (strerror_r(errno, buf, buflen));
+ return (strerror_r(err, buf, buflen));
#endif
}
uintmax_t
-malloc_strtoumax(const char *nptr, char **endptr, int base)
+malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
{
uintmax_t ret, digit;
int b;
bool neg;
const char *p, *ns;
+ p = nptr;
if (base < 0 || base == 1 || base > 36) {
+ ns = p;
set_errno(EINVAL);
- return (UINTMAX_MAX);
+ ret = UINTMAX_MAX;
+ goto label_return;
}
b = base;
/* Swallow leading whitespace and get sign, if any. */
neg = false;
- p = nptr;
while (true) {
switch (*p) {
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
@@ -146,7 +148,7 @@ malloc_strtoumax(const char *nptr, char **endptr, int base)
if (b == 8)
p++;
break;
- case 'x':
+ case 'X': case 'x':
switch (p[2]) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
@@ -164,7 +166,9 @@ malloc_strtoumax(const char *nptr, char **endptr, int base)
}
break;
default:
- break;
+ p++;
+ ret = 0;
+ goto label_return;
}
}
if (b == 0)
@@ -181,13 +185,22 @@ malloc_strtoumax(const char *nptr, char **endptr, int base)
if (ret < pret) {
/* Overflow. */
set_errno(ERANGE);
- return (UINTMAX_MAX);
+ ret = UINTMAX_MAX;
+ goto label_return;
}
p++;
}
if (neg)
ret = -ret;
+ if (p == ns) {
+ /* No conversion performed. */
+ set_errno(EINVAL);
+ ret = UINTMAX_MAX;
+ goto label_return;
+ }
+
+label_return:
if (endptr != NULL) {
if (p == ns) {
/* No characters were converted. */
@@ -195,7 +208,6 @@ malloc_strtoumax(const char *nptr, char **endptr, int base)
} else
*endptr = (char *)p;
}
-
return (ret);
}
@@ -331,7 +343,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
APPEND_C(' '); \
} \
} while (0)
-#define GET_ARG_NUMERIC(val, len) do { \
+#define GET_ARG_NUMERIC(val, len) do { \
switch (len) { \
case '?': \
val = va_arg(ap, int); \
@@ -354,6 +366,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
case 'j': \
val = va_arg(ap, intmax_t); \
break; \
+ case 'j' | 0x80: \
+ val = va_arg(ap, uintmax_t); \
+ break; \
case 't': \
val = va_arg(ap, ptrdiff_t); \
break; \
@@ -385,11 +400,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
unsigned char len = '?';
f++;
- if (*f == '%') {
- /* %% */
- APPEND_C(*f);
- break;
- }
/* Flags. */
while (true) {
switch (*f) {
@@ -419,6 +429,10 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
case '*':
width = va_arg(ap, int);
f++;
+ if (width < 0) {
+ left_justify = true;
+ width = -width;
+ }
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
@@ -428,19 +442,16 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
assert(uwidth != UINTMAX_MAX || get_errno() !=
ERANGE);
width = (int)uwidth;
- if (*f == '.') {
- f++;
- goto label_precision;
- } else
- goto label_length;
break;
- } case '.':
- f++;
- goto label_precision;
- default: goto label_length;
+ } default:
+ break;
}
+ /* Width/precision separator. */
+ if (*f == '.')
+ f++;
+ else
+ goto label_length;
/* Precision. */
- label_precision:
switch (*f) {
case '*':
prec = va_arg(ap, int);
@@ -469,16 +480,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
} else
len = 'l';
break;
- case 'j':
- len = 'j';
- f++;
- break;
- case 't':
- len = 't';
- f++;
- break;
- case 'z':
- len = 'z';
+ case 'q': case 'j': case 't': case 'z':
+ len = *f;
f++;
break;
default: break;
@@ -487,6 +490,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
switch (*f) {
char *s;
size_t slen;
+ case '%':
+ /* %% */
+ APPEND_C(*f);
+ f++;
+ break;
case 'd': case 'i': {
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[D2S_BUFSIZE];
@@ -540,7 +548,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
s = va_arg(ap, char *);
- slen = (prec == -1) ? strlen(s) : prec;
+ slen = (prec < 0) ? strlen(s) : prec;
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
@@ -553,8 +561,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
- }
- default: not_implemented();
+ } default: not_reached();
}
break;
} default: {
diff --git a/deps/jemalloc/src/zone.c b/deps/jemalloc/src/zone.c
index c62c183f6..e0302ef4e 100644
--- a/deps/jemalloc/src/zone.c
+++ b/deps/jemalloc/src/zone.c
@@ -137,7 +137,7 @@ zone_destroy(malloc_zone_t *zone)
{
/* This function should never be called. */
- assert(false);
+ not_reached();
return (NULL);
}
diff --git a/deps/jemalloc/test/ALLOCM_ARENA.c b/deps/jemalloc/test/ALLOCM_ARENA.c
deleted file mode 100644
index 15856908f..000000000
--- a/deps/jemalloc/test/ALLOCM_ARENA.c
+++ /dev/null
@@ -1,66 +0,0 @@
-#define JEMALLOC_MANGLE
-#include "jemalloc_test.h"
-
-#define NTHREADS 10
-
-void *
-je_thread_start(void *arg)
-{
- unsigned thread_ind = (unsigned)(uintptr_t)arg;
- unsigned arena_ind;
- int r;
- void *p;
- size_t rsz, sz;
-
- sz = sizeof(arena_ind);
- if (mallctl("arenas.extend", &arena_ind, &sz, NULL, 0)
- != 0) {
- malloc_printf("Error in arenas.extend\n");
- abort();
- }
-
- if (thread_ind % 4 != 3) {
- size_t mib[3];
- size_t miblen = sizeof(mib) / sizeof(size_t);
- const char *dss_precs[] = {"disabled", "primary", "secondary"};
- const char *dss = dss_precs[thread_ind % 4];
- if (mallctlnametomib("arena.0.dss", mib, &miblen) != 0) {
- malloc_printf("Error in mallctlnametomib()\n");
- abort();
- }
- mib[1] = arena_ind;
- if (mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
- sizeof(const char *))) {
- malloc_printf("Error in mallctlbymib()\n");
- abort();
- }
- }
-
- r = allocm(&p, &rsz, 1, ALLOCM_ARENA(arena_ind));
- if (r != ALLOCM_SUCCESS) {
- malloc_printf("Unexpected allocm() error\n");
- abort();
- }
-
- return (NULL);
-}
-
-int
-main(void)
-{
- je_thread_t threads[NTHREADS];
- unsigned i;
-
- malloc_printf("Test begin\n");
-
- for (i = 0; i < NTHREADS; i++) {
- je_thread_create(&threads[i], je_thread_start,
- (void *)(uintptr_t)i);
- }
-
- for (i = 0; i < NTHREADS; i++)
- je_thread_join(threads[i], NULL);
-
- malloc_printf("Test end\n");
- return (0);
-}
diff --git a/deps/jemalloc/test/ALLOCM_ARENA.exp b/deps/jemalloc/test/ALLOCM_ARENA.exp
deleted file mode 100644
index 369a88dd2..000000000
--- a/deps/jemalloc/test/ALLOCM_ARENA.exp
+++ /dev/null
@@ -1,2 +0,0 @@
-Test begin
-Test end
diff --git a/deps/jemalloc/test/aligned_alloc.exp b/deps/jemalloc/test/aligned_alloc.exp
deleted file mode 100644
index b5061c727..000000000
--- a/deps/jemalloc/test/aligned_alloc.exp
+++ /dev/null
@@ -1,25 +0,0 @@
-Test begin
-Alignment: 8
-Alignment: 16
-Alignment: 32
-Alignment: 64
-Alignment: 128
-Alignment: 256
-Alignment: 512
-Alignment: 1024
-Alignment: 2048
-Alignment: 4096
-Alignment: 8192
-Alignment: 16384
-Alignment: 32768
-Alignment: 65536
-Alignment: 131072
-Alignment: 262144
-Alignment: 524288
-Alignment: 1048576
-Alignment: 2097152
-Alignment: 4194304
-Alignment: 8388608
-Alignment: 16777216
-Alignment: 33554432
-Test end
diff --git a/deps/jemalloc/test/allocated.c b/deps/jemalloc/test/allocated.c
deleted file mode 100644
index 9884905d8..000000000
--- a/deps/jemalloc/test/allocated.c
+++ /dev/null
@@ -1,118 +0,0 @@
-#define JEMALLOC_MANGLE
-#include "jemalloc_test.h"
-
-void *
-je_thread_start(void *arg)
-{
- int err;
- void *p;
- uint64_t a0, a1, d0, d1;
- uint64_t *ap0, *ap1, *dp0, *dp1;
- size_t sz, usize;
-
- sz = sizeof(a0);
- if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) {
- if (err == ENOENT) {
-#ifdef JEMALLOC_STATS
- assert(false);
-#endif
- goto label_return;
- }
- malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
- strerror(err));
- exit(1);
- }
- sz = sizeof(ap0);
- if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) {
- if (err == ENOENT) {
-#ifdef JEMALLOC_STATS
- assert(false);
-#endif
- goto label_return;
- }
- malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
- strerror(err));
- exit(1);
- }
- assert(*ap0 == a0);
-
- sz = sizeof(d0);
- if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) {
- if (err == ENOENT) {
-#ifdef JEMALLOC_STATS
- assert(false);
-#endif
- goto label_return;
- }
- malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
- strerror(err));
- exit(1);
- }
- sz = sizeof(dp0);
- if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) {
- if (err == ENOENT) {
-#ifdef JEMALLOC_STATS
- assert(false);
-#endif
- goto label_return;
- }
- malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
- strerror(err));
- exit(1);
- }
- assert(*dp0 == d0);
-
- p = malloc(1);
- if (p == NULL) {
- malloc_printf("%s(): Error in malloc()\n", __func__);
- exit(1);
- }
-
- sz = sizeof(a1);
- mallctl("thread.allocated", &a1, &sz, NULL, 0);
- sz = sizeof(ap1);
- mallctl("thread.allocatedp", &ap1, &sz, NULL, 0);
- assert(*ap1 == a1);
- assert(ap0 == ap1);
-
- usize = malloc_usable_size(p);
- assert(a0 + usize <= a1);
-
- free(p);
-
- sz = sizeof(d1);
- mallctl("thread.deallocated", &d1, &sz, NULL, 0);
- sz = sizeof(dp1);
- mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0);
- assert(*dp1 == d1);
- assert(dp0 == dp1);
-
- assert(d0 + usize <= d1);
-
-label_return:
- return (NULL);
-}
-
-int
-main(void)
-{
- int ret = 0;
- je_thread_t thread;
-
- malloc_printf("Test begin\n");
-
- je_thread_start(NULL);
-
- je_thread_create(&thread, je_thread_start, NULL);
- je_thread_join(thread, (void *)&ret);
-
- je_thread_start(NULL);
-
- je_thread_create(&thread, je_thread_start, NULL);
- je_thread_join(thread, (void *)&ret);
-
- je_thread_start(NULL);
-
- malloc_printf("Test end\n");
- return (ret);
-}
diff --git a/deps/jemalloc/test/allocated.exp b/deps/jemalloc/test/allocated.exp
deleted file mode 100644
index 369a88dd2..000000000
--- a/deps/jemalloc/test/allocated.exp
+++ /dev/null
@@ -1,2 +0,0 @@
-Test begin
-Test end
diff --git a/deps/jemalloc/test/allocm.c b/deps/jemalloc/test/allocm.c
deleted file mode 100644
index 80be673b8..000000000
--- a/deps/jemalloc/test/allocm.c
+++ /dev/null
@@ -1,194 +0,0 @@
-#define JEMALLOC_MANGLE
-#include "jemalloc_test.h"
-
-#define CHUNK 0x400000
-/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
-#define MAXALIGN ((size_t)0x2000000LU)
-#define NITER 4
-
-int
-main(void)
-{
- int r;
- void *p;
- size_t nsz, rsz, sz, alignment, total;
- unsigned i;
- void *ps[NITER];
-
- malloc_printf("Test begin\n");
-
- sz = 42;
- nsz = 0;
- r = nallocm(&nsz, sz, 0);
- if (r != ALLOCM_SUCCESS) {
- malloc_printf("Unexpected nallocm() error\n");
- abort();
- }
- rsz = 0;
- r = allocm(&p, &rsz, sz, 0);
- if (r != ALLOCM_SUCCESS) {
- malloc_printf("Unexpected allocm() error\n");
- abort();
- }
- if (rsz < sz)
- malloc_printf("Real size smaller than expected\n");
- if (nsz != rsz)
- malloc_printf("nallocm()/allocm() rsize mismatch\n");
- if (dallocm(p, 0) != ALLOCM_SUCCESS)
- malloc_printf("Unexpected dallocm() error\n");
-
- r = allocm(&p, NULL, sz, 0);
- if (r != ALLOCM_SUCCESS) {
- malloc_printf("Unexpected allocm() error\n");
- abort();
- }
- if (dallocm(p, 0) != ALLOCM_SUCCESS)
- malloc_printf("Unexpected dallocm() error\n");
-
- nsz = 0;
- r = nallocm(&nsz, sz, ALLOCM_ZERO);
- if (r != ALLOCM_SUCCESS) {
- malloc_printf("Unexpected nallocm() error\n");
- abort();
- }
- rsz = 0;
- r = allocm(&p, &rsz, sz, ALLOCM_ZERO);
- if (r != ALLOCM_SUCCESS) {
- malloc_printf("Unexpected allocm() error\n");
- abort();
- }
- if (nsz != rsz)
- malloc_printf("nallocm()/allocm() rsize mismatch\n");
- if (dallocm(p, 0) != ALLOCM_SUCCESS)
- malloc_printf("Unexpected dallocm() error\n");
-
-#if LG_SIZEOF_PTR == 3
- alignment = UINT64_C(0x8000000000000000);
- sz = UINT64_C(0x8000000000000000);
-#else
- alignment = 0x80000000LU;
- sz = 0x80000000LU;
-#endif
- nsz = 0;
- r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment));
- if (r == ALLOCM_SUCCESS) {
- malloc_printf(
- "Expected error for nallocm(&nsz, %zu, %#x)\n",
- sz, ALLOCM_ALIGN(alignment));
- }
- rsz = 0;
- r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment));
- if (r == ALLOCM_SUCCESS) {
- malloc_printf(
- "Expected error for allocm(&p, %zu, %#x)\n",
- sz, ALLOCM_ALIGN(alignment));
- }
- if (nsz != rsz)
- malloc_printf("nallocm()/allocm() rsize mismatch\n");
-
-#if LG_SIZEOF_PTR == 3
- alignment = UINT64_C(0x4000000000000000);
- sz = UINT64_C(0x8400000000000001);
-#else
- alignment = 0x40000000LU;
- sz = 0x84000001LU;
-#endif
- nsz = 0;
- r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment));
- if (r != ALLOCM_SUCCESS)
- malloc_printf("Unexpected nallocm() error\n");
- rsz = 0;
- r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment));
- if (r == ALLOCM_SUCCESS) {
- malloc_printf(
- "Expected error for allocm(&p, %zu, %#x)\n",
- sz, ALLOCM_ALIGN(alignment));
- }
-
- alignment = 0x10LU;
-#if LG_SIZEOF_PTR == 3
- sz = UINT64_C(0xfffffffffffffff0);
-#else
- sz = 0xfffffff0LU;
-#endif
- nsz = 0;
- r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment));
- if (r == ALLOCM_SUCCESS) {
- malloc_printf(
- "Expected error for nallocm(&nsz, %zu, %#x)\n",
- sz, ALLOCM_ALIGN(alignment));
- }
- rsz = 0;
- r = allocm(&p, &rsz, sz, ALLOCM_ALIGN(alignment));
- if (r == ALLOCM_SUCCESS) {
- malloc_printf(
- "Expected error for allocm(&p, %zu, %#x)\n",
- sz, ALLOCM_ALIGN(alignment));
- }
- if (nsz != rsz)
- malloc_printf("nallocm()/allocm() rsize mismatch\n");
-
- for (i = 0; i < NITER; i++)
- ps[i] = NULL;
-
- for (alignment = 8;
- alignment <= MAXALIGN;
- alignment <<= 1) {
- total = 0;
- malloc_printf("Alignment: %zu\n", alignment);
- for (sz = 1;
- sz < 3 * alignment && sz < (1U << 31);
- sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
- for (i = 0; i < NITER; i++) {
- nsz = 0;
- r = nallocm(&nsz, sz,
- ALLOCM_ALIGN(alignment) | ALLOCM_ZERO);
- if (r != ALLOCM_SUCCESS) {
- malloc_printf(
- "nallocm() error for size %zu"
- " (%#zx): %d\n",
- sz, sz, r);
- exit(1);
- }
- rsz = 0;
- r = allocm(&ps[i], &rsz, sz,
- ALLOCM_ALIGN(alignment) | ALLOCM_ZERO);
- if (r != ALLOCM_SUCCESS) {
- malloc_printf(
- "allocm() error for size %zu"
- " (%#zx): %d\n",
- sz, sz, r);
- exit(1);
- }
- if (rsz < sz) {
- malloc_printf(
- "Real size smaller than"
- " expected\n");
- }
- if (nsz != rsz) {
- malloc_printf(
- "nallocm()/allocm() rsize"
- " mismatch\n");
- }
- if ((uintptr_t)p & (alignment-1)) {
- malloc_printf(
- "%p inadequately aligned for"
- " alignment: %zu\n", p, alignment);
- }
- sallocm(ps[i], &rsz, 0);
- total += rsz;
- if (total >= (MAXALIGN << 1))
- break;
- }
- for (i = 0; i < NITER; i++) {
- if (ps[i] != NULL) {
- dallocm(ps[i], 0);
- ps[i] = NULL;
- }
- }
- }
- }
-
- malloc_printf("Test end\n");
- return (0);
-}
diff --git a/deps/jemalloc/test/allocm.exp b/deps/jemalloc/test/allocm.exp
deleted file mode 100644
index b5061c727..000000000
--- a/deps/jemalloc/test/allocm.exp
+++ /dev/null
@@ -1,25 +0,0 @@
-Test begin
-Alignment: 8
-Alignment: 16
-Alignment: 32
-Alignment: 64
-Alignment: 128
-Alignment: 256
-Alignment: 512
-Alignment: 1024
-Alignment: 2048
-Alignment: 4096
-Alignment: 8192
-Alignment: 16384
-Alignment: 32768
-Alignment: 65536
-Alignment: 131072
-Alignment: 262144
-Alignment: 524288
-Alignment: 1048576
-Alignment: 2097152
-Alignment: 4194304
-Alignment: 8388608
-Alignment: 16777216
-Alignment: 33554432
-Test end
diff --git a/deps/jemalloc/test/bitmap.exp b/deps/jemalloc/test/bitmap.exp
deleted file mode 100644
index 369a88dd2..000000000
--- a/deps/jemalloc/test/bitmap.exp
+++ /dev/null
@@ -1,2 +0,0 @@
-Test begin
-Test end
diff --git a/deps/jemalloc/test/include/test/SFMT-alti.h b/deps/jemalloc/test/include/test/SFMT-alti.h
new file mode 100644
index 000000000..0005df6b4
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-alti.h
@@ -0,0 +1,186 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT-alti.h
+ *
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT)
+ * pseudorandom number generator
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software.
+ * see LICENSE.txt
+ */
+
+#ifndef SFMT_ALTI_H
+#define SFMT_ALTI_H
+
+/**
+ * This function represents the recursion formula in AltiVec and BIG ENDIAN.
+ * @param a a 128-bit part of the interal state array
+ * @param b a 128-bit part of the interal state array
+ * @param c a 128-bit part of the interal state array
+ * @param d a 128-bit part of the interal state array
+ * @return output
+ */
+JEMALLOC_ALWAYS_INLINE
+vector unsigned int vec_recursion(vector unsigned int a,
+ vector unsigned int b,
+ vector unsigned int c,
+ vector unsigned int d) {
+
+ const vector unsigned int sl1 = ALTI_SL1;
+ const vector unsigned int sr1 = ALTI_SR1;
+#ifdef ONLY64
+ const vector unsigned int mask = ALTI_MSK64;
+ const vector unsigned char perm_sl = ALTI_SL2_PERM64;
+ const vector unsigned char perm_sr = ALTI_SR2_PERM64;
+#else
+ const vector unsigned int mask = ALTI_MSK;
+ const vector unsigned char perm_sl = ALTI_SL2_PERM;
+ const vector unsigned char perm_sr = ALTI_SR2_PERM;
+#endif
+ vector unsigned int v, w, x, y, z;
+ x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl);
+ v = a;
+ y = vec_sr(b, sr1);
+ z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr);
+ w = vec_sl(d, sl1);
+ z = vec_xor(z, w);
+ y = vec_and(y, mask);
+ v = vec_xor(v, x);
+ z = vec_xor(z, y);
+ z = vec_xor(z, v);
+ return z;
+}
+
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
+ int i;
+ vector unsigned int r, r1, r2;
+
+ r1 = ctx->sfmt[N - 2].s;
+ r2 = ctx->sfmt[N - 1].s;
+ for (i = 0; i < N - POS1; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
+ ctx->sfmt[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2);
+ ctx->sfmt[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pesudorandom numbers to be generated.
+ */
+JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
+ int i, j;
+ vector unsigned int r, r1, r2;
+
+ r1 = ctx->sfmt[N - 2].s;
+ r2 = ctx->sfmt[N - 1].s;
+ for (i = 0; i < N - POS1; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ /* main loop */
+ for (; i < size - N; i++) {
+ r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ ctx->sfmt[j].s = array[j + size - N].s;
+ }
+ for (; i < size; i++) {
+ r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ ctx->sfmt[j++].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+#ifndef ONLY64
+#if defined(__APPLE__)
+#define ALTI_SWAP (vector unsigned char) \
+ (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11)
+#else
+#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}
+#endif
+/**
+ * This function swaps high and low 32-bit of 64-bit integers in user
+ * specified array.
+ *
+ * @param array an 128-bit array to be swaped.
+ * @param size size of 128-bit array.
+ */
+JEMALLOC_INLINE void swap(w128_t *array, int size) {
+ int i;
+ const vector unsigned char perm = ALTI_SWAP;
+
+ for (i = 0; i < size; i++) {
+ array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm);
+ }
+}
+#endif
+
+#endif
diff --git a/deps/jemalloc/test/include/test/SFMT-params.h b/deps/jemalloc/test/include/test/SFMT-params.h
new file mode 100644
index 000000000..ade662220
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params.h
@@ -0,0 +1,132 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS_H
+#define SFMT_PARAMS_H
+
+#if !defined(MEXP)
+#ifdef __GNUC__
+ #warning "MEXP is not defined. I assume MEXP is 19937."
+#endif
+ #define MEXP 19937
+#endif
+/*-----------------
+ BASIC DEFINITIONS
+ -----------------*/
+/** Mersenne Exponent. The period of the sequence
+ * is a multiple of 2^MEXP-1.
+ * #define MEXP 19937 */
+/** SFMT generator has an internal state array of 128-bit integers,
+ * and N is its size. */
+#define N (MEXP / 128 + 1)
+/** N32 is the size of internal state array when regarded as an array
+ * of 32-bit integers.*/
+#define N32 (N * 4)
+/** N64 is the size of internal state array when regarded as an array
+ * of 64-bit integers.*/
+#define N64 (N * 2)
+
+/*----------------------
+ the parameters of SFMT
+ following definitions are in paramsXXXX.h file.
+ ----------------------*/
+/** the pick up position of the array.
+#define POS1 122
+*/
+
+/** the parameter of shift left as four 32-bit registers.
+#define SL1 18
+ */
+
+/** the parameter of shift left as one 128-bit register.
+ * The 128-bit integer is shifted by (SL2 * 8) bits.
+#define SL2 1
+*/
+
+/** the parameter of shift right as four 32-bit registers.
+#define SR1 11
+*/
+
+/** the parameter of shift right as one 128-bit register.
+ * The 128-bit integer is shifted by (SL2 * 8) bits.
+#define SR2 1
+*/
+
+/** A bitmask, used in the recursion. These parameters are introduced
+ * to break symmetry of SIMD.
+#define MSK1 0xdfffffefU
+#define MSK2 0xddfecb7fU
+#define MSK3 0xbffaffffU
+#define MSK4 0xbffffff6U
+*/
+
+/** These definitions are part of a 128-bit period certification vector.
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0xc98e126aU
+*/
+
+#if MEXP == 607
+ #include "test/SFMT-params607.h"
+#elif MEXP == 1279
+ #include "test/SFMT-params1279.h"
+#elif MEXP == 2281
+ #include "test/SFMT-params2281.h"
+#elif MEXP == 4253
+ #include "test/SFMT-params4253.h"
+#elif MEXP == 11213
+ #include "test/SFMT-params11213.h"
+#elif MEXP == 19937
+ #include "test/SFMT-params19937.h"
+#elif MEXP == 44497
+ #include "test/SFMT-params44497.h"
+#elif MEXP == 86243
+ #include "test/SFMT-params86243.h"
+#elif MEXP == 132049
+ #include "test/SFMT-params132049.h"
+#elif MEXP == 216091
+ #include "test/SFMT-params216091.h"
+#else
+#ifdef __GNUC__
+ #error "MEXP is not valid."
+ #undef MEXP
+#else
+ #undef MEXP
+#endif
+
+#endif
+
+#endif /* SFMT_PARAMS_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params11213.h b/deps/jemalloc/test/include/test/SFMT-params11213.h
new file mode 100644
index 000000000..2994bd21d
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params11213.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS11213_H
+#define SFMT_PARAMS11213_H
+
+#define POS1 68
+#define SL1 14
+#define SL2 3
+#define SR1 7
+#define SR2 3
+#define MSK1 0xeffff7fbU
+#define MSK2 0xffffffefU
+#define MSK3 0xdfdfbfffU
+#define MSK4 0x7fffdbfdU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0xe8148000U
+#define PARITY4 0xd0c7afa3U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
+ #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
+#endif /* For OSX */
+#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd"
+
+#endif /* SFMT_PARAMS11213_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params1279.h b/deps/jemalloc/test/include/test/SFMT-params1279.h
new file mode 100644
index 000000000..d7959f980
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params1279.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS1279_H
+#define SFMT_PARAMS1279_H
+
+#define POS1 7
+#define SL1 14
+#define SL2 3
+#define SR1 5
+#define SR2 1
+#define MSK1 0xf7fefffdU
+#define MSK2 0x7fefcfffU
+#define MSK3 0xaff3ef3fU
+#define MSK4 0xb5ffff7fU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x20000000U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f"
+
+#endif /* SFMT_PARAMS1279_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params132049.h b/deps/jemalloc/test/include/test/SFMT-params132049.h
new file mode 100644
index 000000000..a1dcec392
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params132049.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS132049_H
+#define SFMT_PARAMS132049_H
+
+#define POS1 110
+#define SL1 19
+#define SL2 1
+#define SR1 21
+#define SR2 1
+#define MSK1 0xffffbb5fU
+#define MSK2 0xfb6ebf95U
+#define MSK3 0xfffefffaU
+#define MSK4 0xcff77fffU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0xcb520000U
+#define PARITY4 0xc7e91c7dU
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff"
+
+#endif /* SFMT_PARAMS132049_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params19937.h b/deps/jemalloc/test/include/test/SFMT-params19937.h
new file mode 100644
index 000000000..fb92b4c9b
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params19937.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS19937_H
+#define SFMT_PARAMS19937_H
+
+#define POS1 122
+#define SL1 18
+#define SL2 1
+#define SR1 11
+#define SR2 1
+#define MSK1 0xdfffffefU
+#define MSK2 0xddfecb7fU
+#define MSK3 0xbffaffffU
+#define MSK4 0xbffffff6U
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x13c9e684U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6"
+
+#endif /* SFMT_PARAMS19937_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params216091.h b/deps/jemalloc/test/include/test/SFMT-params216091.h
new file mode 100644
index 000000000..125ce2820
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params216091.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS216091_H
+#define SFMT_PARAMS216091_H
+
+#define POS1 627
+#define SL1 11
+#define SL2 3
+#define SR1 10
+#define SR2 1
+#define MSK1 0xbff7bff7U
+#define MSK2 0xbfffffffU
+#define MSK3 0xbffffa7fU
+#define MSK4 0xffddfbfbU
+#define PARITY1 0xf8000001U
+#define PARITY2 0x89e80709U
+#define PARITY3 0x3bd2b64bU
+#define PARITY4 0x0c64b1e4U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb"
+
+#endif /* SFMT_PARAMS216091_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params2281.h b/deps/jemalloc/test/include/test/SFMT-params2281.h
new file mode 100644
index 000000000..0ef85c407
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params2281.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS2281_H
+#define SFMT_PARAMS2281_H
+
+#define POS1 12
+#define SL1 19
+#define SL2 1
+#define SR1 5
+#define SR2 1
+#define MSK1 0xbff7ffbfU
+#define MSK2 0xfdfffffeU
+#define MSK3 0xf7ffef7fU
+#define MSK4 0xf2f7cbbfU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x41dfa600U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf"
+
+#endif /* SFMT_PARAMS2281_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params4253.h b/deps/jemalloc/test/include/test/SFMT-params4253.h
new file mode 100644
index 000000000..9f07bc67e
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params4253.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS4253_H
+#define SFMT_PARAMS4253_H
+
+#define POS1 17
+#define SL1 20
+#define SL2 1
+#define SR1 7
+#define SR2 1
+#define MSK1 0x9f7bffffU
+#define MSK2 0x9fffff5fU
+#define MSK3 0x3efffffbU
+#define MSK4 0xfffff7bbU
+#define PARITY1 0xa8000001U
+#define PARITY2 0xaf5390a3U
+#define PARITY3 0xb740b3f8U
+#define PARITY4 0x6c11486dU
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
+ #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb"
+
+#endif /* SFMT_PARAMS4253_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params44497.h b/deps/jemalloc/test/include/test/SFMT-params44497.h
new file mode 100644
index 000000000..85598fed5
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params44497.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS44497_H
+#define SFMT_PARAMS44497_H
+
+#define POS1 330
+#define SL1 5
+#define SL2 3
+#define SR1 9
+#define SR2 3
+#define MSK1 0xeffffffbU
+#define MSK2 0xdfbebfffU
+#define MSK3 0xbfbf7befU
+#define MSK4 0x9ffd7bffU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0xa3ac4000U
+#define PARITY4 0xecc1327aU
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
+ #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
+#endif /* For OSX */
+#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff"
+
+#endif /* SFMT_PARAMS44497_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params607.h b/deps/jemalloc/test/include/test/SFMT-params607.h
new file mode 100644
index 000000000..bc76485f8
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params607.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS607_H
+#define SFMT_PARAMS607_H
+
+#define POS1 2
+#define SL1 15
+#define SL2 3
+#define SR1 13
+#define SR2 3
+#define MSK1 0xfdff37ffU
+#define MSK2 0xef7f3f7dU
+#define MSK3 0xff777b7dU
+#define MSK4 0x7ff7fb2fU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0x5986f054U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
+ #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
+ #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
+ #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
+#endif /* For OSX */
+#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f"
+
+#endif /* SFMT_PARAMS607_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-params86243.h b/deps/jemalloc/test/include/test/SFMT-params86243.h
new file mode 100644
index 000000000..5e4d783c5
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-params86243.h
@@ -0,0 +1,81 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef SFMT_PARAMS86243_H
+#define SFMT_PARAMS86243_H
+
+#define POS1 366
+#define SL1 6
+#define SL2 7
+#define SR1 19
+#define SR2 1
+#define MSK1 0xfdbffbffU
+#define MSK2 0xbff7ff3fU
+#define MSK3 0xfd77efffU
+#define MSK4 0xbf9ff3ffU
+#define PARITY1 0x00000001U
+#define PARITY2 0x00000000U
+#define PARITY3 0x00000000U
+#define PARITY4 0xe9528d85U
+
+
+/* PARAMETERS FOR ALTIVEC */
+#if defined(__APPLE__) /* For OSX */
+ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
+ #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
+ #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
+ #define ALTI_MSK64 \
+ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
+ #define ALTI_SL2_PERM \
+ (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6)
+ #define ALTI_SL2_PERM64 \
+ (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6)
+ #define ALTI_SR2_PERM \
+ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
+ #define ALTI_SR2_PERM64 \
+ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
+#else /* For OTHER OSs(Linux?) */
+ #define ALTI_SL1 {SL1, SL1, SL1, SL1}
+ #define ALTI_SR1 {SR1, SR1, SR1, SR1}
+ #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
+ #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
+ #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6}
+ #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6}
+ #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
+ #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
+#endif /* For OSX */
+#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff"
+
+#endif /* SFMT_PARAMS86243_H */
diff --git a/deps/jemalloc/test/include/test/SFMT-sse2.h b/deps/jemalloc/test/include/test/SFMT-sse2.h
new file mode 100644
index 000000000..0314a163d
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT-sse2.h
@@ -0,0 +1,157 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT-sse2.h
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * @note We assume LITTLE ENDIAN in this file
+ *
+ * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software, see LICENSE.txt
+ */
+
+#ifndef SFMT_SSE2_H
+#define SFMT_SSE2_H
+
+/**
+ * This function represents the recursion formula.
+ * @param a a 128-bit part of the interal state array
+ * @param b a 128-bit part of the interal state array
+ * @param c a 128-bit part of the interal state array
+ * @param d a 128-bit part of the interal state array
+ * @param mask 128-bit mask
+ * @return output
+ */
+JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b,
+ __m128i c, __m128i d, __m128i mask) {
+ __m128i v, x, y, z;
+
+ x = _mm_load_si128(a);
+ y = _mm_srli_epi32(*b, SR1);
+ z = _mm_srli_si128(c, SR2);
+ v = _mm_slli_epi32(d, SL1);
+ z = _mm_xor_si128(z, x);
+ z = _mm_xor_si128(z, v);
+ x = _mm_slli_si128(x, SL2);
+ y = _mm_and_si128(y, mask);
+ z = _mm_xor_si128(z, x);
+ z = _mm_xor_si128(z, y);
+ return z;
+}
+
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
+ int i;
+ __m128i r, r1, r2, mask;
+ mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
+
+ r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
+ r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
+ for (i = 0; i < N - POS1; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
+ mask);
+ _mm_store_si128(&ctx->sfmt[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&ctx->sfmt[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pesudorandom numbers to be generated.
+ */
+JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
+ int i, j;
+ __m128i r, r1, r2, mask;
+ mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
+
+ r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
+ r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
+ for (i = 0; i < N - POS1; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ /* main loop */
+ for (; i < size - N; i++) {
+ r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ r = _mm_load_si128(&array[j + size - N].si);
+ _mm_store_si128(&ctx->sfmt[j].si, r);
+ }
+ for (; i < size; i++) {
+ r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
+ mask);
+ _mm_store_si128(&array[i].si, r);
+ _mm_store_si128(&ctx->sfmt[j++].si, r);
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+#endif
diff --git a/deps/jemalloc/test/include/test/SFMT.h b/deps/jemalloc/test/include/test/SFMT.h
new file mode 100644
index 000000000..09c1607dd
--- /dev/null
+++ b/deps/jemalloc/test/include/test/SFMT.h
@@ -0,0 +1,171 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT.h
+ *
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom
+ * number generator
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software.
+ * see LICENSE.txt
+ *
+ * @note We assume that your system has inttypes.h. If your system
+ * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t,
+ * and you have to define PRIu64 and PRIx64 in this file as follows:
+ * @verbatim
+ typedef unsigned int uint32_t
+ typedef unsigned long long uint64_t
+ #define PRIu64 "llu"
+ #define PRIx64 "llx"
+@endverbatim
+ * uint32_t must be exactly 32-bit unsigned integer type (no more, no
+ * less), and uint64_t must be exactly 64-bit unsigned integer type.
+ * PRIu64 and PRIx64 are used for printf function to print 64-bit
+ * unsigned int and 64-bit unsigned int in hexadecimal format.
+ */
+
+#ifndef SFMT_H
+#define SFMT_H
+
+typedef struct sfmt_s sfmt_t;
+
+uint32_t gen_rand32(sfmt_t *ctx);
+uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit);
+uint64_t gen_rand64(sfmt_t *ctx);
+uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit);
+void fill_array32(sfmt_t *ctx, uint32_t *array, int size);
+void fill_array64(sfmt_t *ctx, uint64_t *array, int size);
+sfmt_t *init_gen_rand(uint32_t seed);
+sfmt_t *init_by_array(uint32_t *init_key, int key_length);
+void fini_gen_rand(sfmt_t *ctx);
+const char *get_idstring(void);
+int get_min_array_size32(void);
+int get_min_array_size64(void);
+
+#ifndef JEMALLOC_ENABLE_INLINE
+double to_real1(uint32_t v);
+double genrand_real1(sfmt_t *ctx);
+double to_real2(uint32_t v);
+double genrand_real2(sfmt_t *ctx);
+double to_real3(uint32_t v);
+double genrand_real3(sfmt_t *ctx);
+double to_res53(uint64_t v);
+double to_res53_mix(uint32_t x, uint32_t y);
+double genrand_res53(sfmt_t *ctx);
+double genrand_res53_mix(sfmt_t *ctx);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_))
+/* These real versions are due to Isaku Wada */
+/** generates a random number on [0,1]-real-interval */
+JEMALLOC_INLINE double to_real1(uint32_t v)
+{
+ return v * (1.0/4294967295.0);
+ /* divided by 2^32-1 */
+}
+
+/** generates a random number on [0,1]-real-interval */
+JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx)
+{
+ return to_real1(gen_rand32(ctx));
+}
+
+/** generates a random number on [0,1)-real-interval */
+JEMALLOC_INLINE double to_real2(uint32_t v)
+{
+ return v * (1.0/4294967296.0);
+ /* divided by 2^32 */
+}
+
+/** generates a random number on [0,1)-real-interval */
+JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx)
+{
+ return to_real2(gen_rand32(ctx));
+}
+
+/** generates a random number on (0,1)-real-interval */
+JEMALLOC_INLINE double to_real3(uint32_t v)
+{
+ return (((double)v) + 0.5)*(1.0/4294967296.0);
+ /* divided by 2^32 */
+}
+
+/** generates a random number on (0,1)-real-interval */
+JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx)
+{
+ return to_real3(gen_rand32(ctx));
+}
+/** These real versions are due to Isaku Wada */
+
+/** generates a random number on [0,1) with 53-bit resolution*/
+JEMALLOC_INLINE double to_res53(uint64_t v)
+{
+ return v * (1.0/18446744073709551616.0L);
+}
+
+/** generates a random number on [0,1) with 53-bit resolution from two
+ * 32 bit integers */
+JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y)
+{
+ return to_res53(x | ((uint64_t)y << 32));
+}
+
+/** generates a random number on [0,1) with 53-bit resolution
+ */
+JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx)
+{
+ return to_res53(gen_rand64(ctx));
+}
+
+/** generates a random number on [0,1) with 53-bit resolution
+ using 32bit integer.
+ */
+JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx)
+{
+ uint32_t x, y;
+
+ x = gen_rand32(ctx);
+ y = gen_rand32(ctx);
+ return to_res53_mix(x, y);
+}
+#endif
+#endif
diff --git a/deps/jemalloc/test/include/test/jemalloc_test.h.in b/deps/jemalloc/test/include/test/jemalloc_test.h.in
new file mode 100644
index 000000000..730a55dba
--- /dev/null
+++ b/deps/jemalloc/test/include/test/jemalloc_test.h.in
@@ -0,0 +1,141 @@
+#include <stdlib.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <math.h>
+#include <string.h>
+
+#ifdef _WIN32
+# include <windows.h>
+#else
+# include <pthread.h>
+#endif
+
+/******************************************************************************/
+/*
+ * Define always-enabled assertion macros, so that test assertions execute even
+ * if assertions are disabled in the library code. These definitions must
+ * exist prior to including "jemalloc/internal/util.h".
+ */
+#define assert(e) do { \
+ if (!(e)) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
+ __FILE__, __LINE__, #e); \
+ abort(); \
+ } \
+} while (0)
+
+#define not_reached() do { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Unreachable code reached\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+} while (0)
+
+#define not_implemented() do { \
+ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+} while (0)
+
+#define assert_not_implemented(e) do { \
+ if (!(e)) \
+ not_implemented(); \
+} while (0)
+
+#include "test/jemalloc_test_defs.h"
+
+#ifdef JEMALLOC_OSSPIN
+# include <libkern/OSAtomic.h>
+#endif
+
+#if defined(HAVE_ALTIVEC) && !defined(__APPLE__)
+# include <altivec.h>
+#endif
+#ifdef HAVE_SSE2
+# include <emmintrin.h>
+#endif
+
+/******************************************************************************/
+/*
+ * For unit tests, expose all public and private interfaces.
+ */
+#ifdef JEMALLOC_UNIT_TEST
+# define JEMALLOC_JET
+# define JEMALLOC_MANGLE
+# include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/*
+ * For integration tests, expose the public jemalloc interfaces, but only
+ * expose the minimum necessary internal utility code (to avoid re-implementing
+ * essentially identical code within the test infrastructure).
+ */
+#elif defined(JEMALLOC_INTEGRATION_TEST)
+# define JEMALLOC_MANGLE
+# include "jemalloc/jemalloc@install_suffix@.h"
+# include "jemalloc/internal/jemalloc_internal_defs.h"
+# include "jemalloc/internal/jemalloc_internal_macros.h"
+
+# define JEMALLOC_N(n) @private_namespace@##n
+# include "jemalloc/internal/private_namespace.h"
+
+# define JEMALLOC_H_TYPES
+# define JEMALLOC_H_STRUCTS
+# define JEMALLOC_H_EXTERNS
+# define JEMALLOC_H_INLINES
+# include "jemalloc/internal/util.h"
+# include "jemalloc/internal/qr.h"
+# include "jemalloc/internal/ql.h"
+# undef JEMALLOC_H_TYPES
+# undef JEMALLOC_H_STRUCTS
+# undef JEMALLOC_H_EXTERNS
+# undef JEMALLOC_H_INLINES
+
+/******************************************************************************/
+/*
+ * For stress tests, expose the public jemalloc interfaces with name mangling
+ * so that they can be tested as e.g. malloc() and free(). Also expose the
+ * public jemalloc interfaces with jet_ prefixes, so that stress tests can use
+ * a separate allocator for their internal data structures.
+ */
+#elif defined(JEMALLOC_STRESS_TEST)
+# include "jemalloc/jemalloc@install_suffix@.h"
+
+# include "jemalloc/jemalloc_protos_jet.h"
+
+# define JEMALLOC_JET
+# include "jemalloc/internal/jemalloc_internal.h"
+# include "jemalloc/internal/public_unnamespace.h"
+# undef JEMALLOC_JET
+
+# include "jemalloc/jemalloc_rename.h"
+# define JEMALLOC_MANGLE
+# ifdef JEMALLOC_STRESS_TESTLIB
+# include "jemalloc/jemalloc_mangle_jet.h"
+# else
+# include "jemalloc/jemalloc_mangle.h"
+# endif
+
+/******************************************************************************/
+/*
+ * This header does dangerous things, the effects of which only test code
+ * should be subject to.
+ */
+#else
+# error "This header cannot be included outside a testing context"
+#endif
+
+/******************************************************************************/
+/*
+ * Common test utilities.
+ */
+#include "test/math.h"
+#include "test/mtx.h"
+#include "test/mq.h"
+#include "test/test.h"
+#include "test/thd.h"
+#define MEXP 19937
+#include "test/SFMT.h"
diff --git a/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in b/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in
new file mode 100644
index 000000000..18a9773d7
--- /dev/null
+++ b/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in
@@ -0,0 +1,5 @@
+#include "jemalloc/internal/jemalloc_internal_defs.h"
+
+/* For use by SFMT. */
+#undef HAVE_SSE2
+#undef HAVE_ALTIVEC
diff --git a/deps/jemalloc/test/include/test/math.h b/deps/jemalloc/test/include/test/math.h
new file mode 100644
index 000000000..a862ed7db
--- /dev/null
+++ b/deps/jemalloc/test/include/test/math.h
@@ -0,0 +1,311 @@
+#ifndef JEMALLOC_ENABLE_INLINE
+double ln_gamma(double x);
+double i_gamma(double x, double p, double ln_gamma_p);
+double pt_norm(double p);
+double pt_chi2(double p, double df, double ln_gamma_df_2);
+double pt_gamma(double p, double shape, double scale, double ln_gamma_shape);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_))
+/*
+ * Compute the natural log of Gamma(x), accurate to 10 decimal places.
+ *
+ * This implementation is based on:
+ *
+ * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function
+ * [S14]. Communications of the ACM 9(9):684.
+ */
+JEMALLOC_INLINE double
+ln_gamma(double x)
+{
+ double f, z;
+
+ assert(x > 0.0);
+
+ if (x < 7.0) {
+ f = 1.0;
+ z = x;
+ while (z < 7.0) {
+ f *= z;
+ z += 1.0;
+ }
+ x = z;
+ f = -log(f);
+ } else
+ f = 0.0;
+
+ z = 1.0 / (x * x);
+
+ return (f + (x-0.5) * log(x) - x + 0.918938533204673 +
+ (((-0.000595238095238 * z + 0.000793650793651) * z -
+ 0.002777777777778) * z + 0.083333333333333) / x);
+}
+
+/*
+ * Compute the incomplete Gamma ratio for [0..x], where p is the shape
+ * parameter, and ln_gamma_p is ln_gamma(p).
+ *
+ * This implementation is based on:
+ *
+ * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral.
+ * Applied Statistics 19:285-287.
+ */
+JEMALLOC_INLINE double
+i_gamma(double x, double p, double ln_gamma_p)
+{
+ double acu, factor, oflo, gin, term, rn, a, b, an, dif;
+ double pn[6];
+ unsigned i;
+
+ assert(p > 0.0);
+ assert(x >= 0.0);
+
+ if (x == 0.0)
+ return (0.0);
+
+ acu = 1.0e-10;
+ oflo = 1.0e30;
+ gin = 0.0;
+ factor = exp(p * log(x) - x - ln_gamma_p);
+
+ if (x <= 1.0 || x < p) {
+ /* Calculation by series expansion. */
+ gin = 1.0;
+ term = 1.0;
+ rn = p;
+
+ while (true) {
+ rn += 1.0;
+ term *= x / rn;
+ gin += term;
+ if (term <= acu) {
+ gin *= factor / p;
+ return (gin);
+ }
+ }
+ } else {
+ /* Calculation by continued fraction. */
+ a = 1.0 - p;
+ b = a + x + 1.0;
+ term = 0.0;
+ pn[0] = 1.0;
+ pn[1] = x;
+ pn[2] = x + 1.0;
+ pn[3] = x * b;
+ gin = pn[2] / pn[3];
+
+ while (true) {
+ a += 1.0;
+ b += 2.0;
+ term += 1.0;
+ an = a * term;
+ for (i = 0; i < 2; i++)
+ pn[i+4] = b * pn[i+2] - an * pn[i];
+ if (pn[5] != 0.0) {
+ rn = pn[4] / pn[5];
+ dif = fabs(gin - rn);
+ if (dif <= acu && dif <= acu * rn) {
+ gin = 1.0 - factor * gin;
+ return (gin);
+ }
+ gin = rn;
+ }
+ for (i = 0; i < 4; i++)
+ pn[i] = pn[i+2];
+
+ if (fabs(pn[4]) >= oflo) {
+ for (i = 0; i < 4; i++)
+ pn[i] /= oflo;
+ }
+ }
+ }
+}
+
+/*
+ * Given a value p in [0..1] of the lower tail area of the normal distribution,
+ * compute the limit on the definite integral from [-inf..z] that satisfies p,
+ * accurate to 16 decimal places.
+ *
+ * This implementation is based on:
+ *
+ * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal
+ * distribution. Applied Statistics 37(3):477-484.
+ */
+JEMALLOC_INLINE double
+pt_norm(double p)
+{
+ double q, r, ret;
+
+ assert(p > 0.0 && p < 1.0);
+
+ q = p - 0.5;
+ if (fabs(q) <= 0.425) {
+ /* p close to 1/2. */
+ r = 0.180625 - q * q;
+ return (q * (((((((2.5090809287301226727e3 * r +
+ 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r
+ + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) *
+ r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2)
+ * r + 3.3871328727963666080e0) /
+ (((((((5.2264952788528545610e3 * r +
+ 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r
+ + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) *
+ r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1)
+ * r + 1.0));
+ } else {
+ if (q < 0.0)
+ r = p;
+ else
+ r = 1.0 - p;
+ assert(r > 0.0);
+
+ r = sqrt(-log(r));
+ if (r <= 5.0) {
+ /* p neither close to 1/2 nor 0 or 1. */
+ r -= 1.6;
+ ret = ((((((((7.74545014278341407640e-4 * r +
+ 2.27238449892691845833e-2) * r +
+ 2.41780725177450611770e-1) * r +
+ 1.27045825245236838258e0) * r +
+ 3.64784832476320460504e0) * r +
+ 5.76949722146069140550e0) * r +
+ 4.63033784615654529590e0) * r +
+ 1.42343711074968357734e0) /
+ (((((((1.05075007164441684324e-9 * r +
+ 5.47593808499534494600e-4) * r +
+ 1.51986665636164571966e-2)
+ * r + 1.48103976427480074590e-1) * r +
+ 6.89767334985100004550e-1) * r +
+ 1.67638483018380384940e0) * r +
+ 2.05319162663775882187e0) * r + 1.0));
+ } else {
+ /* p near 0 or 1. */
+ r -= 5.0;
+ ret = ((((((((2.01033439929228813265e-7 * r +
+ 2.71155556874348757815e-5) * r +
+ 1.24266094738807843860e-3) * r +
+ 2.65321895265761230930e-2) * r +
+ 2.96560571828504891230e-1) * r +
+ 1.78482653991729133580e0) * r +
+ 5.46378491116411436990e0) * r +
+ 6.65790464350110377720e0) /
+ (((((((2.04426310338993978564e-15 * r +
+ 1.42151175831644588870e-7) * r +
+ 1.84631831751005468180e-5) * r +
+ 7.86869131145613259100e-4) * r +
+ 1.48753612908506148525e-2) * r +
+ 1.36929880922735805310e-1) * r +
+ 5.99832206555887937690e-1)
+ * r + 1.0));
+ }
+ if (q < 0.0)
+ ret = -ret;
+ return (ret);
+ }
+}
+
+/*
+ * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution
+ * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute
+ * the upper limit on the definite integral from [0..z] that satisfies p,
+ * accurate to 12 decimal places.
+ *
+ * This implementation is based on:
+ *
+ * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of
+ * the Chi^2 distribution. Applied Statistics 24(3):385-388.
+ *
+ * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage
+ * points of the Chi^2 distribution. Applied Statistics 40(1):233-235.
+ */
+JEMALLOC_INLINE double
+pt_chi2(double p, double df, double ln_gamma_df_2)
+{
+ double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6;
+ unsigned i;
+
+ assert(p >= 0.0 && p < 1.0);
+ assert(df > 0.0);
+
+ e = 5.0e-7;
+ aa = 0.6931471805;
+
+ xx = 0.5 * df;
+ c = xx - 1.0;
+
+ if (df < -1.24 * log(p)) {
+ /* Starting approximation for small Chi^2. */
+ ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx);
+ if (ch - e < 0.0)
+ return (ch);
+ } else {
+ if (df > 0.32) {
+ x = pt_norm(p);
+ /*
+ * Starting approximation using Wilson and Hilferty
+ * estimate.
+ */
+ p1 = 0.222222 / df;
+ ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0);
+ /* Starting approximation for p tending to 1. */
+ if (ch > 2.2 * df + 6.0) {
+ ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) +
+ ln_gamma_df_2);
+ }
+ } else {
+ ch = 0.4;
+ a = log(1.0 - p);
+ while (true) {
+ q = ch;
+ p1 = 1.0 + ch * (4.67 + ch);
+ p2 = ch * (6.73 + ch * (6.66 + ch));
+ t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch
+ * (13.32 + 3.0 * ch)) / p2;
+ ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch +
+ c * aa) * p2 / p1) / t;
+ if (fabs(q / ch - 1.0) - 0.01 <= 0.0)
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < 20; i++) {
+ /* Calculation of seven-term Taylor series. */
+ q = ch;
+ p1 = 0.5 * ch;
+ if (p1 < 0.0)
+ return (-1.0);
+ p2 = p - i_gamma(p1, xx, ln_gamma_df_2);
+ t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch));
+ b = t / ch;
+ a = 0.5 * t - b * c;
+ s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 +
+ 60.0 * a))))) / 420.0;
+ s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 *
+ a)))) / 2520.0;
+ s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0;
+ s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a *
+ (889.0 + 1740.0 * a))) / 5040.0;
+ s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0;
+ s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0;
+ ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3
+ - b * (s4 - b * (s5 - b * s6))))));
+ if (fabs(q / ch - 1.0) <= e)
+ break;
+ }
+
+ return (ch);
+}
+
+/*
+ * Given a value p in [0..1] and Gamma distribution shape and scale parameters,
+ * compute the upper limit on the definite integeral from [0..z] that satisfies
+ * p.
+ */
+JEMALLOC_INLINE double
+pt_gamma(double p, double shape, double scale, double ln_gamma_shape)
+{
+
+ return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale);
+}
+#endif
diff --git a/deps/jemalloc/test/include/test/mq.h b/deps/jemalloc/test/include/test/mq.h
new file mode 100644
index 000000000..11188653c
--- /dev/null
+++ b/deps/jemalloc/test/include/test/mq.h
@@ -0,0 +1,110 @@
+/*
+ * Simple templated message queue implementation that relies on only mutexes for
+ * synchronization (which reduces portability issues). Given the following
+ * setup:
+ *
+ * typedef struct mq_msg_s mq_msg_t;
+ * struct mq_msg_s {
+ * mq_msg(mq_msg_t) link;
+ * [message data]
+ * };
+ * mq_gen(, mq_, mq_t, mq_msg_t, link)
+ *
+ * The API is as follows:
+ *
+ * bool mq_init(mq_t *mq);
+ * void mq_fini(mq_t *mq);
+ * unsigned mq_count(mq_t *mq);
+ * mq_msg_t *mq_tryget(mq_t *mq);
+ * mq_msg_t *mq_get(mq_t *mq);
+ * void mq_put(mq_t *mq, mq_msg_t *msg);
+ *
+ * The message queue linkage embedded in each message is to be treated as
+ * externally opaque (no need to initialize or clean up externally). mq_fini()
+ * does not perform any cleanup of messages, since it knows nothing of their
+ * payloads.
+ */
+#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type)
+
+#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \
+typedef struct { \
+ mtx_t lock; \
+ ql_head(a_mq_msg_type) msgs; \
+ unsigned count; \
+} a_mq_type; \
+a_attr bool \
+a_prefix##init(a_mq_type *mq) { \
+ \
+ if (mtx_init(&mq->lock)) \
+ return (true); \
+ ql_new(&mq->msgs); \
+ mq->count = 0; \
+ return (false); \
+} \
+a_attr void \
+a_prefix##fini(a_mq_type *mq) \
+{ \
+ \
+ mtx_fini(&mq->lock); \
+} \
+a_attr unsigned \
+a_prefix##count(a_mq_type *mq) \
+{ \
+ unsigned count; \
+ \
+ mtx_lock(&mq->lock); \
+ count = mq->count; \
+ mtx_unlock(&mq->lock); \
+ return (count); \
+} \
+a_attr a_mq_msg_type * \
+a_prefix##tryget(a_mq_type *mq) \
+{ \
+ a_mq_msg_type *msg; \
+ \
+ mtx_lock(&mq->lock); \
+ msg = ql_first(&mq->msgs); \
+ if (msg != NULL) { \
+ ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \
+ mq->count--; \
+ } \
+ mtx_unlock(&mq->lock); \
+ return (msg); \
+} \
+a_attr a_mq_msg_type * \
+a_prefix##get(a_mq_type *mq) \
+{ \
+ a_mq_msg_type *msg; \
+ struct timespec timeout; \
+ \
+ msg = a_prefix##tryget(mq); \
+ if (msg != NULL) \
+ return (msg); \
+ \
+ timeout.tv_sec = 0; \
+ timeout.tv_nsec = 1; \
+ while (true) { \
+ nanosleep(&timeout, NULL); \
+ msg = a_prefix##tryget(mq); \
+ if (msg != NULL) \
+ return (msg); \
+ if (timeout.tv_sec == 0) { \
+ /* Double sleep time, up to max 1 second. */ \
+ timeout.tv_nsec <<= 1; \
+ if (timeout.tv_nsec >= 1000*1000*1000) { \
+ timeout.tv_sec = 1; \
+ timeout.tv_nsec = 0; \
+ } \
+ } \
+ } \
+} \
+a_attr void \
+a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \
+{ \
+ \
+ mtx_lock(&mq->lock); \
+ ql_elm_new(msg, a_field); \
+ ql_tail_insert(&mq->msgs, msg, a_field); \
+ mq->count++; \
+ mtx_unlock(&mq->lock); \
+}
diff --git a/deps/jemalloc/test/include/test/mtx.h b/deps/jemalloc/test/include/test/mtx.h
new file mode 100644
index 000000000..bbe822f54
--- /dev/null
+++ b/deps/jemalloc/test/include/test/mtx.h
@@ -0,0 +1,21 @@
+/*
+ * mtx is a slightly simplified version of malloc_mutex. This code duplication
+ * is unfortunate, but there are allocator bootstrapping considerations that
+ * would leak into the test infrastructure if malloc_mutex were used directly
+ * in tests.
+ */
+
+typedef struct {
+#ifdef _WIN32
+ CRITICAL_SECTION lock;
+#elif (defined(JEMALLOC_OSSPIN))
+ OSSpinLock lock;
+#else
+ pthread_mutex_t lock;
+#endif
+} mtx_t;
+
+bool mtx_init(mtx_t *mtx);
+void mtx_fini(mtx_t *mtx);
+void mtx_lock(mtx_t *mtx);
+void mtx_unlock(mtx_t *mtx);
diff --git a/deps/jemalloc/test/include/test/test.h b/deps/jemalloc/test/include/test/test.h
new file mode 100644
index 000000000..a32ec07c4
--- /dev/null
+++ b/deps/jemalloc/test/include/test/test.h
@@ -0,0 +1,329 @@
+#define ASSERT_BUFSIZE 256
+
+#define assert_cmp(t, a, b, cmp, neg_cmp, pri, fmt...) do { \
+ t a_ = (a); \
+ t b_ = (b); \
+ if (!(a_ cmp b_)) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) "#cmp" (%s) --> " \
+ "%"pri" "#neg_cmp" %"pri": ", \
+ __func__, __FILE__, __LINE__, \
+ #a, #b, a_, b_); \
+ malloc_snprintf(message, sizeof(message), fmt); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+
+#define assert_ptr_eq(a, b, fmt...) assert_cmp(void *, a, b, ==, \
+ !=, "p", fmt)
+#define assert_ptr_ne(a, b, fmt...) assert_cmp(void *, a, b, !=, \
+ ==, "p", fmt)
+#define assert_ptr_null(a, fmt...) assert_cmp(void *, a, NULL, ==, \
+ !=, "p", fmt)
+#define assert_ptr_not_null(a, fmt...) assert_cmp(void *, a, NULL, !=, \
+ ==, "p", fmt)
+
+#define assert_c_eq(a, b, fmt...) assert_cmp(char, a, b, ==, !=, "c", fmt)
+#define assert_c_ne(a, b, fmt...) assert_cmp(char, a, b, !=, ==, "c", fmt)
+#define assert_c_lt(a, b, fmt...) assert_cmp(char, a, b, <, >=, "c", fmt)
+#define assert_c_le(a, b, fmt...) assert_cmp(char, a, b, <=, >, "c", fmt)
+#define assert_c_ge(a, b, fmt...) assert_cmp(char, a, b, >=, <, "c", fmt)
+#define assert_c_gt(a, b, fmt...) assert_cmp(char, a, b, >, <=, "c", fmt)
+
+#define assert_x_eq(a, b, fmt...) assert_cmp(int, a, b, ==, !=, "#x", fmt)
+#define assert_x_ne(a, b, fmt...) assert_cmp(int, a, b, !=, ==, "#x", fmt)
+#define assert_x_lt(a, b, fmt...) assert_cmp(int, a, b, <, >=, "#x", fmt)
+#define assert_x_le(a, b, fmt...) assert_cmp(int, a, b, <=, >, "#x", fmt)
+#define assert_x_ge(a, b, fmt...) assert_cmp(int, a, b, >=, <, "#x", fmt)
+#define assert_x_gt(a, b, fmt...) assert_cmp(int, a, b, >, <=, "#x", fmt)
+
+#define assert_d_eq(a, b, fmt...) assert_cmp(int, a, b, ==, !=, "d", fmt)
+#define assert_d_ne(a, b, fmt...) assert_cmp(int, a, b, !=, ==, "d", fmt)
+#define assert_d_lt(a, b, fmt...) assert_cmp(int, a, b, <, >=, "d", fmt)
+#define assert_d_le(a, b, fmt...) assert_cmp(int, a, b, <=, >, "d", fmt)
+#define assert_d_ge(a, b, fmt...) assert_cmp(int, a, b, >=, <, "d", fmt)
+#define assert_d_gt(a, b, fmt...) assert_cmp(int, a, b, >, <=, "d", fmt)
+
+#define assert_u_eq(a, b, fmt...) assert_cmp(int, a, b, ==, !=, "u", fmt)
+#define assert_u_ne(a, b, fmt...) assert_cmp(int, a, b, !=, ==, "u", fmt)
+#define assert_u_lt(a, b, fmt...) assert_cmp(int, a, b, <, >=, "u", fmt)
+#define assert_u_le(a, b, fmt...) assert_cmp(int, a, b, <=, >, "u", fmt)
+#define assert_u_ge(a, b, fmt...) assert_cmp(int, a, b, >=, <, "u", fmt)
+#define assert_u_gt(a, b, fmt...) assert_cmp(int, a, b, >, <=, "u", fmt)
+
+#define assert_ld_eq(a, b, fmt...) assert_cmp(long, a, b, ==, \
+ !=, "ld", fmt)
+#define assert_ld_ne(a, b, fmt...) assert_cmp(long, a, b, !=, \
+ ==, "ld", fmt)
+#define assert_ld_lt(a, b, fmt...) assert_cmp(long, a, b, <, \
+ >=, "ld", fmt)
+#define assert_ld_le(a, b, fmt...) assert_cmp(long, a, b, <=, \
+ >, "ld", fmt)
+#define assert_ld_ge(a, b, fmt...) assert_cmp(long, a, b, >=, \
+ <, "ld", fmt)
+#define assert_ld_gt(a, b, fmt...) assert_cmp(long, a, b, >, \
+ <=, "ld", fmt)
+
+#define assert_lu_eq(a, b, fmt...) assert_cmp(unsigned long, \
+ a, b, ==, !=, "lu", fmt)
+#define assert_lu_ne(a, b, fmt...) assert_cmp(unsigned long, \
+ a, b, !=, ==, "lu", fmt)
+#define assert_lu_lt(a, b, fmt...) assert_cmp(unsigned long, \
+ a, b, <, >=, "lu", fmt)
+#define assert_lu_le(a, b, fmt...) assert_cmp(unsigned long, \
+ a, b, <=, >, "lu", fmt)
+#define assert_lu_ge(a, b, fmt...) assert_cmp(unsigned long, \
+ a, b, >=, <, "lu", fmt)
+#define assert_lu_gt(a, b, fmt...) assert_cmp(unsigned long, \
+ a, b, >, <=, "lu", fmt)
+
+#define assert_qd_eq(a, b, fmt...) assert_cmp(long long, a, b, ==, \
+ !=, "qd", fmt)
+#define assert_qd_ne(a, b, fmt...) assert_cmp(long long, a, b, !=, \
+ ==, "qd", fmt)
+#define assert_qd_lt(a, b, fmt...) assert_cmp(long long, a, b, <, \
+ >=, "qd", fmt)
+#define assert_qd_le(a, b, fmt...) assert_cmp(long long, a, b, <=, \
+ >, "qd", fmt)
+#define assert_qd_ge(a, b, fmt...) assert_cmp(long long, a, b, >=, \
+ <, "qd", fmt)
+#define assert_qd_gt(a, b, fmt...) assert_cmp(long long, a, b, >, \
+ <=, "qd", fmt)
+
+#define assert_qu_eq(a, b, fmt...) assert_cmp(unsigned long long, \
+ a, b, ==, !=, "qu", fmt)
+#define assert_qu_ne(a, b, fmt...) assert_cmp(unsigned long long, \
+ a, b, !=, ==, "qu", fmt)
+#define assert_qu_lt(a, b, fmt...) assert_cmp(unsigned long long, \
+ a, b, <, >=, "qu", fmt)
+#define assert_qu_le(a, b, fmt...) assert_cmp(unsigned long long, \
+ a, b, <=, >, "qu", fmt)
+#define assert_qu_ge(a, b, fmt...) assert_cmp(unsigned long long, \
+ a, b, >=, <, "qu", fmt)
+#define assert_qu_gt(a, b, fmt...) assert_cmp(unsigned long long, \
+ a, b, >, <=, "qu", fmt)
+
+#define assert_jd_eq(a, b, fmt...) assert_cmp(intmax_t, a, b, ==, \
+ !=, "jd", fmt)
+#define assert_jd_ne(a, b, fmt...) assert_cmp(intmax_t, a, b, !=, \
+ ==, "jd", fmt)
+#define assert_jd_lt(a, b, fmt...) assert_cmp(intmax_t, a, b, <, \
+ >=, "jd", fmt)
+#define assert_jd_le(a, b, fmt...) assert_cmp(intmax_t, a, b, <=, \
+ >, "jd", fmt)
+#define assert_jd_ge(a, b, fmt...) assert_cmp(intmax_t, a, b, >=, \
+ <, "jd", fmt)
+#define assert_jd_gt(a, b, fmt...) assert_cmp(intmax_t, a, b, >, \
+ <=, "jd", fmt)
+
+#define assert_ju_eq(a, b, fmt...) assert_cmp(uintmax_t, a, b, ==, \
+ !=, "ju", fmt)
+#define assert_ju_ne(a, b, fmt...) assert_cmp(uintmax_t, a, b, !=, \
+ ==, "ju", fmt)
+#define assert_ju_lt(a, b, fmt...) assert_cmp(uintmax_t, a, b, <, \
+ >=, "ju", fmt)
+#define assert_ju_le(a, b, fmt...) assert_cmp(uintmax_t, a, b, <=, \
+ >, "ju", fmt)
+#define assert_ju_ge(a, b, fmt...) assert_cmp(uintmax_t, a, b, >=, \
+ <, "ju", fmt)
+#define assert_ju_gt(a, b, fmt...) assert_cmp(uintmax_t, a, b, >, \
+ <=, "ju", fmt)
+
+#define assert_zd_eq(a, b, fmt...) assert_cmp(ssize_t, a, b, ==, \
+ !=, "zd", fmt)
+#define assert_zd_ne(a, b, fmt...) assert_cmp(ssize_t, a, b, !=, \
+ ==, "zd", fmt)
+#define assert_zd_lt(a, b, fmt...) assert_cmp(ssize_t, a, b, <, \
+ >=, "zd", fmt)
+#define assert_zd_le(a, b, fmt...) assert_cmp(ssize_t, a, b, <=, \
+ >, "zd", fmt)
+#define assert_zd_ge(a, b, fmt...) assert_cmp(ssize_t, a, b, >=, \
+ <, "zd", fmt)
+#define assert_zd_gt(a, b, fmt...) assert_cmp(ssize_t, a, b, >, \
+ <=, "zd", fmt)
+
+#define assert_zu_eq(a, b, fmt...) assert_cmp(size_t, a, b, ==, \
+ !=, "zu", fmt)
+#define assert_zu_ne(a, b, fmt...) assert_cmp(size_t, a, b, !=, \
+ ==, "zu", fmt)
+#define assert_zu_lt(a, b, fmt...) assert_cmp(size_t, a, b, <, \
+ >=, "zu", fmt)
+#define assert_zu_le(a, b, fmt...) assert_cmp(size_t, a, b, <=, \
+ >, "zu", fmt)
+#define assert_zu_ge(a, b, fmt...) assert_cmp(size_t, a, b, >=, \
+ <, "zu", fmt)
+#define assert_zu_gt(a, b, fmt...) assert_cmp(size_t, a, b, >, \
+ <=, "zu", fmt)
+
+#define assert_d32_eq(a, b, fmt...) assert_cmp(int32_t, a, b, ==, \
+ !=, PRId32, fmt)
+#define assert_d32_ne(a, b, fmt...) assert_cmp(int32_t, a, b, !=, \
+ ==, PRId32, fmt)
+#define assert_d32_lt(a, b, fmt...) assert_cmp(int32_t, a, b, <, \
+ >=, PRId32, fmt)
+#define assert_d32_le(a, b, fmt...) assert_cmp(int32_t, a, b, <=, \
+ >, PRId32, fmt)
+#define assert_d32_ge(a, b, fmt...) assert_cmp(int32_t, a, b, >=, \
+ <, PRId32, fmt)
+#define assert_d32_gt(a, b, fmt...) assert_cmp(int32_t, a, b, >, \
+ <=, PRId32, fmt)
+
+#define assert_u32_eq(a, b, fmt...) assert_cmp(uint32_t, a, b, ==, \
+ !=, PRIu32, fmt)
+#define assert_u32_ne(a, b, fmt...) assert_cmp(uint32_t, a, b, !=, \
+ ==, PRIu32, fmt)
+#define assert_u32_lt(a, b, fmt...) assert_cmp(uint32_t, a, b, <, \
+ >=, PRIu32, fmt)
+#define assert_u32_le(a, b, fmt...) assert_cmp(uint32_t, a, b, <=, \
+ >, PRIu32, fmt)
+#define assert_u32_ge(a, b, fmt...) assert_cmp(uint32_t, a, b, >=, \
+ <, PRIu32, fmt)
+#define assert_u32_gt(a, b, fmt...) assert_cmp(uint32_t, a, b, >, \
+ <=, PRIu32, fmt)
+
+#define assert_d64_eq(a, b, fmt...) assert_cmp(int64_t, a, b, ==, \
+ !=, PRId64, fmt)
+#define assert_d64_ne(a, b, fmt...) assert_cmp(int64_t, a, b, !=, \
+ ==, PRId64, fmt)
+#define assert_d64_lt(a, b, fmt...) assert_cmp(int64_t, a, b, <, \
+ >=, PRId64, fmt)
+#define assert_d64_le(a, b, fmt...) assert_cmp(int64_t, a, b, <=, \
+ >, PRId64, fmt)
+#define assert_d64_ge(a, b, fmt...) assert_cmp(int64_t, a, b, >=, \
+ <, PRId64, fmt)
+#define assert_d64_gt(a, b, fmt...) assert_cmp(int64_t, a, b, >, \
+ <=, PRId64, fmt)
+
+#define assert_u64_eq(a, b, fmt...) assert_cmp(uint64_t, a, b, ==, \
+ !=, PRIu64, fmt)
+#define assert_u64_ne(a, b, fmt...) assert_cmp(uint64_t, a, b, !=, \
+ ==, PRIu64, fmt)
+#define assert_u64_lt(a, b, fmt...) assert_cmp(uint64_t, a, b, <, \
+ >=, PRIu64, fmt)
+#define assert_u64_le(a, b, fmt...) assert_cmp(uint64_t, a, b, <=, \
+ >, PRIu64, fmt)
+#define assert_u64_ge(a, b, fmt...) assert_cmp(uint64_t, a, b, >=, \
+ <, PRIu64, fmt)
+#define assert_u64_gt(a, b, fmt...) assert_cmp(uint64_t, a, b, >, \
+ <=, PRIu64, fmt)
+
+#define assert_b_eq(a, b, fmt...) do { \
+ bool a_ = (a); \
+ bool b_ = (b); \
+ if (!(a_ == b_)) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) == (%s) --> %s != %s: ", \
+ __func__, __FILE__, __LINE__, \
+ #a, #b, a_ ? "true" : "false", \
+ b_ ? "true" : "false"); \
+ malloc_snprintf(message, sizeof(message), fmt); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+#define assert_b_ne(a, b, fmt...) do { \
+ bool a_ = (a); \
+ bool b_ = (b); \
+ if (!(a_ != b_)) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) != (%s) --> %s == %s: ", \
+ __func__, __FILE__, __LINE__, \
+ #a, #b, a_ ? "true" : "false", \
+ b_ ? "true" : "false"); \
+ malloc_snprintf(message, sizeof(message), fmt); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+#define assert_true(a, fmt...) assert_b_eq(a, true, fmt)
+#define assert_false(a, fmt...) assert_b_eq(a, false, fmt)
+
+#define assert_str_eq(a, b, fmt...) do { \
+ if (strcmp((a), (b))) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) same as (%s) --> " \
+ "\"%s\" differs from \"%s\": ", \
+ __func__, __FILE__, __LINE__, #a, #b, a, b); \
+ malloc_snprintf(message, sizeof(message), fmt); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+#define assert_str_ne(a, b, fmt...) do { \
+ if (!strcmp((a), (b))) { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Failed assertion: " \
+ "(%s) differs from (%s) --> " \
+ "\"%s\" same as \"%s\": ", \
+ __func__, __FILE__, __LINE__, #a, #b, a, b); \
+ malloc_snprintf(message, sizeof(message), fmt); \
+ p_test_fail(prefix, message); \
+ } \
+} while (0)
+
+#define assert_not_reached(fmt...) do { \
+ char prefix[ASSERT_BUFSIZE]; \
+ char message[ASSERT_BUFSIZE]; \
+ malloc_snprintf(prefix, sizeof(prefix), \
+ "%s:%s:%d: Unreachable code reached: ", \
+ __func__, __FILE__, __LINE__); \
+ malloc_snprintf(message, sizeof(message), fmt); \
+ p_test_fail(prefix, message); \
+} while (0)
+
+/*
+ * If this enum changes, corresponding changes in test/test.sh.in are also
+ * necessary.
+ */
+typedef enum {
+ test_status_pass = 0,
+ test_status_skip = 1,
+ test_status_fail = 2,
+
+ test_status_count = 3
+} test_status_t;
+
+typedef void (test_t)(void);
+
+#define TEST_BEGIN(f) \
+static void \
+f(void) \
+{ \
+ p_test_init(#f);
+
+#define TEST_END \
+ goto label_test_end; \
+label_test_end: \
+ p_test_fini(); \
+}
+
+#define test(tests...) \
+ p_test(tests, NULL)
+
+#define test_skip_if(e) do { \
+ if (e) { \
+ test_skip("%s:%s:%d: Test skipped: (%s)", \
+ __func__, __FILE__, __LINE__, #e); \
+ goto label_test_end; \
+ } \
+} while (0)
+
+void test_skip(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2));
+void test_fail(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2));
+
+/* For private use by macros. */
+test_status_t p_test(test_t* t, ...);
+void p_test_init(const char *name);
+void p_test_fini(void);
+void p_test_fail(const char *prefix, const char *message);
diff --git a/deps/jemalloc/test/include/test/thd.h b/deps/jemalloc/test/include/test/thd.h
new file mode 100644
index 000000000..f941d7a75
--- /dev/null
+++ b/deps/jemalloc/test/include/test/thd.h
@@ -0,0 +1,9 @@
+/* Abstraction layer for threading in tests */
+#ifdef _WIN32
+typedef HANDLE thd_t;
+#else
+typedef pthread_t thd_t;
+#endif
+
+void thd_create(thd_t *thd, void *(*proc)(void *), void *arg);
+void thd_join(thd_t thd, void **ret);
diff --git a/deps/jemalloc/test/integration/MALLOCX_ARENA.c b/deps/jemalloc/test/integration/MALLOCX_ARENA.c
new file mode 100644
index 000000000..71cf6f255
--- /dev/null
+++ b/deps/jemalloc/test/integration/MALLOCX_ARENA.c
@@ -0,0 +1,58 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 10
+
+void *
+thd_start(void *arg)
+{
+ unsigned thread_ind = (unsigned)(uintptr_t)arg;
+ unsigned arena_ind;
+ void *p;
+ size_t sz;
+
+ sz = sizeof(arena_ind);
+ assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
+ "Error in arenas.extend");
+
+ if (thread_ind % 4 != 3) {
+ size_t mib[3];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ const char *dss_precs[] = {"disabled", "primary", "secondary"};
+ const char *dss = dss_precs[thread_ind %
+ (sizeof(dss_precs)/sizeof(char*))];
+ assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
+ "Error in mallctlnametomib()");
+ mib[1] = arena_ind;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
+ sizeof(const char *)), 0, "Error in mallctlbymib()");
+ }
+
+ p = mallocx(1, MALLOCX_ARENA(arena_ind));
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, 0);
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_ALLOCM_ARENA)
+{
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start,
+ (void *)(uintptr_t)i);
+ }
+
+ for (i = 0; i < NTHREADS; i++)
+ thd_join(thds[i], NULL);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_ALLOCM_ARENA));
+}
diff --git a/deps/jemalloc/test/aligned_alloc.c b/deps/jemalloc/test/integration/aligned_alloc.c
index 5a9b0caea..609001487 100644
--- a/deps/jemalloc/test/aligned_alloc.c
+++ b/deps/jemalloc/test/integration/aligned_alloc.c
@@ -1,39 +1,36 @@
-#define JEMALLOC_MANGLE
-#include "jemalloc_test.h"
+#include "test/jemalloc_test.h"
-#define CHUNK 0x400000
+#define CHUNK 0x400000
/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
-#define MAXALIGN ((size_t)0x2000000LU)
-#define NITER 4
+#define MAXALIGN ((size_t)0x2000000LU)
+#define NITER 4
-int
-main(void)
+TEST_BEGIN(test_alignment_errors)
{
- size_t alignment, size, total;
- unsigned i;
- void *p, *ps[NITER];
-
- malloc_printf("Test begin\n");
+ size_t alignment;
+ void *p;
- /* Test error conditions. */
alignment = 0;
set_errno(0);
p = aligned_alloc(alignment, 1);
- if (p != NULL || get_errno() != EINVAL) {
- malloc_printf(
- "Expected error for invalid alignment %zu\n", alignment);
- }
+ assert_false(p != NULL || get_errno() != EINVAL,
+ "Expected error for invalid alignment %zu", alignment);
for (alignment = sizeof(size_t); alignment < MAXALIGN;
alignment <<= 1) {
set_errno(0);
p = aligned_alloc(alignment + 1, 1);
- if (p != NULL || get_errno() != EINVAL) {
- malloc_printf(
- "Expected error for invalid alignment %zu\n",
- alignment + 1);
- }
+ assert_false(p != NULL || get_errno() != EINVAL,
+ "Expected error for invalid alignment %zu",
+ alignment + 1);
}
+}
+TEST_END
+
+TEST_BEGIN(test_oom_errors)
+{
+ size_t alignment, size;
+ void *p;
#if LG_SIZEOF_PTR == 3
alignment = UINT64_C(0x8000000000000000);
@@ -44,26 +41,22 @@ main(void)
#endif
set_errno(0);
p = aligned_alloc(alignment, size);
- if (p != NULL || get_errno() != ENOMEM) {
- malloc_printf(
- "Expected error for aligned_alloc(%zu, %zu)\n",
- alignment, size);
- }
+ assert_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(%zu, %zu)",
+ alignment, size);
#if LG_SIZEOF_PTR == 3
alignment = UINT64_C(0x4000000000000000);
- size = UINT64_C(0x8400000000000001);
+ size = UINT64_C(0xc000000000000001);
#else
alignment = 0x40000000LU;
- size = 0x84000001LU;
+ size = 0xc0000001LU;
#endif
set_errno(0);
p = aligned_alloc(alignment, size);
- if (p != NULL || get_errno() != ENOMEM) {
- malloc_printf(
- "Expected error for aligned_alloc(%zu, %zu)\n",
- alignment, size);
- }
+ assert_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(%zu, %zu)",
+ alignment, size);
alignment = 0x10LU;
#if LG_SIZEOF_PTR == 3
@@ -73,11 +66,17 @@ main(void)
#endif
set_errno(0);
p = aligned_alloc(alignment, size);
- if (p != NULL || get_errno() != ENOMEM) {
- malloc_printf(
- "Expected error for aligned_alloc(&p, %zu, %zu)\n",
- alignment, size);
- }
+ assert_false(p != NULL || get_errno() != ENOMEM,
+ "Expected error for aligned_alloc(&p, %zu, %zu)",
+ alignment, size);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+ size_t alignment, size, total;
+ unsigned i;
+ void *ps[NITER];
for (i = 0; i < NITER; i++)
ps[i] = NULL;
@@ -86,7 +85,6 @@ main(void)
alignment <= MAXALIGN;
alignment <<= 1) {
total = 0;
- malloc_printf("Alignment: %zu\n", alignment);
for (size = 1;
size < 3 * alignment && size < (1U << 31);
size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
@@ -95,11 +93,11 @@ main(void)
if (ps[i] == NULL) {
char buf[BUFERROR_BUF];
- buferror(buf, sizeof(buf));
- malloc_printf(
- "Error for size %zu (%#zx): %s\n",
- size, size, buf);
- exit(1);
+ buferror(get_errno(), buf, sizeof(buf));
+ test_fail(
+ "Error for alignment=%zu, "
+ "size=%zu (%#zx): %s",
+ alignment, size, size, buf);
}
total += malloc_usable_size(ps[i]);
if (total >= (MAXALIGN << 1))
@@ -113,7 +111,15 @@ main(void)
}
}
}
+}
+TEST_END
+
+int
+main(void)
+{
- malloc_printf("Test end\n");
- return (0);
+ return (test(
+ test_alignment_errors,
+ test_oom_errors,
+ test_alignment_and_size));
}
diff --git a/deps/jemalloc/test/integration/allocated.c b/deps/jemalloc/test/integration/allocated.c
new file mode 100644
index 000000000..3630e80ce
--- /dev/null
+++ b/deps/jemalloc/test/integration/allocated.c
@@ -0,0 +1,125 @@
+#include "test/jemalloc_test.h"
+
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg)
+{
+ int err;
+ void *p;
+ uint64_t a0, a1, d0, d1;
+ uint64_t *ap0, *ap1, *dp0, *dp1;
+ size_t sz, usize;
+
+ sz = sizeof(a0);
+ if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) {
+ if (err == ENOENT)
+ goto label_ENOENT;
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ sz = sizeof(ap0);
+ if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) {
+ if (err == ENOENT)
+ goto label_ENOENT;
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ assert_u64_eq(*ap0, a0,
+ "\"thread.allocatedp\" should provide a pointer to internal "
+ "storage");
+
+ sz = sizeof(d0);
+ if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) {
+ if (err == ENOENT)
+ goto label_ENOENT;
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ sz = sizeof(dp0);
+ if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) {
+ if (err == ENOENT)
+ goto label_ENOENT;
+ test_fail("%s(): Error in mallctl(): %s", __func__,
+ strerror(err));
+ }
+ assert_u64_eq(*dp0, d0,
+ "\"thread.deallocatedp\" should provide a pointer to internal "
+ "storage");
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() error");
+
+ sz = sizeof(a1);
+ mallctl("thread.allocated", &a1, &sz, NULL, 0);
+ sz = sizeof(ap1);
+ mallctl("thread.allocatedp", &ap1, &sz, NULL, 0);
+ assert_u64_eq(*ap1, a1,
+ "Dereferenced \"thread.allocatedp\" value should equal "
+ "\"thread.allocated\" value");
+ assert_ptr_eq(ap0, ap1,
+ "Pointer returned by \"thread.allocatedp\" should not change");
+
+ usize = malloc_usable_size(p);
+ assert_u64_le(a0 + usize, a1,
+ "Allocated memory counter should increase by at least the amount "
+ "explicitly allocated");
+
+ free(p);
+
+ sz = sizeof(d1);
+ mallctl("thread.deallocated", &d1, &sz, NULL, 0);
+ sz = sizeof(dp1);
+ mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0);
+ assert_u64_eq(*dp1, d1,
+ "Dereferenced \"thread.deallocatedp\" value should equal "
+ "\"thread.deallocated\" value");
+ assert_ptr_eq(dp0, dp1,
+ "Pointer returned by \"thread.deallocatedp\" should not change");
+
+ assert_u64_le(d0 + usize, d1,
+ "Deallocated memory counter should increase by at least the amount "
+ "explicitly deallocated");
+
+ return (NULL);
+label_ENOENT:
+ assert_false(config_stats,
+ "ENOENT should only be returned if stats are disabled");
+ test_skip("\"thread.allocated\" mallctl not available");
+ return (NULL);
+}
+
+TEST_BEGIN(test_main_thread)
+{
+
+ thd_start(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_subthread)
+{
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ /* Run tests multiple times to check for bad interactions. */
+ return (test(
+ test_main_thread,
+ test_subthread,
+ test_main_thread,
+ test_subthread,
+ test_main_thread));
+}
diff --git a/deps/jemalloc/test/integration/allocm.c b/deps/jemalloc/test/integration/allocm.c
new file mode 100644
index 000000000..7b4ea0c2c
--- /dev/null
+++ b/deps/jemalloc/test/integration/allocm.c
@@ -0,0 +1,107 @@
+#include "test/jemalloc_test.h"
+
+#define CHUNK 0x400000
+#define MAXALIGN (((size_t)1) << 25)
+#define NITER 4
+
+TEST_BEGIN(test_basic)
+{
+ size_t nsz, rsz, sz;
+ void *p;
+
+ sz = 42;
+ nsz = 0;
+ assert_d_eq(nallocm(&nsz, sz, 0), ALLOCM_SUCCESS,
+ "Unexpected nallocm() error");
+ rsz = 0;
+ assert_d_eq(allocm(&p, &rsz, sz, 0), ALLOCM_SUCCESS,
+ "Unexpected allocm() error");
+ assert_zu_ge(rsz, sz, "Real size smaller than expected");
+ assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch");
+ assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
+ "Unexpected dallocm() error");
+
+ assert_d_eq(allocm(&p, NULL, sz, 0), ALLOCM_SUCCESS,
+ "Unexpected allocm() error");
+ assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
+ "Unexpected dallocm() error");
+
+ nsz = 0;
+ assert_d_eq(nallocm(&nsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS,
+ "Unexpected nallocm() error");
+ rsz = 0;
+ assert_d_eq(allocm(&p, &rsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS,
+ "Unexpected allocm() error");
+ assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch");
+ assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
+ "Unexpected dallocm() error");
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+ int r;
+ size_t nsz, rsz, sz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = 0;
+ r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment) |
+ ALLOCM_ZERO);
+ assert_d_eq(r, ALLOCM_SUCCESS,
+ "nallocm() error for alignment=%zu, "
+ "size=%zu (%#zx): %d",
+ alignment, sz, sz, r);
+ rsz = 0;
+ r = allocm(&ps[i], &rsz, sz,
+ ALLOCM_ALIGN(alignment) | ALLOCM_ZERO);
+ assert_d_eq(r, ALLOCM_SUCCESS,
+ "allocm() error for alignment=%zu, "
+ "size=%zu (%#zx): %d",
+ alignment, sz, sz, r);
+ assert_zu_ge(rsz, sz,
+ "Real size smaller than expected for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_zu_eq(nsz, rsz,
+ "nallocm()/allocm() rsize mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_ptr_null(
+ (void *)((uintptr_t)ps[i] & (alignment-1)),
+ "%p inadequately aligned for"
+ " alignment=%zu, size=%zu", ps[i],
+ alignment, sz);
+ sallocm(ps[i], &rsz, 0);
+ total += rsz;
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ dallocm(ps[i], 0);
+ ps[i] = NULL;
+ }
+ }
+ }
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_basic,
+ test_alignment_and_size));
+}
diff --git a/deps/jemalloc/test/integration/mallocx.c b/deps/jemalloc/test/integration/mallocx.c
new file mode 100644
index 000000000..123e041fa
--- /dev/null
+++ b/deps/jemalloc/test/integration/mallocx.c
@@ -0,0 +1,97 @@
+#include "test/jemalloc_test.h"
+
+#define CHUNK 0x400000
+#define MAXALIGN (((size_t)1) << 25)
+#define NITER 4
+
+TEST_BEGIN(test_basic)
+{
+ size_t nsz, rsz, sz;
+ void *p;
+
+ sz = 42;
+ nsz = nallocx(sz, 0);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ rsz = sallocx(p, 0);
+ assert_zu_ge(rsz, sz, "Real size smaller than expected");
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
+ dallocx(p, 0);
+
+ p = mallocx(sz, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, 0);
+
+ nsz = nallocx(sz, MALLOCX_ZERO);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, MALLOCX_ZERO);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ rsz = sallocx(p, 0);
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+ size_t nsz, rsz, sz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ assert_zu_ne(nsz, 0,
+ "nallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ assert_ptr_not_null(ps[i],
+ "mallocx() error for alignment=%zu, "
+ "size=%zu (%#zx)", alignment, sz, sz);
+ rsz = sallocx(ps[i], 0);
+ assert_zu_ge(rsz, sz,
+ "Real size smaller than expected for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_zu_eq(nsz, rsz,
+ "nallocx()/sallocx() size mismatch for "
+ "alignment=%zu, size=%zu", alignment, sz);
+ assert_ptr_null(
+ (void *)((uintptr_t)ps[i] & (alignment-1)),
+ "%p inadequately aligned for"
+ " alignment=%zu, size=%zu", ps[i],
+ alignment, sz);
+ total += rsz;
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ dallocx(ps[i], 0);
+ ps[i] = NULL;
+ }
+ }
+ }
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_basic,
+ test_alignment_and_size));
+}
diff --git a/deps/jemalloc/test/integration/mremap.c b/deps/jemalloc/test/integration/mremap.c
new file mode 100644
index 000000000..a7fb7ef0a
--- /dev/null
+++ b/deps/jemalloc/test/integration/mremap.c
@@ -0,0 +1,45 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_mremap)
+{
+ int err;
+ size_t sz, lg_chunk, chunksize, i;
+ char *p, *q;
+
+ sz = sizeof(lg_chunk);
+ err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0);
+ assert_d_eq(err, 0, "Error in mallctl(): %s", strerror(err));
+ chunksize = ((size_t)1U) << lg_chunk;
+
+ p = (char *)malloc(chunksize);
+ assert_ptr_not_null(p, "malloc(%zu) --> %p", chunksize, p);
+ memset(p, 'a', chunksize);
+
+ q = (char *)realloc(p, chunksize * 2);
+ assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize * 2,
+ q);
+ for (i = 0; i < chunksize; i++) {
+ assert_c_eq(q[i], 'a',
+ "realloc() should preserve existing bytes across copies");
+ }
+
+ p = q;
+
+ q = (char *)realloc(p, chunksize);
+ assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize, q);
+ for (i = 0; i < chunksize; i++) {
+ assert_c_eq(q[i], 'a',
+ "realloc() should preserve existing bytes across copies");
+ }
+
+ free(q);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_mremap));
+}
diff --git a/deps/jemalloc/test/integration/posix_memalign.c b/deps/jemalloc/test/integration/posix_memalign.c
new file mode 100644
index 000000000..19741c6cb
--- /dev/null
+++ b/deps/jemalloc/test/integration/posix_memalign.c
@@ -0,0 +1,119 @@
+#include "test/jemalloc_test.h"
+
+#define CHUNK 0x400000
+/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
+#define MAXALIGN ((size_t)0x2000000LU)
+#define NITER 4
+
+TEST_BEGIN(test_alignment_errors)
+{
+ size_t alignment;
+ void *p;
+
+ for (alignment = 0; alignment < sizeof(void *); alignment++) {
+ assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL,
+ "Expected error for invalid alignment %zu",
+ alignment);
+ }
+
+ for (alignment = sizeof(size_t); alignment < MAXALIGN;
+ alignment <<= 1) {
+ assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0,
+ "Expected error for invalid alignment %zu",
+ alignment + 1);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_oom_errors)
+{
+ size_t alignment, size;
+ void *p;
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x8000000000000000);
+ size = UINT64_C(0x8000000000000000);
+#else
+ alignment = 0x80000000LU;
+ size = 0x80000000LU;
+#endif
+ assert_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+
+#if LG_SIZEOF_PTR == 3
+ alignment = UINT64_C(0x4000000000000000);
+ size = UINT64_C(0xc000000000000001);
+#else
+ alignment = 0x40000000LU;
+ size = 0xc0000001LU;
+#endif
+ assert_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+
+ alignment = 0x10LU;
+#if LG_SIZEOF_PTR == 3
+ size = UINT64_C(0xfffffffffffffff0);
+#else
+ size = 0xfffffff0LU;
+#endif
+ assert_d_ne(posix_memalign(&p, alignment, size), 0,
+ "Expected error for posix_memalign(&p, %zu, %zu)",
+ alignment, size);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+ size_t alignment, size, total;
+ unsigned i;
+ int err;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (size = 1;
+ size < 3 * alignment && size < (1U << 31);
+ size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ err = posix_memalign(&ps[i],
+ alignment, size);
+ if (err) {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ test_fail(
+ "Error for alignment=%zu, "
+ "size=%zu (%#zx): %s",
+ alignment, size, size, buf);
+ }
+ total += malloc_usable_size(ps[i]);
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ free(ps[i]);
+ ps[i] = NULL;
+ }
+ }
+ }
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_alignment_errors,
+ test_oom_errors,
+ test_alignment_and_size));
+}
diff --git a/deps/jemalloc/test/integration/rallocm.c b/deps/jemalloc/test/integration/rallocm.c
new file mode 100644
index 000000000..33c11bb7c
--- /dev/null
+++ b/deps/jemalloc/test/integration/rallocm.c
@@ -0,0 +1,111 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_same_size)
+{
+ void *p, *q;
+ size_t sz, tsz;
+
+ assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
+ "Unexpected allocm() error");
+
+ q = p;
+ assert_d_eq(rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE), ALLOCM_SUCCESS,
+ "Unexpected rallocm() error");
+ assert_ptr_eq(q, p, "Unexpected object move");
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
+ "Unexpected dallocm() error");
+}
+TEST_END
+
+TEST_BEGIN(test_extra_no_move)
+{
+ void *p, *q;
+ size_t sz, tsz;
+
+ assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
+ "Unexpected allocm() error");
+
+ q = p;
+ assert_d_eq(rallocm(&q, &tsz, sz, sz-42, ALLOCM_NO_MOVE),
+ ALLOCM_SUCCESS, "Unexpected rallocm() error");
+ assert_ptr_eq(q, p, "Unexpected object move");
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
+ "Unexpected dallocm() error");
+}
+TEST_END
+
+TEST_BEGIN(test_no_move_fail)
+{
+ void *p, *q;
+ size_t sz, tsz;
+
+ assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
+ "Unexpected allocm() error");
+
+ q = p;
+ assert_d_eq(rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE),
+ ALLOCM_ERR_NOT_MOVED, "Unexpected rallocm() result");
+ assert_ptr_eq(q, p, "Unexpected object move");
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
+ "Unexpected dallocm() error");
+}
+TEST_END
+
+TEST_BEGIN(test_grow_and_shrink)
+{
+ void *p, *q;
+ size_t tsz;
+#define NCYCLES 3
+ unsigned i, j;
+#define NSZS 2500
+ size_t szs[NSZS];
+#define MAXSZ ZU(12 * 1024 * 1024)
+
+ assert_d_eq(allocm(&p, &szs[0], 1, 0), ALLOCM_SUCCESS,
+ "Unexpected allocm() error");
+
+ for (i = 0; i < NCYCLES; i++) {
+ for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
+ q = p;
+ assert_d_eq(rallocm(&q, &szs[j], szs[j-1]+1, 0, 0),
+ ALLOCM_SUCCESS,
+ "Unexpected rallocm() error for size=%zu-->%zu",
+ szs[j-1], szs[j-1]+1);
+ assert_zu_ne(szs[j], szs[j-1]+1,
+ "Expected size to at least: %zu", szs[j-1]+1);
+ p = q;
+ }
+
+ for (j--; j > 0; j--) {
+ q = p;
+ assert_d_eq(rallocm(&q, &tsz, szs[j-1], 0, 0),
+ ALLOCM_SUCCESS,
+ "Unexpected rallocm() error for size=%zu-->%zu",
+ szs[j], szs[j-1]);
+ assert_zu_eq(tsz, szs[j-1],
+ "Expected size=%zu, got size=%zu", szs[j-1], tsz);
+ p = q;
+ }
+ }
+
+ assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
+ "Unexpected dallocm() error");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_same_size,
+ test_extra_no_move,
+ test_no_move_fail,
+ test_grow_and_shrink));
+}
diff --git a/deps/jemalloc/test/integration/rallocx.c b/deps/jemalloc/test/integration/rallocx.c
new file mode 100644
index 000000000..ee21aedff
--- /dev/null
+++ b/deps/jemalloc/test/integration/rallocx.c
@@ -0,0 +1,182 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_grow_and_shrink)
+{
+ void *p, *q;
+ size_t tsz;
+#define NCYCLES 3
+ unsigned i, j;
+#define NSZS 2500
+ size_t szs[NSZS];
+#define MAXSZ ZU(12 * 1024 * 1024)
+
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ szs[0] = sallocx(p, 0);
+
+ for (i = 0; i < NCYCLES; i++) {
+ for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
+ q = rallocx(p, szs[j-1]+1, 0);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j-1], szs[j-1]+1);
+ szs[j] = sallocx(q, 0);
+ assert_zu_ne(szs[j], szs[j-1]+1,
+ "Expected size to at least: %zu", szs[j-1]+1);
+ p = q;
+ }
+
+ for (j--; j > 0; j--) {
+ q = rallocx(p, szs[j-1], 0);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for size=%zu-->%zu",
+ szs[j], szs[j-1]);
+ tsz = sallocx(q, 0);
+ assert_zu_eq(tsz, szs[j-1],
+ "Expected size=%zu, got size=%zu", szs[j-1], tsz);
+ p = q;
+ }
+ }
+
+ dallocx(p, 0);
+#undef MAXSZ
+#undef NSZS
+#undef NCYCLES
+}
+TEST_END
+
+static bool
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
+{
+ bool ret = false;
+ const uint8_t *buf = (const uint8_t *)p;
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ uint8_t b = buf[offset+i];
+ if (b != c) {
+ test_fail("Allocation at %p contains %#x rather than "
+ "%#x at offset %zu", p, b, c, offset+i);
+ ret = true;
+ }
+ }
+
+ return (ret);
+}
+
+TEST_BEGIN(test_zero)
+{
+ void *p, *q;
+ size_t psz, qsz, i, j;
+ size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024};
+#define FILL_BYTE 0xaaU
+#define RANGE 2048
+
+ for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) {
+ size_t start_size = start_sizes[i];
+ p = mallocx(start_size, MALLOCX_ZERO);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ psz = sallocx(p, 0);
+
+ assert_false(validate_fill(p, 0, 0, psz),
+ "Expected zeroed memory");
+ memset(p, FILL_BYTE, psz);
+ assert_false(validate_fill(p, FILL_BYTE, 0, psz),
+ "Expected filled memory");
+
+ for (j = 1; j < RANGE; j++) {
+ q = rallocx(p, start_size+j, MALLOCX_ZERO);
+ assert_ptr_not_null(q, "Unexpected rallocx() error");
+ qsz = sallocx(q, 0);
+ if (q != p || qsz != psz) {
+ assert_false(validate_fill(q, FILL_BYTE, 0,
+ psz), "Expected filled memory");
+ assert_false(validate_fill(q, 0, psz, qsz-psz),
+ "Expected zeroed memory");
+ }
+ if (psz != qsz) {
+ memset(q+psz, FILL_BYTE, qsz-psz);
+ psz = qsz;
+ }
+ p = q;
+ }
+ assert_false(validate_fill(p, FILL_BYTE, 0, psz),
+ "Expected filled memory");
+ dallocx(p, 0);
+ }
+#undef FILL_BYTE
+}
+TEST_END
+
+TEST_BEGIN(test_align)
+{
+ void *p, *q;
+ size_t align;
+#define MAX_ALIGN (ZU(1) << 25)
+
+ align = ZU(1);
+ p = mallocx(1, MALLOCX_ALIGN(align));
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ for (align <<= 1; align <= MAX_ALIGN; align <<= 1) {
+ q = rallocx(p, 1, MALLOCX_ALIGN(align));
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for align=%zu", align);
+ assert_ptr_null(
+ (void *)((uintptr_t)q & (align-1)),
+ "%p inadequately aligned for align=%zu",
+ q, align);
+ p = q;
+ }
+ dallocx(p, 0);
+#undef MAX_ALIGN
+}
+TEST_END
+
+TEST_BEGIN(test_lg_align_and_zero)
+{
+ void *p, *q;
+ size_t lg_align, sz;
+#define MAX_LG_ALIGN 25
+#define MAX_VALIDATE (ZU(1) << 22)
+
+ lg_align = ZU(0);
+ p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
+ q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
+ assert_ptr_not_null(q,
+ "Unexpected rallocx() error for lg_align=%zu", lg_align);
+ assert_ptr_null(
+ (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
+ "%p inadequately aligned for lg_align=%zu",
+ q, lg_align);
+ sz = sallocx(q, 0);
+ if ((sz << 1) <= MAX_VALIDATE) {
+ assert_false(validate_fill(q, 0, 0, sz),
+ "Expected zeroed memory");
+ } else {
+ assert_false(validate_fill(q, 0, 0, MAX_VALIDATE),
+ "Expected zeroed memory");
+ assert_false(validate_fill(q+sz-MAX_VALIDATE, 0, 0,
+ MAX_VALIDATE), "Expected zeroed memory");
+ }
+ p = q;
+ }
+ dallocx(p, 0);
+#undef MAX_VALIDATE
+#undef MAX_LG_ALIGN
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_grow_and_shrink,
+ test_zero,
+ test_align,
+ test_lg_align_and_zero));
+}
diff --git a/deps/jemalloc/test/integration/thread_arena.c b/deps/jemalloc/test/integration/thread_arena.c
new file mode 100644
index 000000000..67be53513
--- /dev/null
+++ b/deps/jemalloc/test/integration/thread_arena.c
@@ -0,0 +1,79 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 10
+
+void *
+thd_start(void *arg)
+{
+ unsigned main_arena_ind = *(unsigned *)arg;
+ void *p;
+ unsigned arena_ind;
+ size_t size;
+ int err;
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Error in malloc()");
+ free(p);
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind,
+ sizeof(main_arena_ind)))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+ assert_u_eq(arena_ind, main_arena_ind,
+ "Arena index should be same as for main thread");
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_thread_arena)
+{
+ void *p;
+ unsigned arena_ind;
+ size_t size;
+ int err;
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Error in malloc()");
+
+ size = sizeof(arena_ind);
+ if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
+ char buf[BUFERROR_BUF];
+
+ buferror(err, buf, sizeof(buf));
+ test_fail("Error in mallctl(): %s", buf);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_create(&thds[i], thd_start,
+ (void *)&arena_ind);
+ }
+
+ for (i = 0; i < NTHREADS; i++) {
+ intptr_t join_ret;
+ thd_join(thds[i], (void *)&join_ret);
+ assert_zd_eq(join_ret, 0, "Unexpected thread join error");
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_thread_arena));
+}
diff --git a/deps/jemalloc/test/integration/thread_tcache_enabled.c b/deps/jemalloc/test/integration/thread_tcache_enabled.c
new file mode 100644
index 000000000..f4e89c682
--- /dev/null
+++ b/deps/jemalloc/test/integration/thread_tcache_enabled.c
@@ -0,0 +1,113 @@
+#include "test/jemalloc_test.h"
+
+static const bool config_tcache =
+#ifdef JEMALLOC_TCACHE
+ true
+#else
+ false
+#endif
+ ;
+
+void *
+thd_start(void *arg)
+{
+ int err;
+ size_t sz;
+ bool e0, e1;
+
+ sz = sizeof(bool);
+ if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) {
+ if (err == ENOENT) {
+ assert_false(config_tcache,
+ "ENOENT should only be returned if tcache is "
+ "disabled");
+ }
+ goto label_ENOENT;
+ }
+
+ if (e0) {
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz),
+ 0, "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+ }
+
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ e1 = true;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ free(malloc(1));
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_true(e0, "tcache should be enabled");
+
+ free(malloc(1));
+ e1 = false;
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
+ assert_false(e0, "tcache should be disabled");
+
+ free(malloc(1));
+ return (NULL);
+label_ENOENT:
+ test_skip("\"thread.tcache.enabled\" mallctl not available");
+ return (NULL);
+}
+
+TEST_BEGIN(test_main_thread)
+{
+
+ thd_start(NULL);
+}
+TEST_END
+
+TEST_BEGIN(test_subthread)
+{
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ /* Run tests multiple times to check for bad interactions. */
+ return (test(
+ test_main_thread,
+ test_subthread,
+ test_main_thread,
+ test_subthread,
+ test_main_thread));
+}
diff --git a/deps/jemalloc/test/integration/xallocx.c b/deps/jemalloc/test/integration/xallocx.c
new file mode 100644
index 000000000..ab4cf945e
--- /dev/null
+++ b/deps/jemalloc/test/integration/xallocx.c
@@ -0,0 +1,59 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_same_size)
+{
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, 0, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_no_move)
+{
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz, sz-42, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_no_move_fail)
+{
+ void *p;
+ size_t sz, tsz;
+
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ sz = sallocx(p, 0);
+
+ tsz = xallocx(p, sz + 5, 0, 0);
+ assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
+
+ dallocx(p, 0);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_same_size,
+ test_extra_no_move,
+ test_no_move_fail));
+}
diff --git a/deps/jemalloc/test/jemalloc_test.h.in b/deps/jemalloc/test/jemalloc_test.h.in
deleted file mode 100644
index e38b48efa..000000000
--- a/deps/jemalloc/test/jemalloc_test.h.in
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * This header should be included by tests, rather than directly including
- * jemalloc/jemalloc.h, because --with-install-suffix may cause the header to
- * have a different name.
- */
-#include "jemalloc/jemalloc@install_suffix@.h"
-#include "jemalloc/internal/jemalloc_internal.h"
-
-/* Abstraction layer for threading in tests */
-#ifdef _WIN32
-#include <windows.h>
-
-typedef HANDLE je_thread_t;
-
-void
-je_thread_create(je_thread_t *thread, void *(*proc)(void *), void *arg)
-{
- LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc;
- *thread = CreateThread(NULL, 0, routine, arg, 0, NULL);
- if (*thread == NULL) {
- malloc_printf("Error in CreateThread()\n");
- exit(1);
- }
-}
-
-void
-je_thread_join(je_thread_t thread, void **ret)
-{
- WaitForSingleObject(thread, INFINITE);
-}
-
-#else
-#include <pthread.h>
-
-typedef pthread_t je_thread_t;
-
-void
-je_thread_create(je_thread_t *thread, void *(*proc)(void *), void *arg)
-{
-
- if (pthread_create(thread, NULL, proc, arg) != 0) {
- malloc_printf("Error in pthread_create()\n");
- exit(1);
- }
-}
-
-void
-je_thread_join(je_thread_t thread, void **ret)
-{
-
- pthread_join(thread, ret);
-}
-#endif
diff --git a/deps/jemalloc/test/mremap.c b/deps/jemalloc/test/mremap.c
deleted file mode 100644
index 47efa7c41..000000000
--- a/deps/jemalloc/test/mremap.c
+++ /dev/null
@@ -1,60 +0,0 @@
-#define JEMALLOC_MANGLE
-#include "jemalloc_test.h"
-
-int
-main(void)
-{
- int ret, err;
- size_t sz, lg_chunk, chunksize, i;
- char *p, *q;
-
- malloc_printf("Test begin\n");
-
- sz = sizeof(lg_chunk);
- if ((err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0))) {
- assert(err != ENOENT);
- malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
- strerror(err));
- ret = 1;
- goto label_return;
- }
- chunksize = ((size_t)1U) << lg_chunk;
-
- p = (char *)malloc(chunksize);
- if (p == NULL) {
- malloc_printf("malloc(%zu) --> %p\n", chunksize, p);
- ret = 1;
- goto label_return;
- }
- memset(p, 'a', chunksize);
-
- q = (char *)realloc(p, chunksize * 2);
- if (q == NULL) {
- malloc_printf("realloc(%p, %zu) --> %p\n", p, chunksize * 2,
- q);
- ret = 1;
- goto label_return;
- }
- for (i = 0; i < chunksize; i++) {
- assert(q[i] == 'a');
- }
-
- p = q;
-
- q = (char *)realloc(p, chunksize);
- if (q == NULL) {
- malloc_printf("realloc(%p, %zu) --> %p\n", p, chunksize, q);
- ret = 1;
- goto label_return;
- }
- for (i = 0; i < chunksize; i++) {
- assert(q[i] == 'a');
- }
-
- free(q);
-
- ret = 0;
-label_return:
- malloc_printf("Test end\n");
- return (ret);
-}
diff --git a/deps/jemalloc/test/mremap.exp b/deps/jemalloc/test/mremap.exp
deleted file mode 100644
index 369a88dd2..000000000
--- a/deps/jemalloc/test/mremap.exp
+++ /dev/null
@@ -1,2 +0,0 @@
-Test begin
-Test end
diff --git a/deps/jemalloc/test/posix_memalign.c b/deps/jemalloc/test/posix_memalign.c
deleted file mode 100644
index 2185bcf76..000000000
--- a/deps/jemalloc/test/posix_memalign.c
+++ /dev/null
@@ -1,115 +0,0 @@
-#define JEMALLOC_MANGLE
-#include "jemalloc_test.h"
-
-#define CHUNK 0x400000
-/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
-#define MAXALIGN ((size_t)0x2000000LU)
-#define NITER 4
-
-int
-main(void)
-{
- size_t alignment, size, total;
- unsigned i;
- int err;
- void *p, *ps[NITER];
-
- malloc_printf("Test begin\n");
-
- /* Test error conditions. */
- for (alignment = 0; alignment < sizeof(void *); alignment++) {
- err = posix_memalign(&p, alignment, 1);
- if (err != EINVAL) {
- malloc_printf(
- "Expected error for invalid alignment %zu\n",
- alignment);
- }
- }
-
- for (alignment = sizeof(size_t); alignment < MAXALIGN;
- alignment <<= 1) {
- err = posix_memalign(&p, alignment + 1, 1);
- if (err == 0) {
- malloc_printf(
- "Expected error for invalid alignment %zu\n",
- alignment + 1);
- }
- }
-
-#if LG_SIZEOF_PTR == 3
- alignment = UINT64_C(0x8000000000000000);
- size = UINT64_C(0x8000000000000000);
-#else
- alignment = 0x80000000LU;
- size = 0x80000000LU;
-#endif
- err = posix_memalign(&p, alignment, size);
- if (err == 0) {
- malloc_printf(
- "Expected error for posix_memalign(&p, %zu, %zu)\n",
- alignment, size);
- }
-
-#if LG_SIZEOF_PTR == 3
- alignment = UINT64_C(0x4000000000000000);
- size = UINT64_C(0x8400000000000001);
-#else
- alignment = 0x40000000LU;
- size = 0x84000001LU;
-#endif
- err = posix_memalign(&p, alignment, size);
- if (err == 0) {
- malloc_printf(
- "Expected error for posix_memalign(&p, %zu, %zu)\n",
- alignment, size);
- }
-
- alignment = 0x10LU;
-#if LG_SIZEOF_PTR == 3
- size = UINT64_C(0xfffffffffffffff0);
-#else
- size = 0xfffffff0LU;
-#endif
- err = posix_memalign(&p, alignment, size);
- if (err == 0) {
- malloc_printf(
- "Expected error for posix_memalign(&p, %zu, %zu)\n",
- alignment, size);
- }
-
- for (i = 0; i < NITER; i++)
- ps[i] = NULL;
-
- for (alignment = 8;
- alignment <= MAXALIGN;
- alignment <<= 1) {
- total = 0;
- malloc_printf("Alignment: %zu\n", alignment);
- for (size = 1;
- size < 3 * alignment && size < (1U << 31);
- size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
- for (i = 0; i < NITER; i++) {
- err = posix_memalign(&ps[i],
- alignment, size);
- if (err) {
- malloc_printf(
- "Error for size %zu (%#zx): %s\n",
- size, size, strerror(err));
- exit(1);
- }
- total += malloc_usable_size(ps[i]);
- if (total >= (MAXALIGN << 1))
- break;
- }
- for (i = 0; i < NITER; i++) {
- if (ps[i] != NULL) {
- free(ps[i]);
- ps[i] = NULL;
- }
- }
- }
- }
-
- malloc_printf("Test end\n");
- return (0);
-}
diff --git a/deps/jemalloc/test/posix_memalign.exp b/deps/jemalloc/test/posix_memalign.exp
deleted file mode 100644
index b5061c727..000000000
--- a/deps/jemalloc/test/posix_memalign.exp
+++ /dev/null
@@ -1,25 +0,0 @@
-Test begin
-Alignment: 8
-Alignment: 16
-Alignment: 32
-Alignment: 64
-Alignment: 128
-Alignment: 256
-Alignment: 512
-Alignment: 1024
-Alignment: 2048
-Alignment: 4096
-Alignment: 8192
-Alignment: 16384
-Alignment: 32768
-Alignment: 65536
-Alignment: 131072
-Alignment: 262144
-Alignment: 524288
-Alignment: 1048576
-Alignment: 2097152
-Alignment: 4194304
-Alignment: 8388608
-Alignment: 16777216
-Alignment: 33554432
-Test end
diff --git a/deps/jemalloc/test/rallocm.c b/deps/jemalloc/test/rallocm.c
deleted file mode 100644
index c5dedf48d..000000000
--- a/deps/jemalloc/test/rallocm.c
+++ /dev/null
@@ -1,127 +0,0 @@
-#define JEMALLOC_MANGLE
-#include "jemalloc_test.h"
-
-int
-main(void)
-{
- size_t pagesize;
- void *p, *q;
- size_t sz, tsz;
- int r;
-
- malloc_printf("Test begin\n");
-
- /* Get page size. */
- {
-#ifdef _WIN32
- SYSTEM_INFO si;
- GetSystemInfo(&si);
- pagesize = (size_t)si.dwPageSize;
-#else
- long result = sysconf(_SC_PAGESIZE);
- assert(result != -1);
- pagesize = (size_t)result;
-#endif
- }
-
- r = allocm(&p, &sz, 42, 0);
- if (r != ALLOCM_SUCCESS) {
- malloc_printf("Unexpected allocm() error\n");
- abort();
- }
-
- q = p;
- r = rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE);
- if (r != ALLOCM_SUCCESS)
- malloc_printf("Unexpected rallocm() error\n");
- if (q != p)
- malloc_printf("Unexpected object move\n");
- if (tsz != sz) {
- malloc_printf("Unexpected size change: %zu --> %zu\n",
- sz, tsz);
- }
-
- q = p;
- r = rallocm(&q, &tsz, sz, 5, ALLOCM_NO_MOVE);
- if (r != ALLOCM_SUCCESS)
- malloc_printf("Unexpected rallocm() error\n");
- if (q != p)
- malloc_printf("Unexpected object move\n");
- if (tsz != sz) {
- malloc_printf("Unexpected size change: %zu --> %zu\n",
- sz, tsz);
- }
-
- q = p;
- r = rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE);
- if (r != ALLOCM_ERR_NOT_MOVED)
- malloc_printf("Unexpected rallocm() result\n");
- if (q != p)
- malloc_printf("Unexpected object move\n");
- if (tsz != sz) {
- malloc_printf("Unexpected size change: %zu --> %zu\n",
- sz, tsz);
- }
-
- q = p;
- r = rallocm(&q, &tsz, sz + 5, 0, 0);
- if (r != ALLOCM_SUCCESS)
- malloc_printf("Unexpected rallocm() error\n");
- if (q == p)
- malloc_printf("Expected object move\n");
- if (tsz == sz) {
- malloc_printf("Expected size change: %zu --> %zu\n",
- sz, tsz);
- }
- p = q;
- sz = tsz;
-
- r = rallocm(&q, &tsz, pagesize*2, 0, 0);
- if (r != ALLOCM_SUCCESS)
- malloc_printf("Unexpected rallocm() error\n");
- if (q == p)
- malloc_printf("Expected object move\n");
- if (tsz == sz) {
- malloc_printf("Expected size change: %zu --> %zu\n",
- sz, tsz);
- }
- p = q;
- sz = tsz;
-
- r = rallocm(&q, &tsz, pagesize*4, 0, 0);
- if (r != ALLOCM_SUCCESS)
- malloc_printf("Unexpected rallocm() error\n");
- if (tsz == sz) {
- malloc_printf("Expected size change: %zu --> %zu\n",
- sz, tsz);
- }
- p = q;
- sz = tsz;
-
- r = rallocm(&q, &tsz, pagesize*2, 0, ALLOCM_NO_MOVE);
- if (r != ALLOCM_SUCCESS)
- malloc_printf("Unexpected rallocm() error\n");
- if (q != p)
- malloc_printf("Unexpected object move\n");
- if (tsz == sz) {
- malloc_printf("Expected size change: %zu --> %zu\n",
- sz, tsz);
- }
- sz = tsz;
-
- r = rallocm(&q, &tsz, pagesize*4, 0, ALLOCM_NO_MOVE);
- if (r != ALLOCM_SUCCESS)
- malloc_printf("Unexpected rallocm() error\n");
- if (q != p)
- malloc_printf("Unexpected object move\n");
- if (tsz == sz) {
- malloc_printf("Expected size change: %zu --> %zu\n",
- sz, tsz);
- }
- sz = tsz;
-
- dallocm(p, 0);
-
- malloc_printf("Test end\n");
- return (0);
-}
diff --git a/deps/jemalloc/test/rallocm.exp b/deps/jemalloc/test/rallocm.exp
deleted file mode 100644
index 369a88dd2..000000000
--- a/deps/jemalloc/test/rallocm.exp
+++ /dev/null
@@ -1,2 +0,0 @@
-Test begin
-Test end
diff --git a/deps/jemalloc/test/src/SFMT.c b/deps/jemalloc/test/src/SFMT.c
new file mode 100644
index 000000000..e6f8deecb
--- /dev/null
+++ b/deps/jemalloc/test/src/SFMT.c
@@ -0,0 +1,719 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * @file SFMT.c
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT)
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software, see LICENSE.txt
+ */
+#define SFMT_C_
+#include "test/jemalloc_test.h"
+#include "test/SFMT-params.h"
+
+#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
+#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
+#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64)
+#define BIG_ENDIAN64 1
+#endif
+#if defined(ONLY64) && !defined(BIG_ENDIAN64)
+ #if defined(__GNUC__)
+ #error "-DONLY64 must be specified with -DBIG_ENDIAN64"
+ #endif
+#undef ONLY64
+#endif
+/*------------------------------------------------------
+ 128-bit SIMD data type for Altivec, SSE2 or standard C
+ ------------------------------------------------------*/
+#if defined(HAVE_ALTIVEC)
+/** 128-bit data structure */
+union W128_T {
+ vector unsigned int s;
+ uint32_t u[4];
+};
+/** 128-bit data type */
+typedef union W128_T w128_t;
+
+#elif defined(HAVE_SSE2)
+/** 128-bit data structure */
+union W128_T {
+ __m128i si;
+ uint32_t u[4];
+};
+/** 128-bit data type */
+typedef union W128_T w128_t;
+
+#else
+
+/** 128-bit data structure */
+struct W128_T {
+ uint32_t u[4];
+};
+/** 128-bit data type */
+typedef struct W128_T w128_t;
+
+#endif
+
+struct sfmt_s {
+ /** the 128-bit internal state array */
+ w128_t sfmt[N];
+ /** index counter to the 32-bit internal state array */
+ int idx;
+ /** a flag: it is 0 if and only if the internal state is not yet
+ * initialized. */
+ int initialized;
+};
+
+/*--------------------------------------
+ FILE GLOBAL VARIABLES
+ internal state, index counter and flag
+ --------------------------------------*/
+
+/** a parity check vector which certificate the period of 2^{MEXP} */
+static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4};
+
+/*----------------
+ STATIC FUNCTIONS
+ ----------------*/
+JEMALLOC_INLINE_C int idxof(int i);
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift);
+JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift);
+#endif
+JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx);
+JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size);
+JEMALLOC_INLINE_C uint32_t func1(uint32_t x);
+JEMALLOC_INLINE_C uint32_t func2(uint32_t x);
+static void period_certification(sfmt_t *ctx);
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+JEMALLOC_INLINE_C void swap(w128_t *array, int size);
+#endif
+
+#if defined(HAVE_ALTIVEC)
+ #include "test/SFMT-alti.h"
+#elif defined(HAVE_SSE2)
+ #include "test/SFMT-sse2.h"
+#endif
+
+/**
+ * This function simulate a 64-bit index of LITTLE ENDIAN
+ * in BIG ENDIAN machine.
+ */
+#ifdef ONLY64
+JEMALLOC_INLINE_C int idxof(int i) {
+ return i ^ 1;
+}
+#else
+JEMALLOC_INLINE_C int idxof(int i) {
+ return i;
+}
+#endif
+/**
+ * This function simulates SIMD 128-bit right shift by the standard C.
+ * The 128-bit integer given in in is shifted by (shift * 8) bits.
+ * This function simulates the LITTLE ENDIAN SIMD.
+ * @param out the output of this function
+ * @param in the 128-bit data to be shifted
+ * @param shift the shift value
+ */
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+#ifdef ONLY64
+JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
+ tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
+
+ oh = th >> (shift * 8);
+ ol = tl >> (shift * 8);
+ ol |= th << (64 - shift * 8);
+ out->u[0] = (uint32_t)(ol >> 32);
+ out->u[1] = (uint32_t)ol;
+ out->u[2] = (uint32_t)(oh >> 32);
+ out->u[3] = (uint32_t)oh;
+}
+#else
+JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
+ tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
+
+ oh = th >> (shift * 8);
+ ol = tl >> (shift * 8);
+ ol |= th << (64 - shift * 8);
+ out->u[1] = (uint32_t)(ol >> 32);
+ out->u[0] = (uint32_t)ol;
+ out->u[3] = (uint32_t)(oh >> 32);
+ out->u[2] = (uint32_t)oh;
+}
+#endif
+/**
+ * This function simulates SIMD 128-bit left shift by the standard C.
+ * The 128-bit integer given in in is shifted by (shift * 8) bits.
+ * This function simulates the LITTLE ENDIAN SIMD.
+ * @param out the output of this function
+ * @param in the 128-bit data to be shifted
+ * @param shift the shift value
+ */
+#ifdef ONLY64
+JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
+ tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
+
+ oh = th << (shift * 8);
+ ol = tl << (shift * 8);
+ oh |= tl >> (64 - shift * 8);
+ out->u[0] = (uint32_t)(ol >> 32);
+ out->u[1] = (uint32_t)ol;
+ out->u[2] = (uint32_t)(oh >> 32);
+ out->u[3] = (uint32_t)oh;
+}
+#else
+JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
+ tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
+
+ oh = th << (shift * 8);
+ ol = tl << (shift * 8);
+ oh |= tl >> (64 - shift * 8);
+ out->u[1] = (uint32_t)(ol >> 32);
+ out->u[0] = (uint32_t)ol;
+ out->u[3] = (uint32_t)(oh >> 32);
+ out->u[2] = (uint32_t)oh;
+}
+#endif
+#endif
+
+/**
+ * This function represents the recursion formula.
+ * @param r output
+ * @param a a 128-bit part of the internal state array
+ * @param b a 128-bit part of the internal state array
+ * @param c a 128-bit part of the internal state array
+ * @param d a 128-bit part of the internal state array
+ */
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+#ifdef ONLY64
+JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
+ w128_t *d) {
+ w128_t x;
+ w128_t y;
+
+ lshift128(&x, a, SL2);
+ rshift128(&y, c, SR2);
+ r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0]
+ ^ (d->u[0] << SL1);
+ r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1]
+ ^ (d->u[1] << SL1);
+ r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2]
+ ^ (d->u[2] << SL1);
+ r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3]
+ ^ (d->u[3] << SL1);
+}
+#else
+JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
+ w128_t *d) {
+ w128_t x;
+ w128_t y;
+
+ lshift128(&x, a, SL2);
+ rshift128(&y, c, SR2);
+ r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0]
+ ^ (d->u[0] << SL1);
+ r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1]
+ ^ (d->u[1] << SL1);
+ r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2]
+ ^ (d->u[2] << SL1);
+ r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3]
+ ^ (d->u[3] << SL1);
+}
+#endif
+#endif
+
+#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) {
+ int i;
+ w128_t *r1, *r2;
+
+ r1 = &ctx->sfmt[N - 2];
+ r2 = &ctx->sfmt[N - 1];
+ for (i = 0; i < N - POS1; i++) {
+ do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1,
+ r2);
+ r1 = r2;
+ r2 = &ctx->sfmt[i];
+ }
+ for (; i < N; i++) {
+ do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1,
+ r2);
+ r1 = r2;
+ r2 = &ctx->sfmt[i];
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pseudorandom numbers to be generated.
+ */
+JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
+ int i, j;
+ w128_t *r1, *r2;
+
+ r1 = &ctx->sfmt[N - 2];
+ r2 = &ctx->sfmt[N - 1];
+ for (i = 0; i < N - POS1; i++) {
+ do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ }
+ for (; i < N; i++) {
+ do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ }
+ for (; i < size - N; i++) {
+ do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ ctx->sfmt[j] = array[j + size - N];
+ }
+ for (; i < size; i++, j++) {
+ do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
+ r1 = r2;
+ r2 = &array[i];
+ ctx->sfmt[j] = array[i];
+ }
+}
+#endif
+
+#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC)
+JEMALLOC_INLINE_C void swap(w128_t *array, int size) {
+ int i;
+ uint32_t x, y;
+
+ for (i = 0; i < size; i++) {
+ x = array[i].u[0];
+ y = array[i].u[2];
+ array[i].u[0] = array[i].u[1];
+ array[i].u[2] = array[i].u[3];
+ array[i].u[1] = x;
+ array[i].u[3] = y;
+ }
+}
+#endif
+/**
+ * This function represents a function used in the initialization
+ * by init_by_array
+ * @param x 32-bit integer
+ * @return 32-bit integer
+ */
+static uint32_t func1(uint32_t x) {
+ return (x ^ (x >> 27)) * (uint32_t)1664525UL;
+}
+
+/**
+ * This function represents a function used in the initialization
+ * by init_by_array
+ * @param x 32-bit integer
+ * @return 32-bit integer
+ */
+static uint32_t func2(uint32_t x) {
+ return (x ^ (x >> 27)) * (uint32_t)1566083941UL;
+}
+
+/**
+ * This function certificate the period of 2^{MEXP}
+ */
+static void period_certification(sfmt_t *ctx) {
+ int inner = 0;
+ int i, j;
+ uint32_t work;
+ uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
+
+ for (i = 0; i < 4; i++)
+ inner ^= psfmt32[idxof(i)] & parity[i];
+ for (i = 16; i > 0; i >>= 1)
+ inner ^= inner >> i;
+ inner &= 1;
+ /* check OK */
+ if (inner == 1) {
+ return;
+ }
+ /* check NG, and modification */
+ for (i = 0; i < 4; i++) {
+ work = 1;
+ for (j = 0; j < 32; j++) {
+ if ((work & parity[i]) != 0) {
+ psfmt32[idxof(i)] ^= work;
+ return;
+ }
+ work = work << 1;
+ }
+ }
+}
+
+/*----------------
+ PUBLIC FUNCTIONS
+ ----------------*/
+/**
+ * This function returns the identification string.
+ * The string shows the word size, the Mersenne exponent,
+ * and all parameters of this generator.
+ */
+const char *get_idstring(void) {
+ return IDSTR;
+}
+
+/**
+ * This function returns the minimum size of array used for \b
+ * fill_array32() function.
+ * @return minimum size of array used for fill_array32() function.
+ */
+int get_min_array_size32(void) {
+ return N32;
+}
+
+/**
+ * This function returns the minimum size of array used for \b
+ * fill_array64() function.
+ * @return minimum size of array used for fill_array64() function.
+ */
+int get_min_array_size64(void) {
+ return N64;
+}
+
+#ifndef ONLY64
+/**
+ * This function generates and returns 32-bit pseudorandom number.
+ * init_gen_rand or init_by_array must be called before this function.
+ * @return 32-bit pseudorandom number
+ */
+uint32_t gen_rand32(sfmt_t *ctx) {
+ uint32_t r;
+ uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
+
+ assert(ctx->initialized);
+ if (ctx->idx >= N32) {
+ gen_rand_all(ctx);
+ ctx->idx = 0;
+ }
+ r = psfmt32[ctx->idx++];
+ return r;
+}
+
+/* Generate a random integer in [0..limit). */
+uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) {
+ uint32_t ret, above;
+
+ above = 0xffffffffU - (0xffffffffU % limit);
+ while (1) {
+ ret = gen_rand32(ctx);
+ if (ret < above) {
+ ret %= limit;
+ break;
+ }
+ }
+ return ret;
+}
+#endif
+/**
+ * This function generates and returns 64-bit pseudorandom number.
+ * init_gen_rand or init_by_array must be called before this function.
+ * The function gen_rand64 should not be called after gen_rand32,
+ * unless an initialization is again executed.
+ * @return 64-bit pseudorandom number
+ */
+uint64_t gen_rand64(sfmt_t *ctx) {
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+ uint32_t r1, r2;
+ uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
+#else
+ uint64_t r;
+ uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0];
+#endif
+
+ assert(ctx->initialized);
+ assert(ctx->idx % 2 == 0);
+
+ if (ctx->idx >= N32) {
+ gen_rand_all(ctx);
+ ctx->idx = 0;
+ }
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+ r1 = psfmt32[ctx->idx];
+ r2 = psfmt32[ctx->idx + 1];
+ ctx->idx += 2;
+ return ((uint64_t)r2 << 32) | r1;
+#else
+ r = psfmt64[ctx->idx / 2];
+ ctx->idx += 2;
+ return r;
+#endif
+}
+
+/* Generate a random integer in [0..limit). */
+uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) {
+ uint64_t ret, above;
+
+ above = 0xffffffffffffffffLLU - (0xffffffffffffffffLLU % limit);
+ while (1) {
+ ret = gen_rand64(ctx);
+ if (ret < above) {
+ ret %= limit;
+ break;
+ }
+ }
+ return ret;
+}
+
+#ifndef ONLY64
+/**
+ * This function generates pseudorandom 32-bit integers in the
+ * specified array[] by one call. The number of pseudorandom integers
+ * is specified by the argument size, which must be at least 624 and a
+ * multiple of four. The generation by this function is much faster
+ * than the following gen_rand function.
+ *
+ * For initialization, init_gen_rand or init_by_array must be called
+ * before the first call of this function. This function can not be
+ * used after calling gen_rand function, without initialization.
+ *
+ * @param array an array where pseudorandom 32-bit integers are filled
+ * by this function. The pointer to the array must be \b "aligned"
+ * (namely, must be a multiple of 16) in the SIMD version, since it
+ * refers to the address of a 128-bit integer. In the standard C
+ * version, the pointer is arbitrary.
+ *
+ * @param size the number of 32-bit pseudorandom integers to be
+ * generated. size must be a multiple of 4, and greater than or equal
+ * to (MEXP / 128 + 1) * 4.
+ *
+ * @note \b memalign or \b posix_memalign is available to get aligned
+ * memory. Mac OSX doesn't have these functions, but \b malloc of OSX
+ * returns the pointer to the aligned memory block.
+ */
+void fill_array32(sfmt_t *ctx, uint32_t *array, int size) {
+ assert(ctx->initialized);
+ assert(ctx->idx == N32);
+ assert(size % 4 == 0);
+ assert(size >= N32);
+
+ gen_rand_array(ctx, (w128_t *)array, size / 4);
+ ctx->idx = N32;
+}
+#endif
+
+/**
+ * This function generates pseudorandom 64-bit integers in the
+ * specified array[] by one call. The number of pseudorandom integers
+ * is specified by the argument size, which must be at least 312 and a
+ * multiple of two. The generation by this function is much faster
+ * than the following gen_rand function.
+ *
+ * For initialization, init_gen_rand or init_by_array must be called
+ * before the first call of this function. This function can not be
+ * used after calling gen_rand function, without initialization.
+ *
+ * @param array an array where pseudorandom 64-bit integers are filled
+ * by this function. The pointer to the array must be "aligned"
+ * (namely, must be a multiple of 16) in the SIMD version, since it
+ * refers to the address of a 128-bit integer. In the standard C
+ * version, the pointer is arbitrary.
+ *
+ * @param size the number of 64-bit pseudorandom integers to be
+ * generated. size must be a multiple of 2, and greater than or equal
+ * to (MEXP / 128 + 1) * 2
+ *
+ * @note \b memalign or \b posix_memalign is available to get aligned
+ * memory. Mac OSX doesn't have these functions, but \b malloc of OSX
+ * returns the pointer to the aligned memory block.
+ */
+void fill_array64(sfmt_t *ctx, uint64_t *array, int size) {
+ assert(ctx->initialized);
+ assert(ctx->idx == N32);
+ assert(size % 2 == 0);
+ assert(size >= N64);
+
+ gen_rand_array(ctx, (w128_t *)array, size / 2);
+ ctx->idx = N32;
+
+#if defined(BIG_ENDIAN64) && !defined(ONLY64)
+ swap((w128_t *)array, size /2);
+#endif
+}
+
+/**
+ * This function initializes the internal state array with a 32-bit
+ * integer seed.
+ *
+ * @param seed a 32-bit integer used as the seed.
+ */
+sfmt_t *init_gen_rand(uint32_t seed) {
+ void *p;
+ sfmt_t *ctx;
+ int i;
+ uint32_t *psfmt32;
+
+ if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
+ return NULL;
+ }
+ ctx = (sfmt_t *)p;
+ psfmt32 = &ctx->sfmt[0].u[0];
+
+ psfmt32[idxof(0)] = seed;
+ for (i = 1; i < N32; i++) {
+ psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)]
+ ^ (psfmt32[idxof(i - 1)] >> 30))
+ + i;
+ }
+ ctx->idx = N32;
+ period_certification(ctx);
+ ctx->initialized = 1;
+
+ return ctx;
+}
+
+/**
+ * This function initializes the internal state array,
+ * with an array of 32-bit integers used as the seeds
+ * @param init_key the array of 32-bit integers, used as a seed.
+ * @param key_length the length of init_key.
+ */
+sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
+ void *p;
+ sfmt_t *ctx;
+ int i, j, count;
+ uint32_t r;
+ int lag;
+ int mid;
+ int size = N * 4;
+ uint32_t *psfmt32;
+
+ if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
+ return NULL;
+ }
+ ctx = (sfmt_t *)p;
+ psfmt32 = &ctx->sfmt[0].u[0];
+
+ if (size >= 623) {
+ lag = 11;
+ } else if (size >= 68) {
+ lag = 7;
+ } else if (size >= 39) {
+ lag = 5;
+ } else {
+ lag = 3;
+ }
+ mid = (size - lag) / 2;
+
+ memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt));
+ if (key_length + 1 > N32) {
+ count = key_length + 1;
+ } else {
+ count = N32;
+ }
+ r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)]
+ ^ psfmt32[idxof(N32 - 1)]);
+ psfmt32[idxof(mid)] += r;
+ r += key_length;
+ psfmt32[idxof(mid + lag)] += r;
+ psfmt32[idxof(0)] = r;
+
+ count--;
+ for (i = 1, j = 0; (j < count) && (j < key_length); j++) {
+ r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
+ ^ psfmt32[idxof((i + N32 - 1) % N32)]);
+ psfmt32[idxof((i + mid) % N32)] += r;
+ r += init_key[j] + i;
+ psfmt32[idxof((i + mid + lag) % N32)] += r;
+ psfmt32[idxof(i)] = r;
+ i = (i + 1) % N32;
+ }
+ for (; j < count; j++) {
+ r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
+ ^ psfmt32[idxof((i + N32 - 1) % N32)]);
+ psfmt32[idxof((i + mid) % N32)] += r;
+ r += i;
+ psfmt32[idxof((i + mid + lag) % N32)] += r;
+ psfmt32[idxof(i)] = r;
+ i = (i + 1) % N32;
+ }
+ for (j = 0; j < N32; j++) {
+ r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)]
+ + psfmt32[idxof((i + N32 - 1) % N32)]);
+ psfmt32[idxof((i + mid) % N32)] ^= r;
+ r -= i;
+ psfmt32[idxof((i + mid + lag) % N32)] ^= r;
+ psfmt32[idxof(i)] = r;
+ i = (i + 1) % N32;
+ }
+
+ ctx->idx = N32;
+ period_certification(ctx);
+ ctx->initialized = 1;
+
+ return ctx;
+}
+
+void fini_gen_rand(sfmt_t *ctx) {
+ assert(ctx != NULL);
+
+ ctx->initialized = 0;
+ free(ctx);
+}
diff --git a/deps/jemalloc/test/src/math.c b/deps/jemalloc/test/src/math.c
new file mode 100644
index 000000000..887a36390
--- /dev/null
+++ b/deps/jemalloc/test/src/math.c
@@ -0,0 +1,2 @@
+#define MATH_C_
+#include "test/jemalloc_test.h"
diff --git a/deps/jemalloc/test/src/mtx.c b/deps/jemalloc/test/src/mtx.c
new file mode 100644
index 000000000..41b95d59d
--- /dev/null
+++ b/deps/jemalloc/test/src/mtx.c
@@ -0,0 +1,62 @@
+#include "test/jemalloc_test.h"
+
+bool
+mtx_init(mtx_t *mtx)
+{
+
+#ifdef _WIN32
+ if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT))
+ return (true);
+#elif (defined(JEMALLOC_OSSPIN))
+ mtx->lock = 0;
+#else
+ pthread_mutexattr_t attr;
+
+ if (pthread_mutexattr_init(&attr) != 0)
+ return (true);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
+ if (pthread_mutex_init(&mtx->lock, &attr) != 0) {
+ pthread_mutexattr_destroy(&attr);
+ return (true);
+ }
+ pthread_mutexattr_destroy(&attr);
+#endif
+ return (false);
+}
+
+void
+mtx_fini(mtx_t *mtx)
+{
+
+#ifdef _WIN32
+#elif (defined(JEMALLOC_OSSPIN))
+#else
+ pthread_mutex_destroy(&mtx->lock);
+#endif
+}
+
+void
+mtx_lock(mtx_t *mtx)
+{
+
+#ifdef _WIN32
+ EnterCriticalSection(&mtx->lock);
+#elif (defined(JEMALLOC_OSSPIN))
+ OSSpinLockLock(&mtx->lock);
+#else
+ pthread_mutex_lock(&mtx->lock);
+#endif
+}
+
+void
+mtx_unlock(mtx_t *mtx)
+{
+
+#ifdef _WIN32
+ LeaveCriticalSection(&mtx->lock);
+#elif (defined(JEMALLOC_OSSPIN))
+ OSSpinLockUnlock(&mtx->lock);
+#else
+ pthread_mutex_unlock(&mtx->lock);
+#endif
+}
diff --git a/deps/jemalloc/test/src/test.c b/deps/jemalloc/test/src/test.c
new file mode 100644
index 000000000..528d85831
--- /dev/null
+++ b/deps/jemalloc/test/src/test.c
@@ -0,0 +1,94 @@
+#include "test/jemalloc_test.h"
+
+static unsigned test_count = 0;
+static test_status_t test_counts[test_status_count] = {0, 0, 0};
+static test_status_t test_status = test_status_pass;
+static const char * test_name = "";
+
+JEMALLOC_ATTR(format(printf, 1, 2))
+void
+test_skip(const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+ malloc_printf("\n");
+ test_status = test_status_skip;
+}
+
+JEMALLOC_ATTR(format(printf, 1, 2))
+void
+test_fail(const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+ malloc_printf("\n");
+ test_status = test_status_fail;
+}
+
+static const char *
+test_status_string(test_status_t test_status)
+{
+
+ switch (test_status) {
+ case test_status_pass: return "pass";
+ case test_status_skip: return "skip";
+ case test_status_fail: return "fail";
+ default: not_reached();
+ }
+}
+
+void
+p_test_init(const char *name)
+{
+
+ test_count++;
+ test_status = test_status_pass;
+ test_name = name;
+}
+
+void
+p_test_fini(void)
+{
+
+ test_counts[test_status]++;
+ malloc_printf("%s: %s\n", test_name, test_status_string(test_status));
+}
+
+test_status_t
+p_test(test_t* t, ...)
+{
+ test_status_t ret = test_status_pass;
+ va_list ap;
+
+ va_start(ap, t);
+ for (; t != NULL; t = va_arg(ap, test_t*)) {
+ t();
+ if (test_status > ret)
+ ret = test_status;
+ }
+ va_end(ap);
+
+ malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
+ test_status_string(test_status_pass),
+ test_counts[test_status_pass], test_count,
+ test_status_string(test_status_skip),
+ test_counts[test_status_skip], test_count,
+ test_status_string(test_status_fail),
+ test_counts[test_status_fail], test_count);
+
+ return (ret);
+}
+
+void
+p_test_fail(const char *prefix, const char *message)
+{
+
+ malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message);
+ test_status = test_status_fail;
+}
diff --git a/deps/jemalloc/test/src/thd.c b/deps/jemalloc/test/src/thd.c
new file mode 100644
index 000000000..233242a16
--- /dev/null
+++ b/deps/jemalloc/test/src/thd.c
@@ -0,0 +1,35 @@
+#include "test/jemalloc_test.h"
+
+#ifdef _WIN32
+void
+thd_create(thd_t *thd, void *(*proc)(void *), void *arg)
+{
+ LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc;
+ *thd = CreateThread(NULL, 0, routine, arg, 0, NULL);
+ if (*thd == NULL)
+ test_fail("Error in CreateThread()\n");
+}
+
+void
+thd_join(thd_t thd, void **ret)
+{
+
+ WaitForSingleObject(thd, INFINITE);
+}
+
+#else
+void
+thd_create(thd_t *thd, void *(*proc)(void *), void *arg)
+{
+
+ if (pthread_create(thd, NULL, proc, arg) != 0)
+ test_fail("Error in pthread_create()\n");
+}
+
+void
+thd_join(thd_t thd, void **ret)
+{
+
+ pthread_join(thd, ret);
+}
+#endif
diff --git a/deps/jemalloc/test/test.sh.in b/deps/jemalloc/test/test.sh.in
new file mode 100644
index 000000000..a39f99f6b
--- /dev/null
+++ b/deps/jemalloc/test/test.sh.in
@@ -0,0 +1,53 @@
+#!/bin/sh
+
+case @abi@ in
+ macho)
+ export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib"
+ ;;
+ pecoff)
+ export PATH="${PATH}:@objroot@lib"
+ ;;
+ *)
+ ;;
+esac
+
+# Corresponds to test_status_t.
+pass_code=0
+skip_code=1
+fail_code=2
+
+pass_count=0
+skip_count=0
+fail_count=0
+for t in $@; do
+ if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then
+ echo
+ fi
+ echo "=== ${t} ==="
+ ${t}@exe@ @abs_srcroot@ @abs_objroot@
+ result_code=$?
+ case ${result_code} in
+ ${pass_code})
+ pass_count=$((pass_count+1))
+ ;;
+ ${skip_code})
+ skip_count=$((skip_count+1))
+ ;;
+ ${fail_code})
+ fail_count=$((fail_count+1))
+ ;;
+ *)
+ echo "Test harness error" 1>&2
+ exit 1
+ esac
+done
+
+total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}`
+echo
+echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}"
+
+if [ ${fail_count} -eq 0 ] ; then
+ exit 0
+else
+ exit 1
+fi
diff --git a/deps/jemalloc/test/thread_arena.c b/deps/jemalloc/test/thread_arena.c
deleted file mode 100644
index 2ffdb5e80..000000000
--- a/deps/jemalloc/test/thread_arena.c
+++ /dev/null
@@ -1,80 +0,0 @@
-#define JEMALLOC_MANGLE
-#include "jemalloc_test.h"
-
-#define NTHREADS 10
-
-void *
-je_thread_start(void *arg)
-{
- unsigned main_arena_ind = *(unsigned *)arg;
- void *p;
- unsigned arena_ind;
- size_t size;
- int err;
-
- p = malloc(1);
- if (p == NULL) {
- malloc_printf("%s(): Error in malloc()\n", __func__);
- return (void *)1;
- }
-
- size = sizeof(arena_ind);
- if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind,
- sizeof(main_arena_ind)))) {
- malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
- strerror(err));
- return (void *)1;
- }
-
- size = sizeof(arena_ind);
- if ((err = mallctl("thread.arena", &arena_ind, &size, NULL,
- 0))) {
- malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
- strerror(err));
- return (void *)1;
- }
- assert(arena_ind == main_arena_ind);
-
- return (NULL);
-}
-
-int
-main(void)
-{
- int ret = 0;
- void *p;
- unsigned arena_ind;
- size_t size;
- int err;
- je_thread_t threads[NTHREADS];
- unsigned i;
-
- malloc_printf("Test begin\n");
-
- p = malloc(1);
- if (p == NULL) {
- malloc_printf("%s(): Error in malloc()\n", __func__);
- ret = 1;
- goto label_return;
- }
-
- size = sizeof(arena_ind);
- if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
- malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
- strerror(err));
- ret = 1;
- goto label_return;
- }
-
- for (i = 0; i < NTHREADS; i++) {
- je_thread_create(&threads[i], je_thread_start,
- (void *)&arena_ind);
- }
-
- for (i = 0; i < NTHREADS; i++)
- je_thread_join(threads[i], (void *)&ret);
-
-label_return:
- malloc_printf("Test end\n");
- return (ret);
-}
diff --git a/deps/jemalloc/test/thread_arena.exp b/deps/jemalloc/test/thread_arena.exp
deleted file mode 100644
index 369a88dd2..000000000
--- a/deps/jemalloc/test/thread_arena.exp
+++ /dev/null
@@ -1,2 +0,0 @@
-Test begin
-Test end
diff --git a/deps/jemalloc/test/thread_tcache_enabled.c b/deps/jemalloc/test/thread_tcache_enabled.c
deleted file mode 100644
index 2061b7bba..000000000
--- a/deps/jemalloc/test/thread_tcache_enabled.c
+++ /dev/null
@@ -1,91 +0,0 @@
-#define JEMALLOC_MANGLE
-#include "jemalloc_test.h"
-
-void *
-je_thread_start(void *arg)
-{
- int err;
- size_t sz;
- bool e0, e1;
-
- sz = sizeof(bool);
- if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) {
- if (err == ENOENT) {
-#ifdef JEMALLOC_TCACHE
- assert(false);
-#endif
- }
- goto label_return;
- }
-
- if (e0) {
- e1 = false;
- assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz)
- == 0);
- assert(e0);
- }
-
- e1 = true;
- assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
- assert(e0 == false);
-
- e1 = true;
- assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
- assert(e0);
-
- e1 = false;
- assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
- assert(e0);
-
- e1 = false;
- assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
- assert(e0 == false);
-
- free(malloc(1));
- e1 = true;
- assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
- assert(e0 == false);
-
- free(malloc(1));
- e1 = true;
- assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
- assert(e0);
-
- free(malloc(1));
- e1 = false;
- assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
- assert(e0);
-
- free(malloc(1));
- e1 = false;
- assert(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz) == 0);
- assert(e0 == false);
-
- free(malloc(1));
-label_return:
- return (NULL);
-}
-
-int
-main(void)
-{
- int ret = 0;
- je_thread_t thread;
-
- malloc_printf("Test begin\n");
-
- je_thread_start(NULL);
-
- je_thread_create(&thread, je_thread_start, NULL);
- je_thread_join(thread, (void *)&ret);
-
- je_thread_start(NULL);
-
- je_thread_create(&thread, je_thread_start, NULL);
- je_thread_join(thread, (void *)&ret);
-
- je_thread_start(NULL);
-
- malloc_printf("Test end\n");
- return (ret);
-}
diff --git a/deps/jemalloc/test/thread_tcache_enabled.exp b/deps/jemalloc/test/thread_tcache_enabled.exp
deleted file mode 100644
index 369a88dd2..000000000
--- a/deps/jemalloc/test/thread_tcache_enabled.exp
+++ /dev/null
@@ -1,2 +0,0 @@
-Test begin
-Test end
diff --git a/deps/jemalloc/test/unit/SFMT.c b/deps/jemalloc/test/unit/SFMT.c
new file mode 100644
index 000000000..c57bd68df
--- /dev/null
+++ b/deps/jemalloc/test/unit/SFMT.c
@@ -0,0 +1,1605 @@
+/*
+ * This file derives from SFMT 1.3.3
+ * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
+ * released under the terms of the following license:
+ *
+ * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Hiroshima University nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "test/jemalloc_test.h"
+
+#define BLOCK_SIZE 10000
+#define BLOCK_SIZE64 (BLOCK_SIZE / 2)
+#define COUNT_1 1000
+#define COUNT_2 700
+
+static const uint32_t init_gen_rand_32_expected[] = {
+ 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U,
+ 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U,
+ 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U,
+ 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U,
+ 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U,
+ 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U,
+ 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U,
+ 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U,
+ 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U,
+ 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U,
+ 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U,
+ 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U,
+ 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U,
+ 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U,
+ 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U,
+ 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U,
+ 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U,
+ 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U,
+ 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U,
+ 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U,
+ 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U,
+ 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U,
+ 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U,
+ 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U,
+ 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U,
+ 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U,
+ 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U,
+ 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U,
+ 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U,
+ 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U,
+ 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U,
+ 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U,
+ 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U,
+ 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U,
+ 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U,
+ 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U,
+ 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U,
+ 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U,
+ 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U,
+ 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U,
+ 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U,
+ 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U,
+ 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U,
+ 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U,
+ 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U,
+ 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U,
+ 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U,
+ 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U,
+ 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U,
+ 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U,
+ 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U,
+ 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U,
+ 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U,
+ 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U,
+ 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U,
+ 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U,
+ 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U,
+ 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U,
+ 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U,
+ 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U,
+ 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U,
+ 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U,
+ 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U,
+ 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U,
+ 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U,
+ 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U,
+ 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U,
+ 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U,
+ 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U,
+ 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U,
+ 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U,
+ 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U,
+ 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U,
+ 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U,
+ 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U,
+ 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U,
+ 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U,
+ 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U,
+ 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U,
+ 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U,
+ 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U,
+ 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U,
+ 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U,
+ 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U,
+ 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U,
+ 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U,
+ 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U,
+ 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U,
+ 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U,
+ 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U,
+ 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U,
+ 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U,
+ 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U,
+ 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U,
+ 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U,
+ 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U,
+ 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U,
+ 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U,
+ 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U,
+ 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U,
+ 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U,
+ 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U,
+ 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U,
+ 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U,
+ 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U,
+ 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U,
+ 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U,
+ 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U,
+ 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U,
+ 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U,
+ 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U,
+ 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U,
+ 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U,
+ 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U,
+ 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U,
+ 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U,
+ 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U,
+ 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U,
+ 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U,
+ 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U,
+ 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U,
+ 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U,
+ 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U,
+ 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U,
+ 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U,
+ 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U,
+ 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U,
+ 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U,
+ 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U,
+ 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U,
+ 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U,
+ 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U,
+ 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U,
+ 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U,
+ 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U,
+ 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U,
+ 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U,
+ 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U,
+ 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U,
+ 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U,
+ 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U,
+ 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U,
+ 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U,
+ 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U,
+ 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U,
+ 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U,
+ 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U,
+ 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U,
+ 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U,
+ 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U,
+ 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U,
+ 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U,
+ 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U,
+ 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U,
+ 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U,
+ 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U,
+ 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U,
+ 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U,
+ 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U,
+ 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U,
+ 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U,
+ 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U,
+ 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U,
+ 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U,
+ 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U,
+ 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U,
+ 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U,
+ 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U,
+ 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U,
+ 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U,
+ 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U,
+ 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U,
+ 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U,
+ 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U,
+ 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U,
+ 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U,
+ 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U,
+ 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U,
+ 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U,
+ 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U,
+ 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U,
+ 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U,
+ 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U,
+ 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U,
+ 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U,
+ 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U,
+ 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U,
+ 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U,
+ 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U,
+ 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U,
+ 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U,
+ 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U,
+ 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U,
+ 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U,
+ 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U,
+ 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U,
+ 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U,
+ 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U,
+ 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U,
+ 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U
+};
+static const uint32_t init_by_array_32_expected[] = {
+ 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U,
+ 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U,
+ 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U,
+ 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U,
+ 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U,
+ 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U,
+ 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U,
+ 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U,
+ 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U,
+ 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U,
+ 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U,
+ 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U,
+ 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U,
+ 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U,
+ 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U,
+ 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U,
+ 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U,
+ 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U,
+ 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U,
+ 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U,
+ 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U,
+ 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U,
+ 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U,
+ 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U,
+ 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U,
+ 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U,
+ 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U,
+ 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U,
+ 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U,
+ 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U,
+ 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U,
+ 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U,
+ 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U,
+ 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U,
+ 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U,
+ 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U,
+ 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U,
+ 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U,
+ 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U,
+ 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U,
+ 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U,
+ 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U,
+ 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U,
+ 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U,
+ 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U,
+ 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U,
+ 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U,
+ 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U,
+ 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U,
+ 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U,
+ 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U,
+ 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U,
+ 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U,
+ 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U,
+ 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U,
+ 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U,
+ 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U,
+ 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U,
+ 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U,
+ 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U,
+ 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U,
+ 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U,
+ 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U,
+ 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U,
+ 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U,
+ 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U,
+ 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U,
+ 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U,
+ 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U,
+ 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U,
+ 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U,
+ 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U,
+ 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U,
+ 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U,
+ 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U,
+ 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U,
+ 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U,
+ 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U,
+ 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U,
+ 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U,
+ 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U,
+ 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U,
+ 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U,
+ 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U,
+ 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U,
+ 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U,
+ 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U,
+ 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U,
+ 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U,
+ 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U,
+ 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U,
+ 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U,
+ 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U,
+ 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U,
+ 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U,
+ 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U,
+ 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U,
+ 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U,
+ 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U,
+ 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U,
+ 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U,
+ 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U,
+ 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U,
+ 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U,
+ 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U,
+ 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U,
+ 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U,
+ 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U,
+ 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U,
+ 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U,
+ 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U,
+ 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U,
+ 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U,
+ 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U,
+ 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U,
+ 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U,
+ 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U,
+ 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U,
+ 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U,
+ 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U,
+ 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U,
+ 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U,
+ 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U,
+ 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U,
+ 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U,
+ 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U,
+ 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U,
+ 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U,
+ 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U,
+ 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U,
+ 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U,
+ 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U,
+ 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U,
+ 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U,
+ 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U,
+ 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U,
+ 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U,
+ 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U,
+ 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U,
+ 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U,
+ 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U,
+ 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U,
+ 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U,
+ 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U,
+ 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U,
+ 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U,
+ 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U,
+ 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U,
+ 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U,
+ 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U,
+ 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U,
+ 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U,
+ 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U,
+ 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U,
+ 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U,
+ 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U,
+ 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U,
+ 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U,
+ 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U,
+ 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U,
+ 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U,
+ 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U,
+ 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U,
+ 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U,
+ 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U,
+ 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U,
+ 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U,
+ 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U,
+ 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U,
+ 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U,
+ 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U,
+ 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U,
+ 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U,
+ 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U,
+ 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U,
+ 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U,
+ 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U,
+ 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U,
+ 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U,
+ 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U,
+ 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U,
+ 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U,
+ 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U,
+ 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U,
+ 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U,
+ 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U,
+ 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U,
+ 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U,
+ 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U,
+ 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U,
+ 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U,
+ 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U,
+ 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U,
+ 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U,
+ 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U,
+ 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U,
+ 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U,
+ 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U,
+ 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U,
+ 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U
+};
+static const uint64_t init_gen_rand_64_expected[] = {
+ QU(16924766246869039260LLU), QU( 8201438687333352714LLU),
+ QU( 2265290287015001750LLU), QU(18397264611805473832LLU),
+ QU( 3375255223302384358LLU), QU( 6345559975416828796LLU),
+ QU(18229739242790328073LLU), QU( 7596792742098800905LLU),
+ QU( 255338647169685981LLU), QU( 2052747240048610300LLU),
+ QU(18328151576097299343LLU), QU(12472905421133796567LLU),
+ QU(11315245349717600863LLU), QU(16594110197775871209LLU),
+ QU(15708751964632456450LLU), QU(10452031272054632535LLU),
+ QU(11097646720811454386LLU), QU( 4556090668445745441LLU),
+ QU(17116187693090663106LLU), QU(14931526836144510645LLU),
+ QU( 9190752218020552591LLU), QU( 9625800285771901401LLU),
+ QU(13995141077659972832LLU), QU( 5194209094927829625LLU),
+ QU( 4156788379151063303LLU), QU( 8523452593770139494LLU),
+ QU(14082382103049296727LLU), QU( 2462601863986088483LLU),
+ QU( 3030583461592840678LLU), QU( 5221622077872827681LLU),
+ QU( 3084210671228981236LLU), QU(13956758381389953823LLU),
+ QU(13503889856213423831LLU), QU(15696904024189836170LLU),
+ QU( 4612584152877036206LLU), QU( 6231135538447867881LLU),
+ QU(10172457294158869468LLU), QU( 6452258628466708150LLU),
+ QU(14044432824917330221LLU), QU( 370168364480044279LLU),
+ QU(10102144686427193359LLU), QU( 667870489994776076LLU),
+ QU( 2732271956925885858LLU), QU(18027788905977284151LLU),
+ QU(15009842788582923859LLU), QU( 7136357960180199542LLU),
+ QU(15901736243475578127LLU), QU(16951293785352615701LLU),
+ QU(10551492125243691632LLU), QU(17668869969146434804LLU),
+ QU(13646002971174390445LLU), QU( 9804471050759613248LLU),
+ QU( 5511670439655935493LLU), QU(18103342091070400926LLU),
+ QU(17224512747665137533LLU), QU(15534627482992618168LLU),
+ QU( 1423813266186582647LLU), QU(15821176807932930024LLU),
+ QU( 30323369733607156LLU), QU(11599382494723479403LLU),
+ QU( 653856076586810062LLU), QU( 3176437395144899659LLU),
+ QU(14028076268147963917LLU), QU(16156398271809666195LLU),
+ QU( 3166955484848201676LLU), QU( 5746805620136919390LLU),
+ QU(17297845208891256593LLU), QU(11691653183226428483LLU),
+ QU(17900026146506981577LLU), QU(15387382115755971042LLU),
+ QU(16923567681040845943LLU), QU( 8039057517199388606LLU),
+ QU(11748409241468629263LLU), QU( 794358245539076095LLU),
+ QU(13438501964693401242LLU), QU(14036803236515618962LLU),
+ QU( 5252311215205424721LLU), QU(17806589612915509081LLU),
+ QU( 6802767092397596006LLU), QU(14212120431184557140LLU),
+ QU( 1072951366761385712LLU), QU(13098491780722836296LLU),
+ QU( 9466676828710797353LLU), QU(12673056849042830081LLU),
+ QU(12763726623645357580LLU), QU(16468961652999309493LLU),
+ QU(15305979875636438926LLU), QU(17444713151223449734LLU),
+ QU( 5692214267627883674LLU), QU(13049589139196151505LLU),
+ QU( 880115207831670745LLU), QU( 1776529075789695498LLU),
+ QU(16695225897801466485LLU), QU(10666901778795346845LLU),
+ QU( 6164389346722833869LLU), QU( 2863817793264300475LLU),
+ QU( 9464049921886304754LLU), QU( 3993566636740015468LLU),
+ QU( 9983749692528514136LLU), QU(16375286075057755211LLU),
+ QU(16042643417005440820LLU), QU(11445419662923489877LLU),
+ QU( 7999038846885158836LLU), QU( 6721913661721511535LLU),
+ QU( 5363052654139357320LLU), QU( 1817788761173584205LLU),
+ QU(13290974386445856444LLU), QU( 4650350818937984680LLU),
+ QU( 8219183528102484836LLU), QU( 1569862923500819899LLU),
+ QU( 4189359732136641860LLU), QU(14202822961683148583LLU),
+ QU( 4457498315309429058LLU), QU(13089067387019074834LLU),
+ QU(11075517153328927293LLU), QU(10277016248336668389LLU),
+ QU( 7070509725324401122LLU), QU(17808892017780289380LLU),
+ QU(13143367339909287349LLU), QU( 1377743745360085151LLU),
+ QU( 5749341807421286485LLU), QU(14832814616770931325LLU),
+ QU( 7688820635324359492LLU), QU(10960474011539770045LLU),
+ QU( 81970066653179790LLU), QU(12619476072607878022LLU),
+ QU( 4419566616271201744LLU), QU(15147917311750568503LLU),
+ QU( 5549739182852706345LLU), QU( 7308198397975204770LLU),
+ QU(13580425496671289278LLU), QU(17070764785210130301LLU),
+ QU( 8202832846285604405LLU), QU( 6873046287640887249LLU),
+ QU( 6927424434308206114LLU), QU( 6139014645937224874LLU),
+ QU(10290373645978487639LLU), QU(15904261291701523804LLU),
+ QU( 9628743442057826883LLU), QU(18383429096255546714LLU),
+ QU( 4977413265753686967LLU), QU( 7714317492425012869LLU),
+ QU( 9025232586309926193LLU), QU(14627338359776709107LLU),
+ QU(14759849896467790763LLU), QU(10931129435864423252LLU),
+ QU( 4588456988775014359LLU), QU(10699388531797056724LLU),
+ QU( 468652268869238792LLU), QU( 5755943035328078086LLU),
+ QU( 2102437379988580216LLU), QU( 9986312786506674028LLU),
+ QU( 2654207180040945604LLU), QU( 8726634790559960062LLU),
+ QU( 100497234871808137LLU), QU( 2800137176951425819LLU),
+ QU( 6076627612918553487LLU), QU( 5780186919186152796LLU),
+ QU( 8179183595769929098LLU), QU( 6009426283716221169LLU),
+ QU( 2796662551397449358LLU), QU( 1756961367041986764LLU),
+ QU( 6972897917355606205LLU), QU(14524774345368968243LLU),
+ QU( 2773529684745706940LLU), QU( 4853632376213075959LLU),
+ QU( 4198177923731358102LLU), QU( 8271224913084139776LLU),
+ QU( 2741753121611092226LLU), QU(16782366145996731181LLU),
+ QU(15426125238972640790LLU), QU(13595497100671260342LLU),
+ QU( 3173531022836259898LLU), QU( 6573264560319511662LLU),
+ QU(18041111951511157441LLU), QU( 2351433581833135952LLU),
+ QU( 3113255578908173487LLU), QU( 1739371330877858784LLU),
+ QU(16046126562789165480LLU), QU( 8072101652214192925LLU),
+ QU(15267091584090664910LLU), QU( 9309579200403648940LLU),
+ QU( 5218892439752408722LLU), QU(14492477246004337115LLU),
+ QU(17431037586679770619LLU), QU( 7385248135963250480LLU),
+ QU( 9580144956565560660LLU), QU( 4919546228040008720LLU),
+ QU(15261542469145035584LLU), QU(18233297270822253102LLU),
+ QU( 5453248417992302857LLU), QU( 9309519155931460285LLU),
+ QU(10342813012345291756LLU), QU(15676085186784762381LLU),
+ QU(15912092950691300645LLU), QU( 9371053121499003195LLU),
+ QU( 9897186478226866746LLU), QU(14061858287188196327LLU),
+ QU( 122575971620788119LLU), QU(12146750969116317754LLU),
+ QU( 4438317272813245201LLU), QU( 8332576791009527119LLU),
+ QU(13907785691786542057LLU), QU(10374194887283287467LLU),
+ QU( 2098798755649059566LLU), QU( 3416235197748288894LLU),
+ QU( 8688269957320773484LLU), QU( 7503964602397371571LLU),
+ QU(16724977015147478236LLU), QU( 9461512855439858184LLU),
+ QU(13259049744534534727LLU), QU( 3583094952542899294LLU),
+ QU( 8764245731305528292LLU), QU(13240823595462088985LLU),
+ QU(13716141617617910448LLU), QU(18114969519935960955LLU),
+ QU( 2297553615798302206LLU), QU( 4585521442944663362LLU),
+ QU(17776858680630198686LLU), QU( 4685873229192163363LLU),
+ QU( 152558080671135627LLU), QU(15424900540842670088LLU),
+ QU(13229630297130024108LLU), QU(17530268788245718717LLU),
+ QU(16675633913065714144LLU), QU( 3158912717897568068LLU),
+ QU(15399132185380087288LLU), QU( 7401418744515677872LLU),
+ QU(13135412922344398535LLU), QU( 6385314346100509511LLU),
+ QU(13962867001134161139LLU), QU(10272780155442671999LLU),
+ QU(12894856086597769142LLU), QU(13340877795287554994LLU),
+ QU(12913630602094607396LLU), QU(12543167911119793857LLU),
+ QU(17343570372251873096LLU), QU(10959487764494150545LLU),
+ QU( 6966737953093821128LLU), QU(13780699135496988601LLU),
+ QU( 4405070719380142046LLU), QU(14923788365607284982LLU),
+ QU( 2869487678905148380LLU), QU( 6416272754197188403LLU),
+ QU(15017380475943612591LLU), QU( 1995636220918429487LLU),
+ QU( 3402016804620122716LLU), QU(15800188663407057080LLU),
+ QU(11362369990390932882LLU), QU(15262183501637986147LLU),
+ QU(10239175385387371494LLU), QU( 9352042420365748334LLU),
+ QU( 1682457034285119875LLU), QU( 1724710651376289644LLU),
+ QU( 2038157098893817966LLU), QU( 9897825558324608773LLU),
+ QU( 1477666236519164736LLU), QU(16835397314511233640LLU),
+ QU(10370866327005346508LLU), QU(10157504370660621982LLU),
+ QU(12113904045335882069LLU), QU(13326444439742783008LLU),
+ QU(11302769043000765804LLU), QU(13594979923955228484LLU),
+ QU(11779351762613475968LLU), QU( 3786101619539298383LLU),
+ QU( 8021122969180846063LLU), QU(15745904401162500495LLU),
+ QU(10762168465993897267LLU), QU(13552058957896319026LLU),
+ QU(11200228655252462013LLU), QU( 5035370357337441226LLU),
+ QU( 7593918984545500013LLU), QU( 5418554918361528700LLU),
+ QU( 4858270799405446371LLU), QU( 9974659566876282544LLU),
+ QU(18227595922273957859LLU), QU( 2772778443635656220LLU),
+ QU(14285143053182085385LLU), QU( 9939700992429600469LLU),
+ QU(12756185904545598068LLU), QU( 2020783375367345262LLU),
+ QU( 57026775058331227LLU), QU( 950827867930065454LLU),
+ QU( 6602279670145371217LLU), QU( 2291171535443566929LLU),
+ QU( 5832380724425010313LLU), QU( 1220343904715982285LLU),
+ QU(17045542598598037633LLU), QU(15460481779702820971LLU),
+ QU(13948388779949365130LLU), QU(13975040175430829518LLU),
+ QU(17477538238425541763LLU), QU(11104663041851745725LLU),
+ QU(15860992957141157587LLU), QU(14529434633012950138LLU),
+ QU( 2504838019075394203LLU), QU( 7512113882611121886LLU),
+ QU( 4859973559980886617LLU), QU( 1258601555703250219LLU),
+ QU(15594548157514316394LLU), QU( 4516730171963773048LLU),
+ QU(11380103193905031983LLU), QU( 6809282239982353344LLU),
+ QU(18045256930420065002LLU), QU( 2453702683108791859LLU),
+ QU( 977214582986981460LLU), QU( 2006410402232713466LLU),
+ QU( 6192236267216378358LLU), QU( 3429468402195675253LLU),
+ QU(18146933153017348921LLU), QU(17369978576367231139LLU),
+ QU( 1246940717230386603LLU), QU(11335758870083327110LLU),
+ QU(14166488801730353682LLU), QU( 9008573127269635732LLU),
+ QU(10776025389820643815LLU), QU(15087605441903942962LLU),
+ QU( 1359542462712147922LLU), QU(13898874411226454206LLU),
+ QU(17911176066536804411LLU), QU( 9435590428600085274LLU),
+ QU( 294488509967864007LLU), QU( 8890111397567922046LLU),
+ QU( 7987823476034328778LLU), QU(13263827582440967651LLU),
+ QU( 7503774813106751573LLU), QU(14974747296185646837LLU),
+ QU( 8504765037032103375LLU), QU(17340303357444536213LLU),
+ QU( 7704610912964485743LLU), QU( 8107533670327205061LLU),
+ QU( 9062969835083315985LLU), QU(16968963142126734184LLU),
+ QU(12958041214190810180LLU), QU( 2720170147759570200LLU),
+ QU( 2986358963942189566LLU), QU(14884226322219356580LLU),
+ QU( 286224325144368520LLU), QU(11313800433154279797LLU),
+ QU(18366849528439673248LLU), QU(17899725929482368789LLU),
+ QU( 3730004284609106799LLU), QU( 1654474302052767205LLU),
+ QU( 5006698007047077032LLU), QU( 8196893913601182838LLU),
+ QU(15214541774425211640LLU), QU(17391346045606626073LLU),
+ QU( 8369003584076969089LLU), QU( 3939046733368550293LLU),
+ QU(10178639720308707785LLU), QU( 2180248669304388697LLU),
+ QU( 62894391300126322LLU), QU( 9205708961736223191LLU),
+ QU( 6837431058165360438LLU), QU( 3150743890848308214LLU),
+ QU(17849330658111464583LLU), QU(12214815643135450865LLU),
+ QU(13410713840519603402LLU), QU( 3200778126692046802LLU),
+ QU(13354780043041779313LLU), QU( 800850022756886036LLU),
+ QU(15660052933953067433LLU), QU( 6572823544154375676LLU),
+ QU(11030281857015819266LLU), QU(12682241941471433835LLU),
+ QU(11654136407300274693LLU), QU( 4517795492388641109LLU),
+ QU( 9757017371504524244LLU), QU(17833043400781889277LLU),
+ QU(12685085201747792227LLU), QU(10408057728835019573LLU),
+ QU( 98370418513455221LLU), QU( 6732663555696848598LLU),
+ QU(13248530959948529780LLU), QU( 3530441401230622826LLU),
+ QU(18188251992895660615LLU), QU( 1847918354186383756LLU),
+ QU( 1127392190402660921LLU), QU(11293734643143819463LLU),
+ QU( 3015506344578682982LLU), QU(13852645444071153329LLU),
+ QU( 2121359659091349142LLU), QU( 1294604376116677694LLU),
+ QU( 5616576231286352318LLU), QU( 7112502442954235625LLU),
+ QU(11676228199551561689LLU), QU(12925182803007305359LLU),
+ QU( 7852375518160493082LLU), QU( 1136513130539296154LLU),
+ QU( 5636923900916593195LLU), QU( 3221077517612607747LLU),
+ QU(17784790465798152513LLU), QU( 3554210049056995938LLU),
+ QU(17476839685878225874LLU), QU( 3206836372585575732LLU),
+ QU( 2765333945644823430LLU), QU(10080070903718799528LLU),
+ QU( 5412370818878286353LLU), QU( 9689685887726257728LLU),
+ QU( 8236117509123533998LLU), QU( 1951139137165040214LLU),
+ QU( 4492205209227980349LLU), QU(16541291230861602967LLU),
+ QU( 1424371548301437940LLU), QU( 9117562079669206794LLU),
+ QU(14374681563251691625LLU), QU(13873164030199921303LLU),
+ QU( 6680317946770936731LLU), QU(15586334026918276214LLU),
+ QU(10896213950976109802LLU), QU( 9506261949596413689LLU),
+ QU( 9903949574308040616LLU), QU( 6038397344557204470LLU),
+ QU( 174601465422373648LLU), QU(15946141191338238030LLU),
+ QU(17142225620992044937LLU), QU( 7552030283784477064LLU),
+ QU( 2947372384532947997LLU), QU( 510797021688197711LLU),
+ QU( 4962499439249363461LLU), QU( 23770320158385357LLU),
+ QU( 959774499105138124LLU), QU( 1468396011518788276LLU),
+ QU( 2015698006852312308LLU), QU( 4149400718489980136LLU),
+ QU( 5992916099522371188LLU), QU(10819182935265531076LLU),
+ QU(16189787999192351131LLU), QU( 342833961790261950LLU),
+ QU(12470830319550495336LLU), QU(18128495041912812501LLU),
+ QU( 1193600899723524337LLU), QU( 9056793666590079770LLU),
+ QU( 2154021227041669041LLU), QU( 4963570213951235735LLU),
+ QU( 4865075960209211409LLU), QU( 2097724599039942963LLU),
+ QU( 2024080278583179845LLU), QU(11527054549196576736LLU),
+ QU(10650256084182390252LLU), QU( 4808408648695766755LLU),
+ QU( 1642839215013788844LLU), QU(10607187948250398390LLU),
+ QU( 7076868166085913508LLU), QU( 730522571106887032LLU),
+ QU(12500579240208524895LLU), QU( 4484390097311355324LLU),
+ QU(15145801330700623870LLU), QU( 8055827661392944028LLU),
+ QU( 5865092976832712268LLU), QU(15159212508053625143LLU),
+ QU( 3560964582876483341LLU), QU( 4070052741344438280LLU),
+ QU( 6032585709886855634LLU), QU(15643262320904604873LLU),
+ QU( 2565119772293371111LLU), QU( 318314293065348260LLU),
+ QU(15047458749141511872LLU), QU( 7772788389811528730LLU),
+ QU( 7081187494343801976LLU), QU( 6465136009467253947LLU),
+ QU(10425940692543362069LLU), QU( 554608190318339115LLU),
+ QU(14796699860302125214LLU), QU( 1638153134431111443LLU),
+ QU(10336967447052276248LLU), QU( 8412308070396592958LLU),
+ QU( 4004557277152051226LLU), QU( 8143598997278774834LLU),
+ QU(16413323996508783221LLU), QU(13139418758033994949LLU),
+ QU( 9772709138335006667LLU), QU( 2818167159287157659LLU),
+ QU(17091740573832523669LLU), QU(14629199013130751608LLU),
+ QU(18268322711500338185LLU), QU( 8290963415675493063LLU),
+ QU( 8830864907452542588LLU), QU( 1614839084637494849LLU),
+ QU(14855358500870422231LLU), QU( 3472996748392519937LLU),
+ QU(15317151166268877716LLU), QU( 5825895018698400362LLU),
+ QU(16730208429367544129LLU), QU(10481156578141202800LLU),
+ QU( 4746166512382823750LLU), QU(12720876014472464998LLU),
+ QU( 8825177124486735972LLU), QU(13733447296837467838LLU),
+ QU( 6412293741681359625LLU), QU( 8313213138756135033LLU),
+ QU(11421481194803712517LLU), QU( 7997007691544174032LLU),
+ QU( 6812963847917605930LLU), QU( 9683091901227558641LLU),
+ QU(14703594165860324713LLU), QU( 1775476144519618309LLU),
+ QU( 2724283288516469519LLU), QU( 717642555185856868LLU),
+ QU( 8736402192215092346LLU), QU(11878800336431381021LLU),
+ QU( 4348816066017061293LLU), QU( 6115112756583631307LLU),
+ QU( 9176597239667142976LLU), QU(12615622714894259204LLU),
+ QU(10283406711301385987LLU), QU( 5111762509485379420LLU),
+ QU( 3118290051198688449LLU), QU( 7345123071632232145LLU),
+ QU( 9176423451688682359LLU), QU( 4843865456157868971LLU),
+ QU(12008036363752566088LLU), QU(12058837181919397720LLU),
+ QU( 2145073958457347366LLU), QU( 1526504881672818067LLU),
+ QU( 3488830105567134848LLU), QU(13208362960674805143LLU),
+ QU( 4077549672899572192LLU), QU( 7770995684693818365LLU),
+ QU( 1398532341546313593LLU), QU(12711859908703927840LLU),
+ QU( 1417561172594446813LLU), QU(17045191024194170604LLU),
+ QU( 4101933177604931713LLU), QU(14708428834203480320LLU),
+ QU(17447509264469407724LLU), QU(14314821973983434255LLU),
+ QU(17990472271061617265LLU), QU( 5087756685841673942LLU),
+ QU(12797820586893859939LLU), QU( 1778128952671092879LLU),
+ QU( 3535918530508665898LLU), QU( 9035729701042481301LLU),
+ QU(14808661568277079962LLU), QU(14587345077537747914LLU),
+ QU(11920080002323122708LLU), QU( 6426515805197278753LLU),
+ QU( 3295612216725984831LLU), QU(11040722532100876120LLU),
+ QU(12305952936387598754LLU), QU(16097391899742004253LLU),
+ QU( 4908537335606182208LLU), QU(12446674552196795504LLU),
+ QU(16010497855816895177LLU), QU( 9194378874788615551LLU),
+ QU( 3382957529567613384LLU), QU( 5154647600754974077LLU),
+ QU( 9801822865328396141LLU), QU( 9023662173919288143LLU),
+ QU(17623115353825147868LLU), QU( 8238115767443015816LLU),
+ QU(15811444159859002560LLU), QU( 9085612528904059661LLU),
+ QU( 6888601089398614254LLU), QU( 258252992894160189LLU),
+ QU( 6704363880792428622LLU), QU( 6114966032147235763LLU),
+ QU(11075393882690261875LLU), QU( 8797664238933620407LLU),
+ QU( 5901892006476726920LLU), QU( 5309780159285518958LLU),
+ QU(14940808387240817367LLU), QU(14642032021449656698LLU),
+ QU( 9808256672068504139LLU), QU( 3670135111380607658LLU),
+ QU(11211211097845960152LLU), QU( 1474304506716695808LLU),
+ QU(15843166204506876239LLU), QU( 7661051252471780561LLU),
+ QU(10170905502249418476LLU), QU( 7801416045582028589LLU),
+ QU( 2763981484737053050LLU), QU( 9491377905499253054LLU),
+ QU(16201395896336915095LLU), QU( 9256513756442782198LLU),
+ QU( 5411283157972456034LLU), QU( 5059433122288321676LLU),
+ QU( 4327408006721123357LLU), QU( 9278544078834433377LLU),
+ QU( 7601527110882281612LLU), QU(11848295896975505251LLU),
+ QU(12096998801094735560LLU), QU(14773480339823506413LLU),
+ QU(15586227433895802149LLU), QU(12786541257830242872LLU),
+ QU( 6904692985140503067LLU), QU( 5309011515263103959LLU),
+ QU(12105257191179371066LLU), QU(14654380212442225037LLU),
+ QU( 2556774974190695009LLU), QU( 4461297399927600261LLU),
+ QU(14888225660915118646LLU), QU(14915459341148291824LLU),
+ QU( 2738802166252327631LLU), QU( 6047155789239131512LLU),
+ QU(12920545353217010338LLU), QU(10697617257007840205LLU),
+ QU( 2751585253158203504LLU), QU(13252729159780047496LLU),
+ QU(14700326134672815469LLU), QU(14082527904374600529LLU),
+ QU(16852962273496542070LLU), QU(17446675504235853907LLU),
+ QU(15019600398527572311LLU), QU(12312781346344081551LLU),
+ QU(14524667935039810450LLU), QU( 5634005663377195738LLU),
+ QU(11375574739525000569LLU), QU( 2423665396433260040LLU),
+ QU( 5222836914796015410LLU), QU( 4397666386492647387LLU),
+ QU( 4619294441691707638LLU), QU( 665088602354770716LLU),
+ QU(13246495665281593610LLU), QU( 6564144270549729409LLU),
+ QU(10223216188145661688LLU), QU( 3961556907299230585LLU),
+ QU(11543262515492439914LLU), QU(16118031437285993790LLU),
+ QU( 7143417964520166465LLU), QU(13295053515909486772LLU),
+ QU( 40434666004899675LLU), QU(17127804194038347164LLU),
+ QU( 8599165966560586269LLU), QU( 8214016749011284903LLU),
+ QU(13725130352140465239LLU), QU( 5467254474431726291LLU),
+ QU( 7748584297438219877LLU), QU(16933551114829772472LLU),
+ QU( 2169618439506799400LLU), QU( 2169787627665113463LLU),
+ QU(17314493571267943764LLU), QU(18053575102911354912LLU),
+ QU(11928303275378476973LLU), QU(11593850925061715550LLU),
+ QU(17782269923473589362LLU), QU( 3280235307704747039LLU),
+ QU( 6145343578598685149LLU), QU(17080117031114086090LLU),
+ QU(18066839902983594755LLU), QU( 6517508430331020706LLU),
+ QU( 8092908893950411541LLU), QU(12558378233386153732LLU),
+ QU( 4476532167973132976LLU), QU(16081642430367025016LLU),
+ QU( 4233154094369139361LLU), QU( 8693630486693161027LLU),
+ QU(11244959343027742285LLU), QU(12273503967768513508LLU),
+ QU(14108978636385284876LLU), QU( 7242414665378826984LLU),
+ QU( 6561316938846562432LLU), QU( 8601038474994665795LLU),
+ QU(17532942353612365904LLU), QU(17940076637020912186LLU),
+ QU( 7340260368823171304LLU), QU( 7061807613916067905LLU),
+ QU(10561734935039519326LLU), QU(17990796503724650862LLU),
+ QU( 6208732943911827159LLU), QU( 359077562804090617LLU),
+ QU(14177751537784403113LLU), QU(10659599444915362902LLU),
+ QU(15081727220615085833LLU), QU(13417573895659757486LLU),
+ QU(15513842342017811524LLU), QU(11814141516204288231LLU),
+ QU( 1827312513875101814LLU), QU( 2804611699894603103LLU),
+ QU(17116500469975602763LLU), QU(12270191815211952087LLU),
+ QU(12256358467786024988LLU), QU(18435021722453971267LLU),
+ QU( 671330264390865618LLU), QU( 476504300460286050LLU),
+ QU(16465470901027093441LLU), QU( 4047724406247136402LLU),
+ QU( 1322305451411883346LLU), QU( 1388308688834322280LLU),
+ QU( 7303989085269758176LLU), QU( 9323792664765233642LLU),
+ QU( 4542762575316368936LLU), QU(17342696132794337618LLU),
+ QU( 4588025054768498379LLU), QU(13415475057390330804LLU),
+ QU(17880279491733405570LLU), QU(10610553400618620353LLU),
+ QU( 3180842072658960139LLU), QU(13002966655454270120LLU),
+ QU( 1665301181064982826LLU), QU( 7083673946791258979LLU),
+ QU( 190522247122496820LLU), QU(17388280237250677740LLU),
+ QU( 8430770379923642945LLU), QU(12987180971921668584LLU),
+ QU( 2311086108365390642LLU), QU( 2870984383579822345LLU),
+ QU(14014682609164653318LLU), QU(14467187293062251484LLU),
+ QU( 192186361147413298LLU), QU(15171951713531796524LLU),
+ QU( 9900305495015948728LLU), QU(17958004775615466344LLU),
+ QU(14346380954498606514LLU), QU(18040047357617407096LLU),
+ QU( 5035237584833424532LLU), QU(15089555460613972287LLU),
+ QU( 4131411873749729831LLU), QU( 1329013581168250330LLU),
+ QU(10095353333051193949LLU), QU(10749518561022462716LLU),
+ QU( 9050611429810755847LLU), QU(15022028840236655649LLU),
+ QU( 8775554279239748298LLU), QU(13105754025489230502LLU),
+ QU(15471300118574167585LLU), QU( 89864764002355628LLU),
+ QU( 8776416323420466637LLU), QU( 5280258630612040891LLU),
+ QU( 2719174488591862912LLU), QU( 7599309137399661994LLU),
+ QU(15012887256778039979LLU), QU(14062981725630928925LLU),
+ QU(12038536286991689603LLU), QU( 7089756544681775245LLU),
+ QU(10376661532744718039LLU), QU( 1265198725901533130LLU),
+ QU(13807996727081142408LLU), QU( 2935019626765036403LLU),
+ QU( 7651672460680700141LLU), QU( 3644093016200370795LLU),
+ QU( 2840982578090080674LLU), QU(17956262740157449201LLU),
+ QU(18267979450492880548LLU), QU(11799503659796848070LLU),
+ QU( 9942537025669672388LLU), QU(11886606816406990297LLU),
+ QU( 5488594946437447576LLU), QU( 7226714353282744302LLU),
+ QU( 3784851653123877043LLU), QU( 878018453244803041LLU),
+ QU(12110022586268616085LLU), QU( 734072179404675123LLU),
+ QU(11869573627998248542LLU), QU( 469150421297783998LLU),
+ QU( 260151124912803804LLU), QU(11639179410120968649LLU),
+ QU( 9318165193840846253LLU), QU(12795671722734758075LLU),
+ QU(15318410297267253933LLU), QU( 691524703570062620LLU),
+ QU( 5837129010576994601LLU), QU(15045963859726941052LLU),
+ QU( 5850056944932238169LLU), QU(12017434144750943807LLU),
+ QU( 7447139064928956574LLU), QU( 3101711812658245019LLU),
+ QU(16052940704474982954LLU), QU(18195745945986994042LLU),
+ QU( 8932252132785575659LLU), QU(13390817488106794834LLU),
+ QU(11582771836502517453LLU), QU( 4964411326683611686LLU),
+ QU( 2195093981702694011LLU), QU(14145229538389675669LLU),
+ QU(16459605532062271798LLU), QU( 866316924816482864LLU),
+ QU( 4593041209937286377LLU), QU( 8415491391910972138LLU),
+ QU( 4171236715600528969LLU), QU(16637569303336782889LLU),
+ QU( 2002011073439212680LLU), QU(17695124661097601411LLU),
+ QU( 4627687053598611702LLU), QU( 7895831936020190403LLU),
+ QU( 8455951300917267802LLU), QU( 2923861649108534854LLU),
+ QU( 8344557563927786255LLU), QU( 6408671940373352556LLU),
+ QU(12210227354536675772LLU), QU(14294804157294222295LLU),
+ QU(10103022425071085127LLU), QU(10092959489504123771LLU),
+ QU( 6554774405376736268LLU), QU(12629917718410641774LLU),
+ QU( 6260933257596067126LLU), QU( 2460827021439369673LLU),
+ QU( 2541962996717103668LLU), QU( 597377203127351475LLU),
+ QU( 5316984203117315309LLU), QU( 4811211393563241961LLU),
+ QU(13119698597255811641LLU), QU( 8048691512862388981LLU),
+ QU(10216818971194073842LLU), QU( 4612229970165291764LLU),
+ QU(10000980798419974770LLU), QU( 6877640812402540687LLU),
+ QU( 1488727563290436992LLU), QU( 2227774069895697318LLU),
+ QU(11237754507523316593LLU), QU(13478948605382290972LLU),
+ QU( 1963583846976858124LLU), QU( 5512309205269276457LLU),
+ QU( 3972770164717652347LLU), QU( 3841751276198975037LLU),
+ QU(10283343042181903117LLU), QU( 8564001259792872199LLU),
+ QU(16472187244722489221LLU), QU( 8953493499268945921LLU),
+ QU( 3518747340357279580LLU), QU( 4003157546223963073LLU),
+ QU( 3270305958289814590LLU), QU( 3966704458129482496LLU),
+ QU( 8122141865926661939LLU), QU(14627734748099506653LLU),
+ QU(13064426990862560568LLU), QU( 2414079187889870829LLU),
+ QU( 5378461209354225306LLU), QU(10841985740128255566LLU),
+ QU( 538582442885401738LLU), QU( 7535089183482905946LLU),
+ QU(16117559957598879095LLU), QU( 8477890721414539741LLU),
+ QU( 1459127491209533386LLU), QU(17035126360733620462LLU),
+ QU( 8517668552872379126LLU), QU(10292151468337355014LLU),
+ QU(17081267732745344157LLU), QU(13751455337946087178LLU),
+ QU(14026945459523832966LLU), QU( 6653278775061723516LLU),
+ QU(10619085543856390441LLU), QU( 2196343631481122885LLU),
+ QU(10045966074702826136LLU), QU(10082317330452718282LLU),
+ QU( 5920859259504831242LLU), QU( 9951879073426540617LLU),
+ QU( 7074696649151414158LLU), QU(15808193543879464318LLU),
+ QU( 7385247772746953374LLU), QU( 3192003544283864292LLU),
+ QU(18153684490917593847LLU), QU(12423498260668568905LLU),
+ QU(10957758099756378169LLU), QU(11488762179911016040LLU),
+ QU( 2099931186465333782LLU), QU(11180979581250294432LLU),
+ QU( 8098916250668367933LLU), QU( 3529200436790763465LLU),
+ QU(12988418908674681745LLU), QU( 6147567275954808580LLU),
+ QU( 3207503344604030989LLU), QU(10761592604898615360LLU),
+ QU( 229854861031893504LLU), QU( 8809853962667144291LLU),
+ QU(13957364469005693860LLU), QU( 7634287665224495886LLU),
+ QU(12353487366976556874LLU), QU( 1134423796317152034LLU),
+ QU( 2088992471334107068LLU), QU( 7393372127190799698LLU),
+ QU( 1845367839871058391LLU), QU( 207922563987322884LLU),
+ QU(11960870813159944976LLU), QU(12182120053317317363LLU),
+ QU(17307358132571709283LLU), QU(13871081155552824936LLU),
+ QU(18304446751741566262LLU), QU( 7178705220184302849LLU),
+ QU(10929605677758824425LLU), QU(16446976977835806844LLU),
+ QU(13723874412159769044LLU), QU( 6942854352100915216LLU),
+ QU( 1726308474365729390LLU), QU( 2150078766445323155LLU),
+ QU(15345558947919656626LLU), QU(12145453828874527201LLU),
+ QU( 2054448620739726849LLU), QU( 2740102003352628137LLU),
+ QU(11294462163577610655LLU), QU( 756164283387413743LLU),
+ QU(17841144758438810880LLU), QU(10802406021185415861LLU),
+ QU( 8716455530476737846LLU), QU( 6321788834517649606LLU),
+ QU(14681322910577468426LLU), QU(17330043563884336387LLU),
+ QU(12701802180050071614LLU), QU(14695105111079727151LLU),
+ QU( 5112098511654172830LLU), QU( 4957505496794139973LLU),
+ QU( 8270979451952045982LLU), QU(12307685939199120969LLU),
+ QU(12425799408953443032LLU), QU( 8376410143634796588LLU),
+ QU(16621778679680060464LLU), QU( 3580497854566660073LLU),
+ QU( 1122515747803382416LLU), QU( 857664980960597599LLU),
+ QU( 6343640119895925918LLU), QU(12878473260854462891LLU),
+ QU(10036813920765722626LLU), QU(14451335468363173812LLU),
+ QU( 5476809692401102807LLU), QU(16442255173514366342LLU),
+ QU(13060203194757167104LLU), QU(14354124071243177715LLU),
+ QU(15961249405696125227LLU), QU(13703893649690872584LLU),
+ QU( 363907326340340064LLU), QU( 6247455540491754842LLU),
+ QU(12242249332757832361LLU), QU( 156065475679796717LLU),
+ QU( 9351116235749732355LLU), QU( 4590350628677701405LLU),
+ QU( 1671195940982350389LLU), QU(13501398458898451905LLU),
+ QU( 6526341991225002255LLU), QU( 1689782913778157592LLU),
+ QU( 7439222350869010334LLU), QU(13975150263226478308LLU),
+ QU(11411961169932682710LLU), QU(17204271834833847277LLU),
+ QU( 541534742544435367LLU), QU( 6591191931218949684LLU),
+ QU( 2645454775478232486LLU), QU( 4322857481256485321LLU),
+ QU( 8477416487553065110LLU), QU(12902505428548435048LLU),
+ QU( 971445777981341415LLU), QU(14995104682744976712LLU),
+ QU( 4243341648807158063LLU), QU( 8695061252721927661LLU),
+ QU( 5028202003270177222LLU), QU( 2289257340915567840LLU),
+ QU(13870416345121866007LLU), QU(13994481698072092233LLU),
+ QU( 6912785400753196481LLU), QU( 2278309315841980139LLU),
+ QU( 4329765449648304839LLU), QU( 5963108095785485298LLU),
+ QU( 4880024847478722478LLU), QU(16015608779890240947LLU),
+ QU( 1866679034261393544LLU), QU( 914821179919731519LLU),
+ QU( 9643404035648760131LLU), QU( 2418114953615593915LLU),
+ QU( 944756836073702374LLU), QU(15186388048737296834LLU),
+ QU( 7723355336128442206LLU), QU( 7500747479679599691LLU),
+ QU(18013961306453293634LLU), QU( 2315274808095756456LLU),
+ QU(13655308255424029566LLU), QU(17203800273561677098LLU),
+ QU( 1382158694422087756LLU), QU( 5090390250309588976LLU),
+ QU( 517170818384213989LLU), QU( 1612709252627729621LLU),
+ QU( 1330118955572449606LLU), QU( 300922478056709885LLU),
+ QU(18115693291289091987LLU), QU(13491407109725238321LLU),
+ QU(15293714633593827320LLU), QU( 5151539373053314504LLU),
+ QU( 5951523243743139207LLU), QU(14459112015249527975LLU),
+ QU( 5456113959000700739LLU), QU( 3877918438464873016LLU),
+ QU(12534071654260163555LLU), QU(15871678376893555041LLU),
+ QU(11005484805712025549LLU), QU(16353066973143374252LLU),
+ QU( 4358331472063256685LLU), QU( 8268349332210859288LLU),
+ QU(12485161590939658075LLU), QU(13955993592854471343LLU),
+ QU( 5911446886848367039LLU), QU(14925834086813706974LLU),
+ QU( 6590362597857994805LLU), QU( 1280544923533661875LLU),
+ QU( 1637756018947988164LLU), QU( 4734090064512686329LLU),
+ QU(16693705263131485912LLU), QU( 6834882340494360958LLU),
+ QU( 8120732176159658505LLU), QU( 2244371958905329346LLU),
+ QU(10447499707729734021LLU), QU( 7318742361446942194LLU),
+ QU( 8032857516355555296LLU), QU(14023605983059313116LLU),
+ QU( 1032336061815461376LLU), QU( 9840995337876562612LLU),
+ QU( 9869256223029203587LLU), QU(12227975697177267636LLU),
+ QU(12728115115844186033LLU), QU( 7752058479783205470LLU),
+ QU( 729733219713393087LLU), QU(12954017801239007622LLU)
+};
+static const uint64_t init_by_array_64_expected[] = {
+ QU( 2100341266307895239LLU), QU( 8344256300489757943LLU),
+ QU(15687933285484243894LLU), QU( 8268620370277076319LLU),
+ QU(12371852309826545459LLU), QU( 8800491541730110238LLU),
+ QU(18113268950100835773LLU), QU( 2886823658884438119LLU),
+ QU( 3293667307248180724LLU), QU( 9307928143300172731LLU),
+ QU( 7688082017574293629LLU), QU( 900986224735166665LLU),
+ QU( 9977972710722265039LLU), QU( 6008205004994830552LLU),
+ QU( 546909104521689292LLU), QU( 7428471521869107594LLU),
+ QU(14777563419314721179LLU), QU(16116143076567350053LLU),
+ QU( 5322685342003142329LLU), QU( 4200427048445863473LLU),
+ QU( 4693092150132559146LLU), QU(13671425863759338582LLU),
+ QU( 6747117460737639916LLU), QU( 4732666080236551150LLU),
+ QU( 5912839950611941263LLU), QU( 3903717554504704909LLU),
+ QU( 2615667650256786818LLU), QU(10844129913887006352LLU),
+ QU(13786467861810997820LLU), QU(14267853002994021570LLU),
+ QU(13767807302847237439LLU), QU(16407963253707224617LLU),
+ QU( 4802498363698583497LLU), QU( 2523802839317209764LLU),
+ QU( 3822579397797475589LLU), QU( 8950320572212130610LLU),
+ QU( 3745623504978342534LLU), QU(16092609066068482806LLU),
+ QU( 9817016950274642398LLU), QU(10591660660323829098LLU),
+ QU(11751606650792815920LLU), QU( 5122873818577122211LLU),
+ QU(17209553764913936624LLU), QU( 6249057709284380343LLU),
+ QU(15088791264695071830LLU), QU(15344673071709851930LLU),
+ QU( 4345751415293646084LLU), QU( 2542865750703067928LLU),
+ QU(13520525127852368784LLU), QU(18294188662880997241LLU),
+ QU( 3871781938044881523LLU), QU( 2873487268122812184LLU),
+ QU(15099676759482679005LLU), QU(15442599127239350490LLU),
+ QU( 6311893274367710888LLU), QU( 3286118760484672933LLU),
+ QU( 4146067961333542189LLU), QU(13303942567897208770LLU),
+ QU( 8196013722255630418LLU), QU( 4437815439340979989LLU),
+ QU(15433791533450605135LLU), QU( 4254828956815687049LLU),
+ QU( 1310903207708286015LLU), QU(10529182764462398549LLU),
+ QU(14900231311660638810LLU), QU( 9727017277104609793LLU),
+ QU( 1821308310948199033LLU), QU(11628861435066772084LLU),
+ QU( 9469019138491546924LLU), QU( 3145812670532604988LLU),
+ QU( 9938468915045491919LLU), QU( 1562447430672662142LLU),
+ QU(13963995266697989134LLU), QU( 3356884357625028695LLU),
+ QU( 4499850304584309747LLU), QU( 8456825817023658122LLU),
+ QU(10859039922814285279LLU), QU( 8099512337972526555LLU),
+ QU( 348006375109672149LLU), QU(11919893998241688603LLU),
+ QU( 1104199577402948826LLU), QU(16689191854356060289LLU),
+ QU(10992552041730168078LLU), QU( 7243733172705465836LLU),
+ QU( 5668075606180319560LLU), QU(18182847037333286970LLU),
+ QU( 4290215357664631322LLU), QU( 4061414220791828613LLU),
+ QU(13006291061652989604LLU), QU( 7140491178917128798LLU),
+ QU(12703446217663283481LLU), QU( 5500220597564558267LLU),
+ QU(10330551509971296358LLU), QU(15958554768648714492LLU),
+ QU( 5174555954515360045LLU), QU( 1731318837687577735LLU),
+ QU( 3557700801048354857LLU), QU(13764012341928616198LLU),
+ QU(13115166194379119043LLU), QU( 7989321021560255519LLU),
+ QU( 2103584280905877040LLU), QU( 9230788662155228488LLU),
+ QU(16396629323325547654LLU), QU( 657926409811318051LLU),
+ QU(15046700264391400727LLU), QU( 5120132858771880830LLU),
+ QU( 7934160097989028561LLU), QU( 6963121488531976245LLU),
+ QU(17412329602621742089LLU), QU(15144843053931774092LLU),
+ QU(17204176651763054532LLU), QU(13166595387554065870LLU),
+ QU( 8590377810513960213LLU), QU( 5834365135373991938LLU),
+ QU( 7640913007182226243LLU), QU( 3479394703859418425LLU),
+ QU(16402784452644521040LLU), QU( 4993979809687083980LLU),
+ QU(13254522168097688865LLU), QU(15643659095244365219LLU),
+ QU( 5881437660538424982LLU), QU(11174892200618987379LLU),
+ QU( 254409966159711077LLU), QU(17158413043140549909LLU),
+ QU( 3638048789290376272LLU), QU( 1376816930299489190LLU),
+ QU( 4622462095217761923LLU), QU(15086407973010263515LLU),
+ QU(13253971772784692238LLU), QU( 5270549043541649236LLU),
+ QU(11182714186805411604LLU), QU(12283846437495577140LLU),
+ QU( 5297647149908953219LLU), QU(10047451738316836654LLU),
+ QU( 4938228100367874746LLU), QU(12328523025304077923LLU),
+ QU( 3601049438595312361LLU), QU( 9313624118352733770LLU),
+ QU(13322966086117661798LLU), QU(16660005705644029394LLU),
+ QU(11337677526988872373LLU), QU(13869299102574417795LLU),
+ QU(15642043183045645437LLU), QU( 3021755569085880019LLU),
+ QU( 4979741767761188161LLU), QU(13679979092079279587LLU),
+ QU( 3344685842861071743LLU), QU(13947960059899588104LLU),
+ QU( 305806934293368007LLU), QU( 5749173929201650029LLU),
+ QU(11123724852118844098LLU), QU(15128987688788879802LLU),
+ QU(15251651211024665009LLU), QU( 7689925933816577776LLU),
+ QU(16732804392695859449LLU), QU(17087345401014078468LLU),
+ QU(14315108589159048871LLU), QU( 4820700266619778917LLU),
+ QU(16709637539357958441LLU), QU( 4936227875177351374LLU),
+ QU( 2137907697912987247LLU), QU(11628565601408395420LLU),
+ QU( 2333250549241556786LLU), QU( 5711200379577778637LLU),
+ QU( 5170680131529031729LLU), QU(12620392043061335164LLU),
+ QU( 95363390101096078LLU), QU( 5487981914081709462LLU),
+ QU( 1763109823981838620LLU), QU( 3395861271473224396LLU),
+ QU( 1300496844282213595LLU), QU( 6894316212820232902LLU),
+ QU(10673859651135576674LLU), QU( 5911839658857903252LLU),
+ QU(17407110743387299102LLU), QU( 8257427154623140385LLU),
+ QU(11389003026741800267LLU), QU( 4070043211095013717LLU),
+ QU(11663806997145259025LLU), QU(15265598950648798210LLU),
+ QU( 630585789434030934LLU), QU( 3524446529213587334LLU),
+ QU( 7186424168495184211LLU), QU(10806585451386379021LLU),
+ QU(11120017753500499273LLU), QU( 1586837651387701301LLU),
+ QU(17530454400954415544LLU), QU( 9991670045077880430LLU),
+ QU( 7550997268990730180LLU), QU( 8640249196597379304LLU),
+ QU( 3522203892786893823LLU), QU(10401116549878854788LLU),
+ QU(13690285544733124852LLU), QU( 8295785675455774586LLU),
+ QU(15535716172155117603LLU), QU( 3112108583723722511LLU),
+ QU(17633179955339271113LLU), QU(18154208056063759375LLU),
+ QU( 1866409236285815666LLU), QU(13326075895396412882LLU),
+ QU( 8756261842948020025LLU), QU( 6281852999868439131LLU),
+ QU(15087653361275292858LLU), QU(10333923911152949397LLU),
+ QU( 5265567645757408500LLU), QU(12728041843210352184LLU),
+ QU( 6347959327507828759LLU), QU( 154112802625564758LLU),
+ QU(18235228308679780218LLU), QU( 3253805274673352418LLU),
+ QU( 4849171610689031197LLU), QU(17948529398340432518LLU),
+ QU(13803510475637409167LLU), QU(13506570190409883095LLU),
+ QU(15870801273282960805LLU), QU( 8451286481299170773LLU),
+ QU( 9562190620034457541LLU), QU( 8518905387449138364LLU),
+ QU(12681306401363385655LLU), QU( 3788073690559762558LLU),
+ QU( 5256820289573487769LLU), QU( 2752021372314875467LLU),
+ QU( 6354035166862520716LLU), QU( 4328956378309739069LLU),
+ QU( 449087441228269600LLU), QU( 5533508742653090868LLU),
+ QU( 1260389420404746988LLU), QU(18175394473289055097LLU),
+ QU( 1535467109660399420LLU), QU( 8818894282874061442LLU),
+ QU(12140873243824811213LLU), QU(15031386653823014946LLU),
+ QU( 1286028221456149232LLU), QU( 6329608889367858784LLU),
+ QU( 9419654354945132725LLU), QU( 6094576547061672379LLU),
+ QU(17706217251847450255LLU), QU( 1733495073065878126LLU),
+ QU(16918923754607552663LLU), QU( 8881949849954945044LLU),
+ QU(12938977706896313891LLU), QU(14043628638299793407LLU),
+ QU(18393874581723718233LLU), QU( 6886318534846892044LLU),
+ QU(14577870878038334081LLU), QU(13541558383439414119LLU),
+ QU(13570472158807588273LLU), QU(18300760537910283361LLU),
+ QU( 818368572800609205LLU), QU( 1417000585112573219LLU),
+ QU(12337533143867683655LLU), QU(12433180994702314480LLU),
+ QU( 778190005829189083LLU), QU(13667356216206524711LLU),
+ QU( 9866149895295225230LLU), QU(11043240490417111999LLU),
+ QU( 1123933826541378598LLU), QU( 6469631933605123610LLU),
+ QU(14508554074431980040LLU), QU(13918931242962026714LLU),
+ QU( 2870785929342348285LLU), QU(14786362626740736974LLU),
+ QU(13176680060902695786LLU), QU( 9591778613541679456LLU),
+ QU( 9097662885117436706LLU), QU( 749262234240924947LLU),
+ QU( 1944844067793307093LLU), QU( 4339214904577487742LLU),
+ QU( 8009584152961946551LLU), QU(16073159501225501777LLU),
+ QU( 3335870590499306217LLU), QU(17088312653151202847LLU),
+ QU( 3108893142681931848LLU), QU(16636841767202792021LLU),
+ QU(10423316431118400637LLU), QU( 8008357368674443506LLU),
+ QU(11340015231914677875LLU), QU(17687896501594936090LLU),
+ QU(15173627921763199958LLU), QU( 542569482243721959LLU),
+ QU(15071714982769812975LLU), QU( 4466624872151386956LLU),
+ QU( 1901780715602332461LLU), QU( 9822227742154351098LLU),
+ QU( 1479332892928648780LLU), QU( 6981611948382474400LLU),
+ QU( 7620824924456077376LLU), QU(14095973329429406782LLU),
+ QU( 7902744005696185404LLU), QU(15830577219375036920LLU),
+ QU(10287076667317764416LLU), QU(12334872764071724025LLU),
+ QU( 4419302088133544331LLU), QU(14455842851266090520LLU),
+ QU(12488077416504654222LLU), QU( 7953892017701886766LLU),
+ QU( 6331484925529519007LLU), QU( 4902145853785030022LLU),
+ QU(17010159216096443073LLU), QU(11945354668653886087LLU),
+ QU(15112022728645230829LLU), QU(17363484484522986742LLU),
+ QU( 4423497825896692887LLU), QU( 8155489510809067471LLU),
+ QU( 258966605622576285LLU), QU( 5462958075742020534LLU),
+ QU( 6763710214913276228LLU), QU( 2368935183451109054LLU),
+ QU(14209506165246453811LLU), QU( 2646257040978514881LLU),
+ QU( 3776001911922207672LLU), QU( 1419304601390147631LLU),
+ QU(14987366598022458284LLU), QU( 3977770701065815721LLU),
+ QU( 730820417451838898LLU), QU( 3982991703612885327LLU),
+ QU( 2803544519671388477LLU), QU(17067667221114424649LLU),
+ QU( 2922555119737867166LLU), QU( 1989477584121460932LLU),
+ QU(15020387605892337354LLU), QU( 9293277796427533547LLU),
+ QU(10722181424063557247LLU), QU(16704542332047511651LLU),
+ QU( 5008286236142089514LLU), QU(16174732308747382540LLU),
+ QU(17597019485798338402LLU), QU(13081745199110622093LLU),
+ QU( 8850305883842258115LLU), QU(12723629125624589005LLU),
+ QU( 8140566453402805978LLU), QU(15356684607680935061LLU),
+ QU(14222190387342648650LLU), QU(11134610460665975178LLU),
+ QU( 1259799058620984266LLU), QU(13281656268025610041LLU),
+ QU( 298262561068153992LLU), QU(12277871700239212922LLU),
+ QU(13911297774719779438LLU), QU(16556727962761474934LLU),
+ QU(17903010316654728010LLU), QU( 9682617699648434744LLU),
+ QU(14757681836838592850LLU), QU( 1327242446558524473LLU),
+ QU(11126645098780572792LLU), QU( 1883602329313221774LLU),
+ QU( 2543897783922776873LLU), QU(15029168513767772842LLU),
+ QU(12710270651039129878LLU), QU(16118202956069604504LLU),
+ QU(15010759372168680524LLU), QU( 2296827082251923948LLU),
+ QU(10793729742623518101LLU), QU(13829764151845413046LLU),
+ QU(17769301223184451213LLU), QU( 3118268169210783372LLU),
+ QU(17626204544105123127LLU), QU( 7416718488974352644LLU),
+ QU(10450751996212925994LLU), QU( 9352529519128770586LLU),
+ QU( 259347569641110140LLU), QU( 8048588892269692697LLU),
+ QU( 1774414152306494058LLU), QU(10669548347214355622LLU),
+ QU(13061992253816795081LLU), QU(18432677803063861659LLU),
+ QU( 8879191055593984333LLU), QU(12433753195199268041LLU),
+ QU(14919392415439730602LLU), QU( 6612848378595332963LLU),
+ QU( 6320986812036143628LLU), QU(10465592420226092859LLU),
+ QU( 4196009278962570808LLU), QU( 3747816564473572224LLU),
+ QU(17941203486133732898LLU), QU( 2350310037040505198LLU),
+ QU( 5811779859134370113LLU), QU(10492109599506195126LLU),
+ QU( 7699650690179541274LLU), QU( 1954338494306022961LLU),
+ QU(14095816969027231152LLU), QU( 5841346919964852061LLU),
+ QU(14945969510148214735LLU), QU( 3680200305887550992LLU),
+ QU( 6218047466131695792LLU), QU( 8242165745175775096LLU),
+ QU(11021371934053307357LLU), QU( 1265099502753169797LLU),
+ QU( 4644347436111321718LLU), QU( 3609296916782832859LLU),
+ QU( 8109807992218521571LLU), QU(18387884215648662020LLU),
+ QU(14656324896296392902LLU), QU(17386819091238216751LLU),
+ QU(17788300878582317152LLU), QU( 7919446259742399591LLU),
+ QU( 4466613134576358004LLU), QU(12928181023667938509LLU),
+ QU(13147446154454932030LLU), QU(16552129038252734620LLU),
+ QU( 8395299403738822450LLU), QU(11313817655275361164LLU),
+ QU( 434258809499511718LLU), QU( 2074882104954788676LLU),
+ QU( 7929892178759395518LLU), QU( 9006461629105745388LLU),
+ QU( 5176475650000323086LLU), QU(11128357033468341069LLU),
+ QU(12026158851559118955LLU), QU(14699716249471156500LLU),
+ QU( 448982497120206757LLU), QU( 4156475356685519900LLU),
+ QU( 6063816103417215727LLU), QU(10073289387954971479LLU),
+ QU( 8174466846138590962LLU), QU( 2675777452363449006LLU),
+ QU( 9090685420572474281LLU), QU( 6659652652765562060LLU),
+ QU(12923120304018106621LLU), QU(11117480560334526775LLU),
+ QU( 937910473424587511LLU), QU( 1838692113502346645LLU),
+ QU(11133914074648726180LLU), QU( 7922600945143884053LLU),
+ QU(13435287702700959550LLU), QU( 5287964921251123332LLU),
+ QU(11354875374575318947LLU), QU(17955724760748238133LLU),
+ QU(13728617396297106512LLU), QU( 4107449660118101255LLU),
+ QU( 1210269794886589623LLU), QU(11408687205733456282LLU),
+ QU( 4538354710392677887LLU), QU(13566803319341319267LLU),
+ QU(17870798107734050771LLU), QU( 3354318982568089135LLU),
+ QU( 9034450839405133651LLU), QU(13087431795753424314LLU),
+ QU( 950333102820688239LLU), QU( 1968360654535604116LLU),
+ QU(16840551645563314995LLU), QU( 8867501803892924995LLU),
+ QU(11395388644490626845LLU), QU( 1529815836300732204LLU),
+ QU(13330848522996608842LLU), QU( 1813432878817504265LLU),
+ QU( 2336867432693429560LLU), QU(15192805445973385902LLU),
+ QU( 2528593071076407877LLU), QU( 128459777936689248LLU),
+ QU( 9976345382867214866LLU), QU( 6208885766767996043LLU),
+ QU(14982349522273141706LLU), QU( 3099654362410737822LLU),
+ QU(13776700761947297661LLU), QU( 8806185470684925550LLU),
+ QU( 8151717890410585321LLU), QU( 640860591588072925LLU),
+ QU(14592096303937307465LLU), QU( 9056472419613564846LLU),
+ QU(14861544647742266352LLU), QU(12703771500398470216LLU),
+ QU( 3142372800384138465LLU), QU( 6201105606917248196LLU),
+ QU(18337516409359270184LLU), QU(15042268695665115339LLU),
+ QU(15188246541383283846LLU), QU(12800028693090114519LLU),
+ QU( 5992859621101493472LLU), QU(18278043971816803521LLU),
+ QU( 9002773075219424560LLU), QU( 7325707116943598353LLU),
+ QU( 7930571931248040822LLU), QU( 5645275869617023448LLU),
+ QU( 7266107455295958487LLU), QU( 4363664528273524411LLU),
+ QU(14313875763787479809LLU), QU(17059695613553486802LLU),
+ QU( 9247761425889940932LLU), QU(13704726459237593128LLU),
+ QU( 2701312427328909832LLU), QU(17235532008287243115LLU),
+ QU(14093147761491729538LLU), QU( 6247352273768386516LLU),
+ QU( 8268710048153268415LLU), QU( 7985295214477182083LLU),
+ QU(15624495190888896807LLU), QU( 3772753430045262788LLU),
+ QU( 9133991620474991698LLU), QU( 5665791943316256028LLU),
+ QU( 7551996832462193473LLU), QU(13163729206798953877LLU),
+ QU( 9263532074153846374LLU), QU( 1015460703698618353LLU),
+ QU(17929874696989519390LLU), QU(18257884721466153847LLU),
+ QU(16271867543011222991LLU), QU( 3905971519021791941LLU),
+ QU(16814488397137052085LLU), QU( 1321197685504621613LLU),
+ QU( 2870359191894002181LLU), QU(14317282970323395450LLU),
+ QU(13663920845511074366LLU), QU( 2052463995796539594LLU),
+ QU(14126345686431444337LLU), QU( 1727572121947022534LLU),
+ QU(17793552254485594241LLU), QU( 6738857418849205750LLU),
+ QU( 1282987123157442952LLU), QU(16655480021581159251LLU),
+ QU( 6784587032080183866LLU), QU(14726758805359965162LLU),
+ QU( 7577995933961987349LLU), QU(12539609320311114036LLU),
+ QU(10789773033385439494LLU), QU( 8517001497411158227LLU),
+ QU(10075543932136339710LLU), QU(14838152340938811081LLU),
+ QU( 9560840631794044194LLU), QU(17445736541454117475LLU),
+ QU(10633026464336393186LLU), QU(15705729708242246293LLU),
+ QU( 1117517596891411098LLU), QU( 4305657943415886942LLU),
+ QU( 4948856840533979263LLU), QU(16071681989041789593LLU),
+ QU(13723031429272486527LLU), QU( 7639567622306509462LLU),
+ QU(12670424537483090390LLU), QU( 9715223453097197134LLU),
+ QU( 5457173389992686394LLU), QU( 289857129276135145LLU),
+ QU(17048610270521972512LLU), QU( 692768013309835485LLU),
+ QU(14823232360546632057LLU), QU(18218002361317895936LLU),
+ QU( 3281724260212650204LLU), QU(16453957266549513795LLU),
+ QU( 8592711109774511881LLU), QU( 929825123473369579LLU),
+ QU(15966784769764367791LLU), QU( 9627344291450607588LLU),
+ QU(10849555504977813287LLU), QU( 9234566913936339275LLU),
+ QU( 6413807690366911210LLU), QU(10862389016184219267LLU),
+ QU(13842504799335374048LLU), QU( 1531994113376881174LLU),
+ QU( 2081314867544364459LLU), QU(16430628791616959932LLU),
+ QU( 8314714038654394368LLU), QU( 9155473892098431813LLU),
+ QU(12577843786670475704LLU), QU( 4399161106452401017LLU),
+ QU( 1668083091682623186LLU), QU( 1741383777203714216LLU),
+ QU( 2162597285417794374LLU), QU(15841980159165218736LLU),
+ QU( 1971354603551467079LLU), QU( 1206714764913205968LLU),
+ QU( 4790860439591272330LLU), QU(14699375615594055799LLU),
+ QU( 8374423871657449988LLU), QU(10950685736472937738LLU),
+ QU( 697344331343267176LLU), QU(10084998763118059810LLU),
+ QU(12897369539795983124LLU), QU(12351260292144383605LLU),
+ QU( 1268810970176811234LLU), QU( 7406287800414582768LLU),
+ QU( 516169557043807831LLU), QU( 5077568278710520380LLU),
+ QU( 3828791738309039304LLU), QU( 7721974069946943610LLU),
+ QU( 3534670260981096460LLU), QU( 4865792189600584891LLU),
+ QU(16892578493734337298LLU), QU( 9161499464278042590LLU),
+ QU(11976149624067055931LLU), QU(13219479887277343990LLU),
+ QU(14161556738111500680LLU), QU(14670715255011223056LLU),
+ QU( 4671205678403576558LLU), QU(12633022931454259781LLU),
+ QU(14821376219869187646LLU), QU( 751181776484317028LLU),
+ QU( 2192211308839047070LLU), QU(11787306362361245189LLU),
+ QU(10672375120744095707LLU), QU( 4601972328345244467LLU),
+ QU(15457217788831125879LLU), QU( 8464345256775460809LLU),
+ QU(10191938789487159478LLU), QU( 6184348739615197613LLU),
+ QU(11425436778806882100LLU), QU( 2739227089124319793LLU),
+ QU( 461464518456000551LLU), QU( 4689850170029177442LLU),
+ QU( 6120307814374078625LLU), QU(11153579230681708671LLU),
+ QU( 7891721473905347926LLU), QU(10281646937824872400LLU),
+ QU( 3026099648191332248LLU), QU( 8666750296953273818LLU),
+ QU(14978499698844363232LLU), QU(13303395102890132065LLU),
+ QU( 8182358205292864080LLU), QU(10560547713972971291LLU),
+ QU(11981635489418959093LLU), QU( 3134621354935288409LLU),
+ QU(11580681977404383968LLU), QU(14205530317404088650LLU),
+ QU( 5997789011854923157LLU), QU(13659151593432238041LLU),
+ QU(11664332114338865086LLU), QU( 7490351383220929386LLU),
+ QU( 7189290499881530378LLU), QU(15039262734271020220LLU),
+ QU( 2057217285976980055LLU), QU( 555570804905355739LLU),
+ QU(11235311968348555110LLU), QU(13824557146269603217LLU),
+ QU(16906788840653099693LLU), QU( 7222878245455661677LLU),
+ QU( 5245139444332423756LLU), QU( 4723748462805674292LLU),
+ QU(12216509815698568612LLU), QU(17402362976648951187LLU),
+ QU(17389614836810366768LLU), QU( 4880936484146667711LLU),
+ QU( 9085007839292639880LLU), QU(13837353458498535449LLU),
+ QU(11914419854360366677LLU), QU(16595890135313864103LLU),
+ QU( 6313969847197627222LLU), QU(18296909792163910431LLU),
+ QU(10041780113382084042LLU), QU( 2499478551172884794LLU),
+ QU(11057894246241189489LLU), QU( 9742243032389068555LLU),
+ QU(12838934582673196228LLU), QU(13437023235248490367LLU),
+ QU(13372420669446163240LLU), QU( 6752564244716909224LLU),
+ QU( 7157333073400313737LLU), QU(12230281516370654308LLU),
+ QU( 1182884552219419117LLU), QU( 2955125381312499218LLU),
+ QU(10308827097079443249LLU), QU( 1337648572986534958LLU),
+ QU(16378788590020343939LLU), QU( 108619126514420935LLU),
+ QU( 3990981009621629188LLU), QU( 5460953070230946410LLU),
+ QU( 9703328329366531883LLU), QU(13166631489188077236LLU),
+ QU( 1104768831213675170LLU), QU( 3447930458553877908LLU),
+ QU( 8067172487769945676LLU), QU( 5445802098190775347LLU),
+ QU( 3244840981648973873LLU), QU(17314668322981950060LLU),
+ QU( 5006812527827763807LLU), QU(18158695070225526260LLU),
+ QU( 2824536478852417853LLU), QU(13974775809127519886LLU),
+ QU( 9814362769074067392LLU), QU(17276205156374862128LLU),
+ QU(11361680725379306967LLU), QU( 3422581970382012542LLU),
+ QU(11003189603753241266LLU), QU(11194292945277862261LLU),
+ QU( 6839623313908521348LLU), QU(11935326462707324634LLU),
+ QU( 1611456788685878444LLU), QU(13112620989475558907LLU),
+ QU( 517659108904450427LLU), QU(13558114318574407624LLU),
+ QU(15699089742731633077LLU), QU( 4988979278862685458LLU),
+ QU( 8111373583056521297LLU), QU( 3891258746615399627LLU),
+ QU( 8137298251469718086LLU), QU(12748663295624701649LLU),
+ QU( 4389835683495292062LLU), QU( 5775217872128831729LLU),
+ QU( 9462091896405534927LLU), QU( 8498124108820263989LLU),
+ QU( 8059131278842839525LLU), QU(10503167994254090892LLU),
+ QU(11613153541070396656LLU), QU(18069248738504647790LLU),
+ QU( 570657419109768508LLU), QU( 3950574167771159665LLU),
+ QU( 5514655599604313077LLU), QU( 2908460854428484165LLU),
+ QU(10777722615935663114LLU), QU(12007363304839279486LLU),
+ QU( 9800646187569484767LLU), QU( 8795423564889864287LLU),
+ QU(14257396680131028419LLU), QU( 6405465117315096498LLU),
+ QU( 7939411072208774878LLU), QU(17577572378528990006LLU),
+ QU(14785873806715994850LLU), QU(16770572680854747390LLU),
+ QU(18127549474419396481LLU), QU(11637013449455757750LLU),
+ QU(14371851933996761086LLU), QU( 3601181063650110280LLU),
+ QU( 4126442845019316144LLU), QU(10198287239244320669LLU),
+ QU(18000169628555379659LLU), QU(18392482400739978269LLU),
+ QU( 6219919037686919957LLU), QU( 3610085377719446052LLU),
+ QU( 2513925039981776336LLU), QU(16679413537926716955LLU),
+ QU(12903302131714909434LLU), QU( 5581145789762985009LLU),
+ QU(12325955044293303233LLU), QU(17216111180742141204LLU),
+ QU( 6321919595276545740LLU), QU( 3507521147216174501LLU),
+ QU( 9659194593319481840LLU), QU(11473976005975358326LLU),
+ QU(14742730101435987026LLU), QU( 492845897709954780LLU),
+ QU(16976371186162599676LLU), QU(17712703422837648655LLU),
+ QU( 9881254778587061697LLU), QU( 8413223156302299551LLU),
+ QU( 1563841828254089168LLU), QU( 9996032758786671975LLU),
+ QU( 138877700583772667LLU), QU(13003043368574995989LLU),
+ QU( 4390573668650456587LLU), QU( 8610287390568126755LLU),
+ QU(15126904974266642199LLU), QU( 6703637238986057662LLU),
+ QU( 2873075592956810157LLU), QU( 6035080933946049418LLU),
+ QU(13382846581202353014LLU), QU( 7303971031814642463LLU),
+ QU(18418024405307444267LLU), QU( 5847096731675404647LLU),
+ QU( 4035880699639842500LLU), QU(11525348625112218478LLU),
+ QU( 3041162365459574102LLU), QU( 2604734487727986558LLU),
+ QU(15526341771636983145LLU), QU(14556052310697370254LLU),
+ QU(12997787077930808155LLU), QU( 9601806501755554499LLU),
+ QU(11349677952521423389LLU), QU(14956777807644899350LLU),
+ QU(16559736957742852721LLU), QU(12360828274778140726LLU),
+ QU( 6685373272009662513LLU), QU(16932258748055324130LLU),
+ QU(15918051131954158508LLU), QU( 1692312913140790144LLU),
+ QU( 546653826801637367LLU), QU( 5341587076045986652LLU),
+ QU(14975057236342585662LLU), QU(12374976357340622412LLU),
+ QU(10328833995181940552LLU), QU(12831807101710443149LLU),
+ QU(10548514914382545716LLU), QU( 2217806727199715993LLU),
+ QU(12627067369242845138LLU), QU( 4598965364035438158LLU),
+ QU( 150923352751318171LLU), QU(14274109544442257283LLU),
+ QU( 4696661475093863031LLU), QU( 1505764114384654516LLU),
+ QU(10699185831891495147LLU), QU( 2392353847713620519LLU),
+ QU( 3652870166711788383LLU), QU( 8640653276221911108LLU),
+ QU( 3894077592275889704LLU), QU( 4918592872135964845LLU),
+ QU(16379121273281400789LLU), QU(12058465483591683656LLU),
+ QU(11250106829302924945LLU), QU( 1147537556296983005LLU),
+ QU( 6376342756004613268LLU), QU(14967128191709280506LLU),
+ QU(18007449949790627628LLU), QU( 9497178279316537841LLU),
+ QU( 7920174844809394893LLU), QU(10037752595255719907LLU),
+ QU(15875342784985217697LLU), QU(15311615921712850696LLU),
+ QU( 9552902652110992950LLU), QU(14054979450099721140LLU),
+ QU( 5998709773566417349LLU), QU(18027910339276320187LLU),
+ QU( 8223099053868585554LLU), QU( 7842270354824999767LLU),
+ QU( 4896315688770080292LLU), QU(12969320296569787895LLU),
+ QU( 2674321489185759961LLU), QU( 4053615936864718439LLU),
+ QU(11349775270588617578LLU), QU( 4743019256284553975LLU),
+ QU( 5602100217469723769LLU), QU(14398995691411527813LLU),
+ QU( 7412170493796825470LLU), QU( 836262406131744846LLU),
+ QU( 8231086633845153022LLU), QU( 5161377920438552287LLU),
+ QU( 8828731196169924949LLU), QU(16211142246465502680LLU),
+ QU( 3307990879253687818LLU), QU( 5193405406899782022LLU),
+ QU( 8510842117467566693LLU), QU( 6070955181022405365LLU),
+ QU(14482950231361409799LLU), QU(12585159371331138077LLU),
+ QU( 3511537678933588148LLU), QU( 2041849474531116417LLU),
+ QU(10944936685095345792LLU), QU(18303116923079107729LLU),
+ QU( 2720566371239725320LLU), QU( 4958672473562397622LLU),
+ QU( 3032326668253243412LLU), QU(13689418691726908338LLU),
+ QU( 1895205511728843996LLU), QU( 8146303515271990527LLU),
+ QU(16507343500056113480LLU), QU( 473996939105902919LLU),
+ QU( 9897686885246881481LLU), QU(14606433762712790575LLU),
+ QU( 6732796251605566368LLU), QU( 1399778120855368916LLU),
+ QU( 935023885182833777LLU), QU(16066282816186753477LLU),
+ QU( 7291270991820612055LLU), QU(17530230393129853844LLU),
+ QU(10223493623477451366LLU), QU(15841725630495676683LLU),
+ QU(17379567246435515824LLU), QU( 8588251429375561971LLU),
+ QU(18339511210887206423LLU), QU(17349587430725976100LLU),
+ QU(12244876521394838088LLU), QU( 6382187714147161259LLU),
+ QU(12335807181848950831LLU), QU(16948885622305460665LLU),
+ QU(13755097796371520506LLU), QU(14806740373324947801LLU),
+ QU( 4828699633859287703LLU), QU( 8209879281452301604LLU),
+ QU(12435716669553736437LLU), QU(13970976859588452131LLU),
+ QU( 6233960842566773148LLU), QU(12507096267900505759LLU),
+ QU( 1198713114381279421LLU), QU(14989862731124149015LLU),
+ QU(15932189508707978949LLU), QU( 2526406641432708722LLU),
+ QU( 29187427817271982LLU), QU( 1499802773054556353LLU),
+ QU(10816638187021897173LLU), QU( 5436139270839738132LLU),
+ QU( 6659882287036010082LLU), QU( 2154048955317173697LLU),
+ QU(10887317019333757642LLU), QU(16281091802634424955LLU),
+ QU(10754549879915384901LLU), QU(10760611745769249815LLU),
+ QU( 2161505946972504002LLU), QU( 5243132808986265107LLU),
+ QU(10129852179873415416LLU), QU( 710339480008649081LLU),
+ QU( 7802129453068808528LLU), QU(17967213567178907213LLU),
+ QU(15730859124668605599LLU), QU(13058356168962376502LLU),
+ QU( 3701224985413645909LLU), QU(14464065869149109264LLU),
+ QU( 9959272418844311646LLU), QU(10157426099515958752LLU),
+ QU(14013736814538268528LLU), QU(17797456992065653951LLU),
+ QU(17418878140257344806LLU), QU(15457429073540561521LLU),
+ QU( 2184426881360949378LLU), QU( 2062193041154712416LLU),
+ QU( 8553463347406931661LLU), QU( 4913057625202871854LLU),
+ QU( 2668943682126618425LLU), QU(17064444737891172288LLU),
+ QU( 4997115903913298637LLU), QU(12019402608892327416LLU),
+ QU(17603584559765897352LLU), QU(11367529582073647975LLU),
+ QU( 8211476043518436050LLU), QU( 8676849804070323674LLU),
+ QU(18431829230394475730LLU), QU(10490177861361247904LLU),
+ QU( 9508720602025651349LLU), QU( 7409627448555722700LLU),
+ QU( 5804047018862729008LLU), QU(11943858176893142594LLU),
+ QU(11908095418933847092LLU), QU( 5415449345715887652LLU),
+ QU( 1554022699166156407LLU), QU( 9073322106406017161LLU),
+ QU( 7080630967969047082LLU), QU(18049736940860732943LLU),
+ QU(12748714242594196794LLU), QU( 1226992415735156741LLU),
+ QU(17900981019609531193LLU), QU(11720739744008710999LLU),
+ QU( 3006400683394775434LLU), QU(11347974011751996028LLU),
+ QU( 3316999628257954608LLU), QU( 8384484563557639101LLU),
+ QU(18117794685961729767LLU), QU( 1900145025596618194LLU),
+ QU(17459527840632892676LLU), QU( 5634784101865710994LLU),
+ QU( 7918619300292897158LLU), QU( 3146577625026301350LLU),
+ QU( 9955212856499068767LLU), QU( 1873995843681746975LLU),
+ QU( 1561487759967972194LLU), QU( 8322718804375878474LLU),
+ QU(11300284215327028366LLU), QU( 4667391032508998982LLU),
+ QU( 9820104494306625580LLU), QU(17922397968599970610LLU),
+ QU( 1784690461886786712LLU), QU(14940365084341346821LLU),
+ QU( 5348719575594186181LLU), QU(10720419084507855261LLU),
+ QU(14210394354145143274LLU), QU( 2426468692164000131LLU),
+ QU(16271062114607059202LLU), QU(14851904092357070247LLU),
+ QU( 6524493015693121897LLU), QU( 9825473835127138531LLU),
+ QU(14222500616268569578LLU), QU(15521484052007487468LLU),
+ QU(14462579404124614699LLU), QU(11012375590820665520LLU),
+ QU(11625327350536084927LLU), QU(14452017765243785417LLU),
+ QU( 9989342263518766305LLU), QU( 3640105471101803790LLU),
+ QU( 4749866455897513242LLU), QU(13963064946736312044LLU),
+ QU(10007416591973223791LLU), QU(18314132234717431115LLU),
+ QU( 3286596588617483450LLU), QU( 7726163455370818765LLU),
+ QU( 7575454721115379328LLU), QU( 5308331576437663422LLU),
+ QU(18288821894903530934LLU), QU( 8028405805410554106LLU),
+ QU(15744019832103296628LLU), QU( 149765559630932100LLU),
+ QU( 6137705557200071977LLU), QU(14513416315434803615LLU),
+ QU(11665702820128984473LLU), QU( 218926670505601386LLU),
+ QU( 6868675028717769519LLU), QU(15282016569441512302LLU),
+ QU( 5707000497782960236LLU), QU( 6671120586555079567LLU),
+ QU( 2194098052618985448LLU), QU(16849577895477330978LLU),
+ QU(12957148471017466283LLU), QU( 1997805535404859393LLU),
+ QU( 1180721060263860490LLU), QU(13206391310193756958LLU),
+ QU(12980208674461861797LLU), QU( 3825967775058875366LLU),
+ QU(17543433670782042631LLU), QU( 1518339070120322730LLU),
+ QU(16344584340890991669LLU), QU( 2611327165318529819LLU),
+ QU(11265022723283422529LLU), QU( 4001552800373196817LLU),
+ QU(14509595890079346161LLU), QU( 3528717165416234562LLU),
+ QU(18153222571501914072LLU), QU( 9387182977209744425LLU),
+ QU(10064342315985580021LLU), QU(11373678413215253977LLU),
+ QU( 2308457853228798099LLU), QU( 9729042942839545302LLU),
+ QU( 7833785471140127746LLU), QU( 6351049900319844436LLU),
+ QU(14454610627133496067LLU), QU(12533175683634819111LLU),
+ QU(15570163926716513029LLU), QU(13356980519185762498LLU)
+};
+
+TEST_BEGIN(test_gen_rand_32)
+{
+ uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint32_t r32;
+ sfmt_t *ctx;
+
+ assert_d_le(get_min_array_size32(), BLOCK_SIZE,
+ "Array size too small");
+ ctx = init_gen_rand(1234);
+ fill_array32(ctx, array32, BLOCK_SIZE);
+ fill_array32(ctx, array32_2, BLOCK_SIZE);
+ fini_gen_rand(ctx);
+
+ ctx = init_gen_rand(1234);
+ for (i = 0; i < BLOCK_SIZE; i++) {
+ if (i < COUNT_1) {
+ assert_u32_eq(array32[i], init_gen_rand_32_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r32 = gen_rand32(ctx);
+ assert_u32_eq(r32, array32[i],
+ "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r32 = gen_rand32(ctx);
+ assert_u32_eq(r32, array32_2[i],
+ "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
+ r32);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+TEST_BEGIN(test_by_array_32)
+{
+ uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0};
+ uint32_t r32;
+ sfmt_t *ctx;
+
+ assert_d_le(get_min_array_size32(), BLOCK_SIZE,
+ "Array size too small");
+ ctx = init_by_array(ini, 4);
+ fill_array32(ctx, array32, BLOCK_SIZE);
+ fill_array32(ctx, array32_2, BLOCK_SIZE);
+ fini_gen_rand(ctx);
+
+ ctx = init_by_array(ini, 4);
+ for (i = 0; i < BLOCK_SIZE; i++) {
+ if (i < COUNT_1) {
+ assert_u32_eq(array32[i], init_by_array_32_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r32 = gen_rand32(ctx);
+ assert_u32_eq(r32, array32[i],
+ "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r32 = gen_rand32(ctx);
+ assert_u32_eq(r32, array32_2[i],
+ "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
+ r32);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+TEST_BEGIN(test_gen_rand_64)
+{
+ uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint64_t r;
+ sfmt_t *ctx;
+
+ assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
+ "Array size too small");
+ ctx = init_gen_rand(4321);
+ fill_array64(ctx, array64, BLOCK_SIZE64);
+ fill_array64(ctx, array64_2, BLOCK_SIZE64);
+ fini_gen_rand(ctx);
+
+ ctx = init_gen_rand(4321);
+ for (i = 0; i < BLOCK_SIZE64; i++) {
+ if (i < COUNT_1) {
+ assert_u64_eq(array64[i], init_gen_rand_64_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r = gen_rand64(ctx);
+ assert_u64_eq(r, array64[i],
+ "Mismatch at array64[%d]=%"PRIx64", gen=%"PRIx64, i,
+ array64[i], r);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r = gen_rand64(ctx);
+ assert_u64_eq(r, array64_2[i],
+ "Mismatch at array64_2[%d]=%"PRIx64" gen=%"PRIx64"", i,
+ array64_2[i], r);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+TEST_BEGIN(test_by_array_64)
+{
+ uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
+ int i;
+ uint64_t r;
+ uint32_t ini[] = {5, 4, 3, 2, 1};
+ sfmt_t *ctx;
+
+ assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
+ "Array size too small");
+ ctx = init_by_array(ini, 5);
+ fill_array64(ctx, array64, BLOCK_SIZE64);
+ fill_array64(ctx, array64_2, BLOCK_SIZE64);
+ fini_gen_rand(ctx);
+
+ ctx = init_by_array(ini, 5);
+ for (i = 0; i < BLOCK_SIZE64; i++) {
+ if (i < COUNT_1) {
+ assert_u64_eq(array64[i], init_by_array_64_expected[i],
+ "Output mismatch for i=%d", i);
+ }
+ r = gen_rand64(ctx);
+ assert_u64_eq(r, array64[i],
+ "Mismatch at array64[%d]=%"PRIx64" gen=%"PRIx64, i,
+ array64[i], r);
+ }
+ for (i = 0; i < COUNT_2; i++) {
+ r = gen_rand64(ctx);
+ assert_u64_eq(r, array64_2[i],
+ "Mismatch at array64_2[%d]=%"PRIx64" gen=%"PRIx64, i,
+ array64_2[i], r);
+ }
+ fini_gen_rand(ctx);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_gen_rand_32,
+ test_by_array_32,
+ test_gen_rand_64,
+ test_by_array_64));
+}
diff --git a/deps/jemalloc/test/bitmap.c b/deps/jemalloc/test/unit/bitmap.c
index b2cb63004..8086b8885 100644
--- a/deps/jemalloc/test/bitmap.c
+++ b/deps/jemalloc/test/unit/bitmap.c
@@ -1,5 +1,4 @@
-#define JEMALLOC_MANGLE
-#include "jemalloc_test.h"
+#include "test/jemalloc_test.h"
#if (LG_BITMAP_MAXBITS > 12)
# define MAXBITS 4500
@@ -7,21 +6,21 @@
# define MAXBITS (1U << LG_BITMAP_MAXBITS)
#endif
-static void
-test_bitmap_size(void)
+TEST_BEGIN(test_bitmap_size)
{
size_t i, prev_size;
prev_size = 0;
for (i = 1; i <= MAXBITS; i++) {
size_t size = bitmap_size(i);
- assert(size >= prev_size);
+ assert_true(size >= prev_size,
+ "Bitmap size is smaller than expected");
prev_size = size;
}
}
+TEST_END
-static void
-test_bitmap_init(void)
+TEST_BEGIN(test_bitmap_init)
{
size_t i;
@@ -34,16 +33,17 @@ test_bitmap_init(void)
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
- for (j = 0; j < i; j++)
- assert(bitmap_get(bitmap, &binfo, j) == false);
+ for (j = 0; j < i; j++) {
+ assert_false(bitmap_get(bitmap, &binfo, j),
+ "Bit should be unset");
+ }
free(bitmap);
-
}
}
}
+TEST_END
-static void
-test_bitmap_set(void)
+TEST_BEGIN(test_bitmap_set)
{
size_t i;
@@ -58,14 +58,15 @@ test_bitmap_set(void)
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
- assert(bitmap_full(bitmap, &binfo));
+ assert_true(bitmap_full(bitmap, &binfo),
+ "All bits should be set");
free(bitmap);
}
}
}
+TEST_END
-static void
-test_bitmap_unset(void)
+TEST_BEGIN(test_bitmap_unset)
{
size_t i;
@@ -80,19 +81,21 @@ test_bitmap_unset(void)
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
- assert(bitmap_full(bitmap, &binfo));
+ assert_true(bitmap_full(bitmap, &binfo),
+ "All bits should be set");
for (j = 0; j < i; j++)
bitmap_unset(bitmap, &binfo, j);
for (j = 0; j < i; j++)
bitmap_set(bitmap, &binfo, j);
- assert(bitmap_full(bitmap, &binfo));
+ assert_true(bitmap_full(bitmap, &binfo),
+ "All bits should be set");
free(bitmap);
}
}
}
+TEST_END
-static void
-test_bitmap_sfu(void)
+TEST_BEGIN(test_bitmap_sfu)
{
size_t i;
@@ -106,9 +109,13 @@ test_bitmap_sfu(void)
bitmap_init(bitmap, &binfo);
/* Iteratively set bits starting at the beginning. */
- for (j = 0; j < i; j++)
- assert(bitmap_sfu(bitmap, &binfo) == j);
- assert(bitmap_full(bitmap, &binfo));
+ for (j = 0; j < i; j++) {
+ assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
+ "First unset bit should be just after "
+ "previous first unset bit");
+ }
+ assert_true(bitmap_full(bitmap, &binfo),
+ "All bits should be set");
/*
* Iteratively unset bits starting at the end, and
@@ -116,10 +123,13 @@ test_bitmap_sfu(void)
*/
for (j = i - 1; j >= 0; j--) {
bitmap_unset(bitmap, &binfo, j);
- assert(bitmap_sfu(bitmap, &binfo) == j);
+ assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
+ "First unset bit should the bit previously "
+ "unset");
bitmap_unset(bitmap, &binfo, j);
}
- assert(bitmap_get(bitmap, &binfo, 0) == false);
+ assert_false(bitmap_get(bitmap, &binfo, 0),
+ "Bit should be unset");
/*
* Iteratively set bits starting at the beginning, and
@@ -127,27 +137,29 @@ test_bitmap_sfu(void)
*/
for (j = 1; j < i; j++) {
bitmap_set(bitmap, &binfo, j - 1);
- assert(bitmap_sfu(bitmap, &binfo) == j);
+ assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
+ "First unset bit should be just after the "
+ "bit previously set");
bitmap_unset(bitmap, &binfo, j);
}
- assert(bitmap_sfu(bitmap, &binfo) == i - 1);
- assert(bitmap_full(bitmap, &binfo));
+ assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1,
+ "First unset bit should be the last bit");
+ assert_true(bitmap_full(bitmap, &binfo),
+ "All bits should be set");
free(bitmap);
}
}
}
+TEST_END
int
main(void)
{
- malloc_printf("Test begin\n");
-
- test_bitmap_size();
- test_bitmap_init();
- test_bitmap_set();
- test_bitmap_unset();
- test_bitmap_sfu();
- malloc_printf("Test end\n");
- return (0);
+ return (test(
+ test_bitmap_size,
+ test_bitmap_init,
+ test_bitmap_set,
+ test_bitmap_unset,
+ test_bitmap_sfu));
}
diff --git a/deps/jemalloc/test/unit/ckh.c b/deps/jemalloc/test/unit/ckh.c
new file mode 100644
index 000000000..b214c279a
--- /dev/null
+++ b/deps/jemalloc/test/unit/ckh.c
@@ -0,0 +1,206 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_new_delete)
+{
+ ckh_t ckh;
+
+ assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp),
+ "Unexpected ckh_new() error");
+ ckh_delete(&ckh);
+
+ assert_false(ckh_new(&ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp),
+ "Unexpected ckh_new() error");
+ ckh_delete(&ckh);
+}
+TEST_END
+
+TEST_BEGIN(test_count_insert_search_remove)
+{
+ ckh_t ckh;
+ const char *strs[] = {
+ "a string",
+ "A string",
+ "a string.",
+ "A string."
+ };
+ const char *missing = "A string not in the hash table.";
+ size_t i;
+
+ assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp),
+ "Unexpected ckh_new() error");
+ assert_zu_eq(ckh_count(&ckh), 0,
+ "ckh_count() should return %zu, but it returned %zu", ZU(0),
+ ckh_count(&ckh));
+
+ /* Insert. */
+ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
+ ckh_insert(&ckh, strs[i], strs[i]);
+ assert_zu_eq(ckh_count(&ckh), i+1,
+ "ckh_count() should return %zu, but it returned %zu", i+1,
+ ckh_count(&ckh));
+ }
+
+ /* Search. */
+ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
+ union {
+ void *p;
+ const char *s;
+ } k, v;
+ void **kp, **vp;
+ const char *ks, *vs;
+
+ kp = (i & 1) ? &k.p : NULL;
+ vp = (i & 2) ? &v.p : NULL;
+ k.p = NULL;
+ v.p = NULL;
+ assert_false(ckh_search(&ckh, strs[i], kp, vp),
+ "Unexpected ckh_search() error");
+
+ ks = (i & 1) ? strs[i] : (const char *)NULL;
+ vs = (i & 2) ? strs[i] : (const char *)NULL;
+ assert_ptr_eq((void *)ks, (void *)k.s,
+ "Key mismatch, i=%zu", i);
+ assert_ptr_eq((void *)vs, (void *)v.s,
+ "Value mismatch, i=%zu", i);
+ }
+ assert_true(ckh_search(&ckh, missing, NULL, NULL),
+ "Unexpected ckh_search() success");
+
+ /* Remove. */
+ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
+ union {
+ void *p;
+ const char *s;
+ } k, v;
+ void **kp, **vp;
+ const char *ks, *vs;
+
+ kp = (i & 1) ? &k.p : NULL;
+ vp = (i & 2) ? &v.p : NULL;
+ k.p = NULL;
+ v.p = NULL;
+ assert_false(ckh_remove(&ckh, strs[i], kp, vp),
+ "Unexpected ckh_remove() error");
+
+ ks = (i & 1) ? strs[i] : (const char *)NULL;
+ vs = (i & 2) ? strs[i] : (const char *)NULL;
+ assert_ptr_eq((void *)ks, (void *)k.s,
+ "Key mismatch, i=%zu", i);
+ assert_ptr_eq((void *)vs, (void *)v.s,
+ "Value mismatch, i=%zu", i);
+ assert_zu_eq(ckh_count(&ckh),
+ sizeof(strs)/sizeof(const char *) - i - 1,
+ "ckh_count() should return %zu, but it returned %zu",
+ sizeof(strs)/sizeof(const char *) - i - 1,
+ ckh_count(&ckh));
+ }
+
+ ckh_delete(&ckh);
+}
+TEST_END
+
+TEST_BEGIN(test_insert_iter_remove)
+{
+#define NITEMS ZU(1000)
+ ckh_t ckh;
+ void **p[NITEMS];
+ void *q, *r;
+ size_t i;
+
+ assert_false(ckh_new(&ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp),
+ "Unexpected ckh_new() error");
+
+ for (i = 0; i < NITEMS; i++) {
+ p[i] = mallocx(i+1, 0);
+ assert_ptr_not_null(p[i], "Unexpected mallocx() failure");
+ }
+
+ for (i = 0; i < NITEMS; i++) {
+ size_t j;
+
+ for (j = i; j < NITEMS; j++) {
+ assert_false(ckh_insert(&ckh, p[j], p[j]),
+ "Unexpected ckh_insert() failure");
+ assert_false(ckh_search(&ckh, p[j], &q, &r),
+ "Unexpected ckh_search() failure");
+ assert_ptr_eq(p[j], q, "Key pointer mismatch");
+ assert_ptr_eq(p[j], r, "Value pointer mismatch");
+ }
+
+ assert_zu_eq(ckh_count(&ckh), NITEMS,
+ "ckh_count() should return %zu, but it returned %zu",
+ NITEMS, ckh_count(&ckh));
+
+ for (j = i + 1; j < NITEMS; j++) {
+ assert_false(ckh_search(&ckh, p[j], NULL, NULL),
+ "Unexpected ckh_search() failure");
+ assert_false(ckh_remove(&ckh, p[j], &q, &r),
+ "Unexpected ckh_remove() failure");
+ assert_ptr_eq(p[j], q, "Key pointer mismatch");
+ assert_ptr_eq(p[j], r, "Value pointer mismatch");
+ assert_true(ckh_search(&ckh, p[j], NULL, NULL),
+ "Unexpected ckh_search() success");
+ assert_true(ckh_remove(&ckh, p[j], &q, &r),
+ "Unexpected ckh_remove() success");
+ }
+
+ {
+ bool seen[NITEMS];
+ size_t tabind;
+
+ memset(seen, 0, sizeof(seen));
+
+ for (tabind = 0; ckh_iter(&ckh, &tabind, &q, &r) ==
+ false;) {
+ size_t k;
+
+ assert_ptr_eq(q, r, "Key and val not equal");
+
+ for (k = 0; k < NITEMS; k++) {
+ if (p[k] == q) {
+ assert_false(seen[k],
+ "Item %zu already seen", k);
+ seen[k] = true;
+ break;
+ }
+ }
+ }
+
+ for (j = 0; j < i + 1; j++)
+ assert_true(seen[j], "Item %zu not seen", j);
+ for (; j < NITEMS; j++)
+ assert_false(seen[j], "Item %zu seen", j);
+ }
+ }
+
+ for (i = 0; i < NITEMS; i++) {
+ assert_false(ckh_search(&ckh, p[i], NULL, NULL),
+ "Unexpected ckh_search() failure");
+ assert_false(ckh_remove(&ckh, p[i], &q, &r),
+ "Unexpected ckh_remove() failure");
+ assert_ptr_eq(p[i], q, "Key pointer mismatch");
+ assert_ptr_eq(p[i], r, "Value pointer mismatch");
+ assert_true(ckh_search(&ckh, p[i], NULL, NULL),
+ "Unexpected ckh_search() success");
+ assert_true(ckh_remove(&ckh, p[i], &q, &r),
+ "Unexpected ckh_remove() success");
+ dallocx(p[i], 0);
+ }
+
+ assert_zu_eq(ckh_count(&ckh), 0,
+ "ckh_count() should return %zu, but it returned %zu", ZU(0),
+ ckh_count(&ckh));
+ ckh_delete(&ckh);
+#undef NITEMS
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_new_delete,
+ test_count_insert_search_remove,
+ test_insert_iter_remove));
+}
diff --git a/deps/jemalloc/test/unit/hash.c b/deps/jemalloc/test/unit/hash.c
new file mode 100644
index 000000000..abb394ac0
--- /dev/null
+++ b/deps/jemalloc/test/unit/hash.c
@@ -0,0 +1,171 @@
+/*
+ * This file is based on code that is part of SMHasher
+ * (https://code.google.com/p/smhasher/), and is subject to the MIT license
+ * (http://www.opensource.org/licenses/mit-license.php). Both email addresses
+ * associated with the source code's revision history belong to Austin Appleby,
+ * and the revision history ranges from 2010 to 2012. Therefore the copyright
+ * and license are here taken to be:
+ *
+ * Copyright (c) 2010-2012 Austin Appleby
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "test/jemalloc_test.h"
+
+typedef enum {
+ hash_variant_x86_32,
+ hash_variant_x86_128,
+ hash_variant_x64_128
+} hash_variant_t;
+
+static size_t
+hash_variant_bits(hash_variant_t variant)
+{
+
+ switch (variant) {
+ case hash_variant_x86_32: return (32);
+ case hash_variant_x86_128: return (128);
+ case hash_variant_x64_128: return (128);
+ default: not_reached();
+ }
+}
+
+static const char *
+hash_variant_string(hash_variant_t variant)
+{
+
+ switch (variant) {
+ case hash_variant_x86_32: return ("hash_x86_32");
+ case hash_variant_x86_128: return ("hash_x86_128");
+ case hash_variant_x64_128: return ("hash_x64_128");
+ default: not_reached();
+ }
+}
+
+static void
+hash_variant_verify(hash_variant_t variant)
+{
+ const size_t hashbytes = hash_variant_bits(variant) / 8;
+ uint8_t key[256];
+ uint8_t hashes[hashbytes * 256];
+ uint8_t final[hashbytes];
+ unsigned i;
+ uint32_t computed, expected;
+
+ memset(key, 0, sizeof(key));
+ memset(hashes, 0, sizeof(hashes));
+ memset(final, 0, sizeof(final));
+
+ /*
+ * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
+ * seed.
+ */
+ for (i = 0; i < 256; i++) {
+ key[i] = (uint8_t)i;
+ switch (variant) {
+ case hash_variant_x86_32: {
+ uint32_t out;
+ out = hash_x86_32(key, i, 256-i);
+ memcpy(&hashes[i*hashbytes], &out, hashbytes);
+ break;
+ } case hash_variant_x86_128: {
+ uint64_t out[2];
+ hash_x86_128(key, i, 256-i, out);
+ memcpy(&hashes[i*hashbytes], out, hashbytes);
+ break;
+ } case hash_variant_x64_128: {
+ uint64_t out[2];
+ hash_x64_128(key, i, 256-i, out);
+ memcpy(&hashes[i*hashbytes], out, hashbytes);
+ break;
+ } default: not_reached();
+ }
+ }
+
+ /* Hash the result array. */
+ switch (variant) {
+ case hash_variant_x86_32: {
+ uint32_t out = hash_x86_32(hashes, hashbytes*256, 0);
+ memcpy(final, &out, sizeof(out));
+ break;
+ } case hash_variant_x86_128: {
+ uint64_t out[2];
+ hash_x86_128(hashes, hashbytes*256, 0, out);
+ memcpy(final, out, sizeof(out));
+ break;
+ } case hash_variant_x64_128: {
+ uint64_t out[2];
+ hash_x64_128(hashes, hashbytes*256, 0, out);
+ memcpy(final, out, sizeof(out));
+ break;
+ } default: not_reached();
+ }
+
+ computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) |
+ (final[3] << 24);
+
+ switch (variant) {
+#ifdef JEMALLOC_BIG_ENDIAN
+ case hash_variant_x86_32: expected = 0x6213303eU; break;
+ case hash_variant_x86_128: expected = 0x266820caU; break;
+ case hash_variant_x64_128: expected = 0xcc622b6fU; break;
+#else
+ case hash_variant_x86_32: expected = 0xb0f57ee3U; break;
+ case hash_variant_x86_128: expected = 0xb3ece62aU; break;
+ case hash_variant_x64_128: expected = 0x6384ba69U; break;
+#endif
+ default: not_reached();
+ }
+
+ assert_u32_eq(computed, expected,
+ "Hash mismatch for %s(): expected %#x but got %#x",
+ hash_variant_string(variant), expected, computed);
+}
+
+TEST_BEGIN(test_hash_x86_32)
+{
+
+ hash_variant_verify(hash_variant_x86_32);
+}
+TEST_END
+
+TEST_BEGIN(test_hash_x86_128)
+{
+
+ hash_variant_verify(hash_variant_x86_128);
+}
+TEST_END
+
+TEST_BEGIN(test_hash_x64_128)
+{
+
+ hash_variant_verify(hash_variant_x64_128);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_hash_x86_32,
+ test_hash_x86_128,
+ test_hash_x64_128));
+}
diff --git a/deps/jemalloc/test/unit/junk.c b/deps/jemalloc/test/unit/junk.c
new file mode 100644
index 000000000..85bbf9e2b
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk.c
@@ -0,0 +1,222 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_FILL
+const char *malloc_conf =
+ "abort:false,junk:true,zero:false,redzone:true,quarantine:0";
+#endif
+
+static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
+static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig;
+static huge_dalloc_junk_t *huge_dalloc_junk_orig;
+static void *most_recently_junked;
+
+static void
+arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
+{
+ size_t i;
+
+ arena_dalloc_junk_small_orig(ptr, bin_info);
+ for (i = 0; i < bin_info->reg_size; i++) {
+ assert_c_eq(((char *)ptr)[i], 0x5a,
+ "Missing junk fill for byte %zu/%zu of deallocated region",
+ i, bin_info->reg_size);
+ }
+ most_recently_junked = ptr;
+}
+
+static void
+arena_dalloc_junk_large_intercept(void *ptr, size_t usize)
+{
+ size_t i;
+
+ arena_dalloc_junk_large_orig(ptr, usize);
+ for (i = 0; i < usize; i++) {
+ assert_c_eq(((char *)ptr)[i], 0x5a,
+ "Missing junk fill for byte %zu/%zu of deallocated region",
+ i, usize);
+ }
+ most_recently_junked = ptr;
+}
+
+static void
+huge_dalloc_junk_intercept(void *ptr, size_t usize)
+{
+
+ huge_dalloc_junk_orig(ptr, usize);
+ /*
+ * The conditions under which junk filling actually occurs are nuanced
+ * enough that it doesn't make sense to duplicate the decision logic in
+ * test code, so don't actually check that the region is junk-filled.
+ */
+ most_recently_junked = ptr;
+}
+
+static void
+test_junk(size_t sz_min, size_t sz_max)
+{
+ char *s;
+ size_t sz_prev, sz, i;
+
+ arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
+ arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
+ arena_dalloc_junk_large_orig = arena_dalloc_junk_large;
+ arena_dalloc_junk_large = arena_dalloc_junk_large_intercept;
+ huge_dalloc_junk_orig = huge_dalloc_junk;
+ huge_dalloc_junk = huge_dalloc_junk_intercept;
+
+ sz_prev = 0;
+ s = (char *)mallocx(sz_min, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+
+ for (sz = sallocx(s, 0); sz <= sz_max;
+ sz_prev = sz, sz = sallocx(s, 0)) {
+ if (sz_prev > 0) {
+ assert_c_eq(s[0], 'a',
+ "Previously allocated byte %zu/%zu is corrupted",
+ ZU(0), sz_prev);
+ assert_c_eq(s[sz_prev-1], 'a',
+ "Previously allocated byte %zu/%zu is corrupted",
+ sz_prev-1, sz_prev);
+ }
+
+ for (i = sz_prev; i < sz; i++) {
+ assert_c_eq(s[i], 0xa5,
+ "Newly allocated byte %zu/%zu isn't junk-filled",
+ i, sz);
+ s[i] = 'a';
+ }
+
+ if (xallocx(s, sz+1, 0, 0) == sz) {
+ void *junked = (void *)s;
+
+ s = (char *)rallocx(s, sz+1, 0);
+ assert_ptr_not_null((void *)s,
+ "Unexpected rallocx() failure");
+ if (!config_mremap || sz+1 <= arena_maxclass) {
+ assert_ptr_eq(most_recently_junked, junked,
+ "Expected region of size %zu to be "
+ "junk-filled",
+ sz);
+ }
+ }
+ }
+
+ dallocx(s, 0);
+ assert_ptr_eq(most_recently_junked, (void *)s,
+ "Expected region of size %zu to be junk-filled", sz);
+
+ arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
+ arena_dalloc_junk_large = arena_dalloc_junk_large_orig;
+ huge_dalloc_junk = huge_dalloc_junk_orig;
+}
+
+TEST_BEGIN(test_junk_small)
+{
+
+ test_skip_if(!config_fill);
+ test_junk(1, SMALL_MAXCLASS-1);
+}
+TEST_END
+
+TEST_BEGIN(test_junk_large)
+{
+
+ test_skip_if(!config_fill);
+ test_junk(SMALL_MAXCLASS+1, arena_maxclass);
+}
+TEST_END
+
+TEST_BEGIN(test_junk_huge)
+{
+
+ test_skip_if(!config_fill);
+ test_junk(arena_maxclass+1, chunksize*2);
+}
+TEST_END
+
+arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig;
+static void *most_recently_trimmed;
+
+static void
+arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize)
+{
+
+ arena_ralloc_junk_large_orig(ptr, old_usize, usize);
+ assert_zu_eq(old_usize, arena_maxclass, "Unexpected old_usize");
+ assert_zu_eq(usize, arena_maxclass-PAGE, "Unexpected usize");
+ most_recently_trimmed = ptr;
+}
+
+TEST_BEGIN(test_junk_large_ralloc_shrink)
+{
+ void *p1, *p2;
+
+ p1 = mallocx(arena_maxclass, 0);
+ assert_ptr_not_null(p1, "Unexpected mallocx() failure");
+
+ arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
+ arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
+
+ p2 = rallocx(p1, arena_maxclass-PAGE, 0);
+ assert_ptr_eq(p1, p2, "Unexpected move during shrink");
+
+ arena_ralloc_junk_large = arena_ralloc_junk_large_orig;
+
+ assert_ptr_eq(most_recently_trimmed, p1,
+ "Expected trimmed portion of region to be junk-filled");
+}
+TEST_END
+
+static bool detected_redzone_corruption;
+
+static void
+arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after,
+ size_t offset, uint8_t byte)
+{
+
+ detected_redzone_corruption = true;
+}
+
+TEST_BEGIN(test_junk_redzone)
+{
+ char *s;
+ arena_redzone_corruption_t *arena_redzone_corruption_orig;
+
+ test_skip_if(!config_fill);
+
+ arena_redzone_corruption_orig = arena_redzone_corruption;
+ arena_redzone_corruption = arena_redzone_corruption_replacement;
+
+ /* Test underflow. */
+ detected_redzone_corruption = false;
+ s = (char *)mallocx(1, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+ s[-1] = 0xbb;
+ dallocx(s, 0);
+ assert_true(detected_redzone_corruption,
+ "Did not detect redzone corruption");
+
+ /* Test overflow. */
+ detected_redzone_corruption = false;
+ s = (char *)mallocx(1, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+ s[sallocx(s, 0)] = 0xbb;
+ dallocx(s, 0);
+ assert_true(detected_redzone_corruption,
+ "Did not detect redzone corruption");
+
+ arena_redzone_corruption = arena_redzone_corruption_orig;
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_junk_small,
+ test_junk_large,
+ test_junk_huge,
+ test_junk_large_ralloc_shrink,
+ test_junk_redzone));
+}
diff --git a/deps/jemalloc/test/unit/mallctl.c b/deps/jemalloc/test/unit/mallctl.c
new file mode 100644
index 000000000..31fb81057
--- /dev/null
+++ b/deps/jemalloc/test/unit/mallctl.c
@@ -0,0 +1,415 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_mallctl_errors)
+{
+ uint64_t epoch;
+ size_t sz;
+
+ assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
+ "mallctl() should return ENOENT for non-existent names");
+
+ assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
+ EPERM, "mallctl() should return EPERM on attempt to write "
+ "read-only value");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1),
+ EINVAL, "mallctl() should return EINVAL for input size mismatch");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1),
+ EINVAL, "mallctl() should return EINVAL for input size mismatch");
+
+ sz = sizeof(epoch)-1;
+ assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
+ "mallctl() should return EINVAL for output size mismatch");
+ sz = sizeof(epoch)+1;
+ assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
+ "mallctl() should return EINVAL for output size mismatch");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlnametomib_errors)
+{
+ size_t mib[1];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
+ "mallctlnametomib() should return ENOENT for non-existent names");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlbymib_errors)
+{
+ uint64_t epoch;
+ size_t sz;
+ size_t mib[1];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("version", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
+ strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
+ "attempt to write read-only value");
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
+ sizeof(epoch)-1), EINVAL,
+ "mallctlbymib() should return EINVAL for input size mismatch");
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
+ sizeof(epoch)+1), EINVAL,
+ "mallctlbymib() should return EINVAL for input size mismatch");
+
+ sz = sizeof(epoch)-1;
+ assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
+ "mallctlbymib() should return EINVAL for output size mismatch");
+ sz = sizeof(epoch)+1;
+ assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
+ "mallctlbymib() should return EINVAL for output size mismatch");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctl_read_write)
+{
+ uint64_t old_epoch, new_epoch;
+ size_t sz = sizeof(old_epoch);
+
+ /* Blind. */
+ assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+
+ /* Read. */
+ assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+
+ /* Write. */
+ assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)),
+ 0, "Unexpected mallctl() failure");
+ assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+
+ /* Read+write. */
+ assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch,
+ sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
+ assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlnametomib_short_mib)
+{
+ size_t mib[4];
+ size_t miblen;
+
+ miblen = 3;
+ mib[3] = 42;
+ assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ assert_zu_eq(miblen, 3, "Unexpected mib output length");
+ assert_zu_eq(mib[3], 42,
+ "mallctlnametomib() wrote past the end of the input mib");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctl_config)
+{
+
+#define TEST_MALLCTL_CONFIG(config) do { \
+ bool oldval; \
+ size_t sz = sizeof(oldval); \
+ assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \
+ 0, "Unexpected mallctl() failure"); \
+ assert_b_eq(oldval, config_##config, "Incorrect config value"); \
+ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
+} while (0)
+
+ TEST_MALLCTL_CONFIG(debug);
+ TEST_MALLCTL_CONFIG(dss);
+ TEST_MALLCTL_CONFIG(fill);
+ TEST_MALLCTL_CONFIG(lazy_lock);
+ TEST_MALLCTL_CONFIG(mremap);
+ TEST_MALLCTL_CONFIG(munmap);
+ TEST_MALLCTL_CONFIG(prof);
+ TEST_MALLCTL_CONFIG(prof_libgcc);
+ TEST_MALLCTL_CONFIG(prof_libunwind);
+ TEST_MALLCTL_CONFIG(stats);
+ TEST_MALLCTL_CONFIG(tcache);
+ TEST_MALLCTL_CONFIG(tls);
+ TEST_MALLCTL_CONFIG(utrace);
+ TEST_MALLCTL_CONFIG(valgrind);
+ TEST_MALLCTL_CONFIG(xmalloc);
+
+#undef TEST_MALLCTL_CONFIG
+}
+TEST_END
+
+TEST_BEGIN(test_mallctl_opt)
+{
+ bool config_always = true;
+
+#define TEST_MALLCTL_OPT(t, opt, config) do { \
+ t oldval; \
+ size_t sz = sizeof(oldval); \
+ int expected = config_##config ? 0 : ENOENT; \
+ int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \
+ assert_d_eq(result, expected, \
+ "Unexpected mallctl() result for opt."#opt); \
+ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
+} while (0)
+
+ TEST_MALLCTL_OPT(bool, abort, always);
+ TEST_MALLCTL_OPT(size_t, lg_chunk, always);
+ TEST_MALLCTL_OPT(const char *, dss, always);
+ TEST_MALLCTL_OPT(size_t, narenas, always);
+ TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always);
+ TEST_MALLCTL_OPT(bool, stats_print, always);
+ TEST_MALLCTL_OPT(bool, junk, fill);
+ TEST_MALLCTL_OPT(size_t, quarantine, fill);
+ TEST_MALLCTL_OPT(bool, redzone, fill);
+ TEST_MALLCTL_OPT(bool, zero, fill);
+ TEST_MALLCTL_OPT(bool, utrace, utrace);
+ TEST_MALLCTL_OPT(bool, valgrind, valgrind);
+ TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
+ TEST_MALLCTL_OPT(bool, tcache, tcache);
+ TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache);
+ TEST_MALLCTL_OPT(bool, prof, prof);
+ TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
+ TEST_MALLCTL_OPT(bool, prof_active, prof);
+ TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof);
+ TEST_MALLCTL_OPT(bool, prof_accum, prof);
+ TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof);
+ TEST_MALLCTL_OPT(bool, prof_gdump, prof);
+ TEST_MALLCTL_OPT(bool, prof_final, prof);
+ TEST_MALLCTL_OPT(bool, prof_leak, prof);
+
+#undef TEST_MALLCTL_OPT
+}
+TEST_END
+
+TEST_BEGIN(test_manpage_example)
+{
+ unsigned nbins, i;
+ size_t mib[4];
+ size_t len, miblen;
+
+ len = sizeof(nbins);
+ assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ miblen = 4;
+ assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ for (i = 0; i < nbins; i++) {
+ size_t bin_size;
+
+ mib[2] = i;
+ len = sizeof(bin_size);
+ assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0),
+ 0, "Unexpected mallctlbymib() failure");
+ /* Do something with bin_size... */
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_thread_arena)
+{
+ unsigned arena_old, arena_new, narenas;
+ size_t sz = sizeof(unsigned);
+
+ assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
+ arena_new = narenas - 1;
+ assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new,
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure");
+ arena_new = 0;
+ assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new,
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_purge)
+{
+ unsigned narenas;
+ size_t sz = sizeof(unsigned);
+ size_t mib[3];
+ size_t miblen = 3;
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ mib[1] = narenas;
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctlbymib() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arena_i_dss)
+{
+ const char *dss_prec_old, *dss_prec_new;
+ size_t sz = sizeof(dss_prec_old);
+
+ dss_prec_new = "primary";
+ assert_d_eq(mallctl("arena.0.dss", &dss_prec_old, &sz, &dss_prec_new,
+ sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected default for dss precedence");
+
+ assert_d_eq(mallctl("arena.0.dss", &dss_prec_new, &sz, &dss_prec_old,
+ sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_purge)
+{
+ unsigned arena = 0;
+
+ assert_d_eq(mallctl("arenas.purge", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctl("arenas.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_initialized)
+{
+ unsigned narenas;
+ size_t sz = sizeof(narenas);
+
+ assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ {
+ bool initialized[narenas];
+
+ sz = narenas * sizeof(bool);
+ assert_d_eq(mallctl("arenas.initialized", initialized, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_constants)
+{
+
+#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("arenas."#name, &name, &sz, NULL, 0), 0, \
+ "Unexpected mallctl() failure"); \
+ assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
+ TEST_ARENAS_CONSTANT(size_t, page, PAGE);
+ TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
+ TEST_ARENAS_CONSTANT(size_t, nlruns, nlclasses);
+
+#undef TEST_ARENAS_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_bin_constants)
+{
+
+#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("arenas.bin.0."#name, &name, &sz, NULL, 0), \
+ 0, "Unexpected mallctl() failure"); \
+ assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size);
+ TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs);
+ TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size);
+
+#undef TEST_ARENAS_BIN_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_lrun_constants)
+{
+
+#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \
+ 0), 0, "Unexpected mallctl() failure"); \
+ assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_LRUN_CONSTANT(size_t, size, (1 << LG_PAGE));
+
+#undef TEST_ARENAS_LRUN_CONSTANT
+}
+TEST_END
+
+TEST_BEGIN(test_arenas_extend)
+{
+ unsigned narenas_before, arena, narenas_after;
+ size_t sz = sizeof(unsigned);
+
+ assert_d_eq(mallctl("arenas.narenas", &narenas_before, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.extend", &arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.narenas", &narenas_after, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ assert_u_eq(narenas_before+1, narenas_after,
+ "Unexpected number of arenas before versus after extension");
+ assert_u_eq(arena, narenas_after-1, "Unexpected arena index");
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas)
+{
+
+#define TEST_STATS_ARENAS(t, name) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("stats.arenas.0."#name, &name, &sz, NULL, \
+ 0), 0, "Unexpected mallctl() failure"); \
+} while (0)
+
+ TEST_STATS_ARENAS(const char *, dss);
+ TEST_STATS_ARENAS(unsigned, nthreads);
+ TEST_STATS_ARENAS(size_t, pactive);
+ TEST_STATS_ARENAS(size_t, pdirty);
+
+#undef TEST_STATS_ARENAS
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_mallctl_errors,
+ test_mallctlnametomib_errors,
+ test_mallctlbymib_errors,
+ test_mallctl_read_write,
+ test_mallctlnametomib_short_mib,
+ test_mallctl_config,
+ test_mallctl_opt,
+ test_manpage_example,
+ test_thread_arena,
+ test_arena_i_purge,
+ test_arena_i_dss,
+ test_arenas_purge,
+ test_arenas_initialized,
+ test_arenas_constants,
+ test_arenas_bin_constants,
+ test_arenas_lrun_constants,
+ test_arenas_extend,
+ test_stats_arenas));
+}
diff --git a/deps/jemalloc/test/unit/math.c b/deps/jemalloc/test/unit/math.c
new file mode 100644
index 000000000..a1b288ea1
--- /dev/null
+++ b/deps/jemalloc/test/unit/math.c
@@ -0,0 +1,388 @@
+#include "test/jemalloc_test.h"
+
+#define MAX_REL_ERR 1.0e-9
+#define MAX_ABS_ERR 1.0e-9
+
+static bool
+double_eq_rel(double a, double b, double max_rel_err, double max_abs_err)
+{
+ double rel_err;
+
+ if (fabs(a - b) < max_abs_err)
+ return (true);
+ rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a);
+ return (rel_err < max_rel_err);
+}
+
+static uint64_t
+factorial(unsigned x)
+{
+ uint64_t ret = 1;
+ unsigned i;
+
+ for (i = 2; i <= x; i++)
+ ret *= (uint64_t)i;
+
+ return (ret);
+}
+
+TEST_BEGIN(test_ln_gamma_factorial)
+{
+ unsigned x;
+
+ /* exp(ln_gamma(x)) == (x-1)! for integer x. */
+ for (x = 1; x <= 21; x++) {
+ assert_true(double_eq_rel(exp(ln_gamma(x)),
+ (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect factorial result for x=%u", x);
+ }
+}
+TEST_END
+
+/* Expected ln_gamma([0.0..100.0] increment=0.25). */
+static const double ln_gamma_misc_expected[] = {
+ INFINITY,
+ 1.28802252469807743, 0.57236494292470008, 0.20328095143129538,
+ 0.00000000000000000, -0.09827183642181320, -0.12078223763524518,
+ -0.08440112102048555, 0.00000000000000000, 0.12487171489239651,
+ 0.28468287047291918, 0.47521466691493719, 0.69314718055994529,
+ 0.93580193110872523, 1.20097360234707429, 1.48681557859341718,
+ 1.79175946922805496, 2.11445692745037128, 2.45373657084244234,
+ 2.80857141857573644, 3.17805383034794575, 3.56137591038669710,
+ 3.95781396761871651, 4.36671603662228680, 4.78749174278204581,
+ 5.21960398699022932, 5.66256205985714178, 6.11591589143154568,
+ 6.57925121201010121, 7.05218545073853953, 7.53436423675873268,
+ 8.02545839631598312, 8.52516136106541467, 9.03318691960512332,
+ 9.54926725730099690, 10.07315123968123949, 10.60460290274525086,
+ 11.14340011995171231, 11.68933342079726856, 12.24220494005076176,
+ 12.80182748008146909, 13.36802367147604720, 13.94062521940376342,
+ 14.51947222506051816, 15.10441257307551943, 15.69530137706046524,
+ 16.29200047656724237, 16.89437797963419285, 17.50230784587389010,
+ 18.11566950571089407, 18.73434751193644843, 19.35823122022435427,
+ 19.98721449566188468, 20.62119544270163018, 21.26007615624470048,
+ 21.90376249182879320, 22.55216385312342098, 23.20519299513386002,
+ 23.86276584168908954, 24.52480131594137802, 25.19122118273868338,
+ 25.86194990184851861, 26.53691449111561340, 27.21604439872720604,
+ 27.89927138384089389, 28.58652940490193828, 29.27775451504081516,
+ 29.97288476399884871, 30.67186010608067548, 31.37462231367769050,
+ 32.08111489594735843, 32.79128302226991565, 33.50507345013689076,
+ 34.22243445715505317, 34.94331577687681545, 35.66766853819134298,
+ 36.39544520803305261, 37.12659953718355865, 37.86108650896109395,
+ 38.59886229060776230, 39.33988418719949465, 40.08411059791735198,
+ 40.83150097453079752, 41.58201578195490100, 42.33561646075348506,
+ 43.09226539146988699, 43.85192586067515208, 44.61456202863158893,
+ 45.38013889847690052, 46.14862228684032885, 46.91997879580877395,
+ 47.69417578616628361, 48.47118135183522014, 49.25096429545256882,
+ 50.03349410501914463, 50.81874093156324790, 51.60667556776436982,
+ 52.39726942748592364, 53.19049452616926743, 53.98632346204390586,
+ 54.78472939811231157, 55.58568604486942633, 56.38916764371992940,
+ 57.19514895105859864, 58.00360522298051080, 58.81451220059079787,
+ 59.62784609588432261, 60.44358357816834371, 61.26170176100199427,
+ 62.08217818962842927, 62.90499082887649962, 63.73011805151035958,
+ 64.55753862700632340, 65.38723171073768015, 66.21917683354901385,
+ 67.05335389170279825, 67.88974313718154008, 68.72832516833013017,
+ 69.56908092082363737, 70.41199165894616385, 71.25703896716800045,
+ 72.10420474200799390, 72.95347118416940191, 73.80482079093779646,
+ 74.65823634883015814, 75.51370092648485866, 76.37119786778275454,
+ 77.23071078519033961, 78.09222355331530707, 78.95572030266725960,
+ 79.82118541361435859, 80.68860351052903468, 81.55795945611502873,
+ 82.42923834590904164, 83.30242550295004378, 84.17750647261028973,
+ 85.05446701758152983, 85.93329311301090456, 86.81397094178107920,
+ 87.69648688992882057, 88.58082754219766741, 89.46697967771913795,
+ 90.35493026581838194, 91.24466646193963015, 92.13617560368709292,
+ 93.02944520697742803, 93.92446296229978486, 94.82121673107967297,
+ 95.71969454214321615, 96.61988458827809723, 97.52177522288820910,
+ 98.42535495673848800, 99.33061245478741341, 100.23753653310367895,
+ 101.14611615586458981, 102.05634043243354370, 102.96819861451382394,
+ 103.88168009337621811, 104.79677439715833032, 105.71347118823287303,
+ 106.63176026064346047, 107.55163153760463501, 108.47307506906540198,
+ 109.39608102933323153, 110.32063971475740516, 111.24674154146920557,
+ 112.17437704317786995, 113.10353686902013237, 114.03421178146170689,
+ 114.96639265424990128, 115.90007047041454769, 116.83523632031698014,
+ 117.77188139974506953, 118.70999700805310795, 119.64957454634490830,
+ 120.59060551569974962, 121.53308151543865279, 122.47699424143097247,
+ 123.42233548443955726, 124.36909712850338394, 125.31727114935689826,
+ 126.26684961288492559, 127.21782467361175861, 128.17018857322420899,
+ 129.12393363912724453, 130.07905228303084755, 131.03553699956862033,
+ 131.99338036494577864, 132.95257503561629164, 133.91311374698926784,
+ 134.87498931216194364, 135.83819462068046846, 136.80272263732638294,
+ 137.76856640092901785, 138.73571902320256299, 139.70417368760718091,
+ 140.67392364823425055, 141.64496222871400732, 142.61728282114600574,
+ 143.59087888505104047, 144.56574394634486680, 145.54187159633210058,
+ 146.51925549072063859, 147.49788934865566148, 148.47776695177302031,
+ 149.45888214327129617, 150.44122882700193600, 151.42480096657754984,
+ 152.40959258449737490, 153.39559776128982094, 154.38281063467164245,
+ 155.37122539872302696, 156.36083630307879844, 157.35163765213474107,
+ 158.34362380426921391, 159.33678917107920370, 160.33112821663092973,
+ 161.32663545672428995, 162.32330545817117695, 163.32113283808695314,
+ 164.32011226319519892, 165.32023844914485267, 166.32150615984036790,
+ 167.32391020678358018, 168.32744544842768164, 169.33210678954270634,
+ 170.33788918059275375, 171.34478761712384198, 172.35279713916281707,
+ 173.36191283062726143, 174.37212981874515094, 175.38344327348534080,
+ 176.39584840699734514, 177.40934047306160437, 178.42391476654847793,
+ 179.43956662288721304, 180.45629141754378111, 181.47408456550741107,
+ 182.49294152078630304, 183.51285777591152737, 184.53382886144947861,
+ 185.55585034552262869, 186.57891783333786861, 187.60302696672312095,
+ 188.62817342367162610, 189.65435291789341932, 190.68156119837468054,
+ 191.70979404894376330, 192.73904728784492590, 193.76931676731820176,
+ 194.80059837318714244, 195.83288802445184729, 196.86618167288995096,
+ 197.90047530266301123, 198.93576492992946214, 199.97204660246373464,
+ 201.00931639928148797, 202.04757043027063901, 203.08680483582807597,
+ 204.12701578650228385, 205.16819948264117102, 206.21035215404597807,
+ 207.25347005962987623, 208.29754948708190909, 209.34258675253678916,
+ 210.38857820024875878, 211.43552020227099320, 212.48340915813977858,
+ 213.53224149456323744, 214.58201366511514152, 215.63272214993284592,
+ 216.68436345542014010, 217.73693411395422004, 218.79043068359703739,
+ 219.84484974781133815, 220.90018791517996988, 221.95644181913033322,
+ 223.01360811766215875, 224.07168349307951871, 225.13066465172661879,
+ 226.19054832372759734, 227.25133126272962159, 228.31301024565024704,
+ 229.37558207242807384, 230.43904356577689896, 231.50339157094342113,
+ 232.56862295546847008, 233.63473460895144740, 234.70172344281823484,
+ 235.76958639009222907, 236.83832040516844586, 237.90792246359117712,
+ 238.97838956183431947, 240.04971871708477238, 241.12190696702904802,
+ 242.19495136964280846, 243.26884900298270509, 244.34359696498191283,
+ 245.41919237324782443, 246.49563236486270057, 247.57291409618682110,
+ 248.65103474266476269, 249.72999149863338175, 250.80978157713354904,
+ 251.89040220972316320, 252.97185064629374551, 254.05412415488834199,
+ 255.13722002152300661, 256.22113555000953511, 257.30586806178126835,
+ 258.39141489572085675, 259.47777340799029844, 260.56494097186322279,
+ 261.65291497755913497, 262.74169283208021852, 263.83127195904967266,
+ 264.92164979855277807, 266.01282380697938379, 267.10479145686849733,
+ 268.19755023675537586, 269.29109765101975427, 270.38543121973674488,
+ 271.48054847852881721, 272.57644697842033565, 273.67312428569374561,
+ 274.77057798174683967, 275.86880566295326389, 276.96780494052313770,
+ 278.06757344036617496, 279.16810880295668085, 280.26940868320008349,
+ 281.37147075030043197, 282.47429268763045229, 283.57787219260217171,
+ 284.68220697654078322, 285.78729476455760050, 286.89313329542699194,
+ 287.99972032146268930, 289.10705360839756395, 290.21513093526289140,
+ 291.32395009427028754, 292.43350889069523646, 293.54380514276073200,
+ 294.65483668152336350, 295.76660135076059532, 296.87909700685889902,
+ 297.99232151870342022, 299.10627276756946458, 300.22094864701409733,
+ 301.33634706277030091, 302.45246593264130297, 303.56930318639643929,
+ 304.68685676566872189, 305.80512462385280514, 306.92410472600477078,
+ 308.04379504874236773, 309.16419358014690033, 310.28529831966631036,
+ 311.40710727801865687, 312.52961847709792664, 313.65282994987899201,
+ 314.77673974032603610, 315.90134590329950015, 317.02664650446632777,
+ 318.15263962020929966, 319.27932333753892635, 320.40669575400545455,
+ 321.53475497761127144, 322.66349912672620803, 323.79292633000159185,
+ 324.92303472628691452, 326.05382246454587403, 327.18528770377525916,
+ 328.31742861292224234, 329.45024337080525356, 330.58373016603343331,
+ 331.71788719692847280, 332.85271267144611329, 333.98820480709991898,
+ 335.12436183088397001, 336.26118197919845443, 337.39866349777429377,
+ 338.53680464159958774, 339.67560367484657036, 340.81505887079896411,
+ 341.95516851178109619, 343.09593088908627578, 344.23734430290727460,
+ 345.37940706226686416, 346.52211748494903532, 347.66547389743118401,
+ 348.80947463481720661, 349.95411804077025408, 351.09940246744753267,
+ 352.24532627543504759, 353.39188783368263103, 354.53908551944078908,
+ 355.68691771819692349, 356.83538282361303118, 357.98447923746385868,
+ 359.13420536957539753
+};
+
+TEST_BEGIN(test_ln_gamma_misc)
+{
+ unsigned i;
+
+ for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
+ double x = (double)i * 0.25;
+ assert_true(double_eq_rel(ln_gamma(x),
+ ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect ln_gamma result for i=%u", i);
+ }
+}
+TEST_END
+
+/* Expected pt_norm([0.01..0.99] increment=0.01). */
+static const double pt_norm_expected[] = {
+ -INFINITY,
+ -2.32634787404084076, -2.05374891063182252, -1.88079360815125085,
+ -1.75068607125216946, -1.64485362695147264, -1.55477359459685305,
+ -1.47579102817917063, -1.40507156030963221, -1.34075503369021654,
+ -1.28155156554460081, -1.22652812003661049, -1.17498679206608991,
+ -1.12639112903880045, -1.08031934081495606, -1.03643338949378938,
+ -0.99445788320975281, -0.95416525314619416, -0.91536508784281390,
+ -0.87789629505122846, -0.84162123357291418, -0.80642124701824025,
+ -0.77219321418868492, -0.73884684918521371, -0.70630256284008752,
+ -0.67448975019608171, -0.64334540539291685, -0.61281299101662701,
+ -0.58284150727121620, -0.55338471955567281, -0.52440051270804067,
+ -0.49585034734745320, -0.46769879911450812, -0.43991316567323380,
+ -0.41246312944140462, -0.38532046640756751, -0.35845879325119373,
+ -0.33185334643681652, -0.30548078809939738, -0.27931903444745404,
+ -0.25334710313579978, -0.22754497664114931, -0.20189347914185077,
+ -0.17637416478086135, -0.15096921549677725, -0.12566134685507399,
+ -0.10043372051146975, -0.07526986209982976, -0.05015358346473352,
+ -0.02506890825871106, 0.00000000000000000, 0.02506890825871106,
+ 0.05015358346473366, 0.07526986209982990, 0.10043372051146990,
+ 0.12566134685507413, 0.15096921549677739, 0.17637416478086146,
+ 0.20189347914185105, 0.22754497664114931, 0.25334710313579978,
+ 0.27931903444745404, 0.30548078809939738, 0.33185334643681652,
+ 0.35845879325119373, 0.38532046640756762, 0.41246312944140484,
+ 0.43991316567323391, 0.46769879911450835, 0.49585034734745348,
+ 0.52440051270804111, 0.55338471955567303, 0.58284150727121620,
+ 0.61281299101662701, 0.64334540539291685, 0.67448975019608171,
+ 0.70630256284008752, 0.73884684918521371, 0.77219321418868492,
+ 0.80642124701824036, 0.84162123357291441, 0.87789629505122879,
+ 0.91536508784281423, 0.95416525314619460, 0.99445788320975348,
+ 1.03643338949378938, 1.08031934081495606, 1.12639112903880045,
+ 1.17498679206608991, 1.22652812003661049, 1.28155156554460081,
+ 1.34075503369021654, 1.40507156030963265, 1.47579102817917085,
+ 1.55477359459685394, 1.64485362695147308, 1.75068607125217102,
+ 1.88079360815125041, 2.05374891063182208, 2.32634787404084076
+};
+
+TEST_BEGIN(test_pt_norm)
+{
+ unsigned i;
+
+ for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
+ double p = (double)i * 0.01;
+ assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
+ MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect pt_norm result for i=%u", i);
+ }
+}
+TEST_END
+
+/*
+ * Expected pt_chi2(p=[0.01..0.99] increment=0.07,
+ * df={0.1, 1.1, 10.1, 100.1, 1000.1}).
+ */
+static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1};
+static const double pt_chi2_expected[] = {
+ 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17,
+ 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09,
+ 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05,
+ 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03,
+ 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00,
+
+ 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113,
+ 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931,
+ 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259,
+ 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304,
+ 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839,
+
+ 2.606673548632508, 4.602913725294877, 5.646152813924212,
+ 6.488971315540869, 7.249823275816285, 7.977314231410841,
+ 8.700354939944047, 9.441728024225892, 10.224338321374127,
+ 11.076435368801061, 12.039320937038386, 13.183878752697167,
+ 14.657791935084575, 16.885728216339373, 23.361991680031817,
+
+ 70.14844087392152, 80.92379498849355, 85.53325420085891,
+ 88.94433120715347, 91.83732712857017, 94.46719943606301,
+ 96.96896479994635, 99.43412843510363, 101.94074719829733,
+ 104.57228644307247, 107.43900093448734, 110.71844673417287,
+ 114.76616819871325, 120.57422505959563, 135.92318818757556,
+
+ 899.0072447849649, 937.9271278858220, 953.8117189560207,
+ 965.3079371501154, 974.8974061207954, 983.4936235182347,
+ 991.5691170518946, 999.4334123954690, 1007.3391826856553,
+ 1015.5445154999951, 1024.3777075619569, 1034.3538789836223,
+ 1046.4872561869577, 1063.5717461999654, 1107.0741966053859
+};
+
+TEST_BEGIN(test_pt_chi2)
+{
+ unsigned i, j;
+ unsigned e = 0;
+
+ for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) {
+ double df = pt_chi2_df[i];
+ double ln_gamma_df = ln_gamma(df * 0.5);
+ for (j = 1; j < 100; j += 7) {
+ double p = (double)j * 0.01;
+ assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
+ pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR),
+ "Incorrect pt_chi2 result for i=%u, j=%u", i, j);
+ e++;
+ }
+ }
+}
+TEST_END
+
+/*
+ * Expected pt_gamma(p=[0.1..0.99] increment=0.07,
+ * shape=[0.5..3.0] increment=0.5).
+ */
+static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0};
+static const double pt_gamma_expected[] = {
+ 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02,
+ 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01,
+ 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01,
+ 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01,
+ 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00,
+
+ 0.01005033585350144, 0.08338160893905107, 0.16251892949777497,
+ 0.24846135929849966, 0.34249030894677596, 0.44628710262841947,
+ 0.56211891815354142, 0.69314718055994529, 0.84397007029452920,
+ 1.02165124753198167, 1.23787435600161766, 1.51412773262977574,
+ 1.89711998488588196, 2.52572864430825783, 4.60517018598809091,
+
+ 0.05741590094955853, 0.24747378084860744, 0.39888572212236084,
+ 0.54394139997444901, 0.69048812513915159, 0.84311389861296104,
+ 1.00580622221479898, 1.18298694218766931, 1.38038096305861213,
+ 1.60627736383027453, 1.87396970522337947, 2.20749220408081070,
+ 2.65852391865854942, 3.37934630984842244, 5.67243336507218476,
+
+ 0.1485547402532659, 0.4657458011640391, 0.6832386130709406,
+ 0.8794297834672100, 1.0700752852474524, 1.2629614217350744,
+ 1.4638400448580779, 1.6783469900166610, 1.9132338090606940,
+ 2.1778589228618777, 2.4868823970010991, 2.8664695666264195,
+ 3.3724415436062114, 4.1682658512758071, 6.6383520679938108,
+
+ 0.2771490383641385, 0.7195001279643727, 0.9969081732265243,
+ 1.2383497880608061, 1.4675206597269927, 1.6953064251816552,
+ 1.9291243435606809, 2.1757300955477641, 2.4428032131216391,
+ 2.7406534569230616, 3.0851445039665513, 3.5043101122033367,
+ 4.0575997065264637, 4.9182956424675286, 7.5431362346944937,
+
+ 0.4360451650782932, 0.9983600902486267, 1.3306365880734528,
+ 1.6129750834753802, 1.8767241606994294, 2.1357032436097660,
+ 2.3988853336865565, 2.6740603137235603, 2.9697561737517959,
+ 3.2971457713883265, 3.6731795898504660, 4.1275751617770631,
+ 4.7230515633946677, 5.6417477865306020, 8.4059469148854635
+};
+
+TEST_BEGIN(test_pt_gamma_shape)
+{
+ unsigned i, j;
+ unsigned e = 0;
+
+ for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) {
+ double shape = pt_gamma_shape[i];
+ double ln_gamma_shape = ln_gamma(shape);
+ for (j = 1; j < 100; j += 7) {
+ double p = (double)j * 0.01;
+ assert_true(double_eq_rel(pt_gamma(p, shape, 1.0,
+ ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR,
+ MAX_ABS_ERR),
+ "Incorrect pt_gamma result for i=%u, j=%u", i, j);
+ e++;
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_pt_gamma_scale)
+{
+ double shape = 1.0;
+ double ln_gamma_shape = ln_gamma(shape);
+
+ assert_true(double_eq_rel(
+ pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0,
+ pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR,
+ MAX_ABS_ERR),
+ "Scale should be trivially equivalent to external multiplication");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_ln_gamma_factorial,
+ test_ln_gamma_misc,
+ test_pt_norm,
+ test_pt_chi2,
+ test_pt_gamma_shape,
+ test_pt_gamma_scale));
+}
diff --git a/deps/jemalloc/test/unit/mq.c b/deps/jemalloc/test/unit/mq.c
new file mode 100644
index 000000000..f57e96af1
--- /dev/null
+++ b/deps/jemalloc/test/unit/mq.c
@@ -0,0 +1,92 @@
+#include "test/jemalloc_test.h"
+
+#define NSENDERS 3
+#define NMSGS 100000
+
+typedef struct mq_msg_s mq_msg_t;
+struct mq_msg_s {
+ mq_msg(mq_msg_t) link;
+};
+mq_gen(static, mq_, mq_t, mq_msg_t, link)
+
+TEST_BEGIN(test_mq_basic)
+{
+ mq_t mq;
+ mq_msg_t msg;
+
+ assert_false(mq_init(&mq), "Unexpected mq_init() failure");
+ assert_u_eq(mq_count(&mq), 0, "mq should be empty");
+ assert_ptr_null(mq_tryget(&mq),
+ "mq_tryget() should fail when the queue is empty");
+
+ mq_put(&mq, &msg);
+ assert_u_eq(mq_count(&mq), 1, "mq should contain one message");
+ assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg");
+
+ mq_put(&mq, &msg);
+ assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg");
+
+ mq_fini(&mq);
+}
+TEST_END
+
+static void *
+thd_receiver_start(void *arg)
+{
+ mq_t *mq = (mq_t *)arg;
+ unsigned i;
+
+ for (i = 0; i < (NSENDERS * NMSGS); i++) {
+ mq_msg_t *msg = mq_get(mq);
+ assert_ptr_not_null(msg, "mq_get() should never return NULL");
+ dallocx(msg, 0);
+ }
+ return (NULL);
+}
+
+static void *
+thd_sender_start(void *arg)
+{
+ mq_t *mq = (mq_t *)arg;
+ unsigned i;
+
+ for (i = 0; i < NMSGS; i++) {
+ mq_msg_t *msg;
+ void *p;
+ p = mallocx(sizeof(mq_msg_t), 0);
+ assert_ptr_not_null(p, "Unexpected allocm() failure");
+ msg = (mq_msg_t *)p;
+ mq_put(mq, msg);
+ }
+ return (NULL);
+}
+
+TEST_BEGIN(test_mq_threaded)
+{
+ mq_t mq;
+ thd_t receiver;
+ thd_t senders[NSENDERS];
+ unsigned i;
+
+ assert_false(mq_init(&mq), "Unexpected mq_init() failure");
+
+ thd_create(&receiver, thd_receiver_start, (void *)&mq);
+ for (i = 0; i < NSENDERS; i++)
+ thd_create(&senders[i], thd_sender_start, (void *)&mq);
+
+ thd_join(receiver, NULL);
+ for (i = 0; i < NSENDERS; i++)
+ thd_join(senders[i], NULL);
+
+ mq_fini(&mq);
+}
+TEST_END
+
+int
+main(void)
+{
+ return (test(
+ test_mq_basic,
+ test_mq_threaded));
+}
+
diff --git a/deps/jemalloc/test/unit/mtx.c b/deps/jemalloc/test/unit/mtx.c
new file mode 100644
index 000000000..96ff69486
--- /dev/null
+++ b/deps/jemalloc/test/unit/mtx.c
@@ -0,0 +1,60 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 2
+#define NINCRS 2000000
+
+TEST_BEGIN(test_mtx_basic)
+{
+ mtx_t mtx;
+
+ assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
+ mtx_lock(&mtx);
+ mtx_unlock(&mtx);
+ mtx_fini(&mtx);
+}
+TEST_END
+
+typedef struct {
+ mtx_t mtx;
+ unsigned x;
+} thd_start_arg_t;
+
+static void *
+thd_start(void *varg)
+{
+ thd_start_arg_t *arg = (thd_start_arg_t *)varg;
+ unsigned i;
+
+ for (i = 0; i < NINCRS; i++) {
+ mtx_lock(&arg->mtx);
+ arg->x++;
+ mtx_unlock(&arg->mtx);
+ }
+ return (NULL);
+}
+
+TEST_BEGIN(test_mtx_race)
+{
+ thd_start_arg_t arg;
+ thd_t thds[NTHREADS];
+ unsigned i;
+
+ assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
+ arg.x = 0;
+ for (i = 0; i < NTHREADS; i++)
+ thd_create(&thds[i], thd_start, (void *)&arg);
+ for (i = 0; i < NTHREADS; i++)
+ thd_join(thds[i], NULL);
+ assert_u_eq(arg.x, NTHREADS * NINCRS,
+ "Race-related counter corruption");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_mtx_basic,
+ test_mtx_race));
+}
diff --git a/deps/jemalloc/test/unit/prof_accum.c b/deps/jemalloc/test/unit/prof_accum.c
new file mode 100644
index 000000000..050a8a7ee
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_accum.c
@@ -0,0 +1,86 @@
+#include "prof_accum.h"
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf =
+ "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0";
+#endif
+
+static int
+prof_dump_open_intercept(bool propagate_err, const char *filename)
+{
+ int fd;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return (fd);
+}
+
+static void *
+alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration)
+{
+
+ return (alloc_0(thd_ind*NALLOCS_PER_THREAD + iteration));
+}
+
+static void *
+thd_start(void *varg)
+{
+ unsigned thd_ind = *(unsigned *)varg;
+ size_t bt_count_prev, bt_count;
+ unsigned i_prev, i;
+
+ i_prev = 0;
+ bt_count_prev = 0;
+ for (i = 0; i < NALLOCS_PER_THREAD; i++) {
+ void *p = alloc_from_permuted_backtrace(thd_ind, i);
+ dallocx(p, 0);
+ if (i % DUMP_INTERVAL == 0) {
+ assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ }
+
+ if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
+ i+1 == NALLOCS_PER_THREAD) {
+ bt_count = prof_bt_count();
+ assert_zu_le(bt_count_prev+(i-i_prev), bt_count,
+ "Expected larger backtrace count increase");
+ i_prev = i;
+ bt_count_prev = bt_count;
+ }
+ }
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_idump)
+{
+ bool active;
+ thd_t thds[NTHREADS];
+ unsigned thd_args[NTHREADS];
+ unsigned i;
+
+ test_skip_if(!config_prof);
+
+ active = true;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+ 0, "Unexpected mallctl failure while activating profiling");
+
+ prof_dump_open = prof_dump_open_intercept;
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_args[i] = i;
+ thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
+ }
+ for (i = 0; i < NTHREADS; i++)
+ thd_join(thds[i], NULL);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_idump));
+}
diff --git a/deps/jemalloc/test/unit/prof_accum.h b/deps/jemalloc/test/unit/prof_accum.h
new file mode 100644
index 000000000..109d86b59
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_accum.h
@@ -0,0 +1,35 @@
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 4
+#define NALLOCS_PER_THREAD 50
+#define DUMP_INTERVAL 1
+#define BT_COUNT_CHECK_INTERVAL 5
+
+#define alloc_n_proto(n) \
+void *alloc_##n(unsigned bits);
+alloc_n_proto(0)
+alloc_n_proto(1)
+
+#define alloc_n_gen(n) \
+void * \
+alloc_##n(unsigned bits) \
+{ \
+ void *p; \
+ \
+ if (bits == 0) \
+ p = mallocx(1, 0); \
+ else { \
+ switch (bits & 0x1U) { \
+ case 0: \
+ p = (alloc_0(bits >> 1)); \
+ break; \
+ case 1: \
+ p = (alloc_1(bits >> 1)); \
+ break; \
+ default: not_reached(); \
+ } \
+ } \
+ /* Intentionally sabotage tail call optimization. */ \
+ assert_ptr_not_null(p, "Unexpected mallocx() failure"); \
+ return (p); \
+}
diff --git a/deps/jemalloc/test/unit/prof_accum_a.c b/deps/jemalloc/test/unit/prof_accum_a.c
new file mode 100644
index 000000000..42ad521d8
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_accum_a.c
@@ -0,0 +1,3 @@
+#include "prof_accum.h"
+
+alloc_n_gen(0)
diff --git a/deps/jemalloc/test/unit/prof_accum_b.c b/deps/jemalloc/test/unit/prof_accum_b.c
new file mode 100644
index 000000000..60d9dab6a
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_accum_b.c
@@ -0,0 +1,3 @@
+#include "prof_accum.h"
+
+alloc_n_gen(1)
diff --git a/deps/jemalloc/test/unit/prof_gdump.c b/deps/jemalloc/test/unit/prof_gdump.c
new file mode 100644
index 000000000..a00b1054f
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_gdump.c
@@ -0,0 +1,56 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true";
+#endif
+
+static bool did_prof_dump_open;
+
+static int
+prof_dump_open_intercept(bool propagate_err, const char *filename)
+{
+ int fd;
+
+ did_prof_dump_open = true;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return (fd);
+}
+
+TEST_BEGIN(test_gdump)
+{
+ bool active;
+ void *p, *q;
+
+ test_skip_if(!config_prof);
+
+ active = true;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+ 0, "Unexpected mallctl failure while activating profiling");
+
+ prof_dump_open = prof_dump_open_intercept;
+
+ did_prof_dump_open = false;
+ p = mallocx(chunksize, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ assert_true(did_prof_dump_open, "Expected a profile dump");
+
+ did_prof_dump_open = false;
+ q = mallocx(chunksize, 0);
+ assert_ptr_not_null(q, "Unexpected mallocx() failure");
+ assert_true(did_prof_dump_open, "Expected a profile dump");
+
+ dallocx(p, 0);
+ dallocx(q, 0);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_gdump));
+}
diff --git a/deps/jemalloc/test/unit/prof_idump.c b/deps/jemalloc/test/unit/prof_idump.c
new file mode 100644
index 000000000..bdea53ecd
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_idump.c
@@ -0,0 +1,51 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf =
+ "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,"
+ "lg_prof_interval:0";
+#endif
+
+static bool did_prof_dump_open;
+
+static int
+prof_dump_open_intercept(bool propagate_err, const char *filename)
+{
+ int fd;
+
+ did_prof_dump_open = true;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return (fd);
+}
+
+TEST_BEGIN(test_idump)
+{
+ bool active;
+ void *p;
+
+ test_skip_if(!config_prof);
+
+ active = true;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+ 0, "Unexpected mallctl failure while activating profiling");
+
+ prof_dump_open = prof_dump_open_intercept;
+
+ did_prof_dump_open = false;
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ dallocx(p, 0);
+ assert_true(did_prof_dump_open, "Expected a profile dump");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_idump));
+}
diff --git a/deps/jemalloc/test/unit/ql.c b/deps/jemalloc/test/unit/ql.c
new file mode 100644
index 000000000..05fad450f
--- /dev/null
+++ b/deps/jemalloc/test/unit/ql.c
@@ -0,0 +1,209 @@
+#include "test/jemalloc_test.h"
+
+/* Number of ring entries, in [2..26]. */
+#define NENTRIES 9
+
+typedef struct list_s list_t;
+typedef ql_head(list_t) list_head_t;
+
+struct list_s {
+ ql_elm(list_t) link;
+ char id;
+};
+
+static void
+test_empty_list(list_head_t *head)
+{
+ list_t *t;
+ unsigned i;
+
+ assert_ptr_null(ql_first(head), "Unexpected element for empty list");
+ assert_ptr_null(ql_last(head, link),
+ "Unexpected element for empty list");
+
+ i = 0;
+ ql_foreach(t, head, link) {
+ i++;
+ }
+ assert_u_eq(i, 0, "Unexpected element for empty list");
+
+ i = 0;
+ ql_reverse_foreach(t, head, link) {
+ i++;
+ }
+ assert_u_eq(i, 0, "Unexpected element for empty list");
+}
+
+TEST_BEGIN(test_ql_empty)
+{
+ list_head_t head;
+
+ ql_new(&head);
+ test_empty_list(&head);
+}
+TEST_END
+
+static void
+init_entries(list_t *entries, unsigned nentries)
+{
+ unsigned i;
+
+ for (i = 0; i < nentries; i++) {
+ entries[i].id = 'a' + i;
+ ql_elm_new(&entries[i], link);
+ }
+}
+
+static void
+test_entries_list(list_head_t *head, list_t *entries, unsigned nentries)
+{
+ list_t *t;
+ unsigned i;
+
+ assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
+ assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
+ "Element id mismatch");
+
+ i = 0;
+ ql_foreach(t, head, link) {
+ assert_c_eq(t->id, entries[i].id, "Element id mismatch");
+ i++;
+ }
+
+ i = 0;
+ ql_reverse_foreach(t, head, link) {
+ assert_c_eq(t->id, entries[nentries-i-1].id,
+ "Element id mismatch");
+ i++;
+ }
+
+ for (i = 0; i < nentries-1; i++) {
+ t = ql_next(head, &entries[i], link);
+ assert_c_eq(t->id, entries[i+1].id, "Element id mismatch");
+ }
+ assert_ptr_null(ql_next(head, &entries[nentries-1], link),
+ "Unexpected element");
+
+ assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
+ for (i = 1; i < nentries; i++) {
+ t = ql_prev(head, &entries[i], link);
+ assert_c_eq(t->id, entries[i-1].id, "Element id mismatch");
+ }
+}
+
+TEST_BEGIN(test_ql_tail_insert)
+{
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++)
+ ql_tail_insert(&head, &entries[i], link);
+
+ test_entries_list(&head, entries, NENTRIES);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_tail_remove)
+{
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++)
+ ql_tail_insert(&head, &entries[i], link);
+
+ for (i = 0; i < NENTRIES; i++) {
+ test_entries_list(&head, entries, NENTRIES-i);
+ ql_tail_remove(&head, list_t, link);
+ }
+ test_empty_list(&head);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_head_insert)
+{
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++)
+ ql_head_insert(&head, &entries[NENTRIES-i-1], link);
+
+ test_entries_list(&head, entries, NENTRIES);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_head_remove)
+{
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++)
+ ql_head_insert(&head, &entries[NENTRIES-i-1], link);
+
+ for (i = 0; i < NENTRIES; i++) {
+ test_entries_list(&head, &entries[i], NENTRIES-i);
+ ql_head_remove(&head, list_t, link);
+ }
+ test_empty_list(&head);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_insert)
+{
+ list_head_t head;
+ list_t entries[8];
+ list_t *a, *b, *c, *d, *e, *f, *g, *h;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ a = &entries[0];
+ b = &entries[1];
+ c = &entries[2];
+ d = &entries[3];
+ e = &entries[4];
+ f = &entries[5];
+ g = &entries[6];
+ h = &entries[7];
+
+ /*
+ * ql_remove(), ql_before_insert(), and ql_after_insert() are used
+ * internally by other macros that are already tested, so there's no
+ * need to test them completely. However, insertion/deletion from the
+ * middle of lists is not otherwise tested; do so here.
+ */
+ ql_tail_insert(&head, f, link);
+ ql_before_insert(&head, f, b, link);
+ ql_before_insert(&head, f, c, link);
+ ql_after_insert(f, h, link);
+ ql_after_insert(f, g, link);
+ ql_before_insert(&head, b, a, link);
+ ql_after_insert(c, d, link);
+ ql_before_insert(&head, f, e, link);
+
+ test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t));
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_ql_empty,
+ test_ql_tail_insert,
+ test_ql_tail_remove,
+ test_ql_head_insert,
+ test_ql_head_remove,
+ test_ql_insert));
+}
diff --git a/deps/jemalloc/test/unit/qr.c b/deps/jemalloc/test/unit/qr.c
new file mode 100644
index 000000000..a2a2d902b
--- /dev/null
+++ b/deps/jemalloc/test/unit/qr.c
@@ -0,0 +1,248 @@
+#include "test/jemalloc_test.h"
+
+/* Number of ring entries, in [2..26]. */
+#define NENTRIES 9
+/* Split index, in [1..NENTRIES). */
+#define SPLIT_INDEX 5
+
+typedef struct ring_s ring_t;
+
+struct ring_s {
+ qr(ring_t) link;
+ char id;
+};
+
+static void
+init_entries(ring_t *entries)
+{
+ unsigned i;
+
+ for (i = 0; i < NENTRIES; i++) {
+ qr_new(&entries[i], link);
+ entries[i].id = 'a' + i;
+ }
+}
+
+static void
+test_independent_entries(ring_t *entries)
+{
+ ring_t *t;
+ unsigned i, j;
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ j++;
+ }
+ assert_u_eq(j, 1,
+ "Iteration over single-element ring should visit precisely "
+ "one element");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ j++;
+ }
+ assert_u_eq(j, 1,
+ "Iteration over single-element ring should visit precisely "
+ "one element");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_next(&entries[i], link);
+ assert_ptr_eq(t, &entries[i],
+ "Next element in single-element ring should be same as "
+ "current element");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_prev(&entries[i], link);
+ assert_ptr_eq(t, &entries[i],
+ "Previous element in single-element ring should be same as "
+ "current element");
+ }
+}
+
+TEST_BEGIN(test_qr_one)
+{
+ ring_t entries[NENTRIES];
+
+ init_entries(entries);
+ test_independent_entries(entries);
+}
+TEST_END
+
+static void
+test_entries_ring(ring_t *entries)
+{
+ ring_t *t;
+ unsigned i, j;
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[(i+j) % NENTRIES].id,
+ "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[(NENTRIES+i-j-1) %
+ NENTRIES].id, "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_next(&entries[i], link);
+ assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_prev(&entries[i], link);
+ assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+}
+
+TEST_BEGIN(test_qr_after_insert)
+{
+ ring_t entries[NENTRIES];
+ unsigned i;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++)
+ qr_after_insert(&entries[i - 1], &entries[i], link);
+ test_entries_ring(entries);
+}
+TEST_END
+
+TEST_BEGIN(test_qr_remove)
+{
+ ring_t entries[NENTRIES];
+ ring_t *t;
+ unsigned i, j;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++)
+ qr_after_insert(&entries[i - 1], &entries[i], link);
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[i+j].id,
+ "Element id mismatch");
+ j++;
+ }
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[NENTRIES - 1 - j].id,
+ "Element id mismatch");
+ j++;
+ }
+ qr_remove(&entries[i], link);
+ }
+ test_independent_entries(entries);
+}
+TEST_END
+
+TEST_BEGIN(test_qr_before_insert)
+{
+ ring_t entries[NENTRIES];
+ ring_t *t;
+ unsigned i, j;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++)
+ qr_before_insert(&entries[i - 1], &entries[i], link);
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[(NENTRIES+i-j) %
+ NENTRIES].id, "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_reverse_foreach(t, &entries[i], link) {
+ assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id,
+ "Element id mismatch");
+ j++;
+ }
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_next(&entries[i], link);
+ assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+ for (i = 0; i < NENTRIES; i++) {
+ t = qr_prev(&entries[i], link);
+ assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
+ "Element id mismatch");
+ }
+}
+TEST_END
+
+static void
+test_split_entries(ring_t *entries)
+{
+ ring_t *t;
+ unsigned i, j;
+
+ for (i = 0; i < NENTRIES; i++) {
+ j = 0;
+ qr_foreach(t, &entries[i], link) {
+ if (i < SPLIT_INDEX) {
+ assert_c_eq(t->id,
+ entries[(i+j) % SPLIT_INDEX].id,
+ "Element id mismatch");
+ } else {
+ assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) %
+ (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id,
+ "Element id mismatch");
+ }
+ j++;
+ }
+ }
+}
+
+TEST_BEGIN(test_qr_meld_split)
+{
+ ring_t entries[NENTRIES];
+ unsigned i;
+
+ init_entries(entries);
+ for (i = 1; i < NENTRIES; i++)
+ qr_after_insert(&entries[i - 1], &entries[i], link);
+
+ qr_split(&entries[0], &entries[SPLIT_INDEX], link);
+ test_split_entries(entries);
+
+ qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
+ test_entries_ring(entries);
+
+ qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
+ test_split_entries(entries);
+
+ qr_split(&entries[0], &entries[SPLIT_INDEX], link);
+ test_entries_ring(entries);
+
+ qr_split(&entries[0], &entries[0], link);
+ test_entries_ring(entries);
+
+ qr_meld(&entries[0], &entries[0], link);
+ test_entries_ring(entries);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_qr_one,
+ test_qr_after_insert,
+ test_qr_remove,
+ test_qr_before_insert,
+ test_qr_meld_split));
+}
diff --git a/deps/jemalloc/test/unit/quarantine.c b/deps/jemalloc/test/unit/quarantine.c
new file mode 100644
index 000000000..bbd48a51d
--- /dev/null
+++ b/deps/jemalloc/test/unit/quarantine.c
@@ -0,0 +1,108 @@
+#include "test/jemalloc_test.h"
+
+#define QUARANTINE_SIZE 8192
+#define STRINGIFY_HELPER(x) #x
+#define STRINGIFY(x) STRINGIFY_HELPER(x)
+
+#ifdef JEMALLOC_FILL
+const char *malloc_conf = "abort:false,junk:true,redzone:true,quarantine:"
+ STRINGIFY(QUARANTINE_SIZE);
+#endif
+
+void
+quarantine_clear(void)
+{
+ void *p;
+
+ p = mallocx(QUARANTINE_SIZE*2, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ dallocx(p, 0);
+}
+
+TEST_BEGIN(test_quarantine)
+{
+#define SZ ZU(256)
+#define NQUARANTINED (QUARANTINE_SIZE/SZ)
+ void *quarantined[NQUARANTINED+1];
+ size_t i, j;
+
+ test_skip_if(!config_fill);
+
+ assert_zu_eq(nallocx(SZ, 0), SZ,
+ "SZ=%zu does not precisely equal a size class", SZ);
+
+ quarantine_clear();
+
+ /*
+ * Allocate enough regions to completely fill the quarantine, plus one
+ * more. The last iteration occurs with a completely full quarantine,
+ * but no regions should be drained from the quarantine until the last
+ * deallocation occurs. Therefore no region recycling should occur
+ * until after this loop completes.
+ */
+ for (i = 0; i < NQUARANTINED+1; i++) {
+ void *p = mallocx(SZ, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ quarantined[i] = p;
+ dallocx(p, 0);
+ for (j = 0; j < i; j++) {
+ assert_ptr_ne(p, quarantined[j],
+ "Quarantined region recycled too early; "
+ "i=%zu, j=%zu", i, j);
+ }
+ }
+#undef NQUARANTINED
+#undef SZ
+}
+TEST_END
+
+static bool detected_redzone_corruption;
+
+static void
+arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after,
+ size_t offset, uint8_t byte)
+{
+
+ detected_redzone_corruption = true;
+}
+
+TEST_BEGIN(test_quarantine_redzone)
+{
+ char *s;
+ arena_redzone_corruption_t *arena_redzone_corruption_orig;
+
+ test_skip_if(!config_fill);
+
+ arena_redzone_corruption_orig = arena_redzone_corruption;
+ arena_redzone_corruption = arena_redzone_corruption_replacement;
+
+ /* Test underflow. */
+ detected_redzone_corruption = false;
+ s = (char *)mallocx(1, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+ s[-1] = 0xbb;
+ dallocx(s, 0);
+ assert_true(detected_redzone_corruption,
+ "Did not detect redzone corruption");
+
+ /* Test overflow. */
+ detected_redzone_corruption = false;
+ s = (char *)mallocx(1, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+ s[sallocx(s, 0)] = 0xbb;
+ dallocx(s, 0);
+ assert_true(detected_redzone_corruption,
+ "Did not detect redzone corruption");
+
+ arena_redzone_corruption = arena_redzone_corruption_orig;
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_quarantine,
+ test_quarantine_redzone));
+}
diff --git a/deps/jemalloc/test/unit/rb.c b/deps/jemalloc/test/unit/rb.c
new file mode 100644
index 000000000..b737485a7
--- /dev/null
+++ b/deps/jemalloc/test/unit/rb.c
@@ -0,0 +1,333 @@
+#include "test/jemalloc_test.h"
+
+#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
+ a_type *rbp_bh_t; \
+ for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
+ rbp_bh_t != &(a_rbt)->rbt_nil; \
+ rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
+ if (rbtn_red_get(a_type, a_field, rbp_bh_t) == false) { \
+ (r_height)++; \
+ } \
+ } \
+} while (0)
+
+typedef struct node_s node_t;
+
+struct node_s {
+#define NODE_MAGIC 0x9823af7e
+ uint32_t magic;
+ rb_node(node_t) link;
+ uint64_t key;
+};
+
+static int
+node_cmp(node_t *a, node_t *b) {
+ int ret;
+
+ assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
+ assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
+
+ ret = (a->key > b->key) - (a->key < b->key);
+ if (ret == 0) {
+ /*
+ * Duplicates are not allowed in the tree, so force an
+ * arbitrary ordering for non-identical items with equal keys.
+ */
+ ret = (((uintptr_t)a) > ((uintptr_t)b))
+ - (((uintptr_t)a) < ((uintptr_t)b));
+ }
+ return (ret);
+}
+
+typedef rb_tree(node_t) tree_t;
+rb_gen(static, tree_, tree_t, node_t, link, node_cmp);
+
+TEST_BEGIN(test_rb_empty)
+{
+ tree_t tree;
+ node_t key;
+
+ tree_new(&tree);
+
+ assert_ptr_null(tree_first(&tree), "Unexpected node");
+ assert_ptr_null(tree_last(&tree), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ assert_ptr_null(tree_search(&tree, &key), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
+}
+TEST_END
+
+static unsigned
+tree_recurse(node_t *node, unsigned black_height, unsigned black_depth,
+ node_t *nil)
+{
+ unsigned ret = 0;
+ node_t *left_node = rbtn_left_get(node_t, link, node);
+ node_t *right_node = rbtn_right_get(node_t, link, node);
+
+ if (rbtn_red_get(node_t, link, node) == false)
+ black_depth++;
+
+ /* Red nodes must be interleaved with black nodes. */
+ if (rbtn_red_get(node_t, link, node)) {
+ assert_false(rbtn_red_get(node_t, link, left_node),
+ "Node should be black");
+ assert_false(rbtn_red_get(node_t, link, right_node),
+ "Node should be black");
+ }
+
+ if (node == nil)
+ return (ret);
+ /* Self. */
+ assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
+
+ /* Left subtree. */
+ if (left_node != nil)
+ ret += tree_recurse(left_node, black_height, black_depth, nil);
+ else
+ ret += (black_depth != black_height);
+
+ /* Right subtree. */
+ if (right_node != nil)
+ ret += tree_recurse(right_node, black_height, black_depth, nil);
+ else
+ ret += (black_depth != black_height);
+
+ return (ret);
+}
+
+static node_t *
+tree_iterate_cb(tree_t *tree, node_t *node, void *data)
+{
+ unsigned *i = (unsigned *)data;
+ node_t *search_node;
+
+ assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
+
+ /* Test rb_search(). */
+ search_node = tree_search(tree, node);
+ assert_ptr_eq(search_node, node,
+ "tree_search() returned unexpected node");
+
+ /* Test rb_nsearch(). */
+ search_node = tree_nsearch(tree, node);
+ assert_ptr_eq(search_node, node,
+ "tree_nsearch() returned unexpected node");
+
+ /* Test rb_psearch(). */
+ search_node = tree_psearch(tree, node);
+ assert_ptr_eq(search_node, node,
+ "tree_psearch() returned unexpected node");
+
+ (*i)++;
+
+ return (NULL);
+}
+
+static unsigned
+tree_iterate(tree_t *tree)
+{
+ unsigned i;
+
+ i = 0;
+ tree_iter(tree, NULL, tree_iterate_cb, (void *)&i);
+
+ return (i);
+}
+
+static unsigned
+tree_iterate_reverse(tree_t *tree)
+{
+ unsigned i;
+
+ i = 0;
+ tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i);
+
+ return (i);
+}
+
+static void
+node_remove(tree_t *tree, node_t *node, unsigned nnodes)
+{
+ node_t *search_node;
+ unsigned black_height, imbalances;
+
+ tree_remove(tree, node);
+
+ /* Test rb_nsearch(). */
+ search_node = tree_nsearch(tree, node);
+ if (search_node != NULL) {
+ assert_u64_ge(search_node->key, node->key,
+ "Key ordering error");
+ }
+
+ /* Test rb_psearch(). */
+ search_node = tree_psearch(tree, node);
+ if (search_node != NULL) {
+ assert_u64_le(search_node->key, node->key,
+ "Key ordering error");
+ }
+
+ node->magic = 0;
+
+ rbtn_black_height(node_t, link, tree, black_height);
+ imbalances = tree_recurse(tree->rbt_root, black_height, 0,
+ &(tree->rbt_nil));
+ assert_u_eq(imbalances, 0, "Tree is unbalanced");
+ assert_u_eq(tree_iterate(tree), nnodes-1,
+ "Unexpected node iteration count");
+ assert_u_eq(tree_iterate_reverse(tree), nnodes-1,
+ "Unexpected node iteration count");
+}
+
+static node_t *
+remove_iterate_cb(tree_t *tree, node_t *node, void *data)
+{
+ unsigned *nnodes = (unsigned *)data;
+ node_t *ret = tree_next(tree, node);
+
+ node_remove(tree, node, *nnodes);
+
+ return (ret);
+}
+
+static node_t *
+remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
+{
+ unsigned *nnodes = (unsigned *)data;
+ node_t *ret = tree_prev(tree, node);
+
+ node_remove(tree, node, *nnodes);
+
+ return (ret);
+}
+
+TEST_BEGIN(test_rb_random)
+{
+#define NNODES 25
+#define NBAGS 250
+#define SEED 42
+ sfmt_t *sfmt;
+ uint64_t bag[NNODES];
+ tree_t tree;
+ node_t nodes[NNODES];
+ unsigned i, j, k, black_height, imbalances;
+
+ sfmt = init_gen_rand(SEED);
+ for (i = 0; i < NBAGS; i++) {
+ switch (i) {
+ case 0:
+ /* Insert in order. */
+ for (j = 0; j < NNODES; j++)
+ bag[j] = j;
+ break;
+ case 1:
+ /* Insert in reverse order. */
+ for (j = 0; j < NNODES; j++)
+ bag[j] = NNODES - j - 1;
+ break;
+ default:
+ for (j = 0; j < NNODES; j++)
+ bag[j] = gen_rand64_range(sfmt, NNODES);
+ }
+
+ for (j = 1; j <= NNODES; j++) {
+ /* Initialize tree and nodes. */
+ tree_new(&tree);
+ tree.rbt_nil.magic = 0;
+ for (k = 0; k < j; k++) {
+ nodes[k].magic = NODE_MAGIC;
+ nodes[k].key = bag[k];
+ }
+
+ /* Insert nodes. */
+ for (k = 0; k < j; k++) {
+ tree_insert(&tree, &nodes[k]);
+
+ rbtn_black_height(node_t, link, &tree,
+ black_height);
+ imbalances = tree_recurse(tree.rbt_root,
+ black_height, 0, &(tree.rbt_nil));
+ assert_u_eq(imbalances, 0,
+ "Tree is unbalanced");
+
+ assert_u_eq(tree_iterate(&tree), k+1,
+ "Unexpected node iteration count");
+ assert_u_eq(tree_iterate_reverse(&tree), k+1,
+ "Unexpected node iteration count");
+
+ assert_ptr_not_null(tree_first(&tree),
+ "Tree should not be empty");
+ assert_ptr_not_null(tree_last(&tree),
+ "Tree should not be empty");
+
+ tree_next(&tree, &nodes[k]);
+ tree_prev(&tree, &nodes[k]);
+ }
+
+ /* Remove nodes. */
+ switch (i % 4) {
+ case 0:
+ for (k = 0; k < j; k++)
+ node_remove(&tree, &nodes[k], j - k);
+ break;
+ case 1:
+ for (k = j; k > 0; k--)
+ node_remove(&tree, &nodes[k-1], k);
+ break;
+ case 2: {
+ node_t *start;
+ unsigned nnodes = j;
+
+ start = NULL;
+ do {
+ start = tree_iter(&tree, start,
+ remove_iterate_cb, (void *)&nnodes);
+ nnodes--;
+ } while (start != NULL);
+ assert_u_eq(nnodes, 0,
+ "Removal terminated early");
+ break;
+ } case 3: {
+ node_t *start;
+ unsigned nnodes = j;
+
+ start = NULL;
+ do {
+ start = tree_reverse_iter(&tree, start,
+ remove_reverse_iterate_cb,
+ (void *)&nnodes);
+ nnodes--;
+ } while (start != NULL);
+ assert_u_eq(nnodes, 0,
+ "Removal terminated early");
+ break;
+ } default:
+ not_reached();
+ }
+ }
+ }
+ fini_gen_rand(sfmt);
+#undef NNODES
+#undef NBAGS
+#undef SEED
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_rb_empty,
+ test_rb_random));
+}
diff --git a/deps/jemalloc/test/unit/rtree.c b/deps/jemalloc/test/unit/rtree.c
new file mode 100644
index 000000000..5463055fe
--- /dev/null
+++ b/deps/jemalloc/test/unit/rtree.c
@@ -0,0 +1,118 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_rtree_get_empty)
+{
+ unsigned i;
+
+ for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
+ rtree_t *rtree = rtree_new(i, imalloc, idalloc);
+ assert_u_eq(rtree_get(rtree, 0), 0,
+ "rtree_get() should return NULL for empty tree");
+ rtree_delete(rtree);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_rtree_extrema)
+{
+ unsigned i;
+
+ for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
+ rtree_t *rtree = rtree_new(i, imalloc, idalloc);
+
+ rtree_set(rtree, 0, 1);
+ assert_u_eq(rtree_get(rtree, 0), 1,
+ "rtree_get() should return previously set value");
+
+ rtree_set(rtree, ~((uintptr_t)0), 1);
+ assert_u_eq(rtree_get(rtree, ~((uintptr_t)0)), 1,
+ "rtree_get() should return previously set value");
+
+ rtree_delete(rtree);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_rtree_bits)
+{
+ unsigned i, j, k;
+
+ for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
+ uintptr_t keys[] = {0, 1,
+ (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
+ rtree_t *rtree = rtree_new(i, imalloc, idalloc);
+
+ for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
+ rtree_set(rtree, keys[j], 1);
+ for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
+ assert_u_eq(rtree_get(rtree, keys[k]), 1,
+ "rtree_get() should return previously set "
+ "value and ignore insignificant key bits; "
+ "i=%u, j=%u, k=%u, set key=%#"PRIxPTR", "
+ "get key=%#"PRIxPTR, i, j, k, keys[j],
+ keys[k]);
+ }
+ assert_u_eq(rtree_get(rtree,
+ (((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0,
+ "Only leftmost rtree leaf should be set; "
+ "i=%u, j=%u", i, j);
+ rtree_set(rtree, keys[j], 0);
+ }
+
+ rtree_delete(rtree);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_rtree_random)
+{
+ unsigned i;
+ sfmt_t *sfmt;
+#define NSET 100
+#define SEED 42
+
+ sfmt = init_gen_rand(SEED);
+ for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
+ rtree_t *rtree = rtree_new(i, imalloc, idalloc);
+ uintptr_t keys[NSET];
+ unsigned j;
+
+ for (j = 0; j < NSET; j++) {
+ keys[j] = (uintptr_t)gen_rand64(sfmt);
+ rtree_set(rtree, keys[j], 1);
+ assert_u_eq(rtree_get(rtree, keys[j]), 1,
+ "rtree_get() should return previously set value");
+ }
+ for (j = 0; j < NSET; j++) {
+ assert_u_eq(rtree_get(rtree, keys[j]), 1,
+ "rtree_get() should return previously set value");
+ }
+
+ for (j = 0; j < NSET; j++) {
+ rtree_set(rtree, keys[j], 0);
+ assert_u_eq(rtree_get(rtree, keys[j]), 0,
+ "rtree_get() should return previously set value");
+ }
+ for (j = 0; j < NSET; j++) {
+ assert_u_eq(rtree_get(rtree, keys[j]), 0,
+ "rtree_get() should return previously set value");
+ }
+
+ rtree_delete(rtree);
+ }
+ fini_gen_rand(sfmt);
+#undef NSET
+#undef SEED
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_rtree_get_empty,
+ test_rtree_extrema,
+ test_rtree_bits,
+ test_rtree_random));
+}
diff --git a/deps/jemalloc/test/unit/stats.c b/deps/jemalloc/test/unit/stats.c
new file mode 100644
index 000000000..03a55c7fd
--- /dev/null
+++ b/deps/jemalloc/test/unit/stats.c
@@ -0,0 +1,380 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_stats_summary)
+{
+ size_t *cactive;
+ size_t sz, allocated, active, mapped;
+ int expected = config_stats ? 0 : ENOENT;
+
+ sz = sizeof(cactive);
+ assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_le(active, *cactive,
+ "active should be no larger than cactive");
+ assert_zu_le(allocated, active,
+ "allocated should be no larger than active");
+ assert_zu_le(active, mapped,
+ "active should be no larger than mapped");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_stats_chunks)
+{
+ size_t current, high;
+ uint64_t total;
+ size_t sz;
+ int expected = config_stats ? 0 : ENOENT;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.chunks.current", &current, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.chunks.total", &total, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.chunks.high", &high, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_le(current, high,
+ "current should be no larger than high");
+ assert_u64_le((uint64_t)high, total,
+ "high should be no larger than total");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_stats_huge)
+{
+ void *p;
+ uint64_t epoch;
+ size_t allocated;
+ uint64_t nmalloc, ndalloc;
+ size_t sz;
+ int expected = config_stats ? 0 : ENOENT;
+
+ p = mallocx(arena_maxclass+1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.huge.allocated", &allocated, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.huge.nmalloc", &nmalloc, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.huge.ndalloc", &ndalloc, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_summary)
+{
+ unsigned arena;
+ void *small, *large;
+ uint64_t epoch;
+ size_t sz;
+ int expected = config_stats ? 0 : ENOENT;
+ size_t mapped;
+ uint64_t npurge, nmadvise, purged;
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ small = mallocx(SMALL_MAXCLASS, 0);
+ assert_ptr_not_null(small, "Unexpected mallocx() failure");
+ large = mallocx(arena_maxclass, 0);
+ assert_ptr_not_null(large, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0),
+ expected, "Unexepected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0),
+ expected, "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0),
+ expected, "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0),
+ expected, "Unexepected mallctl() result");
+
+ if (config_stats) {
+ assert_u64_gt(npurge, 0,
+ "At least one purge should have occurred");
+ assert_u64_le(nmadvise, purged,
+ "nmadvise should be no greater than purged");
+ }
+
+ dallocx(small, 0);
+ dallocx(large, 0);
+}
+TEST_END
+
+void *
+thd_start(void *arg)
+{
+
+ return (NULL);
+}
+
+static void
+no_lazy_lock(void)
+{
+ thd_t thd;
+
+ thd_create(&thd, thd_start, NULL);
+ thd_join(thd, NULL);
+}
+
+TEST_BEGIN(test_stats_arenas_small)
+{
+ unsigned arena;
+ void *p;
+ size_t sz, allocated;
+ uint64_t epoch, nmalloc, ndalloc, nrequests;
+ int expected = config_stats ? 0 : ENOENT;
+
+ no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ p = mallocx(SMALL_MAXCLASS, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be no greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_gt(nrequests, 0,
+ "nrequests should be greater than zero");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_large)
+{
+ unsigned arena;
+ void *p;
+ size_t sz, allocated;
+ uint64_t epoch, nmalloc, ndalloc, nrequests;
+ int expected = config_stats ? 0 : ENOENT;
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ p = mallocx(arena_maxclass, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ assert_zu_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_zu_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_zu_gt(nrequests, 0,
+ "nrequests should be greater than zero");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_bins)
+{
+ unsigned arena;
+ void *p;
+ size_t sz, allocated, curruns;
+ uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
+ uint64_t nruns, nreruns;
+ int expected = config_stats ? 0 : ENOENT;
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ p = mallocx(arena_bin_info[0].reg_size, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz,
+ NULL, 0), config_tcache ? expected : ENOENT,
+ "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz,
+ NULL, 0), config_tcache ? expected : ENOENT,
+ "Unexpected mallctl() result");
+
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_gt(nrequests, 0,
+ "nrequests should be greater than zero");
+ if (config_tcache) {
+ assert_u64_gt(nfills, 0,
+ "At least one fill should have occurred");
+ assert_u64_gt(nflushes, 0,
+ "At least one flush should have occurred");
+ }
+ assert_u64_gt(nruns, 0,
+ "At least one run should have been allocated");
+ assert_zu_gt(curruns, 0,
+ "At least one run should be currently allocated");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_arenas_lruns)
+{
+ unsigned arena;
+ void *p;
+ uint64_t epoch, nmalloc, ndalloc, nrequests;
+ size_t curruns, sz;
+ int expected = config_stats ? 0 : ENOENT;
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ p = mallocx(SMALL_MAXCLASS+1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_gt(nrequests, 0,
+ "nrequests should be greater than zero");
+ assert_u64_gt(curruns, 0,
+ "At least one run should be currently allocated");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_stats_summary,
+ test_stats_chunks,
+ test_stats_huge,
+ test_stats_arenas_summary,
+ test_stats_arenas_small,
+ test_stats_arenas_large,
+ test_stats_arenas_bins,
+ test_stats_arenas_lruns));
+}
diff --git a/deps/jemalloc/test/unit/tsd.c b/deps/jemalloc/test/unit/tsd.c
new file mode 100644
index 000000000..f421c1a3c
--- /dev/null
+++ b/deps/jemalloc/test/unit/tsd.c
@@ -0,0 +1,71 @@
+#include "test/jemalloc_test.h"
+
+#define THREAD_DATA 0x72b65c10
+
+typedef unsigned int data_t;
+
+static bool data_cleanup_executed;
+
+void
+data_cleanup(void *arg)
+{
+ data_t *data = (data_t *)arg;
+
+ assert_x_eq(*data, THREAD_DATA,
+ "Argument passed into cleanup function should match tsd value");
+ data_cleanup_executed = true;
+}
+
+malloc_tsd_protos(, data, data_t)
+malloc_tsd_externs(data, data_t)
+#define DATA_INIT 0x12345678
+malloc_tsd_data(, data, data_t, DATA_INIT)
+malloc_tsd_funcs(, data, data_t, DATA_INIT, data_cleanup)
+
+static void *
+thd_start(void *arg)
+{
+ data_t d = (data_t)(uintptr_t)arg;
+ assert_x_eq(*data_tsd_get(), DATA_INIT,
+ "Initial tsd get should return initialization value");
+
+ data_tsd_set(&d);
+ assert_x_eq(*data_tsd_get(), d,
+ "After tsd set, tsd get should return value that was set");
+
+ d = 0;
+ assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg,
+ "Resetting local data should have no effect on tsd");
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_tsd_main_thread)
+{
+
+ thd_start((void *) 0xa5f3e329);
+}
+TEST_END
+
+TEST_BEGIN(test_tsd_sub_thread)
+{
+ thd_t thd;
+
+ data_cleanup_executed = false;
+ thd_create(&thd, thd_start, (void *)THREAD_DATA);
+ thd_join(thd, NULL);
+ assert_true(data_cleanup_executed,
+ "Cleanup function should have executed");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ data_tsd_boot();
+
+ return (test(
+ test_tsd_main_thread,
+ test_tsd_sub_thread));
+}
diff --git a/deps/jemalloc/test/unit/util.c b/deps/jemalloc/test/unit/util.c
new file mode 100644
index 000000000..dc3cfe8a9
--- /dev/null
+++ b/deps/jemalloc/test/unit/util.c
@@ -0,0 +1,294 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_pow2_ceil)
+{
+ unsigned i, pow2;
+ size_t x;
+
+ assert_zu_eq(pow2_ceil(0), 0, "Unexpected result");
+
+ for (i = 0; i < sizeof(size_t) * 8; i++) {
+ assert_zu_eq(pow2_ceil(ZU(1) << i), ZU(1) << i,
+ "Unexpected result");
+ }
+
+ for (i = 2; i < sizeof(size_t) * 8; i++) {
+ assert_zu_eq(pow2_ceil((ZU(1) << i) - 1), ZU(1) << i,
+ "Unexpected result");
+ }
+
+ for (i = 0; i < sizeof(size_t) * 8 - 1; i++) {
+ assert_zu_eq(pow2_ceil((ZU(1) << i) + 1), ZU(1) << (i+1),
+ "Unexpected result");
+ }
+
+ for (pow2 = 1; pow2 < 25; pow2++) {
+ for (x = (ZU(1) << (pow2-1)) + 1; x <= ZU(1) << pow2; x++) {
+ assert_zu_eq(pow2_ceil(x), ZU(1) << pow2,
+ "Unexpected result, x=%zu", x);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_strtoumax_no_endptr)
+{
+ int err;
+
+ set_errno(0);
+ assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
+ err = get_errno();
+ assert_d_eq(err, 0, "Unexpected failure");
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_strtoumax)
+{
+ struct test_s {
+ const char *input;
+ const char *expected_remainder;
+ int base;
+ int expected_errno;
+ const char *expected_errno_name;
+ uintmax_t expected_x;
+ };
+#define ERR(e) e, #e
+#define UMAX(x) ((uintmax_t)x##ULL)
+ struct test_s tests[] = {
+ {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX},
+ {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX},
+ {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX},
+
+ {"", "", 0, ERR(EINVAL), UINTMAX_MAX},
+ {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX},
+ {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX},
+ {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX},
+
+ {"42", "", 0, ERR(0), UMAX(42)},
+ {"+42", "", 0, ERR(0), UMAX(42)},
+ {"-42", "", 0, ERR(0), UMAX(-42)},
+ {"042", "", 0, ERR(0), UMAX(042)},
+ {"+042", "", 0, ERR(0), UMAX(042)},
+ {"-042", "", 0, ERR(0), UMAX(-042)},
+ {"0x42", "", 0, ERR(0), UMAX(0x42)},
+ {"+0x42", "", 0, ERR(0), UMAX(0x42)},
+ {"-0x42", "", 0, ERR(0), UMAX(-0x42)},
+
+ {"0", "", 0, ERR(0), UMAX(0)},
+ {"1", "", 0, ERR(0), UMAX(1)},
+
+ {"42", "", 0, ERR(0), UMAX(42)},
+ {" 42", "", 0, ERR(0), UMAX(42)},
+ {"42 ", " ", 0, ERR(0), UMAX(42)},
+ {"0x", "x", 0, ERR(0), UMAX(0)},
+ {"42x", "x", 0, ERR(0), UMAX(42)},
+
+ {"07", "", 0, ERR(0), UMAX(7)},
+ {"010", "", 0, ERR(0), UMAX(8)},
+ {"08", "8", 0, ERR(0), UMAX(0)},
+ {"0_", "_", 0, ERR(0), UMAX(0)},
+
+ {"0x", "x", 0, ERR(0), UMAX(0)},
+ {"0X", "X", 0, ERR(0), UMAX(0)},
+ {"0xg", "xg", 0, ERR(0), UMAX(0)},
+ {"0XA", "", 0, ERR(0), UMAX(10)},
+
+ {"010", "", 10, ERR(0), UMAX(10)},
+ {"0x3", "x3", 10, ERR(0), UMAX(0)},
+
+ {"12", "2", 2, ERR(0), UMAX(1)},
+ {"78", "8", 8, ERR(0), UMAX(7)},
+ {"9a", "a", 10, ERR(0), UMAX(9)},
+ {"9A", "A", 10, ERR(0), UMAX(9)},
+ {"fg", "g", 16, ERR(0), UMAX(15)},
+ {"FG", "G", 16, ERR(0), UMAX(15)},
+ {"0xfg", "g", 16, ERR(0), UMAX(15)},
+ {"0XFG", "G", 16, ERR(0), UMAX(15)},
+ {"z_", "_", 36, ERR(0), UMAX(35)},
+ {"Z_", "_", 36, ERR(0), UMAX(35)}
+ };
+#undef ERR
+#undef UMAX
+ unsigned i;
+
+ for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
+ struct test_s *test = &tests[i];
+ int err;
+ uintmax_t result;
+ char *remainder;
+
+ set_errno(0);
+ result = malloc_strtoumax(test->input, &remainder, test->base);
+ err = get_errno();
+ assert_d_eq(err, test->expected_errno,
+ "Expected errno %s for \"%s\", base %d",
+ test->expected_errno_name, test->input, test->base);
+ assert_str_eq(remainder, test->expected_remainder,
+ "Unexpected remainder for \"%s\", base %d",
+ test->input, test->base);
+ if (err == 0) {
+ assert_ju_eq(result, test->expected_x,
+ "Unexpected result for \"%s\", base %d",
+ test->input, test->base);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_snprintf_truncated)
+{
+#define BUFLEN 15
+ char buf[BUFLEN];
+ int result;
+ size_t len;
+#define TEST(expected_str_untruncated, fmt...) do { \
+ result = malloc_snprintf(buf, len, fmt); \
+ assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
+ "Unexpected string inequality (\"%s\" vs \"%s\")", \
+ buf, expected_str_untruncated); \
+ assert_d_eq(result, strlen(expected_str_untruncated), \
+ "Unexpected result"); \
+} while (0)
+
+ for (len = 1; len < BUFLEN; len++) {
+ TEST("012346789", "012346789");
+ TEST("a0123b", "a%sb", "0123");
+ TEST("a01234567", "a%s%s", "0123", "4567");
+ TEST("a0123 ", "a%-6s", "0123");
+ TEST("a 0123", "a%6s", "0123");
+ TEST("a 012", "a%6.3s", "0123");
+ TEST("a 012", "a%*.*s", 6, 3, "0123");
+ TEST("a 123b", "a% db", 123);
+ TEST("a123b", "a%-db", 123);
+ TEST("a-123b", "a%-db", -123);
+ TEST("a+123b", "a%+db", 123);
+ }
+#undef BUFLEN
+#undef TEST
+}
+TEST_END
+
+TEST_BEGIN(test_malloc_snprintf)
+{
+#define BUFLEN 128
+ char buf[BUFLEN];
+ int result;
+#define TEST(expected_str, fmt...) do { \
+ result = malloc_snprintf(buf, sizeof(buf), fmt); \
+ assert_str_eq(buf, expected_str, "Unexpected output"); \
+ assert_d_eq(result, strlen(expected_str), "Unexpected result"); \
+} while (0)
+
+ TEST("hello", "hello");
+
+ TEST("50%, 100%", "50%%, %d%%", 100);
+
+ TEST("a0123b", "a%sb", "0123");
+
+ TEST("a 0123b", "a%5sb", "0123");
+ TEST("a 0123b", "a%*sb", 5, "0123");
+
+ TEST("a0123 b", "a%-5sb", "0123");
+ TEST("a0123b", "a%*sb", -1, "0123");
+ TEST("a0123 b", "a%*sb", -5, "0123");
+ TEST("a0123 b", "a%-*sb", -5, "0123");
+
+ TEST("a012b", "a%.3sb", "0123");
+ TEST("a012b", "a%.*sb", 3, "0123");
+ TEST("a0123b", "a%.*sb", -3, "0123");
+
+ TEST("a 012b", "a%5.3sb", "0123");
+ TEST("a 012b", "a%5.*sb", 3, "0123");
+ TEST("a 012b", "a%*.3sb", 5, "0123");
+ TEST("a 012b", "a%*.*sb", 5, 3, "0123");
+ TEST("a 0123b", "a%*.*sb", 5, -3, "0123");
+
+ TEST("_abcd_", "_%x_", 0xabcd);
+ TEST("_0xabcd_", "_%#x_", 0xabcd);
+ TEST("_1234_", "_%o_", 01234);
+ TEST("_01234_", "_%#o_", 01234);
+ TEST("_1234_", "_%u_", 1234);
+
+ TEST("_1234_", "_%d_", 1234);
+ TEST("_ 1234_", "_% d_", 1234);
+ TEST("_+1234_", "_%+d_", 1234);
+ TEST("_-1234_", "_%d_", -1234);
+ TEST("_-1234_", "_% d_", -1234);
+ TEST("_-1234_", "_%+d_", -1234);
+
+ TEST("_-1234_", "_%d_", -1234);
+ TEST("_1234_", "_%d_", 1234);
+ TEST("_-1234_", "_%i_", -1234);
+ TEST("_1234_", "_%i_", 1234);
+ TEST("_01234_", "_%#o_", 01234);
+ TEST("_1234_", "_%u_", 1234);
+ TEST("_0x1234abc_", "_%#x_", 0x1234abc);
+ TEST("_0X1234ABC_", "_%#X_", 0x1234abc);
+ TEST("_c_", "_%c_", 'c');
+ TEST("_string_", "_%s_", "string");
+ TEST("_0x42_", "_%p_", ((void *)0x42));
+
+ TEST("_-1234_", "_%ld_", ((long)-1234));
+ TEST("_1234_", "_%ld_", ((long)1234));
+ TEST("_-1234_", "_%li_", ((long)-1234));
+ TEST("_1234_", "_%li_", ((long)1234));
+ TEST("_01234_", "_%#lo_", ((long)01234));
+ TEST("_1234_", "_%lu_", ((long)1234));
+ TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc));
+ TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC));
+
+ TEST("_-1234_", "_%lld_", ((long long)-1234));
+ TEST("_1234_", "_%lld_", ((long long)1234));
+ TEST("_-1234_", "_%lli_", ((long long)-1234));
+ TEST("_1234_", "_%lli_", ((long long)1234));
+ TEST("_01234_", "_%#llo_", ((long long)01234));
+ TEST("_1234_", "_%llu_", ((long long)1234));
+ TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc));
+ TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC));
+
+ TEST("_-1234_", "_%qd_", ((long long)-1234));
+ TEST("_1234_", "_%qd_", ((long long)1234));
+ TEST("_-1234_", "_%qi_", ((long long)-1234));
+ TEST("_1234_", "_%qi_", ((long long)1234));
+ TEST("_01234_", "_%#qo_", ((long long)01234));
+ TEST("_1234_", "_%qu_", ((long long)1234));
+ TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc));
+ TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC));
+
+ TEST("_-1234_", "_%jd_", ((intmax_t)-1234));
+ TEST("_1234_", "_%jd_", ((intmax_t)1234));
+ TEST("_-1234_", "_%ji_", ((intmax_t)-1234));
+ TEST("_1234_", "_%ji_", ((intmax_t)1234));
+ TEST("_01234_", "_%#jo_", ((intmax_t)01234));
+ TEST("_1234_", "_%ju_", ((intmax_t)1234));
+ TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc));
+ TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC));
+
+ TEST("_1234_", "_%td_", ((ptrdiff_t)1234));
+ TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234));
+ TEST("_1234_", "_%ti_", ((ptrdiff_t)1234));
+ TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234));
+
+ TEST("_-1234_", "_%zd_", ((ssize_t)-1234));
+ TEST("_1234_", "_%zd_", ((ssize_t)1234));
+ TEST("_-1234_", "_%zi_", ((ssize_t)-1234));
+ TEST("_1234_", "_%zi_", ((ssize_t)1234));
+ TEST("_01234_", "_%#zo_", ((ssize_t)01234));
+ TEST("_1234_", "_%zu_", ((ssize_t)1234));
+ TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc));
+ TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC));
+#undef BUFLEN
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_pow2_ceil,
+ test_malloc_strtoumax_no_endptr,
+ test_malloc_strtoumax,
+ test_malloc_snprintf_truncated,
+ test_malloc_snprintf));
+}
diff --git a/deps/jemalloc/test/unit/zero.c b/deps/jemalloc/test/unit/zero.c
new file mode 100644
index 000000000..65a8f0c9c
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero.c
@@ -0,0 +1,78 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_FILL
+const char *malloc_conf =
+ "abort:false,junk:false,zero:true,redzone:false,quarantine:0";
+#endif
+
+static void
+test_zero(size_t sz_min, size_t sz_max)
+{
+ char *s;
+ size_t sz_prev, sz, i;
+
+ sz_prev = 0;
+ s = (char *)mallocx(sz_min, 0);
+ assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+
+ for (sz = sallocx(s, 0); sz <= sz_max;
+ sz_prev = sz, sz = sallocx(s, 0)) {
+ if (sz_prev > 0) {
+ assert_c_eq(s[0], 'a',
+ "Previously allocated byte %zu/%zu is corrupted",
+ ZU(0), sz_prev);
+ assert_c_eq(s[sz_prev-1], 'a',
+ "Previously allocated byte %zu/%zu is corrupted",
+ sz_prev-1, sz_prev);
+ }
+
+ for (i = sz_prev; i < sz; i++) {
+ assert_c_eq(s[i], 0x0,
+ "Newly allocated byte %zu/%zu isn't zero-filled",
+ i, sz);
+ s[i] = 'a';
+ }
+
+ if (xallocx(s, sz+1, 0, 0) == sz) {
+ s = (char *)rallocx(s, sz+1, 0);
+ assert_ptr_not_null((void *)s,
+ "Unexpected rallocx() failure");
+ }
+ }
+
+ dallocx(s, 0);
+}
+
+TEST_BEGIN(test_zero_small)
+{
+
+ test_skip_if(!config_fill);
+ test_zero(1, SMALL_MAXCLASS-1);
+}
+TEST_END
+
+TEST_BEGIN(test_zero_large)
+{
+
+ test_skip_if(!config_fill);
+ test_zero(SMALL_MAXCLASS+1, arena_maxclass);
+}
+TEST_END
+
+TEST_BEGIN(test_zero_huge)
+{
+
+ test_skip_if(!config_fill);
+ test_zero(arena_maxclass+1, chunksize*2);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_zero_small,
+ test_zero_large,
+ test_zero_huge));
+}