summaryrefslogtreecommitdiff
path: root/deps/jemalloc
diff options
context:
space:
mode:
authorantirez <antirez@gmail.com>2015-10-06 16:18:30 +0200
committerantirez <antirez@gmail.com>2015-10-06 16:55:37 +0200
commita9951b1b6a326532163e0fe4ee1a26e972258a1e (patch)
treeca555f37238537175cc1b34aa62a9f873026047f /deps/jemalloc
parente3ded0273c43986a49ddd9d5fb4a20d187d015de (diff)
downloadredis-a9951b1b6a326532163e0fe4ee1a26e972258a1e.tar.gz
Jemalloc updated to 4.0.3.
Diffstat (limited to 'deps/jemalloc')
-rw-r--r--deps/jemalloc/.autom4te.cfg3
-rw-r--r--deps/jemalloc/.gitattributes1
-rw-r--r--deps/jemalloc/.gitignore7
-rw-r--r--deps/jemalloc/COPYING4
-rw-r--r--deps/jemalloc/ChangeLog250
-rw-r--r--deps/jemalloc/INSTALL148
-rw-r--r--deps/jemalloc/Makefile.in112
-rw-r--r--deps/jemalloc/VERSION2
-rw-r--r--deps/jemalloc/bin/jemalloc-config.in79
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/bin/jeprof.in (renamed from deps/jemalloc/bin/pprof)502
-rwxr-xr-xdeps/jemalloc/config.guess192
-rwxr-xr-xdeps/jemalloc/config.sub22
-rwxr-xr-xdeps/jemalloc/configure1408
-rw-r--r--deps/jemalloc/configure.ac590
-rw-r--r--deps/jemalloc/doc/jemalloc.3881
-rw-r--r--deps/jemalloc/doc/jemalloc.html897
-rw-r--r--deps/jemalloc/doc/jemalloc.xml.in1207
-rw-r--r--deps/jemalloc/include/jemalloc/internal/arena.h1032
-rw-r--r--deps/jemalloc/include/jemalloc/internal/atomic.h477
-rw-r--r--deps/jemalloc/include/jemalloc/internal/base.h4
-rw-r--r--deps/jemalloc/include/jemalloc/internal/bitmap.h58
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk.h62
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk_dss.h3
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk_mmap.h7
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ckh.h8
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ctl.h14
-rw-r--r--deps/jemalloc/include/jemalloc/internal/extent.h217
-rw-r--r--deps/jemalloc/include/jemalloc/internal/hash.h13
-rw-r--r--deps/jemalloc/include/jemalloc/internal/huge.h36
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in878
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h64
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in91
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h6
-rw-r--r--deps/jemalloc/include/jemalloc/internal/mutex.h14
-rw-r--r--deps/jemalloc/include/jemalloc/internal/pages.h26
-rw-r--r--deps/jemalloc/include/jemalloc/internal/private_symbols.txt336
-rw-r--r--deps/jemalloc/include/jemalloc/internal/prng.h14
-rw-r--r--deps/jemalloc/include/jemalloc/internal/prof.h702
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ql.h4
-rw-r--r--deps/jemalloc/include/jemalloc/internal/qr.h6
-rw-r--r--deps/jemalloc/include/jemalloc/internal/quarantine.h21
-rw-r--r--deps/jemalloc/include/jemalloc/internal/rb.h24
-rw-r--r--deps/jemalloc/include/jemalloc/internal/rtree.h366
-rwxr-xr-xdeps/jemalloc/include/jemalloc/internal/size_classes.sh290
-rw-r--r--deps/jemalloc/include/jemalloc/internal/stats.h64
-rw-r--r--deps/jemalloc/include/jemalloc/internal/tcache.h305
-rw-r--r--deps/jemalloc/include/jemalloc/internal/tsd.h539
-rw-r--r--deps/jemalloc/include/jemalloc/internal/util.h174
-rw-r--r--deps/jemalloc/include/jemalloc/internal/valgrind.h112
-rwxr-xr-xdeps/jemalloc/include/jemalloc/jemalloc.sh4
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_defs.h.in17
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_macros.h.in93
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_protos.h.in86
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in57
-rw-r--r--deps/jemalloc/include/msvc_compat/C99/stdbool.h (renamed from deps/jemalloc/include/msvc_compat/stdbool.h)4
-rw-r--r--deps/jemalloc/include/msvc_compat/C99/stdint.h (renamed from deps/jemalloc/include/msvc_compat/stdint.h)0
-rw-r--r--deps/jemalloc/include/msvc_compat/inttypes.h313
-rw-r--r--deps/jemalloc/include/msvc_compat/strings.h10
-rw-r--r--deps/jemalloc/include/msvc_compat/windows_extra.h26
-rw-r--r--deps/jemalloc/jemalloc.pc.in12
-rw-r--r--deps/jemalloc/src/arena.c2899
-rw-r--r--deps/jemalloc/src/base.c178
-rw-r--r--deps/jemalloc/src/bitmap.c18
-rw-r--r--deps/jemalloc/src/chunk.c792
-rw-r--r--deps/jemalloc/src/chunk_dss.c54
-rw-r--r--deps/jemalloc/src/chunk_mmap.c154
-rw-r--r--deps/jemalloc/src/ckh.c61
-rw-r--r--deps/jemalloc/src/ctl.c1085
-rw-r--r--deps/jemalloc/src/extent.c40
-rw-r--r--deps/jemalloc/src/huge.c546
-rw-r--r--deps/jemalloc/src/jemalloc.c2078
-rw-r--r--deps/jemalloc/src/mutex.c10
-rw-r--r--deps/jemalloc/src/pages.c173
-rw-r--r--deps/jemalloc/src/prof.c1821
-rw-r--r--deps/jemalloc/src/quarantine.c132
-rw-r--r--deps/jemalloc/src/rtree.c138
-rw-r--r--deps/jemalloc/src/stats.c479
-rw-r--r--deps/jemalloc/src/tcache.c320
-rw-r--r--deps/jemalloc/src/tsd.c62
-rw-r--r--deps/jemalloc/src/util.c30
-rw-r--r--deps/jemalloc/src/valgrind.c34
-rw-r--r--deps/jemalloc/src/zone.c34
-rw-r--r--deps/jemalloc/test/include/test/btalloc.h31
-rw-r--r--deps/jemalloc/test/include/test/jemalloc_test.h.in12
-rw-r--r--deps/jemalloc/test/include/test/jemalloc_test_defs.h.in6
-rw-r--r--deps/jemalloc/test/include/test/math.h2
-rw-r--r--deps/jemalloc/test/include/test/mq.h19
-rw-r--r--deps/jemalloc/test/include/test/test.h390
-rw-r--r--deps/jemalloc/test/include/test/thd.h2
-rw-r--r--deps/jemalloc/test/include/test/timer.h26
-rw-r--r--deps/jemalloc/test/integration/MALLOCX_ARENA.c21
-rw-r--r--deps/jemalloc/test/integration/allocm.c107
-rw-r--r--deps/jemalloc/test/integration/chunk.c276
-rw-r--r--deps/jemalloc/test/integration/mallocx.c139
-rw-r--r--deps/jemalloc/test/integration/mremap.c45
-rw-r--r--deps/jemalloc/test/integration/overflow.c49
-rw-r--r--deps/jemalloc/test/integration/rallocm.c111
-rw-r--r--deps/jemalloc/test/integration/rallocx.c15
-rw-r--r--deps/jemalloc/test/integration/sdallocx.c57
-rw-r--r--deps/jemalloc/test/integration/xallocx.c414
-rw-r--r--deps/jemalloc/test/src/SFMT.c22
-rw-r--r--deps/jemalloc/test/src/btalloc.c8
-rw-r--r--deps/jemalloc/test/src/btalloc_0.c3
-rw-r--r--deps/jemalloc/test/src/btalloc_1.c3
-rw-r--r--deps/jemalloc/test/src/mq.c29
-rw-r--r--deps/jemalloc/test/src/mtx.c4
-rw-r--r--deps/jemalloc/test/src/test.c23
-rw-r--r--deps/jemalloc/test/src/thd.c6
-rw-r--r--deps/jemalloc/test/src/timer.c85
-rw-r--r--deps/jemalloc/test/stress/microbench.c181
-rw-r--r--deps/jemalloc/test/unit/SFMT.c2008
-rw-r--r--deps/jemalloc/test/unit/atomic.c122
-rw-r--r--deps/jemalloc/test/unit/bitmap.c24
-rw-r--r--deps/jemalloc/test/unit/ckh.c68
-rw-r--r--deps/jemalloc/test/unit/hash.c4
-rw-r--r--deps/jemalloc/test/unit/junk.c96
-rw-r--r--deps/jemalloc/test/unit/junk_alloc.c3
-rw-r--r--deps/jemalloc/test/unit/junk_free.c3
-rw-r--r--deps/jemalloc/test/unit/lg_chunk.c26
-rw-r--r--deps/jemalloc/test/unit/mallctl.c256
-rw-r--r--deps/jemalloc/test/unit/math.c6
-rw-r--r--deps/jemalloc/test/unit/mq.c3
-rw-r--r--deps/jemalloc/test/unit/prof_accum.c9
-rw-r--r--deps/jemalloc/test/unit/prof_accum.h35
-rw-r--r--deps/jemalloc/test/unit/prof_accum_a.c3
-rw-r--r--deps/jemalloc/test/unit/prof_accum_b.c3
-rw-r--r--deps/jemalloc/test/unit/prof_active.c136
-rw-r--r--deps/jemalloc/test/unit/prof_gdump.c29
-rw-r--r--deps/jemalloc/test/unit/prof_reset.c302
-rw-r--r--deps/jemalloc/test/unit/prof_thread_name.c129
-rw-r--r--deps/jemalloc/test/unit/rb.c7
-rw-r--r--deps/jemalloc/test/unit/rtree.c93
-rw-r--r--deps/jemalloc/test/unit/size_classes.c89
-rw-r--r--deps/jemalloc/test/unit/stats.c171
-rw-r--r--deps/jemalloc/test/unit/tsd.c48
-rw-r--r--deps/jemalloc/test/unit/util.c98
-rw-r--r--deps/jemalloc/test/unit/zero.c4
137 files changed, 19876 insertions, 10017 deletions
diff --git a/deps/jemalloc/.autom4te.cfg b/deps/jemalloc/.autom4te.cfg
new file mode 100644
index 000000000..fe2424db5
--- /dev/null
+++ b/deps/jemalloc/.autom4te.cfg
@@ -0,0 +1,3 @@
+begin-language: "Autoconf-without-aclocal-m4"
+args: --no-cache
+end-language: "Autoconf-without-aclocal-m4"
diff --git a/deps/jemalloc/.gitattributes b/deps/jemalloc/.gitattributes
new file mode 100644
index 000000000..6313b56c5
--- /dev/null
+++ b/deps/jemalloc/.gitattributes
@@ -0,0 +1 @@
+* text=auto eol=lf
diff --git a/deps/jemalloc/.gitignore b/deps/jemalloc/.gitignore
index 4c408ec2c..d0e393619 100644
--- a/deps/jemalloc/.gitignore
+++ b/deps/jemalloc/.gitignore
@@ -1,8 +1,8 @@
/*.gcov.*
-/autom4te.cache/
-
+/bin/jemalloc-config
/bin/jemalloc.sh
+/bin/jeprof
/config.stamp
/config.log
@@ -15,6 +15,8 @@
/doc/jemalloc.html
/doc/jemalloc.3
+/jemalloc.pc
+
/lib/
/Makefile
@@ -35,6 +37,7 @@
/include/jemalloc/jemalloc_protos.h
/include/jemalloc/jemalloc_protos_jet.h
/include/jemalloc/jemalloc_rename.h
+/include/jemalloc/jemalloc_typedefs.h
/src/*.[od]
/src/*.gcda
diff --git a/deps/jemalloc/COPYING b/deps/jemalloc/COPYING
index bdda0feb9..611968cda 100644
--- a/deps/jemalloc/COPYING
+++ b/deps/jemalloc/COPYING
@@ -1,10 +1,10 @@
Unless otherwise specified, files in the jemalloc source distribution are
subject to the following license:
--------------------------------------------------------------------------------
-Copyright (C) 2002-2014 Jason Evans <jasone@canonware.com>.
+Copyright (C) 2002-2015 Jason Evans <jasone@canonware.com>.
All rights reserved.
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
-Copyright (C) 2009-2014 Facebook, Inc. All rights reserved.
+Copyright (C) 2009-2015 Facebook, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
diff --git a/deps/jemalloc/ChangeLog b/deps/jemalloc/ChangeLog
index d56ee999e..e3b0a5190 100644
--- a/deps/jemalloc/ChangeLog
+++ b/deps/jemalloc/ChangeLog
@@ -1,10 +1,250 @@
Following are change highlights associated with official releases. Important
-bug fixes are all mentioned, but internal enhancements are omitted here for
-brevity (even though they are more fun to write about). Much more detail can be
-found in the git revision history:
+bug fixes are all mentioned, but some internal enhancements are omitted here for
+brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
+* 4.0.3 (September 24, 2015)
+
+ This bugfix release continues the trend of xallocx() and heap profiling fixes.
+
+ Bug fixes:
+ - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large
+ allocations when --enable-cache-oblivious configure option is enabled.
+ - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations
+ when resizing from/to a size class that is not a multiple of the chunk size.
+ - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap
+ profile dumping started.
+ - Work around a potentially bad thread-specific data initialization
+ interaction with NPTL (glibc's pthreads implementation).
+
+* 4.0.2 (September 21, 2015)
+
+ This bugfix release addresses a few bugs specific to heap profiling.
+
+ Bug fixes:
+ - Fix ixallocx_prof_sample() to never modify nor create sampled small
+ allocations. xallocx() is in general incapable of moving small allocations,
+ so this fix removes buggy code without loss of generality.
+ - Fix irallocx_prof_sample() to always allocate large regions, even when
+ alignment is non-zero.
+ - Fix prof_alloc_rollback() to read tdata from thread-specific data rather
+ than dereferencing a potentially invalid tctx.
+
+* 4.0.1 (September 15, 2015)
+
+ This is a bugfix release that is somewhat high risk due to the amount of
+ refactoring required to address deep xallocx() problems. As a side effect of
+ these fixes, xallocx() now tries harder to partially fulfill requests for
+ optional extra space. Note that a couple of minor heap profiling
+ optimizations are included, but these are better thought of as performance
+ fixes that were integral to disovering most of the other bugs.
+
+ Optimizations:
+ - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the
+ fast path when heap profiling is enabled. Additionally, split a special
+ case out into arena_prof_tctx_reset(), which also avoids chunk metadata
+ reads.
+ - Optimize irallocx_prof() to optimistically update the sampler state. The
+ prior implementation appears to have been a holdover from when
+ rallocx()/xallocx() functionality was combined as rallocm().
+
+ Bug fixes:
+ - Fix TLS configuration such that it is enabled by default for platforms on
+ which it works correctly.
+ - Fix arenas_cache_cleanup() and arena_get_hard() to handle
+ allocation/deallocation within the application's thread-specific data
+ cleanup functions even after arenas_cache is torn down.
+ - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS.
+ - Fix chunk purge hook calls for in-place huge shrinking reallocation to
+ specify the old chunk size rather than the new chunk size. This bug caused
+ no correctness issues for the default chunk purge function, but was
+ visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
+ - Fix heap profiling bugs:
+ + Fix heap profiling to distinguish among otherwise identical sample sites
+ with interposed resets (triggered via the "prof.reset" mallctl). This bug
+ could cause data structure corruption that would most likely result in a
+ segfault.
+ + Fix irealloc_prof() to prof_alloc_rollback() on OOM.
+ + Make one call to prof_active_get_unlocked() per allocation event, and use
+ the result throughout the relevant functions that handle an allocation
+ event. Also add a missing check in prof_realloc(). These fixes protect
+ allocation events against concurrent prof_active changes.
+ + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample()
+ in the correct order.
+ + Fix prof_realloc() to call prof_free_sampled_object() after calling
+ prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were
+ the same, the tctx could have been prematurely destroyed.
+ - Fix portability bugs:
+ + Don't bitshift by negative amounts when encoding/decoding run sizes in
+ chunk header maps. This affected systems with page sizes greater than 8
+ KiB.
+ + Rename index_t to szind_t to avoid an existing type on Solaris.
+ + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to
+ match glibc and avoid compilation errors when including both
+ jemalloc/jemalloc.h and malloc.h in C++ code.
+ + Don't assume that /bin/sh is appropriate when running size_classes.sh
+ during configuration.
+ + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM.
+ + Link tests to librt if it contains clock_gettime(2).
+
+* 4.0.0 (August 17, 2015)
+
+ This version contains many speed and space optimizations, both minor and
+ major. The major themes are generalization, unification, and simplification.
+ Although many of these optimizations cause no visible behavior change, their
+ cumulative effect is substantial.
+
+ New features:
+ - Normalize size class spacing to be consistent across the complete size
+ range. By default there are four size classes per size doubling, but this
+ is now configurable via the --with-lg-size-class-group option. Also add the
+ --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and
+ --with-lg-tiny-min options, which can be used to tweak page and size class
+ settings. Impacts:
+ + Worst case performance for incrementally growing/shrinking reallocation
+ is improved because there are far fewer size classes, and therefore
+ copying happens less often.
+ + Internal fragmentation is limited to 20% for all but the smallest size
+ classes (those less than four times the quantum). (1B + 4 KiB)
+ and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation.
+ + Chunk fragmentation tends to be lower because there are fewer distinct run
+ sizes to pack.
+ - Add support for explicit tcaches. The "tcache.create", "tcache.flush", and
+ "tcache.destroy" mallctls control tcache lifetime and flushing, and the
+ MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API
+ control which tcache is used for each operation.
+ - Implement per thread heap profiling, as well as the ability to
+ enable/disable heap profiling on a per thread basis. Add the "prof.reset",
+ "prof.lg_sample", "thread.prof.name", "thread.prof.active",
+ "opt.prof_thread_active_init", "prof.thread_active_init", and
+ "thread.prof.active" mallctls.
+ - Add support for per arena application-specified chunk allocators, configured
+ via the "arena.<i>.chunk_hooks" mallctl.
+ - Refactor huge allocation to be managed by arenas, so that arenas now
+ function as general purpose independent allocators. This is important in
+ the context of user-specified chunk allocators, aside from the scalability
+ benefits. Related new statistics:
+ + The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc",
+ "stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests"
+ mallctls provide high level per arena huge allocation statistics.
+ + The "arenas.nhchunks", "arenas.hchunk.<i>.size",
+ "stats.arenas.<i>.hchunks.<j>.nmalloc",
+ "stats.arenas.<i>.hchunks.<j>.ndalloc",
+ "stats.arenas.<i>.hchunks.<j>.nrequests", and
+ "stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class
+ statistics.
+ - Add the 'util' column to malloc_stats_print() output, which reports the
+ proportion of available regions that are currently in use for each small
+ size class.
+ - Add "alloc" and "free" modes for for junk filling (see the "opt.junk"
+ mallctl), so that it is possible to separately enable junk filling for
+ allocation versus deallocation.
+ - Add the jemalloc-config script, which provides information about how
+ jemalloc was configured, and how to integrate it into application builds.
+ - Add metadata statistics, which are accessible via the "stats.metadata",
+ "stats.arenas.<i>.metadata.mapped", and
+ "stats.arenas.<i>.metadata.allocated" mallctls.
+ - Add the "stats.resident" mallctl, which reports the upper limit of
+ physically resident memory mapped by the allocator.
+ - Add per arena control over unused dirty page purging, via the
+ "arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
+ "stats.arenas.<i>.lg_dirty_mult" mallctls.
+ - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
+ feature on/off during program execution.
+ - Add sdallocx(), which implements sized deallocation. The primary
+ optimization over dallocx() is the removal of a metadata read, which often
+ suffers an L1 cache miss.
+ - Add missing header includes in jemalloc/jemalloc.h, so that applications
+ only have to #include <jemalloc/jemalloc.h>.
+ - Add support for additional platforms:
+ + Bitrig
+ + Cygwin
+ + DragonFlyBSD
+ + iOS
+ + OpenBSD
+ + OpenRISC/or1k
+
+ Optimizations:
+ - Maintain dirty runs in per arena LRUs rather than in per arena trees of
+ dirty-run-containing chunks. In practice this change significantly reduces
+ dirty page purging volume.
+ - Integrate whole chunks into the unused dirty page purging machinery. This
+ reduces the cost of repeated huge allocation/deallocation, because it
+ effectively introduces a cache of chunks.
+ - Split the arena chunk map into two separate arrays, in order to increase
+ cache locality for the frequently accessed bits.
+ - Move small run metadata out of runs, into arena chunk headers. This reduces
+ run fragmentation, smaller runs reduce external fragmentation for small size
+ classes, and packed (less uniformly aligned) metadata layout improves CPU
+ cache set distribution.
+ - Randomly distribute large allocation base pointer alignment relative to page
+ boundaries in order to more uniformly utilize CPU cache sets. This can be
+ disabled via the --disable-cache-oblivious configure option, and queried via
+ the "config.cache_oblivious" mallctl.
+ - Micro-optimize the fast paths for the public API functions.
+ - Refactor thread-specific data to reside in a single structure. This assures
+ that only a single TLS read is necessary per call into the public API.
+ - Implement in-place huge allocation growing and shrinking.
+ - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make
+ additional optimizations that reduce maximum lookup depth to one or two
+ levels. This resolves what was a concurrency bottleneck for per arena huge
+ allocation, because a global data structure is critical for determining
+ which arenas own which huge allocations.
+
+ Incompatible changes:
+ - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious
+ warnings by default.
+ - Assure that the constness of malloc_usable_size()'s return type matches that
+ of the system implementation.
+ - Change the heap profile dump format to support per thread heap profiling,
+ rename pprof to jeprof, and enhance it with the --thread=<n> option. As a
+ result, the bundled jeprof must now be used rather than the upstream
+ (gperftools) pprof.
+ - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can
+ internally deadlock on some platforms.
+ - Change the "arenas.nlruns" mallctl type from size_t to unsigned.
+ - Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with
+ "stats.arenas.<i>.bins.<j>.curregs".
+ - Ignore MALLOC_CONF in set{uid,gid,cap} binaries.
+ - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the
+ MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage.
+
+ Removed features:
+ - Remove the *allocm() API, which is superseded by the *allocx() API.
+ - Remove the --enable-dss options, and make dss non-optional on all platforms
+ which support sbrk(2).
+ - Remove the "arenas.purge" mallctl, which was obsoleted by the
+ "arena.<i>.purge" mallctl in 3.1.0.
+ - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically
+ detects whether it is running inside Valgrind.
+ - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and
+ "stats.huge.ndalloc" mallctls.
+ - Remove the --enable-mremap option.
+ - Remove the "stats.chunks.current", "stats.chunks.total", and
+ "stats.chunks.high" mallctls.
+
+ Bug fixes:
+ - Fix the cactive statistic to decrease (rather than increase) when active
+ memory decreases. This regression was first released in 3.5.0.
+ - Fix OOM handling in memalign() and valloc(). A variant of this bug existed
+ in all releases since 2.0.0, which introduced these functions.
+ - Fix an OOM-related regression in arena_tcache_fill_small(), which could
+ cause cache corruption on OOM. This regression was present in all releases
+ from 2.2.0 through 3.6.0.
+ - Fix size class overflow handling for malloc(), posix_memalign(), memalign(),
+ calloc(), and realloc() when profiling is enabled.
+ - Fix the "arena.<i>.dss" mallctl to return an error if "primary" or
+ "secondary" precedence is specified, but sbrk(2) is not supported.
+ - Fix fallback lg_floor() implementations to handle extremely large inputs.
+ - Ensure the default purgeable zone is after the default zone on OS X.
+ - Fix latent bugs in atomic_*().
+ - Fix the "arena.<i>.dss" mallctl to handle read-only calls.
+ - Fix tls_model configuration to enable the initial-exec model when possible.
+ - Mark malloc_conf as a weak symbol so that the application can override it.
+ - Correctly detect glibc's adaptive pthread mutexes.
+ - Fix the --without-export configure option.
+
* 3.6.0 (March 31, 2014)
This version contains a critical bug fix for a regression present in 3.5.0 and
@@ -21,7 +261,7 @@ found in the git revision history:
backtracing to be reliable.
- Use dss allocation precedence for huge allocations as well as small/large
allocations.
- - Fix test assertion failure message formatting. This bug did not manifect on
+ - Fix test assertion failure message formatting. This bug did not manifest on
x86_64 systems because of implementation subtleties in va_list.
- Fix inconsequential test failures for hash and SFMT code.
@@ -516,7 +756,7 @@ found in the git revision history:
- Make it possible for the application to manually flush a thread's cache, via
the "tcache.flush" mallctl.
- Base maximum dirty page count on proportion of active memory.
- - Compute various addtional run-time statistics, including per size class
+ - Compute various additional run-time statistics, including per size class
statistics for large objects.
- Expose malloc_stats_print(), which can be called repeatedly by the
application.
diff --git a/deps/jemalloc/INSTALL b/deps/jemalloc/INSTALL
index 841704d2a..8d3968745 100644
--- a/deps/jemalloc/INSTALL
+++ b/deps/jemalloc/INSTALL
@@ -1,10 +1,23 @@
-Building and installing jemalloc can be as simple as typing the following while
-in the root directory of the source tree:
+Building and installing a packaged release of jemalloc can be as simple as
+typing the following while in the root directory of the source tree:
./configure
make
make install
+If building from unpackaged developer sources, the simplest command sequence
+that might work is:
+
+ ./autogen.sh
+ make dist
+ make
+ make install
+
+Note that documentation is not built by the default target because doing so
+would create a dependency on xsltproc in packaged releases, hence the
+requirement to either run 'make dist' or avoid installing docs via the various
+install_* targets documented below.
+
=== Advanced configuration =====================================================
The 'configure' script supports numerous options that allow control of which
@@ -56,7 +69,7 @@ any of the following arguments (not a definitive list) to 'configure':
replace the "malloc", "calloc", etc. symbols.
--without-export
- Don't export public APIs. This can be useful when building jemalloc as a
+ Don't export public APIs. This can be useful when building jemalloc as a
static library, or to avoid exporting public APIs when using the zone
allocator on OSX.
@@ -71,10 +84,10 @@ any of the following arguments (not a definitive list) to 'configure':
versions of jemalloc can coexist in the same installation directory. For
example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
---enable-cc-silence
- Enable code that silences non-useful compiler warnings. This is helpful
- when trying to tell serious warnings from those due to compiler
- limitations, but it potentially incurs a performance penalty.
+--disable-cc-silence
+ Disable code that silences non-useful compiler warnings. This is mainly
+ useful during development when auditing the set of warnings that are being
+ silenced.
--enable-debug
Enable assertions and validation code. This incurs a substantial
@@ -94,15 +107,15 @@ any of the following arguments (not a definitive list) to 'configure':
there are interactions between the various coverage targets, so it is
usually advisable to run 'make clean' between repeated code coverage runs.
---enable-ivsalloc
- Enable validation code, which verifies that pointers reside within
- jemalloc-owned chunks before dereferencing them. This incurs a substantial
- performance hit.
-
--disable-stats
Disable statistics gathering functionality. See the "opt.stats_print"
option documentation for usage details.
+--enable-ivsalloc
+ Enable validation code, which verifies that pointers reside within
+ jemalloc-owned chunks before dereferencing them. This incurs a minor
+ performance hit.
+
--enable-prof
Enable heap profiling and leak detection functionality. See the "opt.prof"
option documentation for usage details. When enabled, there are several
@@ -132,12 +145,6 @@ any of the following arguments (not a definitive list) to 'configure':
released in bulk, thus reducing the total number of mutex operations. See
the "opt.tcache" option for usage details.
---enable-mremap
- Enable huge realloc() via mremap(2). mremap() is disabled by default
- because the flavor used is specific to Linux, which has a quirk in its
- virtual memory allocation algorithm that causes semi-permanent VM map holes
- under normal jemalloc operation.
-
--disable-munmap
Disable virtual memory deallocation via munmap(2); instead keep track of
the virtual memory for later use. munmap() is disabled by default (i.e.
@@ -145,10 +152,6 @@ any of the following arguments (not a definitive list) to 'configure':
memory allocation algorithm that causes semi-permanent VM map holes under
normal jemalloc operation.
---enable-dss
- Enable support for page allocation/deallocation via sbrk(2), in addition to
- mmap(2).
-
--disable-fill
Disable support for junk/zero filling of memory, quarantine, and redzones.
See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option
@@ -157,11 +160,8 @@ any of the following arguments (not a definitive list) to 'configure':
--disable-valgrind
Disable support for Valgrind.
---disable-experimental
- Disable support for the experimental API (*allocm()).
-
--disable-zone-allocator
- Disable zone allocator for Darwin. This means jemalloc won't be hooked as
+ Disable zone allocator for Darwin. This means jemalloc won't be hooked as
the default allocator on OSX/iOS.
--enable-utrace
@@ -185,10 +185,106 @@ any of the following arguments (not a definitive list) to 'configure':
thread-local variables via the __thread keyword. If TLS is available,
jemalloc uses it for several purposes.
+--disable-cache-oblivious
+ Disable cache-oblivious large allocation alignment for large allocation
+ requests with no alignment constraints. If this feature is disabled, all
+ large allocations are page-aligned as an implementation artifact, which can
+ severely harm CPU cache utilization. However, the cache-oblivious layout
+ comes at the cost of one extra page per large allocation, which in the
+ most extreme case increases physical memory usage for the 16 KiB size class
+ to 20 KiB.
+
--with-xslroot=<path>
Specify where to find DocBook XSL stylesheets when building the
documentation.
+--with-lg-page=<lg-page>
+ Specify the base 2 log of the system page size. This option is only useful
+ when cross compiling, since the configure script automatically determines
+ the host's page size by default.
+
+--with-lg-page-sizes=<lg-page-sizes>
+ Specify the comma-separated base 2 logs of the page sizes to support. This
+ option may be useful when cross-compiling in combination with
+ --with-lg-page, but its primary use case is for integration with FreeBSD's
+ libc, wherein jemalloc is embedded.
+
+--with-lg-size-class-group=<lg-size-class-group>
+ Specify the base 2 log of how many size classes to use for each doubling in
+ size. By default jemalloc uses <lg-size-class-group>=2, which results in
+ e.g. the following size classes:
+
+ [...], 64,
+ 80, 96, 112, 128,
+ 160, [...]
+
+ <lg-size-class-group>=3 results in e.g. the following size classes:
+
+ [...], 64,
+ 72, 80, 88, 96, 104, 112, 120, 128,
+ 144, [...]
+
+ The minimal <lg-size-class-group>=0 causes jemalloc to only provide size
+ classes that are powers of 2:
+
+ [...],
+ 64,
+ 128,
+ 256,
+ [...]
+
+ An implementation detail currently limits the total number of small size
+ classes to 255, and a compilation error will result if the
+ <lg-size-class-group> you specify cannot be supported. The limit is
+ roughly <lg-size-class-group>=4, depending on page size.
+
+--with-lg-quantum=<lg-quantum>
+ Specify the base 2 log of the minimum allocation alignment. jemalloc needs
+ to know the minimum alignment that meets the following C standard
+ requirement (quoted from the April 12, 2011 draft of the C11 standard):
+
+ The pointer returned if the allocation succeeds is suitably aligned so
+ that it may be assigned to a pointer to any type of object with a
+ fundamental alignment requirement and then used to access such an object
+ or an array of such objects in the space allocated [...]
+
+ This setting is architecture-specific, and although jemalloc includes known
+ safe values for the most commonly used modern architectures, there is a
+ wrinkle related to GNU libc (glibc) that may impact your choice of
+ <lg-quantum>. On most modern architectures, this mandates 16-byte alignment
+ (<lg-quantum>=4), but the glibc developers chose not to meet this
+ requirement for performance reasons. An old discussion can be found at
+ https://sourceware.org/bugzilla/show_bug.cgi?id=206 . Unlike glibc,
+ jemalloc does follow the C standard by default (caveat: jemalloc
+ technically cheats if --with-lg-tiny-min is smaller than
+ --with-lg-quantum), but the fact that Linux systems already work around
+ this allocator noncompliance means that it is generally safe in practice to
+ let jemalloc's minimum alignment follow glibc's lead. If you specify
+ --with-lg-quantum=3 during configuration, jemalloc will provide additional
+ size classes that are not 16-byte-aligned (24, 40, and 56, assuming
+ --with-lg-size-class-group=2).
+
+--with-lg-tiny-min=<lg-tiny-min>
+ Specify the base 2 log of the minimum tiny size class to support. Tiny
+ size classes are powers of 2 less than the quantum, and are only
+ incorporated if <lg-tiny-min> is less than <lg-quantum> (see
+ --with-lg-quantum). Tiny size classes technically violate the C standard
+ requirement for minimum alignment, and crashes could conceivably result if
+ the compiler were to generate instructions that made alignment assumptions,
+ both because illegal instruction traps could result, and because accesses
+ could straddle page boundaries and cause segmentation faults due to
+ accessing unmapped addresses.
+
+ The default of <lg-tiny-min>=3 works well in practice even on architectures
+ that technically require 16-byte alignment, probably for the same reason
+ --with-lg-quantum=3 works. Smaller tiny size classes can, and will, cause
+ crashes (see https://bugzilla.mozilla.org/show_bug.cgi?id=691003 for an
+ example).
+
+ This option is rarely useful, and is mainly provided as documentation of a
+ subtle implementation detail. If you do use this option, specify a
+ value in [3, ..., <lg-quantum>].
+
The following environment variables (not a definitive list) impact configure's
behavior:
diff --git a/deps/jemalloc/Makefile.in b/deps/jemalloc/Makefile.in
index d6b7d6ea3..1ac6f2926 100644
--- a/deps/jemalloc/Makefile.in
+++ b/deps/jemalloc/Makefile.in
@@ -28,6 +28,7 @@ CFLAGS := @CFLAGS@
LDFLAGS := @LDFLAGS@
EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
LIBS := @LIBS@
+TESTLIBS := @TESTLIBS@
RPATH_EXTRA := @RPATH_EXTRA@
SO := @so@
IMPORTLIB := @importlib@
@@ -42,14 +43,16 @@ XSLTPROC := @XSLTPROC@
AUTOCONF := @AUTOCONF@
_RPATH = @RPATH@
RPATH = $(if $(1),$(call _RPATH,$(1)))
-cfghdrs_in := @cfghdrs_in@
+cfghdrs_in := $(addprefix $(srcroot),@cfghdrs_in@)
cfghdrs_out := @cfghdrs_out@
-cfgoutputs_in := @cfgoutputs_in@
+cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@)
cfgoutputs_out := @cfgoutputs_out@
enable_autogen := @enable_autogen@
enable_code_coverage := @enable_code_coverage@
-enable_experimental := @enable_experimental@
+enable_prof := @enable_prof@
+enable_valgrind := @enable_valgrind@
enable_zone_allocator := @enable_zone_allocator@
+MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
DSO_LDFLAGS = @DSO_LDFLAGS@
SOREV = @SOREV@
PIC_CFLAGS = @PIC_CFLAGS@
@@ -73,16 +76,20 @@ endif
LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix)
# Lists of files.
-BINS := $(srcroot)bin/pprof $(objroot)bin/jemalloc.sh
+BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof
C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h
C_SRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c \
$(srcroot)src/atomic.c $(srcroot)src/base.c $(srcroot)src/bitmap.c \
$(srcroot)src/chunk.c $(srcroot)src/chunk_dss.c \
$(srcroot)src/chunk_mmap.c $(srcroot)src/ckh.c $(srcroot)src/ctl.c \
$(srcroot)src/extent.c $(srcroot)src/hash.c $(srcroot)src/huge.c \
- $(srcroot)src/mb.c $(srcroot)src/mutex.c $(srcroot)src/prof.c \
- $(srcroot)src/quarantine.c $(srcroot)src/rtree.c $(srcroot)src/stats.c \
- $(srcroot)src/tcache.c $(srcroot)src/util.c $(srcroot)src/tsd.c
+ $(srcroot)src/mb.c $(srcroot)src/mutex.c $(srcroot)src/pages.c \
+ $(srcroot)src/prof.c $(srcroot)src/quarantine.c $(srcroot)src/rtree.c \
+ $(srcroot)src/stats.c $(srcroot)src/tcache.c $(srcroot)src/util.c \
+ $(srcroot)src/tsd.c
+ifeq ($(enable_valgrind), 1)
+C_SRCS += $(srcroot)src/valgrind.c
+endif
ifeq ($(enable_zone_allocator), 1)
C_SRCS += $(srcroot)src/zone.c
endif
@@ -98,53 +105,60 @@ DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV)
ifneq ($(SOREV),$(SO))
DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO)
endif
+PC := $(objroot)jemalloc.pc
MAN3 := $(objroot)doc/jemalloc$(install_suffix).3
DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml
-DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.html)
-DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.3)
+DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html)
+DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3)
DOCS := $(DOCS_HTML) $(DOCS_MAN3)
-C_TESTLIB_SRCS := $(srcroot)test/src/math.c $(srcroot)test/src/mtx.c \
+C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
+ $(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \
+ $(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \
$(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
- $(srcroot)test/src/thd.c
+ $(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
C_UTIL_INTEGRATION_SRCS := $(srcroot)src/util.c
-TESTS_UNIT := $(srcroot)test/unit/bitmap.c \
+TESTS_UNIT := $(srcroot)test/unit/atomic.c \
+ $(srcroot)test/unit/bitmap.c \
$(srcroot)test/unit/ckh.c \
$(srcroot)test/unit/hash.c \
$(srcroot)test/unit/junk.c \
+ $(srcroot)test/unit/junk_alloc.c \
+ $(srcroot)test/unit/junk_free.c \
+ $(srcroot)test/unit/lg_chunk.c \
$(srcroot)test/unit/mallctl.c \
$(srcroot)test/unit/math.c \
$(srcroot)test/unit/mq.c \
$(srcroot)test/unit/mtx.c \
$(srcroot)test/unit/prof_accum.c \
+ $(srcroot)test/unit/prof_active.c \
$(srcroot)test/unit/prof_gdump.c \
$(srcroot)test/unit/prof_idump.c \
+ $(srcroot)test/unit/prof_reset.c \
+ $(srcroot)test/unit/prof_thread_name.c \
$(srcroot)test/unit/ql.c \
$(srcroot)test/unit/qr.c \
$(srcroot)test/unit/quarantine.c \
$(srcroot)test/unit/rb.c \
$(srcroot)test/unit/rtree.c \
$(srcroot)test/unit/SFMT.c \
+ $(srcroot)test/unit/size_classes.c \
$(srcroot)test/unit/stats.c \
$(srcroot)test/unit/tsd.c \
$(srcroot)test/unit/util.c \
$(srcroot)test/unit/zero.c
-TESTS_UNIT_AUX := $(srcroot)test/unit/prof_accum_a.c \
- $(srcroot)test/unit/prof_accum_b.c
TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
$(srcroot)test/integration/allocated.c \
+ $(srcroot)test/integration/sdallocx.c \
$(srcroot)test/integration/mallocx.c \
- $(srcroot)test/integration/mremap.c \
+ $(srcroot)test/integration/MALLOCX_ARENA.c \
+ $(srcroot)test/integration/overflow.c \
$(srcroot)test/integration/posix_memalign.c \
$(srcroot)test/integration/rallocx.c \
$(srcroot)test/integration/thread_arena.c \
$(srcroot)test/integration/thread_tcache_enabled.c \
- $(srcroot)test/integration/xallocx.c
-ifeq ($(enable_experimental), 1)
-TESTS_INTEGRATION += $(srcroot)test/integration/allocm.c \
- $(srcroot)test/integration/MALLOCX_ARENA.c \
- $(srcroot)test/integration/rallocm.c
-endif
-TESTS_STRESS :=
+ $(srcroot)test/integration/xallocx.c \
+ $(srcroot)test/integration/chunk.c
+TESTS_STRESS := $(srcroot)test/stress/microbench.c
TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_STRESS)
C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O))
@@ -157,10 +171,9 @@ C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O))
C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS)
TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O))
-TESTS_UNIT_AUX_OBJS := $(TESTS_UNIT_AUX:$(srcroot)%.c=$(objroot)%.$(O))
TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O))
TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O))
-TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_UNIT_AUX_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS)
+TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS)
.PHONY: all dist build_doc_html build_doc_man build_doc
.PHONY: install_bin install_include install_lib
@@ -174,10 +187,10 @@ all: build_lib
dist: build_doc
-$(srcroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
+$(objroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
$(XSLTPROC) -o $@ $(objroot)doc/html.xsl $<
-$(srcroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
+$(objroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
$(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $<
build_doc_html: $(DOCS_HTML)
@@ -209,18 +222,12 @@ $(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%
$(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB
$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
-$(TESTS_UNIT_AUX_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
-define make-unit-link-dep
-$(1): TESTS_UNIT_LINK_OBJS += $(2)
-$(1): $(2)
-endef
-$(foreach test, $(TESTS_UNIT:$(srcroot)test/unit/%.c=$(objroot)test/unit/%$(EXE)), $(eval $(call make-unit-link-dep,$(test),$(filter $(test:%=%_a.$(O)) $(test:%=%_b.$(O)),$(TESTS_UNIT_AUX_OBJS)))))
$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST
$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
ifneq ($(IMPORTLIB),$(SO))
-$(C_OBJS): CPPFLAGS += -DDLLEXPORT
+$(C_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT
endif
ifndef CC_MM
@@ -229,7 +236,7 @@ HEADER_DIRS = $(srcroot)include/jemalloc/internal \
$(objroot)include/jemalloc $(objroot)include/jemalloc/internal
HEADERS = $(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h))
$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): $(HEADERS)
-$(TESTS_OBJS): $(objroot)test/unit/jemalloc_test.h
+$(TESTS_OBJS): $(objroot)test/include/test/jemalloc_test.h
endif
$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O):
@@ -259,15 +266,15 @@ $(STATIC_LIBS):
$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(TESTS_UNIT_LINK_OBJS) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
@mkdir -p $(@D)
- $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(TESTLIBS) $(EXTRA_LDFLAGS)
$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
@mkdir -p $(@D)
- $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(filter -lpthread,$(LIBS))) -lm $(EXTRA_LDFLAGS)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(filter -lpthread,$(LIBS))) -lm $(TESTLIBS) $(EXTRA_LDFLAGS)
$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
@mkdir -p $(@D)
- $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(TESTLIBS) $(EXTRA_LDFLAGS)
build_lib_shared: $(DSOS)
build_lib_static: $(STATIC_LIBS)
@@ -301,7 +308,14 @@ install_lib_static: $(STATIC_LIBS)
install -m 755 $$l $(LIBDIR); \
done
-install_lib: install_lib_shared install_lib_static
+install_lib_pc: $(PC)
+ install -d $(LIBDIR)/pkgconfig
+ @for l in $(PC); do \
+ echo "install -m 644 $$l $(LIBDIR)/pkgconfig"; \
+ install -m 644 $$l $(LIBDIR)/pkgconfig; \
+done
+
+install_lib: install_lib_shared install_lib_static install_lib_pc
install_doc_html:
install -d $(DATADIR)/doc/jemalloc$(install_suffix)
@@ -330,18 +344,23 @@ check_unit_dir:
@mkdir -p $(objroot)test/unit
check_integration_dir:
@mkdir -p $(objroot)test/integration
-check_stress_dir:
+stress_dir:
@mkdir -p $(objroot)test/stress
-check_dir: check_unit_dir check_integration_dir check_stress_dir
+check_dir: check_unit_dir check_integration_dir
check_unit: tests_unit check_unit_dir
$(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
+check_integration_prof: tests_integration check_integration_dir
+ifeq ($(enable_prof), 1)
+ $(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+ $(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
+endif
check_integration: tests_integration check_integration_dir
$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
-check_stress: tests_stress check_stress_dir
+stress: tests_stress stress_dir
$(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
-check: tests check_dir
- $(SHELL) $(objroot)test/test.sh $(TESTS:$(srcroot)%.c=$(objroot)%)
+check: tests check_dir check_integration_prof
+ $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
ifeq ($(enable_code_coverage), 1)
coverage_unit: check_unit
@@ -355,7 +374,7 @@ coverage_integration: check_integration
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS)
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS)
-coverage_stress: check_stress
+coverage_stress: stress
$(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
$(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS)
@@ -400,8 +419,9 @@ clean:
rm -f $(objroot)*.gcov.*
distclean: clean
- rm -rf $(objroot)autom4te.cache
+ rm -f $(objroot)bin/jemalloc-config
rm -f $(objroot)bin/jemalloc.sh
+ rm -f $(objroot)bin/jeprof
rm -f $(objroot)config.log
rm -f $(objroot)config.status
rm -f $(objroot)config.stamp
@@ -410,7 +430,7 @@ distclean: clean
relclean: distclean
rm -f $(objroot)configure
- rm -f $(srcroot)VERSION
+ rm -f $(objroot)VERSION
rm -f $(DOCS_HTML)
rm -f $(DOCS_MAN3)
diff --git a/deps/jemalloc/VERSION b/deps/jemalloc/VERSION
index dace31ba7..f1f9f1c61 100644
--- a/deps/jemalloc/VERSION
+++ b/deps/jemalloc/VERSION
@@ -1 +1 @@
-3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340
+4.0.3-0-ge9192eacf8935e29fc62fddc2701f7942b1cc02c
diff --git a/deps/jemalloc/bin/jemalloc-config.in b/deps/jemalloc/bin/jemalloc-config.in
new file mode 100644
index 000000000..b016c8d33
--- /dev/null
+++ b/deps/jemalloc/bin/jemalloc-config.in
@@ -0,0 +1,79 @@
+#!/bin/sh
+
+usage() {
+ cat <<EOF
+Usage:
+ @BINDIR@/jemalloc-config <option>
+Options:
+ --help | -h : Print usage.
+ --version : Print jemalloc version.
+ --revision : Print shared library revision number.
+ --config : Print configure options used to build jemalloc.
+ --prefix : Print installation directory prefix.
+ --bindir : Print binary installation directory.
+ --datadir : Print data installation directory.
+ --includedir : Print include installation directory.
+ --libdir : Print library installation directory.
+ --mandir : Print manual page installation directory.
+ --cc : Print compiler used to build jemalloc.
+ --cflags : Print compiler flags used to build jemalloc.
+ --cppflags : Print preprocessor flags used to build jemalloc.
+ --ldflags : Print library flags used to build jemalloc.
+ --libs : Print libraries jemalloc was linked against.
+EOF
+}
+
+prefix="@prefix@"
+exec_prefix="@exec_prefix@"
+
+case "$1" in
+--help | -h)
+ usage
+ exit 0
+ ;;
+--version)
+ echo "@jemalloc_version@"
+ ;;
+--revision)
+ echo "@rev@"
+ ;;
+--config)
+ echo "@CONFIG@"
+ ;;
+--prefix)
+ echo "@PREFIX@"
+ ;;
+--bindir)
+ echo "@BINDIR@"
+ ;;
+--datadir)
+ echo "@DATADIR@"
+ ;;
+--includedir)
+ echo "@INCLUDEDIR@"
+ ;;
+--libdir)
+ echo "@LIBDIR@"
+ ;;
+--mandir)
+ echo "@MANDIR@"
+ ;;
+--cc)
+ echo "@CC@"
+ ;;
+--cflags)
+ echo "@CFLAGS@"
+ ;;
+--cppflags)
+ echo "@CPPFLAGS@"
+ ;;
+--ldflags)
+ echo "@LDFLAGS@ @EXTRA_LDFLAGS@"
+ ;;
+--libs)
+ echo "@LIBS@"
+ ;;
+*)
+ usage
+ exit 1
+esac
diff --git a/deps/jemalloc/bin/pprof b/deps/jemalloc/bin/jeprof.in
index a309943c1..e7178078a 100755..100644
--- a/deps/jemalloc/bin/pprof
+++ b/deps/jemalloc/bin/jeprof.in
@@ -2,11 +2,11 @@
# Copyright (c) 1998-2007, Google Inc.
# All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
-#
+#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
@@ -16,7 +16,7 @@
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
+#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -40,28 +40,28 @@
#
# Examples:
#
-# % tools/pprof "program" "profile"
+# % tools/jeprof "program" "profile"
# Enters "interactive" mode
#
-# % tools/pprof --text "program" "profile"
+# % tools/jeprof --text "program" "profile"
# Generates one line per procedure
#
-# % tools/pprof --gv "program" "profile"
+# % tools/jeprof --gv "program" "profile"
# Generates annotated call-graph and displays via "gv"
#
-# % tools/pprof --gv --focus=Mutex "program" "profile"
+# % tools/jeprof --gv --focus=Mutex "program" "profile"
# Restrict to code paths that involve an entry that matches "Mutex"
#
-# % tools/pprof --gv --focus=Mutex --ignore=string "program" "profile"
+# % tools/jeprof --gv --focus=Mutex --ignore=string "program" "profile"
# Restrict to code paths that involve an entry that matches "Mutex"
# and does not match "string"
#
-# % tools/pprof --list=IBF_CheckDocid "program" "profile"
+# % tools/jeprof --list=IBF_CheckDocid "program" "profile"
# Generates disassembly listing of all routines with at least one
# sample that match the --list=<regexp> pattern. The listing is
# annotated with the flat and cumulative sample counts at each line.
#
-# % tools/pprof --disasm=IBF_CheckDocid "program" "profile"
+# % tools/jeprof --disasm=IBF_CheckDocid "program" "profile"
# Generates disassembly listing of all routines with at least one
# sample that match the --disasm=<regexp> pattern. The listing is
# annotated with the flat and cumulative sample counts at each PC value.
@@ -72,10 +72,11 @@ use strict;
use warnings;
use Getopt::Long;
+my $JEPROF_VERSION = "@jemalloc_version@";
my $PPROF_VERSION = "2.0";
# These are the object tools we use which can come from a
-# user-specified location using --tools, from the PPROF_TOOLS
+# user-specified location using --tools, from the JEPROF_TOOLS
# environment variable, or from the environment.
my %obj_tool_map = (
"objdump" => "objdump",
@@ -144,13 +145,13 @@ my $sep_address = undef;
sub usage_string {
return <<EOF;
Usage:
-pprof [options] <program> <profiles>
+jeprof [options] <program> <profiles>
<profiles> is a space separated list of profile names.
-pprof [options] <symbolized-profiles>
+jeprof [options] <symbolized-profiles>
<symbolized-profiles> is a list of profile files where each file contains
the necessary symbol mappings as well as profile data (likely generated
with --raw).
-pprof [options] <profile>
+jeprof [options] <profile>
<profile> is a remote form. Symbols are obtained from host:port$SYMBOL_PAGE
Each name can be:
@@ -161,9 +162,9 @@ pprof [options] <profile>
$GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall,
$CENSUSPROFILE_PAGE, or /pprof/filteredprofile.
For instance:
- pprof http://myserver.com:80$HEAP_PAGE
+ jeprof http://myserver.com:80$HEAP_PAGE
If /<service> is omitted, the service defaults to $PROFILE_PAGE (cpu profiling).
-pprof --symbols <program>
+jeprof --symbols <program>
Maps addresses to symbol names. In this mode, stdin should be a
list of library mappings, in the same format as is found in the heap-
and cpu-profile files (this loosely matches that of /proc/self/maps
@@ -202,7 +203,7 @@ Output type:
--pdf Generate PDF to stdout
--svg Generate SVG to stdout
--gif Generate GIF to stdout
- --raw Generate symbolized pprof data (useful with remote fetch)
+ --raw Generate symbolized jeprof data (useful with remote fetch)
Heap-Profile Options:
--inuse_space Display in-use (mega)bytes [default]
@@ -223,6 +224,7 @@ Call-graph Options:
--edgefraction=<f> Hide edges below <f>*total [default=.001]
--maxdegree=<n> Max incoming/outgoing edges per node [default=8]
--focus=<regexp> Focus on nodes matching <regexp>
+ --thread=<n> Show profile for thread <n>
--ignore=<regexp> Ignore nodes matching <regexp>
--scale=<n> Set GV scaling [default=0]
--heapcheck Make nodes with non-0 object counts
@@ -235,34 +237,34 @@ Miscellaneous:
--version Version information
Environment Variables:
- PPROF_TMPDIR Profiles directory. Defaults to \$HOME/pprof
- PPROF_TOOLS Prefix for object tools pathnames
+ JEPROF_TMPDIR Profiles directory. Defaults to \$HOME/jeprof
+ JEPROF_TOOLS Prefix for object tools pathnames
Examples:
-pprof /bin/ls ls.prof
+jeprof /bin/ls ls.prof
Enters "interactive" mode
-pprof --text /bin/ls ls.prof
+jeprof --text /bin/ls ls.prof
Outputs one line per procedure
-pprof --web /bin/ls ls.prof
+jeprof --web /bin/ls ls.prof
Displays annotated call-graph in web browser
-pprof --gv /bin/ls ls.prof
+jeprof --gv /bin/ls ls.prof
Displays annotated call-graph via 'gv'
-pprof --gv --focus=Mutex /bin/ls ls.prof
+jeprof --gv --focus=Mutex /bin/ls ls.prof
Restricts to code paths including a .*Mutex.* entry
-pprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof
+jeprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof
Code paths including Mutex but not string
-pprof --list=getdir /bin/ls ls.prof
+jeprof --list=getdir /bin/ls ls.prof
(Per-line) annotated source listing for getdir()
-pprof --disasm=getdir /bin/ls ls.prof
+jeprof --disasm=getdir /bin/ls ls.prof
(Per-PC) annotated disassembly for getdir()
-pprof http://localhost:1234/
+jeprof http://localhost:1234/
Enters "interactive" mode
-pprof --text localhost:1234
+jeprof --text localhost:1234
Outputs one line per procedure for localhost:1234
-pprof --raw localhost:1234 > ./local.raw
-pprof --text ./local.raw
+jeprof --raw localhost:1234 > ./local.raw
+jeprof --text ./local.raw
Fetches a remote profile for later analysis and then
analyzes it in text mode.
EOF
@@ -270,7 +272,8 @@ EOF
sub version_string {
return <<EOF
-pprof (part of gperftools $PPROF_VERSION)
+jeprof (part of jemalloc $JEPROF_VERSION)
+based on pprof (part of gperftools $PPROF_VERSION)
Copyright 1998-2007 Google Inc.
@@ -293,8 +296,8 @@ sub Init() {
# Setup tmp-file name and handler to clean it up.
# We do this in the very beginning so that we can use
# error() and cleanup() function anytime here after.
- $main::tmpfile_sym = "/tmp/pprof$$.sym";
- $main::tmpfile_ps = "/tmp/pprof$$";
+ $main::tmpfile_sym = "/tmp/jeprof$$.sym";
+ $main::tmpfile_ps = "/tmp/jeprof$$";
$main::next_tmpfile = 0;
$SIG{'INT'} = \&sighandler;
@@ -332,6 +335,7 @@ sub Init() {
$main::opt_edgefraction = 0.001;
$main::opt_maxdegree = 8;
$main::opt_focus = '';
+ $main::opt_thread = undef;
$main::opt_ignore = '';
$main::opt_scale = 0;
$main::opt_heapcheck = 0;
@@ -402,6 +406,7 @@ sub Init() {
"edgefraction=f" => \$main::opt_edgefraction,
"maxdegree=i" => \$main::opt_maxdegree,
"focus=s" => \$main::opt_focus,
+ "thread=s" => \$main::opt_thread,
"ignore=s" => \$main::opt_ignore,
"scale=i" => \$main::opt_scale,
"heapcheck" => \$main::opt_heapcheck,
@@ -562,66 +567,12 @@ sub Init() {
}
}
-sub Main() {
- Init();
- $main::collected_profile = undef;
- @main::profile_files = ();
- $main::op_time = time();
-
- # Printing symbols is special and requires a lot less info that most.
- if ($main::opt_symbols) {
- PrintSymbols(*STDIN); # Get /proc/maps and symbols output from stdin
- return;
- }
-
- # Fetch all profile data
- FetchDynamicProfiles();
-
- # this will hold symbols that we read from the profile files
- my $symbol_map = {};
-
- # Read one profile, pick the last item on the list
- my $data = ReadProfile($main::prog, pop(@main::profile_files));
- my $profile = $data->{profile};
- my $pcs = $data->{pcs};
- my $libs = $data->{libs}; # Info about main program and shared libraries
- $symbol_map = MergeSymbols($symbol_map, $data->{symbols});
-
- # Add additional profiles, if available.
- if (scalar(@main::profile_files) > 0) {
- foreach my $pname (@main::profile_files) {
- my $data2 = ReadProfile($main::prog, $pname);
- $profile = AddProfile($profile, $data2->{profile});
- $pcs = AddPcs($pcs, $data2->{pcs});
- $symbol_map = MergeSymbols($symbol_map, $data2->{symbols});
- }
- }
-
- # Subtract base from profile, if specified
- if ($main::opt_base ne '') {
- my $base = ReadProfile($main::prog, $main::opt_base);
- $profile = SubtractProfile($profile, $base->{profile});
- $pcs = AddPcs($pcs, $base->{pcs});
- $symbol_map = MergeSymbols($symbol_map, $base->{symbols});
- }
+sub FilterAndPrint {
+ my ($profile, $symbols, $libs, $thread) = @_;
# Get total data in profile
my $total = TotalProfile($profile);
- # Collect symbols
- my $symbols;
- if ($main::use_symbolized_profile) {
- $symbols = FetchSymbols($pcs, $symbol_map);
- } elsif ($main::use_symbol_page) {
- $symbols = FetchSymbols($pcs);
- } else {
- # TODO(csilvers): $libs uses the /proc/self/maps data from profile1,
- # which may differ from the data from subsequent profiles, especially
- # if they were run on different machines. Use appropriate libs for
- # each pc somehow.
- $symbols = ExtractSymbols($libs, $pcs);
- }
-
# Remove uniniteresting stack items
$profile = RemoveUninterestingFrames($symbols, $profile);
@@ -656,7 +607,9 @@ sub Main() {
# (only matters when --heapcheck is given but we must be
# compatible with old branches that did not pass --heapcheck always):
if ($total != 0) {
- printf("Total: %s %s\n", Unparse($total), Units());
+ printf("Total%s: %s %s\n",
+ (defined($thread) ? " (t$thread)" : ""),
+ Unparse($total), Units());
}
PrintText($symbols, $flat, $cumulative, -1);
} elsif ($main::opt_raw) {
@@ -692,6 +645,77 @@ sub Main() {
} else {
InteractiveMode($profile, $symbols, $libs, $total);
}
+}
+
+sub Main() {
+ Init();
+ $main::collected_profile = undef;
+ @main::profile_files = ();
+ $main::op_time = time();
+
+ # Printing symbols is special and requires a lot less info that most.
+ if ($main::opt_symbols) {
+ PrintSymbols(*STDIN); # Get /proc/maps and symbols output from stdin
+ return;
+ }
+
+ # Fetch all profile data
+ FetchDynamicProfiles();
+
+ # this will hold symbols that we read from the profile files
+ my $symbol_map = {};
+
+ # Read one profile, pick the last item on the list
+ my $data = ReadProfile($main::prog, pop(@main::profile_files));
+ my $profile = $data->{profile};
+ my $pcs = $data->{pcs};
+ my $libs = $data->{libs}; # Info about main program and shared libraries
+ $symbol_map = MergeSymbols($symbol_map, $data->{symbols});
+
+ # Add additional profiles, if available.
+ if (scalar(@main::profile_files) > 0) {
+ foreach my $pname (@main::profile_files) {
+ my $data2 = ReadProfile($main::prog, $pname);
+ $profile = AddProfile($profile, $data2->{profile});
+ $pcs = AddPcs($pcs, $data2->{pcs});
+ $symbol_map = MergeSymbols($symbol_map, $data2->{symbols});
+ }
+ }
+
+ # Subtract base from profile, if specified
+ if ($main::opt_base ne '') {
+ my $base = ReadProfile($main::prog, $main::opt_base);
+ $profile = SubtractProfile($profile, $base->{profile});
+ $pcs = AddPcs($pcs, $base->{pcs});
+ $symbol_map = MergeSymbols($symbol_map, $base->{symbols});
+ }
+
+ # Collect symbols
+ my $symbols;
+ if ($main::use_symbolized_profile) {
+ $symbols = FetchSymbols($pcs, $symbol_map);
+ } elsif ($main::use_symbol_page) {
+ $symbols = FetchSymbols($pcs);
+ } else {
+ # TODO(csilvers): $libs uses the /proc/self/maps data from profile1,
+ # which may differ from the data from subsequent profiles, especially
+ # if they were run on different machines. Use appropriate libs for
+ # each pc somehow.
+ $symbols = ExtractSymbols($libs, $pcs);
+ }
+
+ if (!defined($main::opt_thread)) {
+ FilterAndPrint($profile, $symbols, $libs);
+ }
+ if (defined($data->{threads})) {
+ foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) {
+ if (defined($main::opt_thread) &&
+ ($main::opt_thread eq '*' || $main::opt_thread == $thread)) {
+ my $thread_profile = $data->{threads}{$thread};
+ FilterAndPrint($thread_profile, $symbols, $libs, $thread);
+ }
+ }
+ }
cleanup();
exit(0);
@@ -780,14 +804,14 @@ sub InteractiveMode {
$| = 1; # Make output unbuffered for interactive mode
my ($orig_profile, $symbols, $libs, $total) = @_;
- print STDERR "Welcome to pprof! For help, type 'help'.\n";
+ print STDERR "Welcome to jeprof! For help, type 'help'.\n";
# Use ReadLine if it's installed and input comes from a console.
if ( -t STDIN &&
!ReadlineMightFail() &&
defined(eval {require Term::ReadLine}) ) {
- my $term = new Term::ReadLine 'pprof';
- while ( defined ($_ = $term->readline('(pprof) '))) {
+ my $term = new Term::ReadLine 'jeprof';
+ while ( defined ($_ = $term->readline('(jeprof) '))) {
$term->addhistory($_) if /\S/;
if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
last; # exit when we get an interactive command to quit
@@ -795,7 +819,7 @@ sub InteractiveMode {
}
} else { # don't have readline
while (1) {
- print STDERR "(pprof) ";
+ print STDERR "(jeprof) ";
$_ = <STDIN>;
last if ! defined $_ ;
s/\r//g; # turn windows-looking lines into unix-looking lines
@@ -988,7 +1012,7 @@ sub ProcessProfile {
sub InteractiveHelpMessage {
print STDERR <<ENDOFHELP;
-Interactive pprof mode
+Interactive jeprof mode
Commands:
gv
@@ -1031,7 +1055,7 @@ Commands:
Generates callgrind file. If no filename is given, kcachegrind is called.
help - This listing
- quit or ^D - End pprof
+ quit or ^D - End jeprof
For commands that accept optional -ignore tags, samples where any routine in
the stack trace matches the regular expression in any of the -ignore
@@ -1476,7 +1500,7 @@ h1 {
}
</style>
<script type="text/javascript">
-function pprof_toggle_asm(e) {
+function jeprof_toggle_asm(e) {
var target;
if (!e) e = window.event;
if (e.target) target = e.target;
@@ -1683,23 +1707,23 @@ sub PrintSource {
HtmlPrintNumber($c2),
UnparseAddress($offset, $e->[0]),
CleanDisassembly($e->[3]));
-
+
# Append the most specific source line associated with this instruction
if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) };
$dis = HtmlEscape($dis);
my $f = $e->[5];
my $l = $e->[6];
if ($f ne $last_dis_filename) {
- $dis .= sprintf("<span class=disasmloc>%s:%d</span>",
+ $dis .= sprintf("<span class=disasmloc>%s:%d</span>",
HtmlEscape(CleanFileName($f)), $l);
} elsif ($l ne $last_dis_linenum) {
# De-emphasize the unchanged file name portion
$dis .= sprintf("<span class=unimportant>%s</span>" .
- "<span class=disasmloc>:%d</span>",
+ "<span class=disasmloc>:%d</span>",
HtmlEscape(CleanFileName($f)), $l);
} else {
# De-emphasize the entire location
- $dis .= sprintf("<span class=unimportant>%s:%d</span>",
+ $dis .= sprintf("<span class=unimportant>%s:%d</span>",
HtmlEscape(CleanFileName($f)), $l);
}
$last_dis_filename = $f;
@@ -1745,7 +1769,7 @@ sub PrintSource {
if ($html) {
printf $output (
- "<h1>%s</h1>%s\n<pre onClick=\"pprof_toggle_asm()\">\n" .
+ "<h1>%s</h1>%s\n<pre onClick=\"jeprof_toggle_asm()\">\n" .
"Total:%6s %6s (flat / cumulative %s)\n",
HtmlEscape(ShortFunctionName($routine)),
HtmlEscape(CleanFileName($filename)),
@@ -1788,8 +1812,8 @@ sub PrintSource {
if (defined($dis) && $dis ne '') {
$asm = "<span class=\"asm\">" . $dis . "</span>";
}
- my $source_class = (($n1 + $n2 > 0)
- ? "livesrc"
+ my $source_class = (($n1 + $n2 > 0)
+ ? "livesrc"
: (($asm ne "") ? "deadsrc" : "nop"));
printf $output (
"<span class=\"line\">%5d</span> " .
@@ -2811,9 +2835,15 @@ sub RemoveUninterestingFrames {
'free',
'memalign',
'posix_memalign',
+ 'aligned_alloc',
'pvalloc',
'valloc',
'realloc',
+ 'mallocx', # jemalloc
+ 'rallocx', # jemalloc
+ 'xallocx', # jemalloc
+ 'dallocx', # jemalloc
+ 'sdallocx', # jemalloc
'tc_calloc',
'tc_cfree',
'tc_malloc',
@@ -2923,6 +2953,10 @@ sub RemoveUninterestingFrames {
if (exists($symbols->{$a})) {
my $func = $symbols->{$a}->[0];
if ($skip{$func} || ($func =~ m/$skip_regexp/)) {
+ # Throw away the portion of the backtrace seen so far, under the
+ # assumption that previous frames were for functions internal to the
+ # allocator.
+ @path = ();
next;
}
}
@@ -3401,7 +3435,7 @@ sub FetchDynamicProfile {
$profile_file .= $suffix;
}
- my $profile_dir = $ENV{"PPROF_TMPDIR"} || ($ENV{HOME} . "/pprof");
+ my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof");
if (! -d $profile_dir) {
mkdir($profile_dir)
|| die("Unable to create profile directory $profile_dir: $!\n");
@@ -3617,7 +3651,7 @@ BEGIN {
# Reads the top, 'header' section of a profile, and returns the last
# line of the header, commonly called a 'header line'. The header
# section of a profile consists of zero or more 'command' lines that
-# are instructions to pprof, which pprof executes when reading the
+# are instructions to jeprof, which jeprof executes when reading the
# header. All 'command' lines start with a %. After the command
# lines is the 'header line', which is a profile-specific line that
# indicates what type of profile it is, and perhaps other global
@@ -3680,6 +3714,7 @@ sub IsSymbolizedProfileFile {
# $result->{version} Version number of profile file
# $result->{period} Sampling period (in microseconds)
# $result->{profile} Profile object
+# $result->{threads} Map of thread IDs to profile objects
# $result->{map} Memory map info from profile
# $result->{pcs} Hash of all PC values seen, key is hex address
sub ReadProfile {
@@ -3728,6 +3763,9 @@ sub ReadProfile {
} elsif ($header =~ m/^heap profile:/) {
$main::profile_type = 'heap';
$result = ReadHeapProfile($prog, *PROFILE, $header);
+ } elsif ($header =~ m/^heap/) {
+ $main::profile_type = 'heap';
+ $result = ReadThreadedHeapProfile($prog, $fname, $header);
} elsif ($header =~ m/^--- *$contention_marker/o) {
$main::profile_type = 'contention';
$result = ReadSynchProfile($prog, *PROFILE);
@@ -3870,11 +3908,7 @@ sub ReadCPUProfile {
return $r;
}
-sub ReadHeapProfile {
- my $prog = shift;
- local *PROFILE = shift;
- my $header = shift;
-
+sub HeapProfileIndex {
my $index = 1;
if ($main::opt_inuse_space) {
$index = 1;
@@ -3885,6 +3919,84 @@ sub ReadHeapProfile {
} elsif ($main::opt_alloc_objects) {
$index = 2;
}
+ return $index;
+}
+
+sub ReadMappedLibraries {
+ my $fh = shift;
+ my $map = "";
+ # Read the /proc/self/maps data
+ while (<$fh>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ $map .= $_;
+ }
+ return $map;
+}
+
+sub ReadMemoryMap {
+ my $fh = shift;
+ my $map = "";
+ # Read /proc/self/maps data as formatted by DumpAddressMap()
+ my $buildvar = "";
+ while (<PROFILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Parse "build=<dir>" specification if supplied
+ if (m/^\s*build=(.*)\n/) {
+ $buildvar = $1;
+ }
+
+ # Expand "$build" variable if available
+ $_ =~ s/\$build\b/$buildvar/g;
+
+ $map .= $_;
+ }
+ return $map;
+}
+
+sub AdjustSamples {
+ my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_;
+ if ($sample_adjustment) {
+ if ($sampling_algorithm == 2) {
+ # Remote-heap version 2
+ # The sampling frequency is the rate of a Poisson process.
+ # This means that the probability of sampling an allocation of
+ # size X with sampling rate Y is 1 - exp(-X/Y)
+ if ($n1 != 0) {
+ my $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
+ my $scale_factor = 1/(1 - exp(-$ratio));
+ $n1 *= $scale_factor;
+ $s1 *= $scale_factor;
+ }
+ if ($n2 != 0) {
+ my $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
+ my $scale_factor = 1/(1 - exp(-$ratio));
+ $n2 *= $scale_factor;
+ $s2 *= $scale_factor;
+ }
+ } else {
+ # Remote-heap version 1
+ my $ratio;
+ $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
+ if ($ratio < 1) {
+ $n1 /= $ratio;
+ $s1 /= $ratio;
+ }
+ $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
+ if ($ratio < 1) {
+ $n2 /= $ratio;
+ $s2 /= $ratio;
+ }
+ }
+ }
+ return ($n1, $s1, $n2, $s2);
+}
+
+sub ReadHeapProfile {
+ my $prog = shift;
+ local *PROFILE = shift;
+ my $header = shift;
+
+ my $index = HeapProfileIndex();
# Find the type of this profile. The header line looks like:
# heap profile: 1246: 8800744 [ 1246: 8800744] @ <heap-url>/266053
@@ -3974,29 +4086,12 @@ sub ReadHeapProfile {
while (<PROFILE>) {
s/\r//g; # turn windows-looking lines into unix-looking lines
if (/^MAPPED_LIBRARIES:/) {
- # Read the /proc/self/maps data
- while (<PROFILE>) {
- s/\r//g; # turn windows-looking lines into unix-looking lines
- $map .= $_;
- }
+ $map .= ReadMappedLibraries(*PROFILE);
last;
}
if (/^--- Memory map:/) {
- # Read /proc/self/maps data as formatted by DumpAddressMap()
- my $buildvar = "";
- while (<PROFILE>) {
- s/\r//g; # turn windows-looking lines into unix-looking lines
- # Parse "build=<dir>" specification if supplied
- if (m/^\s*build=(.*)\n/) {
- $buildvar = $1;
- }
-
- # Expand "$build" variable if available
- $_ =~ s/\$build\b/$buildvar/g;
-
- $map .= $_;
- }
+ $map .= ReadMemoryMap(*PROFILE);
last;
}
@@ -4007,43 +4102,85 @@ sub ReadHeapProfile {
if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) {
my $stack = $5;
my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
+ my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
+ $n1, $s1, $n2, $s2);
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
+ }
+ }
- if ($sample_adjustment) {
- if ($sampling_algorithm == 2) {
- # Remote-heap version 2
- # The sampling frequency is the rate of a Poisson process.
- # This means that the probability of sampling an allocation of
- # size X with sampling rate Y is 1 - exp(-X/Y)
- if ($n1 != 0) {
- my $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
- my $scale_factor = 1/(1 - exp(-$ratio));
- $n1 *= $scale_factor;
- $s1 *= $scale_factor;
- }
- if ($n2 != 0) {
- my $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
- my $scale_factor = 1/(1 - exp(-$ratio));
- $n2 *= $scale_factor;
- $s2 *= $scale_factor;
- }
- } else {
- # Remote-heap version 1
- my $ratio;
- $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
- if ($ratio < 1) {
- $n1 /= $ratio;
- $s1 /= $ratio;
- }
- $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
- if ($ratio < 1) {
- $n2 /= $ratio;
- $s2 /= $ratio;
- }
+ my $r = {};
+ $r->{version} = "heap";
+ $r->{period} = 1;
+ $r->{profile} = $profile;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+ return $r;
+}
+
+sub ReadThreadedHeapProfile {
+ my ($prog, $fname, $header) = @_;
+
+ my $index = HeapProfileIndex();
+ my $sampling_algorithm = 0;
+ my $sample_adjustment = 0;
+ chomp($header);
+ my $type = "unknown";
+ # Assuming a very specific type of header for now.
+ if ($header =~ m"^heap_v2/(\d+)") {
+ $type = "_v2";
+ $sampling_algorithm = 2;
+ $sample_adjustment = int($1);
+ }
+ if ($type ne "_v2" || !defined($sample_adjustment)) {
+ die "Threaded heap profiles require v2 sampling with a sample rate\n";
+ }
+
+ my $profile = {};
+ my $thread_profiles = {};
+ my $pcs = {};
+ my $map = "";
+ my $stack = "";
+
+ while (<PROFILE>) {
+ s/\r//g;
+ if (/^MAPPED_LIBRARIES:/) {
+ $map .= ReadMappedLibraries(*PROFILE);
+ last;
+ }
+
+ if (/^--- Memory map:/) {
+ $map .= ReadMemoryMap(*PROFILE);
+ last;
+ }
+
+ # Read entry of the form:
+ # @ a1 a2 ... an
+ # t*: <count1>: <bytes1> [<count2>: <bytes2>]
+ # t1: <count1>: <bytes1> [<count2>: <bytes2>]
+ # ...
+ # tn: <count1>: <bytes1> [<count2>: <bytes2>]
+ s/^\s*//;
+ s/\s*$//;
+ if (m/^@\s+(.*)$/) {
+ $stack = $1;
+ } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) {
+ if ($stack eq "") {
+ # Still in the header, so this is just a per-thread summary.
+ next;
+ }
+ my $thread = $2;
+ my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6);
+ my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
+ $n1, $s1, $n2, $s2);
+ if ($thread eq "*") {
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
+ } else {
+ if (!exists($thread_profiles->{$thread})) {
+ $thread_profiles->{$thread} = {};
}
+ AddEntries($thread_profiles->{$thread}, $pcs,
+ FixCallerAddresses($stack), $counts[$index]);
}
-
- my @counts = ($n1, $s1, $n2, $s2);
- AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
}
}
@@ -4051,6 +4188,7 @@ sub ReadHeapProfile {
$r->{version} = "heap";
$r->{period} = 1;
$r->{profile} = $profile;
+ $r->{threads} = $thread_profiles;
$r->{libs} = ParseLibraries($prog, $map, $pcs);
$r->{pcs} = $pcs;
return $r;
@@ -4120,10 +4258,10 @@ sub ReadSynchProfile {
} elsif ($variable eq "sampling period") {
$sampling_period = $value;
} elsif ($variable eq "ms since reset") {
- # Currently nothing is done with this value in pprof
+ # Currently nothing is done with this value in jeprof
# So we just silently ignore it for now
} elsif ($variable eq "discarded samples") {
- # Currently nothing is done with this value in pprof
+ # Currently nothing is done with this value in jeprof
# So we just silently ignore it for now
} else {
printf STDERR ("Ignoring unnknown variable in /contention output: " .
@@ -4429,7 +4567,7 @@ sub ParseLibraries {
}
# Add two hex addresses of length $address_length.
-# Run pprof --test for unit test if this is changed.
+# Run jeprof --test for unit test if this is changed.
sub AddressAdd {
my $addr1 = shift;
my $addr2 = shift;
@@ -4483,7 +4621,7 @@ sub AddressAdd {
# Subtract two hex addresses of length $address_length.
-# Run pprof --test for unit test if this is changed.
+# Run jeprof --test for unit test if this is changed.
sub AddressSub {
my $addr1 = shift;
my $addr2 = shift;
@@ -4535,7 +4673,7 @@ sub AddressSub {
}
# Increment a hex addresses of length $address_length.
-# Run pprof --test for unit test if this is changed.
+# Run jeprof --test for unit test if this is changed.
sub AddressInc {
my $addr = shift;
my $sum;
@@ -4747,7 +4885,7 @@ sub MapToSymbols {
}
}
}
-
+
# Prepend to accumulated symbols for pcstr
# (so that caller comes before callee)
my $sym = $symbols->{$pcstr};
@@ -4853,7 +4991,7 @@ sub UnparseAddress {
# 32-bit or ELF 64-bit executable file. The location of the tools
# is determined by considering the following options in this order:
# 1) --tools option, if set
-# 2) PPROF_TOOLS environment variable, if set
+# 2) JEPROF_TOOLS environment variable, if set
# 3) the environment
sub ConfigureObjTools {
my $prog_file = shift;
@@ -4886,7 +5024,7 @@ sub ConfigureObjTools {
# For windows, we provide a version of nm and addr2line as part of
# the opensource release, which is capable of parsing
# Windows-style PDB executables. It should live in the path, or
- # in the same directory as pprof.
+ # in the same directory as jeprof.
$obj_tool_map{"nm_pdb"} = "nm-pdb";
$obj_tool_map{"addr2line_pdb"} = "addr2line-pdb";
}
@@ -4905,20 +5043,20 @@ sub ConfigureObjTools {
}
# Returns the path of a caller-specified object tool. If --tools or
-# PPROF_TOOLS are specified, then returns the full path to the tool
+# JEPROF_TOOLS are specified, then returns the full path to the tool
# with that prefix. Otherwise, returns the path unmodified (which
# means we will look for it on PATH).
sub ConfigureTool {
my $tool = shift;
my $path;
- # --tools (or $PPROF_TOOLS) is a comma separated list, where each
+ # --tools (or $JEPROF_TOOLS) is a comma separated list, where each
# item is either a) a pathname prefix, or b) a map of the form
# <tool>:<path>. First we look for an entry of type (b) for our
# tool. If one is found, we use it. Otherwise, we consider all the
# pathname prefixes in turn, until one yields an existing file. If
# none does, we use a default path.
- my $tools = $main::opt_tools || $ENV{"PPROF_TOOLS"} || "";
+ my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || "";
if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) {
$path = $2;
# TODO(csilvers): sanity-check that $path exists? Hard if it's relative.
@@ -4932,16 +5070,16 @@ sub ConfigureTool {
}
if (!$path) {
error("No '$tool' found with prefix specified by " .
- "--tools (or \$PPROF_TOOLS) '$tools'\n");
+ "--tools (or \$JEPROF_TOOLS) '$tools'\n");
}
} else {
# ... otherwise use the version that exists in the same directory as
- # pprof. If there's nothing there, use $PATH.
+ # jeprof. If there's nothing there, use $PATH.
$0 =~ m,[^/]*$,; # this is everything after the last slash
my $dirname = $`; # this is everything up to and including the last slash
if (-x "$dirname$tool") {
$path = "$dirname$tool";
- } else {
+ } else {
$path = $tool;
}
}
@@ -4966,7 +5104,7 @@ sub cleanup {
unlink($main::tmpfile_sym);
unlink(keys %main::tempnames);
- # We leave any collected profiles in $HOME/pprof in case the user wants
+ # We leave any collected profiles in $HOME/jeprof in case the user wants
# to look at them later. We print a message informing them of this.
if ((scalar(@main::profile_files) > 0) &&
defined($main::collected_profile)) {
@@ -4975,7 +5113,7 @@ sub cleanup {
}
print STDERR "If you want to investigate this profile further, you can do:\n";
print STDERR "\n";
- print STDERR " pprof \\\n";
+ print STDERR " jeprof \\\n";
print STDERR " $main::prog \\\n";
print STDERR " $main::collected_profile\n";
print STDERR "\n";
@@ -5160,7 +5298,7 @@ sub GetProcedureBoundaries {
# The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings.
# To make them more readable, we add underscores at interesting places.
# This routine removes the underscores, producing the canonical representation
-# used by pprof to represent addresses, particularly in the tested routines.
+# used by jeprof to represent addresses, particularly in the tested routines.
sub CanonicalHex {
my $arg = shift;
return join '', (split '_',$arg);
diff --git a/deps/jemalloc/config.guess b/deps/jemalloc/config.guess
index b79252d6b..1f5c50c0d 100755
--- a/deps/jemalloc/config.guess
+++ b/deps/jemalloc/config.guess
@@ -1,8 +1,8 @@
#! /bin/sh
# Attempt to guess a canonical system name.
-# Copyright 1992-2013 Free Software Foundation, Inc.
+# Copyright 1992-2014 Free Software Foundation, Inc.
-timestamp='2013-06-10'
+timestamp='2014-03-23'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@@ -50,7 +50,7 @@ version="\
GNU config.guess ($timestamp)
Originally written by Per Bothner.
-Copyright 1992-2013 Free Software Foundation, Inc.
+Copyright 1992-2014 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -149,7 +149,7 @@ Linux|GNU|GNU/*)
LIBC=gnu
#endif
EOF
- eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`
;;
esac
@@ -826,7 +826,7 @@ EOF
*:MINGW*:*)
echo ${UNAME_MACHINE}-pc-mingw32
exit ;;
- i*:MSYS*:*)
+ *:MSYS*:*)
echo ${UNAME_MACHINE}-pc-msys
exit ;;
i*:windows32*:*)
@@ -969,10 +969,10 @@ EOF
eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
;;
- or1k:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ openrisc*:Linux:*:*)
+ echo or1k-unknown-linux-${LIBC}
exit ;;
- or32:Linux:*:*)
+ or32:Linux:*:* | or1k*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
padre:Linux:*:*)
@@ -1260,16 +1260,26 @@ EOF
if test "$UNAME_PROCESSOR" = unknown ; then
UNAME_PROCESSOR=powerpc
fi
- if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
- if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
- (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
- grep IS_64BIT_ARCH >/dev/null
- then
- case $UNAME_PROCESSOR in
- i386) UNAME_PROCESSOR=x86_64 ;;
- powerpc) UNAME_PROCESSOR=powerpc64 ;;
- esac
+ if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ case $UNAME_PROCESSOR in
+ i386) UNAME_PROCESSOR=x86_64 ;;
+ powerpc) UNAME_PROCESSOR=powerpc64 ;;
+ esac
+ fi
fi
+ elif test "$UNAME_PROCESSOR" = i386 ; then
+ # Avoid executing cc on OS X 10.9, as it ships with a stub
+ # that puts up a graphical alert prompting to install
+ # developer tools. Any system running Mac OS X 10.7 or
+ # later (Darwin 11 and later) is required to have a 64-bit
+ # processor. This is not true of the ARM version of Darwin
+ # that Apple uses in portable devices.
+ UNAME_PROCESSOR=x86_64
fi
echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
exit ;;
@@ -1361,154 +1371,6 @@ EOF
exit ;;
esac
-eval $set_cc_for_build
-cat >$dummy.c <<EOF
-#ifdef _SEQUENT_
-# include <sys/types.h>
-# include <sys/utsname.h>
-#endif
-main ()
-{
-#if defined (sony)
-#if defined (MIPSEB)
- /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
- I don't know.... */
- printf ("mips-sony-bsd\n"); exit (0);
-#else
-#include <sys/param.h>
- printf ("m68k-sony-newsos%s\n",
-#ifdef NEWSOS4
- "4"
-#else
- ""
-#endif
- ); exit (0);
-#endif
-#endif
-
-#if defined (__arm) && defined (__acorn) && defined (__unix)
- printf ("arm-acorn-riscix\n"); exit (0);
-#endif
-
-#if defined (hp300) && !defined (hpux)
- printf ("m68k-hp-bsd\n"); exit (0);
-#endif
-
-#if defined (NeXT)
-#if !defined (__ARCHITECTURE__)
-#define __ARCHITECTURE__ "m68k"
-#endif
- int version;
- version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
- if (version < 4)
- printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
- else
- printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
- exit (0);
-#endif
-
-#if defined (MULTIMAX) || defined (n16)
-#if defined (UMAXV)
- printf ("ns32k-encore-sysv\n"); exit (0);
-#else
-#if defined (CMU)
- printf ("ns32k-encore-mach\n"); exit (0);
-#else
- printf ("ns32k-encore-bsd\n"); exit (0);
-#endif
-#endif
-#endif
-
-#if defined (__386BSD__)
- printf ("i386-pc-bsd\n"); exit (0);
-#endif
-
-#if defined (sequent)
-#if defined (i386)
- printf ("i386-sequent-dynix\n"); exit (0);
-#endif
-#if defined (ns32000)
- printf ("ns32k-sequent-dynix\n"); exit (0);
-#endif
-#endif
-
-#if defined (_SEQUENT_)
- struct utsname un;
-
- uname(&un);
-
- if (strncmp(un.version, "V2", 2) == 0) {
- printf ("i386-sequent-ptx2\n"); exit (0);
- }
- if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
- printf ("i386-sequent-ptx1\n"); exit (0);
- }
- printf ("i386-sequent-ptx\n"); exit (0);
-
-#endif
-
-#if defined (vax)
-# if !defined (ultrix)
-# include <sys/param.h>
-# if defined (BSD)
-# if BSD == 43
- printf ("vax-dec-bsd4.3\n"); exit (0);
-# else
-# if BSD == 199006
- printf ("vax-dec-bsd4.3reno\n"); exit (0);
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# endif
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# else
- printf ("vax-dec-ultrix\n"); exit (0);
-# endif
-#endif
-
-#if defined (alliant) && defined (i860)
- printf ("i860-alliant-bsd\n"); exit (0);
-#endif
-
- exit (1);
-}
-EOF
-
-$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
- { echo "$SYSTEM_NAME"; exit; }
-
-# Apollos put the system type in the environment.
-
-test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
-
-# Convex versions that predate uname can use getsysinfo(1)
-
-if [ -x /usr/convex/getsysinfo ]
-then
- case `getsysinfo -f cpu_type` in
- c1*)
- echo c1-convex-bsd
- exit ;;
- c2*)
- if getsysinfo -f scalar_acc
- then echo c32-convex-bsd
- else echo c2-convex-bsd
- fi
- exit ;;
- c34*)
- echo c34-convex-bsd
- exit ;;
- c38*)
- echo c38-convex-bsd
- exit ;;
- c4*)
- echo c4-convex-bsd
- exit ;;
- esac
-fi
-
cat >&2 <<EOF
$0: unable to guess system type
diff --git a/deps/jemalloc/config.sub b/deps/jemalloc/config.sub
index 61cb4bc22..0ccff7706 100755
--- a/deps/jemalloc/config.sub
+++ b/deps/jemalloc/config.sub
@@ -1,8 +1,8 @@
#! /bin/sh
# Configuration validation subroutine script.
-# Copyright 1992-2013 Free Software Foundation, Inc.
+# Copyright 1992-2014 Free Software Foundation, Inc.
-timestamp='2013-10-01'
+timestamp='2014-05-01'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ Report bugs and patches to <config-patches@gnu.org>."
version="\
GNU config.sub ($timestamp)
-Copyright 1992-2013 Free Software Foundation, Inc.
+Copyright 1992-2014 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -283,8 +283,10 @@ case $basic_machine in
| mips64vr5900 | mips64vr5900el \
| mipsisa32 | mipsisa32el \
| mipsisa32r2 | mipsisa32r2el \
+ | mipsisa32r6 | mipsisa32r6el \
| mipsisa64 | mipsisa64el \
| mipsisa64r2 | mipsisa64r2el \
+ | mipsisa64r6 | mipsisa64r6el \
| mipsisa64sb1 | mipsisa64sb1el \
| mipsisa64sr71k | mipsisa64sr71kel \
| mipsr5900 | mipsr5900el \
@@ -296,8 +298,7 @@ case $basic_machine in
| nds32 | nds32le | nds32be \
| nios | nios2 | nios2eb | nios2el \
| ns16k | ns32k \
- | open8 \
- | or1k | or32 \
+ | open8 | or1k | or1knd | or32 \
| pdp10 | pdp11 | pj | pjl \
| powerpc | powerpc64 | powerpc64le | powerpcle \
| pyramid \
@@ -402,8 +403,10 @@ case $basic_machine in
| mips64vr5900-* | mips64vr5900el-* \
| mipsisa32-* | mipsisa32el-* \
| mipsisa32r2-* | mipsisa32r2el-* \
+ | mipsisa32r6-* | mipsisa32r6el-* \
| mipsisa64-* | mipsisa64el-* \
| mipsisa64r2-* | mipsisa64r2el-* \
+ | mipsisa64r6-* | mipsisa64r6el-* \
| mipsisa64sb1-* | mipsisa64sb1el-* \
| mipsisa64sr71k-* | mipsisa64sr71kel-* \
| mipsr5900-* | mipsr5900el-* \
@@ -415,6 +418,7 @@ case $basic_machine in
| nios-* | nios2-* | nios2eb-* | nios2el-* \
| none-* | np1-* | ns16k-* | ns32k-* \
| open8-* \
+ | or1k*-* \
| orion-* \
| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
@@ -1376,7 +1380,7 @@ case $os in
| -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
| -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
| -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
- | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*)
+ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*)
# Remember, each alternative MUST END IN *, to match a version number.
;;
-qnx*)
@@ -1400,6 +1404,9 @@ case $os in
-mac*)
os=`echo $os | sed -e 's|mac|macos|'`
;;
+ # Apple iOS
+ -ios*)
+ ;;
-linux-dietlibc)
os=-linux-dietlibc
;;
@@ -1594,9 +1601,6 @@ case $basic_machine in
mips*-*)
os=-elf
;;
- or1k-*)
- os=-elf
- ;;
or32-*)
os=-coff
;;
diff --git a/deps/jemalloc/configure b/deps/jemalloc/configure
index 2e5496bfb..4feae0b4b 100755
--- a/deps/jemalloc/configure
+++ b/deps/jemalloc/configure
@@ -628,19 +628,19 @@ cfghdrs_in
enable_zone_allocator
enable_tls
enable_lazy_lock
+TESTLIBS
jemalloc_version_gid
jemalloc_version_nrev
jemalloc_version_bugfix
jemalloc_version_minor
jemalloc_version_major
jemalloc_version
+enable_cache_oblivious
enable_xmalloc
enable_valgrind
enable_utrace
enable_fill
-enable_dss
enable_munmap
-enable_mremap
enable_tcache
enable_prof
enable_stats
@@ -648,8 +648,8 @@ enable_debug
je_
install_suffix
private_namespace
+JEMALLOC_CPREFIX
enable_code_coverage
-enable_experimental
AUTOCONF
LD
RANLIB
@@ -709,6 +709,7 @@ objroot
abs_srcroot
srcroot
rev
+CONFIG
target_alias
host_alias
build_alias
@@ -753,7 +754,6 @@ enable_option_checking
with_xslroot
with_rpath
enable_autogen
-enable_experimental
enable_code_coverage
with_mangling
with_jemalloc_prefix
@@ -770,13 +770,17 @@ with_static_libunwind
enable_prof_libgcc
enable_prof_gcc
enable_tcache
-enable_mremap
enable_munmap
-enable_dss
enable_fill
enable_utrace
enable_valgrind
enable_xmalloc
+enable_cache_oblivious
+with_lg_tiny_min
+with_lg_quantum
+with_lg_page
+with_lg_page_sizes
+with_lg_size_class_group
enable_lazy_lock
enable_tls
enable_zone_allocator
@@ -1402,9 +1406,8 @@ Optional Features:
--disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
--enable-FEATURE[=ARG] include FEATURE [ARG=yes]
--enable-autogen Automatically regenerate configure output
- --disable-experimental Disable support for the experimental API
--enable-code-coverage Enable code coverage
- --enable-cc-silence Silence irrelevant compiler warnings
+ --disable-cc-silence Do not silence irrelevant compiler warnings
--enable-debug Build debugging code (implies --enable-ivsalloc)
--enable-ivsalloc Validate pointers passed through the public API
--disable-stats Disable statistics calculation/reporting
@@ -1413,14 +1416,15 @@ Optional Features:
--disable-prof-libgcc Do not use libgcc for backtracing
--disable-prof-gcc Do not use gcc intrinsics for backtracing
--disable-tcache Disable per thread caches
- --enable-mremap Enable mremap(2) for huge realloc()
--disable-munmap Disable VM deallocation via munmap(2)
- --enable-dss Enable allocation from DSS
--disable-fill Disable support for junk/zero filling, quarantine,
and redzones
--enable-utrace Enable utrace(2)-based tracing
--disable-valgrind Disable support for Valgrind
--enable-xmalloc Support xmalloc option
+ --disable-cache-oblivious
+ Disable support for cache-oblivious allocation
+ alignment
--enable-lazy-lock Enable lazy locking (only lock when multi-threaded)
--disable-tls Disable thread-local storage (__thread keyword)
--disable-zone-allocator
@@ -1442,6 +1446,16 @@ Optional Packages:
--with-static-libunwind=<libunwind.a>
Path to static libunwind library; use rather than
dynamically linking
+ --with-lg-tiny-min=<lg-tiny-min>
+ Base 2 log of minimum tiny size class to support
+ --with-lg-quantum=<lg-quantum>
+ Base 2 log of minimum allocation alignment
+ --with-lg-page=<lg-page>
+ Base 2 log of system page size
+ --with-lg-page-sizes=<lg-page-sizes>
+ Base 2 logs of system page sizes to support
+ --with-lg-size-class-group=<lg-size-class-group>
+ Base 2 log of size classes per doubling
Some influential environment variables:
CC C compiler command
@@ -1910,73 +1924,6 @@ fi
} # ac_fn_c_try_link
-# ac_fn_c_check_func LINENO FUNC VAR
-# ----------------------------------
-# Tests whether FUNC exists, setting the cache variable VAR accordingly
-ac_fn_c_check_func ()
-{
- as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if eval \${$3+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
- For example, HP-UX 11i <limits.h> declares gettimeofday. */
-#define $2 innocuous_$2
-
-/* System header to define __stub macros and hopefully few prototypes,
- which can conflict with char $2 (); below.
- Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
- <limits.h> exists even on freestanding compilers. */
-
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-
-#undef $2
-
-/* Override any GCC internal prototype to avoid an error.
- Use char because int might match the return type of a GCC
- builtin and then its argument prototype would still apply. */
-#ifdef __cplusplus
-extern "C"
-#endif
-char $2 ();
-/* The GNU C library defines this for functions which it implements
- to always fail with ENOSYS. Some functions are actually named
- something starting with __ and the normal name is an alias. */
-#if defined __stub_$2 || defined __stub___$2
-choke me
-#endif
-
-int
-main ()
-{
-return $2 ();
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- eval "$3=yes"
-else
- eval "$3=no"
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-eval ac_res=\$$3
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
-
-} # ac_fn_c_check_func
-
# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES
# -------------------------------------------------------
# Tests whether HEADER exists, giving a warning if it cannot be compiled using
@@ -2064,6 +2011,73 @@ fi
} # ac_fn_c_check_header_mongrel
+# ac_fn_c_check_func LINENO FUNC VAR
+# ----------------------------------
+# Tests whether FUNC exists, setting the cache variable VAR accordingly
+ac_fn_c_check_func ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
+ For example, HP-UX 11i <limits.h> declares gettimeofday. */
+#define $2 innocuous_$2
+
+/* System header to define __stub macros and hopefully few prototypes,
+ which can conflict with char $2 (); below.
+ Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+ <limits.h> exists even on freestanding compilers. */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef $2
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $2 ();
+/* The GNU C library defines this for functions which it implements
+ to always fail with ENOSYS. Some functions are actually named
+ something starting with __ and the normal name is an alias. */
+#if defined __stub_$2 || defined __stub___$2
+choke me
+#endif
+
+int
+main ()
+{
+return $2 ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ eval "$3=yes"
+else
+ eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_func
+
# ac_fn_c_check_type LINENO TYPE VAR INCLUDES
# -------------------------------------------
# Tests whether TYPE exists after having included INCLUDES, setting cache
@@ -2476,7 +2490,10 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
-rev=1
+CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'`
+
+
+rev=2
srcroot=$srcdir
@@ -3488,6 +3505,42 @@ fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror=declaration-after-statement" >&5
+$as_echo_n "checking whether compiler supports -Werror=declaration-after-statement... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Werror=declaration-after-statement"
+else
+ CFLAGS="${CFLAGS} -Werror=declaration-after-statement"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Werror=declaration-after-statement
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -pipe" >&5
$as_echo_n "checking whether compiler supports -pipe... " >&6; }
TCFLAGS="${CFLAGS}"
@@ -3669,7 +3722,43 @@ $as_echo "no" >&6; }
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
- CPPFLAGS="$CPPFLAGS -I${srcroot}/include/msvc_compat"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -FS" >&5
+$as_echo_n "checking whether compiler supports -FS... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-FS"
+else
+ CFLAGS="${CFLAGS} -FS"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-FS
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat"
fi
fi
if test "x$EXTRA_CFLAGS" != "x" ; then
@@ -4338,6 +4427,10 @@ _ACEOF
fi
+if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
+ CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99"
+fi
+
# The cast to long int works around a bug in the HP C Compiler
# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
@@ -4622,9 +4715,10 @@ case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
CPU_SPINWAIT=""
case "${host_cpu}" in
- i[345]86)
- ;;
i686|x86_64)
+ if ${je_cv_pause+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction is compilable" >&5
$as_echo_n "checking whether pause instruction is compilable... " >&6; }
@@ -4653,44 +4747,10 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause" >&5
$as_echo "$je_cv_pause" >&6; }
- if test "x${je_cv_pause}" = "xyes" ; then
- CPU_SPINWAIT='__asm__ volatile("pause")'
- fi
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether SSE2 intrinsics is compilable" >&5
-$as_echo_n "checking whether SSE2 intrinsics is compilable... " >&6; }
-if ${je_cv_sse2+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#include <emmintrin.h>
-
-int
-main ()
-{
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_sse2=yes
-else
- je_cv_sse2=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_sse2" >&5
-$as_echo "$je_cv_sse2" >&6; }
-
- if test "x${je_cv_sse2}" = "xyes" ; then
- cat >>confdefs.h <<_ACEOF
-#define HAVE_SSE2
-_ACEOF
+ if test "x${je_cv_pause}" = "xyes" ; then
+ CPU_SPINWAIT='__asm__ volatile("pause")'
fi
;;
powerpc)
@@ -4822,9 +4882,9 @@ fi
default_munmap="1"
-JEMALLOC_USABLE_SIZE_CONST="const"
+maps_coalesce="1"
case "${host}" in
- *-*-darwin*)
+ *-*-darwin* | *-*-ios*)
CFLAGS="$CFLAGS"
abi="macho"
$as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
@@ -4834,7 +4894,7 @@ case "${host}" in
so="dylib"
importlib="${so}"
force_tls="0"
- DSO_LDFLAGS='-shared -Wl,-dylib_install_name,$(@F)'
+ DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
SOREV="${rev}.${so}"
sbrk_deprecated="1"
;;
@@ -4845,6 +4905,25 @@ case "${host}" in
force_lazy_lock="1"
;;
+ *-*-dragonfly*)
+ CFLAGS="$CFLAGS"
+ abi="elf"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
+ ;;
+ *-*-openbsd*)
+ CFLAGS="$CFLAGS"
+ abi="elf"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
+ force_tls="0"
+ ;;
+ *-*-bitrig*)
+ CFLAGS="$CFLAGS"
+ abi="elf"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
+ ;;
*-*-linux*)
CFLAGS="$CFLAGS"
CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
@@ -4855,7 +4934,8 @@ case "${host}" in
$as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h
- JEMALLOC_USABLE_SIZE_CONST=""
+ $as_echo "#define JEMALLOC_USE_CXX_THROW " >>confdefs.h
+
default_munmap="0"
;;
*-*-netbsd*)
@@ -4905,9 +4985,11 @@ $as_echo "$abi" >&6; }
fi
abi="xcoff"
;;
- *-*-mingw*)
+ *-*-mingw* | *-*-cygwin*)
abi="pecoff"
force_tls="0"
+ force_lazy_lock="1"
+ maps_coalesce="0"
RPATH=""
so="dll"
if test "x$je_cv_msvc" = "xyes" ; then
@@ -4935,6 +5017,50 @@ $as_echo "Unsupported operating system: ${host}" >&6; }
abi="elf"
;;
esac
+
+JEMALLOC_USABLE_SIZE_CONST=const
+for ac_header in malloc.h
+do :
+ ac_fn_c_check_header_mongrel "$LINENO" "malloc.h" "ac_cv_header_malloc_h" "$ac_includes_default"
+if test "x$ac_cv_header_malloc_h" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_MALLOC_H 1
+_ACEOF
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether malloc_usable_size definition can use const argument" >&5
+$as_echo_n "checking whether malloc_usable_size definition can use const argument... " >&6; }
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <malloc.h>
+ #include <stddef.h>
+ size_t malloc_usable_size(const void *ptr);
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+
+else
+
+ JEMALLOC_USABLE_SIZE_CONST=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+fi
+
+done
+
cat >>confdefs.h <<_ACEOF
#define JEMALLOC_USABLE_SIZE_CONST $JEMALLOC_USABLE_SIZE_CONST
_ACEOF
@@ -5079,7 +5205,7 @@ int
main ()
{
static __thread int
- __attribute__((tls_model("initial-exec"))) foo;
+ __attribute__((tls_model("initial-exec"), unused)) foo;
foo = 0;
;
return 0;
@@ -5104,6 +5230,216 @@ else
$as_echo "#define JEMALLOC_TLS_MODEL " >>confdefs.h
fi
+SAVED_CFLAGS="${CFLAGS}"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
+$as_echo_n "checking whether compiler supports -Werror... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Werror"
+else
+ CFLAGS="${CFLAGS} -Werror"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Werror
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether alloc_size attribute is compilable" >&5
+$as_echo_n "checking whether alloc_size attribute is compilable... " >&6; }
+if ${je_cv_alloc_size+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+int
+main ()
+{
+void *foo(size_t size) __attribute__((alloc_size(1)));
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_alloc_size=yes
+else
+ je_cv_alloc_size=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_alloc_size" >&5
+$as_echo "$je_cv_alloc_size" >&6; }
+
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_alloc_size}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE " >>confdefs.h
+
+fi
+SAVED_CFLAGS="${CFLAGS}"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
+$as_echo_n "checking whether compiler supports -Werror... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Werror"
+else
+ CFLAGS="${CFLAGS} -Werror"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Werror
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(gnu_printf, ...) attribute is compilable" >&5
+$as_echo_n "checking whether format(gnu_printf, ...) attribute is compilable... " >&6; }
+if ${je_cv_format_gnu_printf+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+int
+main ()
+{
+void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_format_gnu_printf=yes
+else
+ je_cv_format_gnu_printf=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_gnu_printf" >&5
+$as_echo "$je_cv_format_gnu_printf" >&6; }
+
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_format_gnu_printf}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF " >>confdefs.h
+
+fi
+SAVED_CFLAGS="${CFLAGS}"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
+$as_echo_n "checking whether compiler supports -Werror... " >&6; }
+TCFLAGS="${CFLAGS}"
+if test "x${CFLAGS}" = "x" ; then
+ CFLAGS="-Werror"
+else
+ CFLAGS="${CFLAGS} -Werror"
+fi
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+
+int
+main ()
+{
+
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ je_cv_cflags_appended=-Werror
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ je_cv_cflags_appended=
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ CFLAGS="${TCFLAGS}"
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(printf, ...) attribute is compilable" >&5
+$as_echo_n "checking whether format(printf, ...) attribute is compilable... " >&6; }
+if ${je_cv_format_printf+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdlib.h>
+int
+main ()
+{
+void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_format_printf=yes
+else
+ je_cv_format_printf=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_printf" >&5
+$as_echo "$je_cv_format_printf" >&6; }
+
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_format_printf}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF " >>confdefs.h
+
+fi
# Check whether --with-rpath was given.
@@ -5403,7 +5739,7 @@ fi
-public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size"
+public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx sdallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size"
ac_fn_c_check_func "$LINENO" "memalign" "ac_cv_func_memalign"
if test "x$ac_cv_func_memalign" = xyes; then :
@@ -5420,26 +5756,6 @@ if test "x$ac_cv_func_valloc" = xyes; then :
fi
-# Check whether --enable-experimental was given.
-if test "${enable_experimental+set}" = set; then :
- enableval=$enable_experimental; if test "x$enable_experimental" = "xno" ; then
- enable_experimental="0"
-else
- enable_experimental="1"
-fi
-
-else
- enable_experimental="1"
-
-fi
-
-if test "x$enable_experimental" = "x1" ; then
- $as_echo "#define JEMALLOC_EXPERIMENTAL " >>confdefs.h
-
- public_syms="${public_syms} allocm dallocm nallocm rallocm sallocm"
-fi
-
-
GCOV_FLAGS=
# Check whether --enable-code-coverage was given.
if test "${enable_code_coverage+set}" = set; then :
@@ -5572,6 +5888,7 @@ _ACEOF
fi
+
# Check whether --with-export was given.
if test "${with_export+set}" = set; then :
withval=$with_export; if test "x$with_export" = "xno"; then
@@ -5613,48 +5930,54 @@ install_suffix="$INSTALL_SUFFIX"
je_="je_"
-cfgoutputs_in="${srcroot}Makefile.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/html.xsl.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/manpages.xsl.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/jemalloc.xml.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc_macros.h.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc_protos.h.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal.h.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/test.sh.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/include/test/jemalloc_test.h.in"
+cfgoutputs_in="Makefile.in"
+cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in"
+cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_internal.h.in"
+cfgoutputs_in="${cfgoutputs_in} test/test.sh.in"
+cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in"
cfgoutputs_out="Makefile"
+cfgoutputs_out="${cfgoutputs_out} jemalloc.pc"
cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml"
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h"
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h"
cfgoutputs_out="${cfgoutputs_out} test/test.sh"
cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
cfgoutputs_tup="Makefile"
+cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in"
cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in"
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in"
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h"
cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
-cfghdrs_in="${srcroot}include/jemalloc/jemalloc_defs.h.in"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal_defs.h.in"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_namespace.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_unnamespace.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_symbols.txt"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/public_namespace.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/public_unnamespace.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/size_classes.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc_rename.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc_mangle.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}test/include/test/jemalloc_test_defs.h.in"
+cfghdrs_in="include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_unnamespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.txt"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh"
+cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in"
cfghdrs_out="include/jemalloc/jemalloc_defs.h"
cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
@@ -5672,8 +5995,8 @@ cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h"
cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h"
cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in"
-cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:${srcroot}include/jemalloc/internal/jemalloc_internal_defs.h.in"
-cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:${srcroot}test/include/test/jemalloc_test_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in"
# Check whether --enable-cc-silence was given.
if test "${enable_cc_silence+set}" = set; then :
@@ -5684,7 +6007,7 @@ else
fi
else
- enable_cc_silence="0"
+ enable_cc_silence="1"
fi
@@ -5709,6 +6032,10 @@ fi
if test "x$enable_debug" = "x1" ; then
$as_echo "#define JEMALLOC_DEBUG " >>confdefs.h
+fi
+if test "x$enable_debug" = "x1" ; then
+ $as_echo "#define JEMALLOC_DEBUG " >>confdefs.h
+
enable_ivsalloc="1"
fi
@@ -5969,9 +6296,9 @@ fi
done
if test "x$LUNWIND" = "x-lunwind" ; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for backtrace in -lunwind" >&5
-$as_echo_n "checking for backtrace in -lunwind... " >&6; }
-if ${ac_cv_lib_unwind_backtrace+:} false; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for unw_backtrace in -lunwind" >&5
+$as_echo_n "checking for unw_backtrace in -lunwind... " >&6; }
+if ${ac_cv_lib_unwind_unw_backtrace+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -5985,27 +6312,27 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
#ifdef __cplusplus
extern "C"
#endif
-char backtrace ();
+char unw_backtrace ();
int
main ()
{
-return backtrace ();
+return unw_backtrace ();
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- ac_cv_lib_unwind_backtrace=yes
+ ac_cv_lib_unwind_unw_backtrace=yes
else
- ac_cv_lib_unwind_backtrace=no
+ ac_cv_lib_unwind_unw_backtrace=no
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
LIBS=$ac_check_lib_save_LIBS
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_unwind_backtrace" >&5
-$as_echo "$ac_cv_lib_unwind_backtrace" >&6; }
-if test "x$ac_cv_lib_unwind_backtrace" = xyes; then :
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_unwind_unw_backtrace" >&5
+$as_echo "$ac_cv_lib_unwind_unw_backtrace" >&6; }
+if test "x$ac_cv_lib_unwind_unw_backtrace" = xyes; then :
LIBS="$LIBS $LUNWIND"
else
enable_prof_libunwind="0"
@@ -6168,11 +6495,6 @@ $as_echo_n "checking configured backtracing method... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $backtrace_method" >&5
$as_echo "$backtrace_method" >&6; }
if test "x$enable_prof" = "x1" ; then
- if test "x${force_tls}" = "x0" ; then
- as_fn_error $? "Heap profiling requires TLS" "$LINENO" 5;
- fi
- force_tls="1"
-
if test "x$abi" != "xpecoff"; then
LIBS="$LIBS -lm"
fi
@@ -6201,63 +6523,11 @@ if test "x$enable_tcache" = "x1" ; then
fi
-# Check whether --enable-mremap was given.
-if test "${enable_mremap+set}" = set; then :
- enableval=$enable_mremap; if test "x$enable_mremap" = "xno" ; then
- enable_mremap="0"
-else
- enable_mremap="1"
-fi
-
-else
- enable_mremap="0"
+if test "x${maps_coalesce}" = "x1" ; then
+ $as_echo "#define JEMALLOC_MAPS_COALESCE " >>confdefs.h
fi
-if test "x$enable_mremap" = "x1" ; then
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mremap(...MREMAP_FIXED...) is compilable" >&5
-$as_echo_n "checking whether mremap(...MREMAP_FIXED...) is compilable... " >&6; }
-if ${je_cv_mremap_fixed+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#define _GNU_SOURCE
-#include <sys/mman.h>
-
-int
-main ()
-{
-
-void *p = mremap((void *)0, 0, 0, MREMAP_MAYMOVE|MREMAP_FIXED, (void *)0);
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_mremap_fixed=yes
-else
- je_cv_mremap_fixed=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_mremap_fixed" >&5
-$as_echo "$je_cv_mremap_fixed" >&6; }
-
- if test "x${je_cv_mremap_fixed}" = "xno" ; then
- enable_mremap="0"
- fi
-fi
-if test "x$enable_mremap" = "x1" ; then
- $as_echo "#define JEMALLOC_MREMAP " >>confdefs.h
-
-fi
-
-
# Check whether --enable-munmap was given.
if test "${enable_munmap+set}" = set; then :
enableval=$enable_munmap; if test "x$enable_munmap" = "xno" ; then
@@ -6277,19 +6547,7 @@ if test "x$enable_munmap" = "x1" ; then
fi
-# Check whether --enable-dss was given.
-if test "${enable_dss+set}" = set; then :
- enableval=$enable_dss; if test "x$enable_dss" = "xno" ; then
- enable_dss="0"
-else
- enable_dss="1"
-fi
-
-else
- enable_dss="0"
-
-fi
-
+have_dss="1"
ac_fn_c_check_func "$LINENO" "sbrk" "ac_cv_func_sbrk"
if test "x$ac_cv_func_sbrk" = xyes; then :
have_sbrk="1"
@@ -6298,24 +6556,20 @@ else
fi
if test "x$have_sbrk" = "x1" ; then
- if test "x$sbrk_deprecated" == "x1" ; then
+ if test "x$sbrk_deprecated" = "x1" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: Disabling dss allocation because sbrk is deprecated" >&5
$as_echo "Disabling dss allocation because sbrk is deprecated" >&6; }
- enable_dss="0"
- else
- $as_echo "#define JEMALLOC_HAVE_SBRK " >>confdefs.h
-
+ have_dss="0"
fi
else
- enable_dss="0"
+ have_dss="0"
fi
-if test "x$enable_dss" = "x1" ; then
+if test "x$have_dss" = "x1" ; then
$as_echo "#define JEMALLOC_DSS " >>confdefs.h
fi
-
# Check whether --enable-fill was given.
if test "${enable_fill+set}" = set; then :
enableval=$enable_fill; if test "x$enable_fill" = "xno" ; then
@@ -6471,16 +6725,159 @@ if test "x$enable_xmalloc" = "x1" ; then
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking STATIC_PAGE_SHIFT" >&5
-$as_echo_n "checking STATIC_PAGE_SHIFT... " >&6; }
-if ${je_cv_static_page_shift+:} false; then :
+# Check whether --enable-cache-oblivious was given.
+if test "${enable_cache_oblivious+set}" = set; then :
+ enableval=$enable_cache_oblivious; if test "x$enable_cache_oblivious" = "xno" ; then
+ enable_cache_oblivious="0"
+else
+ enable_cache_oblivious="1"
+fi
+
+else
+ enable_cache_oblivious="1"
+
+fi
+
+if test "x$enable_cache_oblivious" = "x1" ; then
+ $as_echo "#define JEMALLOC_CACHE_OBLIVIOUS " >>confdefs.h
+
+fi
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_ffsl is compilable" >&5
+$as_echo_n "checking whether a program using __builtin_ffsl is compilable... " >&6; }
+if ${je_cv_gcc_builtin_ffsl+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <stdio.h>
+#include <strings.h>
+#include <string.h>
+
+int
+main ()
+{
+
+ {
+ int rv = __builtin_ffsl(0x08);
+ printf("%d\n", rv);
+ }
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_gcc_builtin_ffsl=yes
+else
+ je_cv_gcc_builtin_ffsl=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_ffsl" >&5
+$as_echo "$je_cv_gcc_builtin_ffsl" >&6; }
+
+if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl" >>confdefs.h
+
+ $as_echo "#define JEMALLOC_INTERNAL_FFS __builtin_ffs" >>confdefs.h
+
+else
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using ffsl is compilable" >&5
+$as_echo_n "checking whether a program using ffsl is compilable... " >&6; }
+if ${je_cv_function_ffsl+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ #include <stdio.h>
+ #include <strings.h>
+ #include <string.h>
+
+int
+main ()
+{
+
+ {
+ int rv = ffsl(0x08);
+ printf("%d\n", rv);
+ }
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_function_ffsl=yes
+else
+ je_cv_function_ffsl=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_function_ffsl" >&5
+$as_echo "$je_cv_function_ffsl" >&6; }
+
+ if test "x${je_cv_function_ffsl}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_INTERNAL_FFSL ffsl" >>confdefs.h
+
+ $as_echo "#define JEMALLOC_INTERNAL_FFS ffs" >>confdefs.h
+
+ else
+ as_fn_error $? "Cannot build without ffsl(3) or __builtin_ffsl()" "$LINENO" 5
+ fi
+fi
+
+
+# Check whether --with-lg_tiny_min was given.
+if test "${with_lg_tiny_min+set}" = set; then :
+ withval=$with_lg_tiny_min; LG_TINY_MIN="$with_lg_tiny_min"
+else
+ LG_TINY_MIN="3"
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define LG_TINY_MIN $LG_TINY_MIN
+_ACEOF
+
+
+
+# Check whether --with-lg_quantum was given.
+if test "${with_lg_quantum+set}" = set; then :
+ withval=$with_lg_quantum; LG_QUANTA="$with_lg_quantum"
+else
+ LG_QUANTA="3 4"
+fi
+
+if test "x$with_lg_quantum" != "x" ; then
+ cat >>confdefs.h <<_ACEOF
+#define LG_QUANTUM $with_lg_quantum
+_ACEOF
+
+fi
+
+
+# Check whether --with-lg_page was given.
+if test "${with_lg_page+set}" = set; then :
+ withval=$with_lg_page; LG_PAGE="$with_lg_page"
+else
+ LG_PAGE="detect"
+fi
+
+if test "x$LG_PAGE" = "xdetect"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking LG_PAGE" >&5
+$as_echo_n "checking LG_PAGE... " >&6; }
+if ${je_cv_lg_page+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
- { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error $? "cannot run test program while cross compiling
-See \`config.log' for more details" "$LINENO" 5; }
+ je_cv_lg_page=12
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
@@ -6510,7 +6907,7 @@ main ()
if (result == -1) {
return 1;
}
- result = ffsl(result) - 1;
+ result = JEMALLOC_INTERNAL_FFSL(result) - 1;
f = fopen("conftest.out", "w");
if (f == NULL) {
@@ -6526,32 +6923,76 @@ main ()
}
_ACEOF
if ac_fn_c_try_run "$LINENO"; then :
- je_cv_static_page_shift=`cat conftest.out`
+ je_cv_lg_page=`cat conftest.out`
else
- je_cv_static_page_shift=undefined
+ je_cv_lg_page=undefined
fi
rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
conftest.$ac_objext conftest.beam conftest.$ac_ext
fi
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_static_page_shift" >&5
-$as_echo "$je_cv_static_page_shift" >&6; }
-
-if test "x$je_cv_static_page_shift" != "xundefined"; then
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_lg_page" >&5
+$as_echo "$je_cv_lg_page" >&6; }
+fi
+if test "x${je_cv_lg_page}" != "x" ; then
+ LG_PAGE="${je_cv_lg_page}"
+fi
+if test "x${LG_PAGE}" != "xundefined" ; then
cat >>confdefs.h <<_ACEOF
-#define STATIC_PAGE_SHIFT $je_cv_static_page_shift
+#define LG_PAGE $LG_PAGE
_ACEOF
else
- as_fn_error $? "cannot determine value for STATIC_PAGE_SHIFT" "$LINENO" 5
+ as_fn_error $? "cannot determine value for LG_PAGE" "$LINENO" 5
fi
-if test -d "${srcroot}.git" ; then
- git describe --long --abbrev=40 > ${srcroot}VERSION
+# Check whether --with-lg_page_sizes was given.
+if test "${with_lg_page_sizes+set}" = set; then :
+ withval=$with_lg_page_sizes; LG_PAGE_SIZES="$with_lg_page_sizes"
+else
+ LG_PAGE_SIZES="$LG_PAGE"
fi
-jemalloc_version=`cat ${srcroot}VERSION`
+
+
+
+# Check whether --with-lg_size_class_group was given.
+if test "${with_lg_size_class_group+set}" = set; then :
+ withval=$with_lg_size_class_group; LG_SIZE_CLASS_GROUP="$with_lg_size_class_group"
+else
+ LG_SIZE_CLASS_GROUP="2"
+fi
+
+
+
+if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
+ rm -f "${objroot}VERSION"
+ for pattern in '[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
+ '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9][0-9]'; do
+ if test ! -e "${objroot}VERSION" ; then
+ (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
+ if test $? -eq 0 ; then
+ mv "${objroot}VERSION.tmp" "${objroot}VERSION"
+ break
+ fi
+ fi
+ done
+fi
+rm -f "${objroot}VERSION.tmp"
+if test ! -e "${objroot}VERSION" ; then
+ if test ! -e "${srcroot}VERSION" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Missing VERSION file, and unable to generate it; creating bogus VERSION" >&5
+$as_echo "Missing VERSION file, and unable to generate it; creating bogus VERSION" >&6; }
+ echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION"
+ else
+ cp ${srcroot}VERSION ${objroot}VERSION
+ fi
+fi
+jemalloc_version=`cat "${objroot}VERSION"`
jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $1}'`
jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $2}'`
jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $3}'`
@@ -6683,6 +7124,93 @@ fi
CPPFLAGS="$CPPFLAGS -D_REENTRANT"
+SAVED_LIBS="${LIBS}"
+LIBS=
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5
+$as_echo_n "checking for library containing clock_gettime... " >&6; }
+if ${ac_cv_search_clock_gettime+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_func_search_save_LIBS=$LIBS
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char clock_gettime ();
+int
+main ()
+{
+return clock_gettime ();
+ ;
+ return 0;
+}
+_ACEOF
+for ac_lib in '' rt; do
+ if test -z "$ac_lib"; then
+ ac_res="none required"
+ else
+ ac_res=-l$ac_lib
+ LIBS="-l$ac_lib $ac_func_search_save_LIBS"
+ fi
+ if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_search_clock_gettime=$ac_res
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext
+ if ${ac_cv_search_clock_gettime+:} false; then :
+ break
+fi
+done
+if ${ac_cv_search_clock_gettime+:} false; then :
+
+else
+ ac_cv_search_clock_gettime=no
+fi
+rm conftest.$ac_ext
+LIBS=$ac_func_search_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5
+$as_echo "$ac_cv_search_clock_gettime" >&6; }
+ac_res=$ac_cv_search_clock_gettime
+if test "$ac_res" != no; then :
+ test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
+ TESTLIBS="${LIBS}"
+fi
+
+
+LIBS="${SAVED_LIBS}"
+
+ac_fn_c_check_func "$LINENO" "secure_getenv" "ac_cv_func_secure_getenv"
+if test "x$ac_cv_func_secure_getenv" = xyes; then :
+ have_secure_getenv="1"
+else
+ have_secure_getenv="0"
+
+fi
+
+if test "x$have_secure_getenv" = "x1" ; then
+ $as_echo "#define JEMALLOC_HAVE_SECURE_GETENV " >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "issetugid" "ac_cv_func_issetugid"
+if test "x$ac_cv_func_issetugid" = xyes; then :
+ have_issetugid="1"
+else
+ have_issetugid="0"
+
+fi
+
+if test "x$have_issetugid" = "x1" ; then
+ $as_echo "#define JEMALLOC_HAVE_ISSETUGID " >>confdefs.h
+
+fi
+
ac_fn_c_check_func "$LINENO" "_malloc_thread_cleanup" "ac_cv_func__malloc_thread_cleanup"
if test "x$ac_cv_func__malloc_thread_cleanup" = xyes; then :
have__malloc_thread_cleanup="1"
@@ -6719,11 +7247,11 @@ else
fi
else
- enable_lazy_lock="0"
+ enable_lazy_lock=""
fi
-if test "x$enable_lazy_lock" = "x0" -a "x${force_lazy_lock}" = "x1" ; then
+if test "x$enable_lazy_lock" = "x" -a "x${force_lazy_lock}" = "x1" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5
$as_echo "Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&6; }
enable_lazy_lock="1"
@@ -6796,6 +7324,8 @@ fi
fi
$as_echo "#define JEMALLOC_LAZY_LOCK " >>confdefs.h
+else
+ enable_lazy_lock="0"
fi
@@ -6808,19 +7338,22 @@ else
fi
else
- enable_tls="1"
+ enable_tls=""
fi
-if test "x${enable_tls}" = "x0" -a "x${force_tls}" = "x1" ; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5
+if test "x${enable_tls}" = "x" ; then
+ if test "x${force_tls}" = "x1" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5
$as_echo "Forcing TLS to avoid allocator/threading bootstrap issues" >&6; }
- enable_tls="1"
-fi
-if test "x${enable_tls}" = "x1" -a "x${force_tls}" = "x0" ; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5
+ enable_tls="1"
+ elif test "x${force_tls}" = "x0" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5
$as_echo "Forcing no TLS to avoid allocator/threading bootstrap issues" >&6; }
- enable_tls="0"
+ enable_tls="0"
+ else
+ enable_tls="1"
+ fi
fi
if test "x${enable_tls}" = "x1" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for TLS" >&5
@@ -6851,56 +7384,69 @@ $as_echo "no" >&6; }
enable_tls="0"
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+ enable_tls="0"
fi
if test "x${enable_tls}" = "x1" ; then
+ if test "x${force_tls}" = "x0" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS enabled despite being marked unusable on this platform" >&5
+$as_echo "$as_me: WARNING: TLS enabled despite being marked unusable on this platform" >&2;}
+ fi
cat >>confdefs.h <<_ACEOF
#define JEMALLOC_TLS
_ACEOF
elif test "x${force_tls}" = "x1" ; then
- as_fn_error $? "Failed to configure TLS, which is mandatory for correct function" "$LINENO" 5
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS disabled despite being marked critical on this platform" >&5
+$as_echo "$as_me: WARNING: TLS disabled despite being marked critical on this platform" >&2;}
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using ffsl is compilable" >&5
-$as_echo_n "checking whether a program using ffsl is compilable... " >&6; }
-if ${je_cv_function_ffsl+:} false; then :
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C11 atomics is compilable" >&5
+$as_echo_n "checking whether C11 atomics is compilable... " >&6; }
+if ${je_cv_c11atomics+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-#include <stdio.h>
-#include <strings.h>
-#include <string.h>
+#include <stdint.h>
+#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
+#include <stdatomic.h>
+#else
+#error Atomics not available
+#endif
int
main ()
{
- {
- int rv = ffsl(0x08);
- printf("%d\n", rv);
- }
+ uint64_t *p = (uint64_t *)0;
+ uint64_t x = 1;
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ uint64_t r = atomic_fetch_add(a, x) + x;
+ return (r == 0);
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- je_cv_function_ffsl=yes
+ je_cv_c11atomics=yes
else
- je_cv_function_ffsl=no
+ je_cv_c11atomics=no
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_function_ffsl" >&5
-$as_echo "$je_cv_function_ffsl" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_c11atomics" >&5
+$as_echo "$je_cv_c11atomics" >&6; }
+
+if test "x${je_cv_c11atomics}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_C11ATOMICS 1" >>confdefs.h
-if test "x${je_cv_function_ffsl}" != "xyes" ; then
- as_fn_error $? "Cannot build without ffsl(3)" "$LINENO" 5
fi
@@ -7002,6 +7548,46 @@ fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(2) is compilable" >&5
+$as_echo_n "checking whether madvise(2) is compilable... " >&6; }
+if ${je_cv_madvise+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <sys/mman.h>
+
+int
+main ()
+{
+
+ {
+ madvise((void *)0, 0, 0);
+ }
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_madvise=yes
+else
+ je_cv_madvise=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madvise" >&5
+$as_echo "$je_cv_madvise" >&6; }
+
+if test "x${je_cv_madvise}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_MADVISE " >>confdefs.h
+
+fi
+
+
+
if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then
@@ -7097,6 +7683,48 @@ $as_echo "$je_cv_sync_compare_and_swap_8" >&6; }
fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_clz" >&5
+$as_echo_n "checking for __builtin_clz... " >&6; }
+if ${je_cv_builtin_clz+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ {
+ unsigned x = 0;
+ int y = __builtin_clz(x);
+ }
+ {
+ unsigned long x = 0;
+ int y = __builtin_clzl(x);
+ }
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_builtin_clz=yes
+else
+ je_cv_builtin_clz=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_builtin_clz" >&5
+$as_echo "$je_cv_builtin_clz" >&6; }
+
+if test "x${je_cv_builtin_clz}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_BUILTIN_CLZ " >>confdefs.h
+
+fi
+
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSSpin*() is compilable" >&5
$as_echo_n "checking whether Darwin OSSpin*() is compilable... " >&6; }
@@ -7160,8 +7788,6 @@ if test "x${enable_zone_allocator}" = "x1" ; then
if test "x${abi}" != "xmacho"; then
as_fn_error $? "--enable-zone-allocator is only supported on Darwin" "$LINENO" 5
fi
- $as_echo "#define JEMALLOC_IVSALLOC " >>confdefs.h
-
$as_echo "#define JEMALLOC_ZONE " >>confdefs.h
@@ -7175,7 +7801,7 @@ $as_echo_n "checking malloc zone version... " >&6; }
int
main ()
{
-static foo[sizeof(malloc_zone_t) == sizeof(void *) * 14 ? 1 : -1]
+static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 14 ? 1 : -1]
;
return 0;
@@ -7191,7 +7817,7 @@ else
int
main ()
{
-static foo[sizeof(malloc_zone_t) == sizeof(void *) * 15 ? 1 : -1]
+static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 15 ? 1 : -1]
;
return 0;
@@ -7207,7 +7833,7 @@ else
int
main ()
{
-static foo[sizeof(malloc_zone_t) == sizeof(void *) * 16 ? 1 : -1]
+static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 16 ? 1 : -1]
;
return 0;
@@ -7221,7 +7847,7 @@ if ac_fn_c_try_compile "$LINENO"; then :
int
main ()
{
-static foo[sizeof(malloc_introspection_t) == sizeof(void *) * 9 ? 1 : -1]
+static int foo[sizeof(malloc_introspection_t) == sizeof(void *) * 9 ? 1 : -1]
;
return 0;
@@ -7237,7 +7863,7 @@ else
int
main ()
{
-static foo[sizeof(malloc_introspection_t) == sizeof(void *) * 13 ? 1 : -1]
+static int foo[sizeof(malloc_introspection_t) == sizeof(void *) * 13 ? 1 : -1]
;
return 0;
@@ -7260,7 +7886,7 @@ else
int
main ()
{
-static foo[sizeof(malloc_zone_t) == sizeof(void *) * 17 ? 1 : -1]
+static int foo[sizeof(malloc_zone_t) == sizeof(void *) * 17 ? 1 : -1]
;
return 0;
@@ -7276,7 +7902,7 @@ else
int
main ()
{
-static foo[sizeof(malloc_zone_t) > sizeof(void *) * 17 ? 1 : -1]
+static int foo[sizeof(malloc_zone_t) > sizeof(void *) * 17 ? 1 : -1]
;
return 0;
@@ -7316,6 +7942,131 @@ _ACEOF
fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc malloc hook is compilable" >&5
+$as_echo_n "checking whether glibc malloc hook is compilable... " >&6; }
+if ${je_cv_glibc_malloc_hook+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <stddef.h>
+
+extern void (* __free_hook)(void *ptr);
+extern void *(* __malloc_hook)(size_t size);
+extern void *(* __realloc_hook)(void *ptr, size_t size);
+
+int
+main ()
+{
+
+ void *ptr = 0L;
+ if (__malloc_hook) ptr = __malloc_hook(1);
+ if (__realloc_hook) ptr = __realloc_hook(ptr, 2);
+ if (__free_hook && ptr) __free_hook(ptr);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_glibc_malloc_hook=yes
+else
+ je_cv_glibc_malloc_hook=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_malloc_hook" >&5
+$as_echo "$je_cv_glibc_malloc_hook" >&6; }
+
+if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_GLIBC_MALLOC_HOOK " >>confdefs.h
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc memalign hook is compilable" >&5
+$as_echo_n "checking whether glibc memalign hook is compilable... " >&6; }
+if ${je_cv_glibc_memalign_hook+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <stddef.h>
+
+extern void *(* __memalign_hook)(size_t alignment, size_t size);
+
+int
+main ()
+{
+
+ void *ptr = 0L;
+ if (__memalign_hook) ptr = __memalign_hook(16, 7);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_glibc_memalign_hook=yes
+else
+ je_cv_glibc_memalign_hook=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_memalign_hook" >&5
+$as_echo "$je_cv_glibc_memalign_hook" >&6; }
+
+if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_GLIBC_MEMALIGN_HOOK " >>confdefs.h
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads adaptive mutexes is compilable" >&5
+$as_echo_n "checking whether pthreads adaptive mutexes is compilable... " >&6; }
+if ${je_cv_pthread_mutex_adaptive_np+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include <pthread.h>
+
+int
+main ()
+{
+
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+ pthread_mutexattr_destroy(&attr);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ je_cv_pthread_mutex_adaptive_np=yes
+else
+ je_cv_pthread_mutex_adaptive_np=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_mutex_adaptive_np" >&5
+$as_echo "$je_cv_pthread_mutex_adaptive_np" >&6; }
+
+if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then
+ $as_echo "#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP " >>confdefs.h
+
+fi
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5
$as_echo_n "checking for stdbool.h that conforms to C99... " >&6; }
if ${ac_cv_header_stdbool_h+:} false; then :
@@ -7440,7 +8191,7 @@ ac_config_headers="$ac_config_headers $cfghdrs_tup"
-ac_config_files="$ac_config_files $cfgoutputs_tup config.stamp bin/jemalloc.sh"
+ac_config_files="$ac_config_files $cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof"
@@ -8158,8 +8909,13 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
objroot="${objroot}"
+ SHELL="${SHELL}"
srcdir="${srcdir}"
objroot="${objroot}"
+ LG_QUANTA="${LG_QUANTA}"
+ LG_TINY_MIN=${LG_TINY_MIN}
+ LG_PAGE_SIZES="${LG_PAGE_SIZES}"
+ LG_SIZE_CLASS_GROUP=${LG_SIZE_CLASS_GROUP}
srcdir="${srcdir}"
@@ -8205,7 +8961,9 @@ do
"$cfghdrs_tup") CONFIG_HEADERS="$CONFIG_HEADERS $cfghdrs_tup" ;;
"$cfgoutputs_tup") CONFIG_FILES="$CONFIG_FILES $cfgoutputs_tup" ;;
"config.stamp") CONFIG_FILES="$CONFIG_FILES config.stamp" ;;
+ "bin/jemalloc-config") CONFIG_FILES="$CONFIG_FILES bin/jemalloc-config" ;;
"bin/jemalloc.sh") CONFIG_FILES="$CONFIG_FILES bin/jemalloc.sh" ;;
+ "bin/jeprof") CONFIG_FILES="$CONFIG_FILES bin/jeprof" ;;
*) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
esac
@@ -8795,7 +9553,7 @@ $as_echo "$as_me: executing $ac_file commands" >&6;}
;;
"include/jemalloc/internal/size_classes.h":C)
mkdir -p "${objroot}include/jemalloc/internal"
- "${srcdir}/include/jemalloc/internal/size_classes.sh" > "${objroot}include/jemalloc/internal/size_classes.h"
+ "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h"
;;
"include/jemalloc/jemalloc_protos_jet.h":C)
mkdir -p "${objroot}include/jemalloc"
@@ -8864,18 +9622,22 @@ $as_echo "jemalloc version : ${jemalloc_version}" >&6; }
$as_echo "library revision : ${rev}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
$as_echo "" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIG : ${CONFIG}" >&5
+$as_echo "CONFIG : ${CONFIG}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CC : ${CC}" >&5
$as_echo "CC : ${CC}" >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS : ${CPPFLAGS}" >&5
-$as_echo "CPPFLAGS : ${CPPFLAGS}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CFLAGS : ${CFLAGS}" >&5
$as_echo "CFLAGS : ${CFLAGS}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS : ${CPPFLAGS}" >&5
+$as_echo "CPPFLAGS : ${CPPFLAGS}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS : ${LDFLAGS}" >&5
$as_echo "LDFLAGS : ${LDFLAGS}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&5
$as_echo "EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS : ${LIBS}" >&5
$as_echo "LIBS : ${LIBS}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: TESTLIBS : ${TESTLIBS}" >&5
+$as_echo "TESTLIBS : ${TESTLIBS}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: RPATH_EXTRA : ${RPATH_EXTRA}" >&5
$as_echo "RPATH_EXTRA : ${RPATH_EXTRA}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
@@ -8890,12 +9652,12 @@ $as_echo "" >&6; }
$as_echo "PREFIX : ${PREFIX}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: BINDIR : ${BINDIR}" >&5
$as_echo "BINDIR : ${BINDIR}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: DATADIR : ${DATADIR}" >&5
+$as_echo "DATADIR : ${DATADIR}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: INCLUDEDIR : ${INCLUDEDIR}" >&5
$as_echo "INCLUDEDIR : ${INCLUDEDIR}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBDIR : ${LIBDIR}" >&5
$as_echo "LIBDIR : ${LIBDIR}" >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: DATADIR : ${DATADIR}" >&5
-$as_echo "DATADIR : ${DATADIR}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: MANDIR : ${MANDIR}" >&5
$as_echo "MANDIR : ${MANDIR}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
@@ -8920,8 +9682,6 @@ $as_echo " : ${JEMALLOC_PRIVATE_NAMESPACE}" >&6; }
$as_echo "install_suffix : ${install_suffix}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: autogen : ${enable_autogen}" >&5
$as_echo "autogen : ${enable_autogen}" >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: experimental : ${enable_experimental}" >&5
-$as_echo "experimental : ${enable_experimental}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: cc-silence : ${enable_cc_silence}" >&5
$as_echo "cc-silence : ${enable_cc_silence}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: debug : ${enable_debug}" >&5
@@ -8948,15 +9708,13 @@ $as_echo "utrace : ${enable_utrace}" >&6; }
$as_echo "valgrind : ${enable_valgrind}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: xmalloc : ${enable_xmalloc}" >&5
$as_echo "xmalloc : ${enable_xmalloc}" >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: mremap : ${enable_mremap}" >&5
-$as_echo "mremap : ${enable_mremap}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: munmap : ${enable_munmap}" >&5
$as_echo "munmap : ${enable_munmap}" >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: dss : ${enable_dss}" >&5
-$as_echo "dss : ${enable_dss}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: lazy_lock : ${enable_lazy_lock}" >&5
$as_echo "lazy_lock : ${enable_lazy_lock}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: tls : ${enable_tls}" >&5
$as_echo "tls : ${enable_tls}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: cache-oblivious : ${enable_cache_oblivious}" >&5
+$as_echo "cache-oblivious : ${enable_cache_oblivious}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5
$as_echo "===============================================================================" >&6; }
diff --git a/deps/jemalloc/configure.ac b/deps/jemalloc/configure.ac
index 4de81dc1d..7a1290e0d 100644
--- a/deps/jemalloc/configure.ac
+++ b/deps/jemalloc/configure.ac
@@ -43,8 +43,11 @@ AC_CACHE_CHECK([whether $1 is compilable],
dnl ============================================================================
+CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'`
+AC_SUBST([CONFIG])
+
dnl Library revision.
-rev=1
+rev=2
AC_SUBST([rev])
srcroot=$srcdir
@@ -134,6 +137,7 @@ if test "x$CFLAGS" = "x" ; then
AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
fi
JE_CFLAGS_APPEND([-Wall])
+ JE_CFLAGS_APPEND([-Werror=declaration-after-statement])
JE_CFLAGS_APPEND([-pipe])
JE_CFLAGS_APPEND([-g3])
elif test "x$je_cv_msvc" = "xyes" ; then
@@ -141,7 +145,8 @@ if test "x$CFLAGS" = "x" ; then
JE_CFLAGS_APPEND([-Zi])
JE_CFLAGS_APPEND([-MT])
JE_CFLAGS_APPEND([-W3])
- CPPFLAGS="$CPPFLAGS -I${srcroot}/include/msvc_compat"
+ JE_CFLAGS_APPEND([-FS])
+ CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat"
fi
fi
dnl Append EXTRA_CFLAGS to CFLAGS, if defined.
@@ -155,6 +160,10 @@ if test "x${ac_cv_big_endian}" = "x1" ; then
AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ])
fi
+if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
+ CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99"
+fi
+
AC_CHECK_SIZEOF([void *])
if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
LG_SIZEOF_PTR=3
@@ -201,23 +210,14 @@ AC_CANONICAL_HOST
dnl CPU-specific settings.
CPU_SPINWAIT=""
case "${host_cpu}" in
- i[[345]]86)
- ;;
i686|x86_64)
- JE_COMPILABLE([pause instruction], [],
- [[__asm__ volatile("pause"); return 0;]],
- [je_cv_pause])
+ AC_CACHE_VAL([je_cv_pause],
+ [JE_COMPILABLE([pause instruction], [],
+ [[__asm__ volatile("pause"); return 0;]],
+ [je_cv_pause])])
if test "x${je_cv_pause}" = "xyes" ; then
CPU_SPINWAIT='__asm__ volatile("pause")'
fi
- dnl emmintrin.h fails to compile unless MMX, SSE, and SSE2 are
- dnl supported.
- JE_COMPILABLE([SSE2 intrinsics], [
-#include <emmintrin.h>
-], [], [je_cv_sse2])
- if test "x${je_cv_sse2}" = "xyes" ; then
- AC_DEFINE_UNQUOTED([HAVE_SSE2], [ ])
- fi
;;
powerpc)
AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ])
@@ -258,9 +258,9 @@ dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
dnl definitions need to be seen before any headers are included, which is a pain
dnl to make happen otherwise.
default_munmap="1"
-JEMALLOC_USABLE_SIZE_CONST="const"
+maps_coalesce="1"
case "${host}" in
- *-*-darwin*)
+ *-*-darwin* | *-*-ios*)
CFLAGS="$CFLAGS"
abi="macho"
AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
@@ -269,7 +269,7 @@ case "${host}" in
so="dylib"
importlib="${so}"
force_tls="0"
- DSO_LDFLAGS='-shared -Wl,-dylib_install_name,$(@F)'
+ DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
SOREV="${rev}.${so}"
sbrk_deprecated="1"
;;
@@ -279,6 +279,22 @@ case "${host}" in
AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
force_lazy_lock="1"
;;
+ *-*-dragonfly*)
+ CFLAGS="$CFLAGS"
+ abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+ ;;
+ *-*-openbsd*)
+ CFLAGS="$CFLAGS"
+ abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+ force_tls="0"
+ ;;
+ *-*-bitrig*)
+ CFLAGS="$CFLAGS"
+ abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+ ;;
*-*-linux*)
CFLAGS="$CFLAGS"
CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
@@ -286,7 +302,7 @@ case "${host}" in
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
- JEMALLOC_USABLE_SIZE_CONST=""
+ AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
default_munmap="0"
;;
*-*-netbsd*)
@@ -322,9 +338,11 @@ case "${host}" in
fi
abi="xcoff"
;;
- *-*-mingw*)
+ *-*-mingw* | *-*-cygwin*)
abi="pecoff"
force_tls="0"
+ force_lazy_lock="1"
+ maps_coalesce="0"
RPATH=""
so="dll"
if test "x$je_cv_msvc" = "xyes" ; then
@@ -351,6 +369,22 @@ case "${host}" in
abi="elf"
;;
esac
+
+JEMALLOC_USABLE_SIZE_CONST=const
+AC_CHECK_HEADERS([malloc.h], [
+ AC_MSG_CHECKING([whether malloc_usable_size definition can use const argument])
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+ [#include <malloc.h>
+ #include <stddef.h>
+ size_t malloc_usable_size(const void *ptr);
+ ],
+ [])],[
+ AC_MSG_RESULT([yes])
+ ],[
+ JEMALLOC_USABLE_SIZE_CONST=
+ AC_MSG_RESULT([no])
+ ])
+])
AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST])
AC_SUBST([abi])
AC_SUBST([RPATH])
@@ -387,7 +421,7 @@ SAVED_CFLAGS="${CFLAGS}"
JE_CFLAGS_APPEND([-Werror])
JE_COMPILABLE([tls_model attribute], [],
[static __thread int
- __attribute__((tls_model("initial-exec"))) foo;
+ __attribute__((tls_model("initial-exec"), unused)) foo;
foo = 0;],
[je_cv_tls_model])
CFLAGS="${SAVED_CFLAGS}"
@@ -397,6 +431,36 @@ if test "x${je_cv_tls_model}" = "xyes" ; then
else
AC_DEFINE([JEMALLOC_TLS_MODEL], [ ])
fi
+dnl Check for alloc_size attribute support.
+SAVED_CFLAGS="${CFLAGS}"
+JE_CFLAGS_APPEND([-Werror])
+JE_COMPILABLE([alloc_size attribute], [#include <stdlib.h>],
+ [void *foo(size_t size) __attribute__((alloc_size(1)));],
+ [je_cv_alloc_size])
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_alloc_size}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ])
+fi
+dnl Check for format(gnu_printf, ...) attribute support.
+SAVED_CFLAGS="${CFLAGS}"
+JE_CFLAGS_APPEND([-Werror])
+JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include <stdlib.h>],
+ [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));],
+ [je_cv_format_gnu_printf])
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_format_gnu_printf}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ])
+fi
+dnl Check for format(printf, ...) attribute support.
+SAVED_CFLAGS="${CFLAGS}"
+JE_CFLAGS_APPEND([-Werror])
+JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>],
+ [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));],
+ [je_cv_format_printf])
+CFLAGS="${SAVED_CFLAGS}"
+if test "x${je_cv_format_printf}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ])
+fi
dnl Support optional additions to rpath.
AC_ARG_WITH([rpath],
@@ -428,7 +492,7 @@ AC_PROG_RANLIB
AC_PATH_PROG([LD], [ld], [false], [$PATH])
AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH])
-public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size"
+public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx sdallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size"
dnl Check for allocator-related functions that should be wrapped.
AC_CHECK_FUNC([memalign],
@@ -438,24 +502,6 @@ AC_CHECK_FUNC([valloc],
[AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ])
public_syms="${public_syms} valloc"])
-dnl Support the experimental API by default.
-AC_ARG_ENABLE([experimental],
- [AS_HELP_STRING([--disable-experimental],
- [Disable support for the experimental API])],
-[if test "x$enable_experimental" = "xno" ; then
- enable_experimental="0"
-else
- enable_experimental="1"
-fi
-],
-[enable_experimental="1"]
-)
-if test "x$enable_experimental" = "x1" ; then
- AC_DEFINE([JEMALLOC_EXPERIMENTAL], [ ])
- public_syms="${public_syms} allocm dallocm nallocm rallocm sallocm"
-fi
-AC_SUBST([enable_experimental])
-
dnl Do not compute test code coverage by default.
GCOV_FLAGS=
AC_ARG_ENABLE([code-coverage],
@@ -501,6 +547,7 @@ if test "x$JEMALLOC_PREFIX" != "x" ; then
AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"])
AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"])
fi
+AC_SUBST([JEMALLOC_CPREFIX])
AC_ARG_WITH([export],
[AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])],
@@ -533,48 +580,54 @@ dnl jemalloc_protos_jet.h easy.
je_="je_"
AC_SUBST([je_])
-cfgoutputs_in="${srcroot}Makefile.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/html.xsl.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/manpages.xsl.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}doc/jemalloc.xml.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc_macros.h.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/jemalloc_protos.h.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal.h.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/test.sh.in"
-cfgoutputs_in="${cfgoutputs_in} ${srcroot}test/include/test/jemalloc_test.h.in"
+cfgoutputs_in="Makefile.in"
+cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in"
+cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_internal.h.in"
+cfgoutputs_in="${cfgoutputs_in} test/test.sh.in"
+cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in"
cfgoutputs_out="Makefile"
+cfgoutputs_out="${cfgoutputs_out} jemalloc.pc"
cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml"
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h"
cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h"
cfgoutputs_out="${cfgoutputs_out} test/test.sh"
cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
cfgoutputs_tup="Makefile"
+cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in"
cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in"
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in"
cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h"
cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
-cfghdrs_in="${srcroot}include/jemalloc/jemalloc_defs.h.in"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/jemalloc_internal_defs.h.in"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_namespace.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_unnamespace.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/private_symbols.txt"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/public_namespace.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/public_unnamespace.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/internal/size_classes.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc_rename.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc_mangle.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}include/jemalloc/jemalloc.sh"
-cfghdrs_in="${cfghdrs_in} ${srcroot}test/include/test/jemalloc_test_defs.h.in"
+cfghdrs_in="include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_unnamespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.txt"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh"
+cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in"
cfghdrs_out="include/jemalloc/jemalloc_defs.h"
cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
@@ -592,21 +645,20 @@ cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h"
cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h"
cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in"
-cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:${srcroot}include/jemalloc/internal/jemalloc_internal_defs.h.in"
-cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:${srcroot}test/include/test/jemalloc_test_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in"
-dnl Do not silence irrelevant compiler warnings by default, since enabling this
-dnl option incurs a performance penalty.
+dnl Silence irrelevant compiler warnings by default.
AC_ARG_ENABLE([cc-silence],
- [AS_HELP_STRING([--enable-cc-silence],
- [Silence irrelevant compiler warnings])],
+ [AS_HELP_STRING([--disable-cc-silence],
+ [Do not silence irrelevant compiler warnings])],
[if test "x$enable_cc_silence" = "xno" ; then
enable_cc_silence="0"
else
enable_cc_silence="1"
fi
],
-[enable_cc_silence="0"]
+[enable_cc_silence="1"]
)
if test "x$enable_cc_silence" = "x1" ; then
AC_DEFINE([JEMALLOC_CC_SILENCE], [ ])
@@ -614,7 +666,8 @@ fi
dnl Do not compile with debugging by default.
AC_ARG_ENABLE([debug],
- [AS_HELP_STRING([--enable-debug], [Build debugging code (implies --enable-ivsalloc)])],
+ [AS_HELP_STRING([--enable-debug],
+ [Build debugging code (implies --enable-ivsalloc)])],
[if test "x$enable_debug" = "xno" ; then
enable_debug="0"
else
@@ -625,13 +678,17 @@ fi
)
if test "x$enable_debug" = "x1" ; then
AC_DEFINE([JEMALLOC_DEBUG], [ ])
+fi
+if test "x$enable_debug" = "x1" ; then
+ AC_DEFINE([JEMALLOC_DEBUG], [ ])
enable_ivsalloc="1"
fi
AC_SUBST([enable_debug])
dnl Do not validate pointers by default.
AC_ARG_ENABLE([ivsalloc],
- [AS_HELP_STRING([--enable-ivsalloc], [Validate pointers passed through the public API])],
+ [AS_HELP_STRING([--enable-ivsalloc],
+ [Validate pointers passed through the public API])],
[if test "x$enable_ivsalloc" = "xno" ; then
enable_ivsalloc="0"
else
@@ -721,7 +778,7 @@ fi,
if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then
AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"])
if test "x$LUNWIND" = "x-lunwind" ; then
- AC_CHECK_LIB([unwind], [backtrace], [LIBS="$LIBS $LUNWIND"],
+ AC_CHECK_LIB([unwind], [unw_backtrace], [LIBS="$LIBS $LUNWIND"],
[enable_prof_libunwind="0"])
else
LIBS="$LIBS $LUNWIND"
@@ -782,11 +839,6 @@ fi
AC_MSG_CHECKING([configured backtracing method])
AC_MSG_RESULT([$backtrace_method])
if test "x$enable_prof" = "x1" ; then
- if test "x${force_tls}" = "x0" ; then
- AC_MSG_ERROR([Heap profiling requires TLS]);
- fi
- force_tls="1"
-
if test "x$abi" != "xpecoff"; then
dnl Heap profiling uses the log(3) function.
LIBS="$LIBS -lm"
@@ -812,32 +864,11 @@ if test "x$enable_tcache" = "x1" ; then
fi
AC_SUBST([enable_tcache])
-dnl Disable mremap() for huge realloc() by default.
-AC_ARG_ENABLE([mremap],
- [AS_HELP_STRING([--enable-mremap], [Enable mremap(2) for huge realloc()])],
-[if test "x$enable_mremap" = "xno" ; then
- enable_mremap="0"
-else
- enable_mremap="1"
+dnl Indicate whether adjacent virtual memory mappings automatically coalesce
+dnl (and fragment on demand).
+if test "x${maps_coalesce}" = "x1" ; then
+ AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
fi
-],
-[enable_mremap="0"]
-)
-if test "x$enable_mremap" = "x1" ; then
- JE_COMPILABLE([mremap(...MREMAP_FIXED...)], [
-#define _GNU_SOURCE
-#include <sys/mman.h>
-], [
-void *p = mremap((void *)0, 0, 0, MREMAP_MAYMOVE|MREMAP_FIXED, (void *)0);
-], [je_cv_mremap_fixed])
- if test "x${je_cv_mremap_fixed}" = "xno" ; then
- enable_mremap="0"
- fi
-fi
-if test "x$enable_mremap" = "x1" ; then
- AC_DEFINE([JEMALLOC_MREMAP], [ ])
-fi
-AC_SUBST([enable_mremap])
dnl Enable VM deallocation via munmap() by default.
AC_ARG_ENABLE([munmap],
@@ -855,34 +886,22 @@ if test "x$enable_munmap" = "x1" ; then
fi
AC_SUBST([enable_munmap])
-dnl Do not enable allocation from DSS by default.
-AC_ARG_ENABLE([dss],
- [AS_HELP_STRING([--enable-dss], [Enable allocation from DSS])],
-[if test "x$enable_dss" = "xno" ; then
- enable_dss="0"
-else
- enable_dss="1"
-fi
-],
-[enable_dss="0"]
-)
+dnl Enable allocation from DSS if supported by the OS.
+have_dss="1"
dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support.
AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"])
if test "x$have_sbrk" = "x1" ; then
- if test "x$sbrk_deprecated" == "x1" ; then
+ if test "x$sbrk_deprecated" = "x1" ; then
AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated])
- enable_dss="0"
- else
- AC_DEFINE([JEMALLOC_HAVE_SBRK], [ ])
+ have_dss="0"
fi
else
- enable_dss="0"
+ have_dss="0"
fi
-if test "x$enable_dss" = "x1" ; then
+if test "x$have_dss" = "x1" ; then
AC_DEFINE([JEMALLOC_DSS], [ ])
fi
-AC_SUBST([enable_dss])
dnl Support the junk/zero filling option by default.
AC_ARG_ENABLE([fill],
@@ -974,8 +993,83 @@ if test "x$enable_xmalloc" = "x1" ; then
fi
AC_SUBST([enable_xmalloc])
-AC_CACHE_CHECK([STATIC_PAGE_SHIFT],
- [je_cv_static_page_shift],
+dnl Support cache-oblivious allocation alignment by default.
+AC_ARG_ENABLE([cache-oblivious],
+ [AS_HELP_STRING([--disable-cache-oblivious],
+ [Disable support for cache-oblivious allocation alignment])],
+[if test "x$enable_cache_oblivious" = "xno" ; then
+ enable_cache_oblivious="0"
+else
+ enable_cache_oblivious="1"
+fi
+],
+[enable_cache_oblivious="1"]
+)
+if test "x$enable_cache_oblivious" = "x1" ; then
+ AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ])
+fi
+AC_SUBST([enable_cache_oblivious])
+
+dnl ============================================================================
+dnl Check for __builtin_ffsl(), then ffsl(3), and fail if neither are found.
+dnl One of those two functions should (theoretically) exist on all platforms
+dnl that jemalloc currently has a chance of functioning on without modification.
+dnl We additionally assume ffs() or __builtin_ffs() are defined if
+dnl ffsl() or __builtin_ffsl() are defined, respectively.
+JE_COMPILABLE([a program using __builtin_ffsl], [
+#include <stdio.h>
+#include <strings.h>
+#include <string.h>
+], [
+ {
+ int rv = __builtin_ffsl(0x08);
+ printf("%d\n", rv);
+ }
+], [je_cv_gcc_builtin_ffsl])
+if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs])
+else
+ JE_COMPILABLE([a program using ffsl], [
+ #include <stdio.h>
+ #include <strings.h>
+ #include <string.h>
+ ], [
+ {
+ int rv = ffsl(0x08);
+ printf("%d\n", rv);
+ }
+ ], [je_cv_function_ffsl])
+ if test "x${je_cv_function_ffsl}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs])
+ else
+ AC_MSG_ERROR([Cannot build without ffsl(3) or __builtin_ffsl()])
+ fi
+fi
+
+AC_ARG_WITH([lg_tiny_min],
+ [AS_HELP_STRING([--with-lg-tiny-min=<lg-tiny-min>],
+ [Base 2 log of minimum tiny size class to support])],
+ [LG_TINY_MIN="$with_lg_tiny_min"],
+ [LG_TINY_MIN="3"])
+AC_DEFINE_UNQUOTED([LG_TINY_MIN], [$LG_TINY_MIN])
+
+AC_ARG_WITH([lg_quantum],
+ [AS_HELP_STRING([--with-lg-quantum=<lg-quantum>],
+ [Base 2 log of minimum allocation alignment])],
+ [LG_QUANTA="$with_lg_quantum"],
+ [LG_QUANTA="3 4"])
+if test "x$with_lg_quantum" != "x" ; then
+ AC_DEFINE_UNQUOTED([LG_QUANTUM], [$with_lg_quantum])
+fi
+
+AC_ARG_WITH([lg_page],
+ [AS_HELP_STRING([--with-lg-page=<lg-page>], [Base 2 log of system page size])],
+ [LG_PAGE="$with_lg_page"], [LG_PAGE="detect"])
+if test "x$LG_PAGE" = "xdetect"; then
+ AC_CACHE_CHECK([LG_PAGE],
+ [je_cv_lg_page],
AC_RUN_IFELSE([AC_LANG_PROGRAM(
[[
#include <strings.h>
@@ -1000,7 +1094,7 @@ AC_CACHE_CHECK([STATIC_PAGE_SHIFT],
if (result == -1) {
return 1;
}
- result = ffsl(result) - 1;
+ result = JEMALLOC_INTERNAL_FFSL(result) - 1;
f = fopen("conftest.out", "w");
if (f == NULL) {
@@ -1011,24 +1105,65 @@ AC_CACHE_CHECK([STATIC_PAGE_SHIFT],
return 0;
]])],
- [je_cv_static_page_shift=`cat conftest.out`],
- [je_cv_static_page_shift=undefined]))
-
-if test "x$je_cv_static_page_shift" != "xundefined"; then
- AC_DEFINE_UNQUOTED([STATIC_PAGE_SHIFT], [$je_cv_static_page_shift])
+ [je_cv_lg_page=`cat conftest.out`],
+ [je_cv_lg_page=undefined],
+ [je_cv_lg_page=12]))
+fi
+if test "x${je_cv_lg_page}" != "x" ; then
+ LG_PAGE="${je_cv_lg_page}"
+fi
+if test "x${LG_PAGE}" != "xundefined" ; then
+ AC_DEFINE_UNQUOTED([LG_PAGE], [$LG_PAGE])
else
- AC_MSG_ERROR([cannot determine value for STATIC_PAGE_SHIFT])
+ AC_MSG_ERROR([cannot determine value for LG_PAGE])
fi
+AC_ARG_WITH([lg_page_sizes],
+ [AS_HELP_STRING([--with-lg-page-sizes=<lg-page-sizes>],
+ [Base 2 logs of system page sizes to support])],
+ [LG_PAGE_SIZES="$with_lg_page_sizes"], [LG_PAGE_SIZES="$LG_PAGE"])
+
+AC_ARG_WITH([lg_size_class_group],
+ [AS_HELP_STRING([--with-lg-size-class-group=<lg-size-class-group>],
+ [Base 2 log of size classes per doubling])],
+ [LG_SIZE_CLASS_GROUP="$with_lg_size_class_group"],
+ [LG_SIZE_CLASS_GROUP="2"])
+
dnl ============================================================================
dnl jemalloc configuration.
dnl
-dnl Set VERSION if source directory has an embedded git repository.
-if test -d "${srcroot}.git" ; then
- git describe --long --abbrev=40 > ${srcroot}VERSION
+dnl Set VERSION if source directory is inside a git repository.
+if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
+ dnl Pattern globs aren't powerful enough to match both single- and
+ dnl double-digit version numbers, so iterate over patterns to support up to
+ dnl version 99.99.99 without any accidental matches.
+ rm -f "${objroot}VERSION"
+ for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
+ '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do
+ if test ! -e "${objroot}VERSION" ; then
+ (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
+ if test $? -eq 0 ; then
+ mv "${objroot}VERSION.tmp" "${objroot}VERSION"
+ break
+ fi
+ fi
+ done
fi
-jemalloc_version=`cat ${srcroot}VERSION`
+rm -f "${objroot}VERSION.tmp"
+if test ! -e "${objroot}VERSION" ; then
+ if test ! -e "${srcroot}VERSION" ; then
+ AC_MSG_RESULT(
+ [Missing VERSION file, and unable to generate it; creating bogus VERSION])
+ echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION"
+ else
+ cp ${srcroot}VERSION ${objroot}VERSION
+ fi
+fi
+jemalloc_version=`cat "${objroot}VERSION"`
jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'`
jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'`
jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'`
@@ -1055,6 +1190,32 @@ fi
CPPFLAGS="$CPPFLAGS -D_REENTRANT"
+dnl Check whether clock_gettime(2) is in libc or librt. This function is only
+dnl used in test code, so save the result to TESTLIBS to avoid poluting LIBS.
+SAVED_LIBS="${LIBS}"
+LIBS=
+AC_SEARCH_LIBS([clock_gettime], [rt], [TESTLIBS="${LIBS}"])
+AC_SUBST([TESTLIBS])
+LIBS="${SAVED_LIBS}"
+
+dnl Check if the GNU-specific secure_getenv function exists.
+AC_CHECK_FUNC([secure_getenv],
+ [have_secure_getenv="1"],
+ [have_secure_getenv="0"]
+ )
+if test "x$have_secure_getenv" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ])
+fi
+
+dnl Check if the Solaris/BSD issetugid function exists.
+AC_CHECK_FUNC([issetugid],
+ [have_issetugid="1"],
+ [have_issetugid="0"]
+ )
+if test "x$have_issetugid" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ISSETUGID], [ ])
+fi
+
dnl Check whether the BSD-specific _malloc_thread_cleanup() exists. If so, use
dnl it rather than pthreads TSD cleanup functions to support cleanup during
dnl thread exit, in order to avoid pthreads library recursion during
@@ -1089,9 +1250,9 @@ else
enable_lazy_lock="1"
fi
],
-[enable_lazy_lock="0"]
+[enable_lazy_lock=""]
)
-if test "x$enable_lazy_lock" = "x0" -a "x${force_lazy_lock}" = "x1" ; then
+if test "x$enable_lazy_lock" = "x" -a "x${force_lazy_lock}" = "x1" ; then
AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues])
enable_lazy_lock="1"
fi
@@ -1104,6 +1265,8 @@ if test "x$enable_lazy_lock" = "x1" ; then
])
fi
AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ])
+else
+ enable_lazy_lock="0"
fi
AC_SUBST([enable_lazy_lock])
@@ -1115,15 +1278,18 @@ else
enable_tls="1"
fi
,
-enable_tls="1"
+enable_tls=""
)
-if test "x${enable_tls}" = "x0" -a "x${force_tls}" = "x1" ; then
- AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
- enable_tls="1"
-fi
-if test "x${enable_tls}" = "x1" -a "x${force_tls}" = "x0" ; then
- AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
- enable_tls="0"
+if test "x${enable_tls}" = "x" ; then
+ if test "x${force_tls}" = "x1" ; then
+ AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
+ enable_tls="1"
+ elif test "x${force_tls}" = "x0" ; then
+ AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
+ enable_tls="0"
+ else
+ enable_tls="1"
+ fi
fi
if test "x${enable_tls}" = "x1" ; then
AC_MSG_CHECKING([for TLS])
@@ -1138,30 +1304,38 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
AC_MSG_RESULT([yes]),
AC_MSG_RESULT([no])
enable_tls="0")
+else
+ enable_tls="0"
fi
AC_SUBST([enable_tls])
if test "x${enable_tls}" = "x1" ; then
+ if test "x${force_tls}" = "x0" ; then
+ AC_MSG_WARN([TLS enabled despite being marked unusable on this platform])
+ fi
AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ])
elif test "x${force_tls}" = "x1" ; then
- AC_MSG_ERROR([Failed to configure TLS, which is mandatory for correct function])
+ AC_MSG_WARN([TLS disabled despite being marked critical on this platform])
fi
dnl ============================================================================
-dnl Check for ffsl(3), and fail if not found. This function exists on all
-dnl platforms that jemalloc currently has a chance of functioning on without
-dnl modification.
-JE_COMPILABLE([a program using ffsl], [
-#include <stdio.h>
-#include <strings.h>
-#include <string.h>
+dnl Check for C11 atomics.
+
+JE_COMPILABLE([C11 atomics], [
+#include <stdint.h>
+#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
+#include <stdatomic.h>
+#else
+#error Atomics not available
+#endif
], [
- {
- int rv = ffsl(0x08);
- printf("%d\n", rv);
- }
-], [je_cv_function_ffsl])
-if test "x${je_cv_function_ffsl}" != "xyes" ; then
- AC_MSG_ERROR([Cannot build without ffsl(3)])
+ uint64_t *p = (uint64_t *)0;
+ uint64_t x = 1;
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ uint64_t r = atomic_fetch_add(a, x) + x;
+ return (r == 0);
+], [je_cv_c11atomics])
+if test "x${je_cv_c11atomics}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_C11ATOMICS])
fi
dnl ============================================================================
@@ -1210,6 +1384,20 @@ if test "x${je_cv_osatomic}" = "xyes" ; then
fi
dnl ============================================================================
+dnl Check for madvise(2).
+
+JE_COMPILABLE([madvise(2)], [
+#include <sys/mman.h>
+], [
+ {
+ madvise((void *)0, 0, 0);
+ }
+], [je_cv_madvise])
+if test "x${je_cv_madvise}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ])
+fi
+
+dnl ============================================================================
dnl Check whether __sync_{add,sub}_and_fetch() are available despite
dnl __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros being undefined.
@@ -1244,6 +1432,29 @@ if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then
fi
dnl ============================================================================
+dnl Check for __builtin_clz() and __builtin_clzl().
+
+AC_CACHE_CHECK([for __builtin_clz],
+ [je_cv_builtin_clz],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([],
+ [
+ {
+ unsigned x = 0;
+ int y = __builtin_clz(x);
+ }
+ {
+ unsigned long x = 0;
+ int y = __builtin_clzl(x);
+ }
+ ])],
+ [je_cv_builtin_clz=yes],
+ [je_cv_builtin_clz=no])])
+
+if test "x${je_cv_builtin_clz}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ])
+fi
+
+dnl ============================================================================
dnl Check for spinlock(3) operations as provided on Darwin.
JE_COMPILABLE([Darwin OSSpin*()], [
@@ -1281,7 +1492,6 @@ if test "x${enable_zone_allocator}" = "x1" ; then
if test "x${abi}" != "xmacho"; then
AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin])
fi
- AC_DEFINE([JEMALLOC_IVSALLOC], [ ])
AC_DEFINE([JEMALLOC_ZONE], [ ])
dnl The szone version jumped from 3 to 6 between the OS X 10.5.x and 10.6
@@ -1291,7 +1501,7 @@ if test "x${enable_zone_allocator}" = "x1" ; then
AC_DEFUN([JE_ZONE_PROGRAM],
[AC_LANG_PROGRAM(
[#include <malloc/malloc.h>],
- [static foo[[sizeof($1) $2 sizeof(void *) * $3 ? 1 : -1]]]
+ [static int foo[[sizeof($1) $2 sizeof(void *) * $3 ? 1 : -1]]]
)])
AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,14)],[JEMALLOC_ZONE_VERSION=3],[
@@ -1317,6 +1527,49 @@ if test "x${enable_zone_allocator}" = "x1" ; then
fi
dnl ============================================================================
+dnl Check for glibc malloc hooks
+
+JE_COMPILABLE([glibc malloc hook], [
+#include <stddef.h>
+
+extern void (* __free_hook)(void *ptr);
+extern void *(* __malloc_hook)(size_t size);
+extern void *(* __realloc_hook)(void *ptr, size_t size);
+], [
+ void *ptr = 0L;
+ if (__malloc_hook) ptr = __malloc_hook(1);
+ if (__realloc_hook) ptr = __realloc_hook(ptr, 2);
+ if (__free_hook && ptr) __free_hook(ptr);
+], [je_cv_glibc_malloc_hook])
+if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ])
+fi
+
+JE_COMPILABLE([glibc memalign hook], [
+#include <stddef.h>
+
+extern void *(* __memalign_hook)(size_t alignment, size_t size);
+], [
+ void *ptr = 0L;
+ if (__memalign_hook) ptr = __memalign_hook(16, 7);
+], [je_cv_glibc_memalign_hook])
+if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ])
+fi
+
+JE_COMPILABLE([pthreads adaptive mutexes], [
+#include <pthread.h>
+], [
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+ pthread_mutexattr_destroy(&attr);
+], [je_cv_pthread_mutex_adaptive_np])
+if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ])
+fi
+
+dnl ============================================================================
dnl Check for typedefs, structures, and compiler characteristics.
AC_HEADER_STDBOOL
@@ -1376,10 +1629,15 @@ AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [
])
AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [
mkdir -p "${objroot}include/jemalloc/internal"
- "${srcdir}/include/jemalloc/internal/size_classes.sh" > "${objroot}include/jemalloc/internal/size_classes.h"
+ "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h"
], [
+ SHELL="${SHELL}"
srcdir="${srcdir}"
objroot="${objroot}"
+ LG_QUANTA="${LG_QUANTA}"
+ LG_TINY_MIN=${LG_TINY_MIN}
+ LG_PAGE_SIZES="${LG_PAGE_SIZES}"
+ LG_SIZE_CLASS_GROUP=${LG_SIZE_CLASS_GROUP}
])
AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [
mkdir -p "${objroot}include/jemalloc"
@@ -1426,7 +1684,7 @@ AC_CONFIG_HEADERS([$cfghdrs_tup])
dnl ============================================================================
dnl Generate outputs.
-AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc.sh])
+AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof])
AC_SUBST([cfgoutputs_in])
AC_SUBST([cfgoutputs_out])
AC_OUTPUT
@@ -1437,12 +1695,14 @@ AC_MSG_RESULT([=================================================================
AC_MSG_RESULT([jemalloc version : ${jemalloc_version}])
AC_MSG_RESULT([library revision : ${rev}])
AC_MSG_RESULT([])
+AC_MSG_RESULT([CONFIG : ${CONFIG}])
AC_MSG_RESULT([CC : ${CC}])
-AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}])
AC_MSG_RESULT([CFLAGS : ${CFLAGS}])
+AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}])
AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}])
AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}])
AC_MSG_RESULT([LIBS : ${LIBS}])
+AC_MSG_RESULT([TESTLIBS : ${TESTLIBS}])
AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}])
AC_MSG_RESULT([])
AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}])
@@ -1450,9 +1710,9 @@ AC_MSG_RESULT([XSLROOT : ${XSLROOT}])
AC_MSG_RESULT([])
AC_MSG_RESULT([PREFIX : ${PREFIX}])
AC_MSG_RESULT([BINDIR : ${BINDIR}])
+AC_MSG_RESULT([DATADIR : ${DATADIR}])
AC_MSG_RESULT([INCLUDEDIR : ${INCLUDEDIR}])
AC_MSG_RESULT([LIBDIR : ${LIBDIR}])
-AC_MSG_RESULT([DATADIR : ${DATADIR}])
AC_MSG_RESULT([MANDIR : ${MANDIR}])
AC_MSG_RESULT([])
AC_MSG_RESULT([srcroot : ${srcroot}])
@@ -1465,7 +1725,6 @@ AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE])
AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}])
AC_MSG_RESULT([install_suffix : ${install_suffix}])
AC_MSG_RESULT([autogen : ${enable_autogen}])
-AC_MSG_RESULT([experimental : ${enable_experimental}])
AC_MSG_RESULT([cc-silence : ${enable_cc_silence}])
AC_MSG_RESULT([debug : ${enable_debug}])
AC_MSG_RESULT([code-coverage : ${enable_code_coverage}])
@@ -1479,9 +1738,8 @@ AC_MSG_RESULT([fill : ${enable_fill}])
AC_MSG_RESULT([utrace : ${enable_utrace}])
AC_MSG_RESULT([valgrind : ${enable_valgrind}])
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
-AC_MSG_RESULT([mremap : ${enable_mremap}])
AC_MSG_RESULT([munmap : ${enable_munmap}])
-AC_MSG_RESULT([dss : ${enable_dss}])
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
AC_MSG_RESULT([tls : ${enable_tls}])
+AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}])
AC_MSG_RESULT([===============================================================================])
diff --git a/deps/jemalloc/doc/jemalloc.3 b/deps/jemalloc/doc/jemalloc.3
index d04fbb498..2e6b2c0e8 100644
--- a/deps/jemalloc/doc/jemalloc.3
+++ b/deps/jemalloc/doc/jemalloc.3
@@ -2,12 +2,12 @@
.\" Title: JEMALLOC
.\" Author: Jason Evans
.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
-.\" Date: 03/31/2014
+.\" Date: 09/24/2015
.\" Manual: User Manual
-.\" Source: jemalloc 3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340
+.\" Source: jemalloc 4.0.3-0-ge9192eacf8935e29fc62fddc2701f7942b1cc02c
.\" Language: English
.\"
-.TH "JEMALLOC" "3" "03/31/2014" "jemalloc 3.6.0-0-g46c0af68bd24" "User Manual"
+.TH "JEMALLOC" "3" "09/24/2015" "jemalloc 4.0.3-0-ge9192eacf893" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -31,13 +31,12 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
-This manual describes jemalloc 3\&.6\&.0\-0\-g46c0af68bd248b04df75e4f92d5fb804c3d75340\&. More information can be found at the
+This manual describes jemalloc 4\&.0\&.3\-0\-ge9192eacf8935e29fc62fddc2701f7942b1cc02c\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.SH "SYNOPSIS"
.sp
.ft B
.nf
-#include <stdlib\&.h>
#include <jemalloc/jemalloc\&.h>
.fi
.ft
@@ -65,6 +64,8 @@ This manual describes jemalloc 3\&.6\&.0\-0\-g46c0af68bd248b04df75e4f92d5fb804c3
.BI "size_t sallocx(void\ *" "ptr" ", int\ " "flags" ");"
.HP \w'void\ dallocx('u
.BI "void dallocx(void\ *" "ptr" ", int\ " "flags" ");"
+.HP \w'void\ sdallocx('u
+.BI "void sdallocx(void\ *" "ptr" ", size_t\ " "size" ", int\ " "flags" ");"
.HP \w'size_t\ nallocx('u
.BI "size_t nallocx(size_t\ " "size" ", int\ " "flags" ");"
.HP \w'int\ mallctl('u
@@ -81,17 +82,6 @@ This manual describes jemalloc 3\&.6\&.0\-0\-g46c0af68bd248b04df75e4f92d5fb804c3
.BI "void (*malloc_message)(void\ *" "cbopaque" ", const\ char\ *" "s" ");"
.PP
const char *\fImalloc_conf\fR;
-.SS "Experimental API"
-.HP \w'int\ allocm('u
-.BI "int allocm(void\ **" "ptr" ", size_t\ *" "rsize" ", size_t\ " "size" ", int\ " "flags" ");"
-.HP \w'int\ rallocm('u
-.BI "int rallocm(void\ **" "ptr" ", size_t\ *" "rsize" ", size_t\ " "size" ", size_t\ " "extra" ", int\ " "flags" ");"
-.HP \w'int\ sallocm('u
-.BI "int sallocm(const\ void\ *" "ptr" ", size_t\ *" "rsize" ", int\ " "flags" ");"
-.HP \w'int\ dallocm('u
-.BI "int dallocm(void\ *" "ptr" ", int\ " "flags" ");"
-.HP \w'int\ nallocm('u
-.BI "int nallocm(size_t\ *" "rsize" ", size_t\ " "size" ", int\ " "flags" ");"
.SH "DESCRIPTION"
.SS "Standard API"
.PP
@@ -118,7 +108,7 @@ The
\fBposix_memalign\fR\fB\fR
function allocates
\fIsize\fR
-bytes of memory such that the allocation\*(Aqs base address is an even multiple of
+bytes of memory such that the allocation\*(Aqs base address is a multiple of
\fIalignment\fR, and returns the allocation in the value pointed to by
\fIptr\fR\&. The requested
\fIalignment\fR
@@ -129,7 +119,7 @@ The
\fBaligned_alloc\fR\fB\fR
function allocates
\fIsize\fR
-bytes of memory such that the allocation\*(Aqs base address is an even multiple of
+bytes of memory such that the allocation\*(Aqs base address is a multiple of
\fIalignment\fR\&. The requested
\fIalignment\fR
must be a power of 2\&. Behavior is undefined if
@@ -172,7 +162,8 @@ The
\fBrallocx\fR\fB\fR,
\fBxallocx\fR\fB\fR,
\fBsallocx\fR\fB\fR,
-\fBdallocx\fR\fB\fR, and
+\fBdallocx\fR\fB\fR,
+\fBsdallocx\fR\fB\fR, and
\fBnallocx\fR\fB\fR
functions all have a
\fIflags\fR
@@ -201,11 +192,32 @@ is a power of 2\&.
Initialize newly allocated memory to contain zero bytes\&. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes\&. If this macro is absent, newly allocated memory is uninitialized\&.
.RE
.PP
+\fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB) \fR
+.RS 4
+Use the thread\-specific cache (tcache) specified by the identifier
+\fItc\fR, which must have been acquired via the
+"tcache\&.create"
+mallctl\&. This macro does not validate that
+\fItc\fR
+specifies a valid identifier\&.
+.RE
+.PP
+\fBMALLOCX_TCACHE_NONE\fR
+.RS 4
+Do not use a thread\-specific cache (tcache)\&. Unless
+\fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB)\fR
+or
+\fBMALLOCX_TCACHE_NONE\fR
+is specified, an automatically managed tcache will be used under many circumstances\&. This macro cannot be used in the same
+\fIflags\fR
+argument as
+\fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB)\fR\&.
+.RE
+.PP
\fBMALLOCX_ARENA(\fR\fB\fIa\fR\fR\fB) \fR
.RS 4
Use the arena specified by the index
-\fIa\fR
-(and by necessity bypass the thread cache)\&. This macro has no effect for huge regions, nor for regions that were allocated via an arena other than the one specified\&. This macro does not validate that
+\fIa\fR\&. This macro has no effect for regions that were allocated via an arena other than the one specified\&. This macro does not validate that
\fIa\fR
specifies an arena index in the valid range\&.
.RE
@@ -258,6 +270,17 @@ function causes the memory referenced by
to be made available for future allocations\&.
.PP
The
+\fBsdallocx\fR\fB\fR
+function is an extension of
+\fBdallocx\fR\fB\fR
+with a
+\fIsize\fR
+parameter to allow the caller to pass in the allocation size as an optimization\&. The minimum valid input size is the original requested size of the allocation, and the maximum valid input size is the corresponding value returned by
+\fBnallocx\fR\fB\fR
+or
+\fBsallocx\fR\fB\fR\&.
+.PP
+The
\fBnallocx\fR\fB\fR
function allocates no memory, but it performs the same size computation as the
\fBmallocx\fR\fB\fR
@@ -351,7 +374,7 @@ uses the
\fBmallctl*\fR\fB\fR
functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously\&. If
\fB\-\-enable\-stats\fR
-is specified during configuration, \(lqm\(rq and \(lqa\(rq can be specified to omit merged arena and per arena statistics, respectively; \(lqb\(rq and \(lql\(rq can be specified to omit per size class statistics for bins and large objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&.
+is specified during configuration, \(lqm\(rq and \(lqa\(rq can be specified to omit merged arena and per arena statistics, respectively; \(lqb\(rq, \(lql\(rq, and \(lqh\(rq can be specified to omit per size class statistics for bins, large objects, and huge objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&.
.PP
The
\fBmalloc_usable_size\fR\fB\fR
@@ -362,126 +385,6 @@ function is not a mechanism for in\-place
\fBrealloc\fR\fB\fR; rather it is provided solely as a tool for introspection purposes\&. Any discrepancy between the requested allocation size and the size reported by
\fBmalloc_usable_size\fR\fB\fR
should not be depended on, since such behavior is entirely implementation\-dependent\&.
-.SS "Experimental API"
-.PP
-The experimental API is subject to change or removal without regard for backward compatibility\&. If
-\fB\-\-disable\-experimental\fR
-is specified during configuration, the experimental API is omitted\&.
-.PP
-The
-\fBallocm\fR\fB\fR,
-\fBrallocm\fR\fB\fR,
-\fBsallocm\fR\fB\fR,
-\fBdallocm\fR\fB\fR, and
-\fBnallocm\fR\fB\fR
-functions all have a
-\fIflags\fR
-argument that can be used to specify options\&. The functions only check the options that are contextually relevant\&. Use bitwise or (|) operations to specify one or more of the following:
-.PP
-\fBALLOCM_LG_ALIGN(\fR\fB\fIla\fR\fR\fB) \fR
-.RS 4
-Align the memory allocation to start at an address that is a multiple of
-(1 << \fIla\fR)\&. This macro does not validate that
-\fIla\fR
-is within the valid range\&.
-.RE
-.PP
-\fBALLOCM_ALIGN(\fR\fB\fIa\fR\fR\fB) \fR
-.RS 4
-Align the memory allocation to start at an address that is a multiple of
-\fIa\fR, where
-\fIa\fR
-is a power of two\&. This macro does not validate that
-\fIa\fR
-is a power of 2\&.
-.RE
-.PP
-\fBALLOCM_ZERO\fR
-.RS 4
-Initialize newly allocated memory to contain zero bytes\&. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes\&. If this macro is absent, newly allocated memory is uninitialized\&.
-.RE
-.PP
-\fBALLOCM_NO_MOVE\fR
-.RS 4
-For reallocation, fail rather than moving the object\&. This constraint can apply to both growth and shrinkage\&.
-.RE
-.PP
-\fBALLOCM_ARENA(\fR\fB\fIa\fR\fR\fB) \fR
-.RS 4
-Use the arena specified by the index
-\fIa\fR
-(and by necessity bypass the thread cache)\&. This macro has no effect for huge regions, nor for regions that were allocated via an arena other than the one specified\&. This macro does not validate that
-\fIa\fR
-specifies an arena index in the valid range\&.
-.RE
-.PP
-The
-\fBallocm\fR\fB\fR
-function allocates at least
-\fIsize\fR
-bytes of memory, sets
-\fI*ptr\fR
-to the base address of the allocation, and sets
-\fI*rsize\fR
-to the real size of the allocation if
-\fIrsize\fR
-is not
-\fBNULL\fR\&. Behavior is undefined if
-\fIsize\fR
-is
-\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
-.PP
-The
-\fBrallocm\fR\fB\fR
-function resizes the allocation at
-\fI*ptr\fR
-to be at least
-\fIsize\fR
-bytes, sets
-\fI*ptr\fR
-to the base address of the allocation if it moved, and sets
-\fI*rsize\fR
-to the real size of the allocation if
-\fIrsize\fR
-is not
-\fBNULL\fR\&. If
-\fIextra\fR
-is non\-zero, an attempt is made to resize the allocation to be at least
-(\fIsize\fR + \fIextra\fR)
-bytes, though inability to allocate the extra byte(s) will not by itself result in failure\&. Behavior is undefined if
-\fIsize\fR
-is
-\fB0\fR, if request size overflows due to size class and/or alignment constraints, or if
-(\fIsize\fR + \fIextra\fR > \fBSIZE_T_MAX\fR)\&.
-.PP
-The
-\fBsallocm\fR\fB\fR
-function sets
-\fI*rsize\fR
-to the real size of the allocation\&.
-.PP
-The
-\fBdallocm\fR\fB\fR
-function causes the memory referenced by
-\fIptr\fR
-to be made available for future allocations\&.
-.PP
-The
-\fBnallocm\fR\fB\fR
-function allocates no memory, but it performs the same size computation as the
-\fBallocm\fR\fB\fR
-function, and if
-\fIrsize\fR
-is not
-\fBNULL\fR
-it sets
-\fI*rsize\fR
-to the real size of the allocation that would result from the equivalent
-\fBallocm\fR\fB\fR
-function call\&. Behavior is undefined if
-\fIsize\fR
-is
-\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
.SH "TUNING"
.PP
Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile\- or run\-time\&.
@@ -519,8 +422,8 @@ options\&. Some options have boolean values (true/false), others have integer va
Traditionally, allocators have used
\fBsbrk\fR(2)
to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory\&. If
-\fB\-\-enable\-dss\fR
-is specified during configuration, this allocator uses both
+\fBsbrk\fR(2)
+is supported by the operating system, this allocator uses both
\fBmmap\fR(2)
and
\fBsbrk\fR(2), in that order of preference; otherwise only
@@ -535,18 +438,29 @@ is specified during configuration, this allocator supports thread\-specific cach
.PP
Memory is conceptually broken into equal\-sized chunks, where the chunk size is a power of two that is greater than the page size\&. Chunks are always aligned to multiples of the chunk size\&. This alignment makes it possible to find metadata for user objects very quickly\&.
.PP
-User objects are broken into three categories according to size: small, large, and huge\&. Small objects are smaller than one page\&. Large objects are smaller than the chunk size\&. Huge objects are a multiple of the chunk size\&. Small and large objects are managed by arenas; huge objects are managed separately in a single data structure that is shared by all threads\&. Huge objects are used by applications infrequently enough that this single data structure is not a scalability issue\&.
+User objects are broken into three categories according to size: small, large, and huge\&. Small and large objects are managed entirely by arenas; huge objects are additionally aggregated in a single data structure that is shared by all threads\&. Huge objects are typically used by applications infrequently enough that this single data structure is not a scalability issue\&.
.PP
Each chunk that is managed by an arena tracks its contents as runs of contiguous pages (unused, backing a set of small objects, or backing one large object)\&. The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time\&.
.PP
-Small objects are managed in groups by page runs\&. Each run maintains a frontier and free list to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least
-sizeof(\fBdouble\fR)\&. All other small object size classes are multiples of the quantum, spaced such that internal fragmentation is limited to approximately 25% for all but the smallest size classes\&. Allocation requests that are larger than the maximum small size class, but small enough to fit in an arena\-managed chunk (see the
+Small objects are managed in groups by page runs\&. Each run maintains a bitmap to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least
+sizeof(\fBdouble\fR)\&. All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes\&. Small size classes are smaller than four times the page size, large size classes are smaller than the chunk size (see the
"opt\&.lg_chunk"
-option), are rounded up to the nearest run size\&. Allocation requests that are too large to fit in an arena\-managed chunk are rounded up to the nearest multiple of the chunk size\&.
+option), and huge size classes extend from the chunk size up to one size class less than the full address space size\&.
.PP
Allocations are packed tightly together, which can be an issue for multi\-threaded applications\&. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating\&.
.PP
-Assuming 4 MiB chunks, 4 KiB pages, and a 16\-byte quantum on a 64\-bit system, the size classes in each category are as shown in
+The
+\fBrealloc\fR\fB\fR,
+\fBrallocx\fR\fB\fR, and
+\fBxallocx\fR\fB\fR
+functions may resize allocations without moving them under limited circumstances\&. Unlike the
+\fB*allocx\fR\fB\fR
+API, the standard API does not officially round up the usable size of an allocation to the nearest size class, so technically it is necessary to call
+\fBrealloc\fR\fB\fR
+to grow e\&.g\&. a 9\-byte allocation to 16 bytes, or shrink a 16\-byte allocation to 9 bytes\&. Growth and shrinkage trivially succeeds in place as long as the pre\-size and post\-size both round up to the same size class\&. No other API guarantees are made regarding in\-place resizing, but the current implementation also tries to resize large and huge allocations in place, as long as the pre\-size and post\-size are both large or both huge\&. In such cases shrinkage always succeeds for large size classes, but for huge size classes the chunk allocator must support splitting (see
+"arena\&.<i>\&.chunk_hooks")\&. Growth only succeeds if the trailing memory is currently available, and additionally for huge size classes the chunk allocator must support merging\&.
+.PP
+Assuming 2 MiB chunks, 4 KiB pages, and a 16\-byte quantum on a 64\-bit system, the size classes in each category are as shown in
Table 1\&.
.sp
.it 1 an-trap
@@ -572,8 +486,23 @@ l r l
^ r l
^ r l
^ r l
+^ r l
+^ r l
l r l
-l r l.
+^ r l
+^ r l
+^ r l
+^ r l
+^ r l
+^ r l
+^ r l
+l r l
+^ r l
+^ r l
+^ r l
+^ r l
+^ r l
+^ r l.
T{
Small
T}:T{
@@ -584,7 +513,7 @@ T}
:T{
16
T}:T{
-[16, 32, 48, \&.\&.\&., 128]
+[16, 32, 48, 64, 80, 96, 112, 128]
T}
:T{
32
@@ -609,21 +538,96 @@ T}
:T{
512
T}:T{
-[2560, 3072, 3584]
+[2560, 3072, 3584, 4096]
+T}
+:T{
+1 KiB
+T}:T{
+[5 KiB, 6 KiB, 7 KiB, 8 KiB]
+T}
+:T{
+2 KiB
+T}:T{
+[10 KiB, 12 KiB, 14 KiB]
T}
T{
Large
T}:T{
+2 KiB
+T}:T{
+[16 KiB]
+T}
+:T{
4 KiB
T}:T{
-[4 KiB, 8 KiB, 12 KiB, \&.\&.\&., 4072 KiB]
+[20 KiB, 24 KiB, 28 KiB, 32 KiB]
+T}
+:T{
+8 KiB
+T}:T{
+[40 KiB, 48 KiB, 54 KiB, 64 KiB]
+T}
+:T{
+16 KiB
+T}:T{
+[80 KiB, 96 KiB, 112 KiB, 128 KiB]
+T}
+:T{
+32 KiB
+T}:T{
+[160 KiB, 192 KiB, 224 KiB, 256 KiB]
+T}
+:T{
+64 KiB
+T}:T{
+[320 KiB, 384 KiB, 448 KiB, 512 KiB]
+T}
+:T{
+128 KiB
+T}:T{
+[640 KiB, 768 KiB, 896 KiB, 1 MiB]
+T}
+:T{
+256 KiB
+T}:T{
+[1280 KiB, 1536 KiB, 1792 KiB]
T}
T{
Huge
T}:T{
+256 KiB
+T}:T{
+[2 MiB]
+T}
+:T{
+512 KiB
+T}:T{
+[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]
+T}
+:T{
+1 MiB
+T}:T{
+[5 MiB, 6 MiB, 7 MiB, 8 MiB]
+T}
+:T{
+2 MiB
+T}:T{
+[10 MiB, 12 MiB, 14 MiB, 16 MiB]
+T}
+:T{
4 MiB
T}:T{
-[4 MiB, 8 MiB, 12 MiB, \&.\&.\&.]
+[20 MiB, 24 MiB, 28 MiB, 32 MiB]
+T}
+:T{
+8 MiB
+T}:T{
+[40 MiB, 48 MiB, 56 MiB, 64 MiB]
+T}
+:T{
+\&.\&.\&.
+T}:T{
+\&.\&.\&.
T}
.TE
.sp 1
@@ -660,15 +664,15 @@ If a value is passed in, refresh the data from which the
functions report values, and increment the epoch\&. Return the current epoch\&. This is useful for detecting whether another thread caused a refresh\&.
.RE
.PP
-"config\&.debug" (\fBbool\fR) r\-
+"config\&.cache_oblivious" (\fBbool\fR) r\-
.RS 4
-\fB\-\-enable\-debug\fR
+\fB\-\-enable\-cache\-oblivious\fR
was specified during build configuration\&.
.RE
.PP
-"config\&.dss" (\fBbool\fR) r\-
+"config\&.debug" (\fBbool\fR) r\-
.RS 4
-\fB\-\-enable\-dss\fR
+\fB\-\-enable\-debug\fR
was specified during build configuration\&.
.RE
.PP
@@ -684,12 +688,6 @@ was specified during build configuration\&.
was specified during build configuration\&.
.RE
.PP
-"config\&.mremap" (\fBbool\fR) r\-
-.RS 4
-\fB\-\-enable\-mremap\fR
-was specified during build configuration\&.
-.RE
-.PP
"config\&.munmap" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-munmap\fR
@@ -763,14 +761,16 @@ is specified during configuration, in which case it is enabled by default\&.
.RS 4
dss (\fBsbrk\fR(2)) allocation precedence as related to
\fBmmap\fR(2)
-allocation\&. The following settings are supported: \(lqdisabled\(rq, \(lqprimary\(rq, and \(lqsecondary\(rq\&. The default is \(lqsecondary\(rq if
-"config\&.dss"
-is true, \(lqdisabled\(rq otherwise\&.
+allocation\&. The following settings are supported if
+\fBsbrk\fR(2)
+is supported by the operating system: \(lqdisabled\(rq, \(lqprimary\(rq, and \(lqsecondary\(rq; otherwise only \(lqdisabled\(rq is supported\&. The default is \(lqsecondary\(rq if
+\fBsbrk\fR(2)
+is supported by the operating system; \(lqdisabled\(rq otherwise\&.
.RE
.PP
"opt\&.lg_chunk" (\fBsize_t\fR) r\-
.RS 4
-Virtual memory chunk size (log base 2)\&. If a chunk size outside the supported size range is specified, the size is silently clipped to the minimum/maximum supported size\&. The default chunk size is 4 MiB (2^22)\&.
+Virtual memory chunk size (log base 2)\&. If a chunk size outside the supported size range is specified, the size is silently clipped to the minimum/maximum supported size\&. The default chunk size is 2 MiB (2^21)\&.
.RE
.PP
"opt\&.narenas" (\fBsize_t\fR) r\-
@@ -782,7 +782,11 @@ Maximum number of arenas to use for automatic multiplexing of threads and arenas
.RS 4
Per\-arena minimum ratio (log base 2) of active to dirty pages\&. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via
\fBmadvise\fR(2)
-or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 8:1 (2^3:1); an option value of \-1 will disable dirty page purging\&.
+or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 8:1 (2^3:1); an option value of \-1 will disable dirty page purging\&. See
+"arenas\&.lg_dirty_mult"
+and
+"arena\&.<i>\&.lg_dirty_mult"
+for related dynamic control options\&.
.RE
.PP
"opt\&.stats_print" (\fBbool\fR) r\-
@@ -793,16 +797,21 @@ function is called at program exit via an
\fBatexit\fR(3)
function\&. If
\fB\-\-enable\-stats\fR
-is specified during configuration, this has the potential to cause deadlock for a multi\-threaded process that exits while one or more threads are executing in the memory allocation functions\&. Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development\&. This option is disabled by default\&.
+is specified during configuration, this has the potential to cause deadlock for a multi\-threaded process that exits while one or more threads are executing in the memory allocation functions\&. Furthermore,
+\fBatexit\fR\fB\fR
+may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls
+\fBatexit\fR\fB\fR, so this option is not univerally usable (though the application can register its own
+\fBatexit\fR\fB\fR
+function with equivalent functionality)\&. Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development\&. This option is disabled by default\&.
.RE
.PP
-"opt\&.junk" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
+"opt\&.junk" (\fBconst char *\fR) r\- [\fB\-\-enable\-fill\fR]
.RS 4
-Junk filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to
-0xa5\&. All deallocated memory will be initialized to
-0x5a\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default unless
+Junk filling\&. If set to "alloc", each byte of uninitialized allocated memory will be initialized to
+0xa5\&. If set to "free", all deallocated memory will be initialized to
+0x5a\&. If set to "true", both allocated and deallocated memory will be initialized, and if set to "false", junk filling be disabled entirely\&. This is intended for debugging and will impact performance negatively\&. This option is "false" by default unless
\fB\-\-enable\-debug\fR
-is specified during configuration, in which case it is enabled by default unless running inside
+is specified during configuration, in which case it is "true" by default unless running inside
\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&.
.RE
.PP
@@ -825,10 +834,9 @@ option is enabled, the redzones are checked for corruption during deallocation\&
"opt\&.zero" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
.RS 4
Zero filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to 0\&. Note that this initialization only happens once for each byte, so
-\fBrealloc\fR\fB\fR,
-\fBrallocx\fR\fB\fR
+\fBrealloc\fR\fB\fR
and
-\fBrallocm\fR\fB\fR
+\fBrallocx\fR\fB\fR
calls do not zero memory that was previously allocated\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default\&.
.RE
.PP
@@ -839,12 +847,6 @@ Allocation tracing based on
enabled/disabled\&. This option is disabled by default\&.
.RE
.PP
-"opt\&.valgrind" (\fBbool\fR) r\- [\fB\-\-enable\-valgrind\fR]
-.RS 4
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2
-support enabled/disabled\&. This option is vestigal because jemalloc auto\-detects whether it is running inside Valgrind\&. This option is disabled by default, unless running inside Valgrind\&.
-.RE
-.PP
"opt\&.xmalloc" (\fBbool\fR) r\- [\fB\-\-enable\-xmalloc\fR]
.RS 4
Abort\-on\-out\-of\-memory enabled/disabled\&. If enabled, rather than returning failure for any allocation function, display a diagnostic message on
@@ -867,15 +869,15 @@ This option is disabled by default\&.
.PP
"opt\&.tcache" (\fBbool\fR) r\- [\fB\-\-enable\-tcache\fR]
.RS 4
-Thread\-specific caching enabled/disabled\&. When there are multiple threads, each thread uses a thread\-specific cache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the
+Thread\-specific caching (tcache) enabled/disabled\&. When there are multiple threads, each thread uses a tcache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the
"opt\&.lg_tcache_max"
option for related tuning information\&. This option is enabled by default unless running inside
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&.
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, in which case it is forcefully disabled\&.
.RE
.PP
"opt\&.lg_tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
.RS 4
-Maximum size class (log base 2) to cache in the thread\-specific cache\&. At a minimum, all small size classes are cached, and at a maximum all large size classes are cached\&. The default maximum is 32 KiB (2^15)\&.
+Maximum size class (log base 2) to cache in the thread\-specific cache (tcache)\&. At a minimum, all small size classes are cached, and at a maximum all large size classes are cached\&. The default maximum is 32 KiB (2^15)\&.
.RE
.PP
"opt\&.prof" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
@@ -892,9 +894,11 @@ option for information on interval\-triggered profile dumping, the
"opt\&.prof_gdump"
option for information on high\-water\-triggered profile dumping, and the
"opt\&.prof_final"
-option for final profile dumping\&. Profile output is compatible with the included
+option for final profile dumping\&. Profile output is compatible with the
+\fBjeprof\fR
+command, which is based on the
\fBpprof\fR
-Perl script, which originates from the
+that is developed as part of the
\m[blue]\fBgperftools package\fR\m[]\&\s-2\u[3]\d\s+2\&.
.RE
.PP
@@ -904,7 +908,7 @@ Filename prefix for profile dumps\&. If the prefix is set to the empty string, n
jeprof\&.
.RE
.PP
-"opt\&.prof_active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
+"opt\&.prof_active" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Profiling activated/deactivated\&. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the
"opt\&.prof"
@@ -913,7 +917,16 @@ option) but inactive, then toggle profiling at any time during program execution
mallctl\&. This option is enabled by default\&.
.RE
.PP
-"opt\&.lg_prof_sample" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR]
+"opt\&.prof_thread_active_init" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+.RS 4
+Initial setting for
+"thread\&.prof\&.active"
+in newly created threads\&. The initial setting for newly created threads can also be changed during execution via the
+"prof\&.thread_active_init"
+mallctl\&. This option is enabled by default\&.
+.RE
+.PP
+"opt\&.lg_prof_sample" (\fBsize_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity\&. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead\&. The default sample interval is 512 KiB (2^19 B)\&.
.RE
@@ -935,12 +948,8 @@ option\&. By default, interval\-triggered profile dumping is disabled (encoded a
.PP
"opt\&.prof_gdump" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
-Trigger a memory profile dump every time the total virtual memory exceeds the previous maximum\&. Profiles are dumped to files named according to the pattern
-<prefix>\&.<pid>\&.<seq>\&.u<useq>\&.heap, where
-<prefix>
-is controlled by the
-"opt\&.prof_prefix"
-option\&. This option is disabled by default\&.
+Set the initial state of
+"prof\&.gdump", which when enabled triggers a memory profile dump every time the total virtual memory exceeds the previous maximum\&. This option is disabled by default\&.
.RE
.PP
"opt\&.prof_final" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
@@ -952,7 +961,12 @@ function to dump final memory usage to a file named according to the pattern
<prefix>
is controlled by the
"opt\&.prof_prefix"
-option\&. This option is enabled by default\&.
+option\&. Note that
+\fBatexit\fR\fB\fR
+may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls
+\fBatexit\fR\fB\fR, so this option is not univerally usable (though the application can register its own
+\fBatexit\fR\fB\fR
+function with equivalent functionality)\&. This option is disabled by default\&.
.RE
.PP
"opt\&.prof_leak" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
@@ -1007,10 +1021,42 @@ Enable/disable calling thread\*(Aqs tcache\&. The tcache is implicitly flushed a
.PP
"thread\&.tcache\&.flush" (\fBvoid\fR) \-\- [\fB\-\-enable\-tcache\fR]
.RS 4
-Flush calling thread\*(Aqs tcache\&. This interface releases all cached objects and internal data structures associated with the calling thread\*(Aqs thread\-specific cache\&. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits\&. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful\&.
+Flush calling thread\*(Aqs thread\-specific cache (tcache)\&. This interface releases all cached objects and internal data structures associated with the calling thread\*(Aqs tcache\&. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits\&. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful\&.
+.RE
+.PP
+"thread\&.prof\&.name" (\fBconst char *\fR) r\- or \-w [\fB\-\-enable\-prof\fR]
+.RS 4
+Get/set the descriptive name associated with the calling thread in memory profile dumps\&. An internal copy of the name string is created, so the input string need not be maintained after this interface completes execution\&. The output string of this interface should be copied for non\-ephemeral uses, because multiple implementation details can cause asynchronous string deallocation\&. Furthermore, each invocation of this interface can only read or write; simultaneous read/write is not supported due to string lifetime limitations\&. The name string must nil\-terminated and comprised only of characters in the sets recognized by
+\fBisgraph\fR(3)
+and
+\fBisblank\fR(3)\&.
+.RE
+.PP
+"thread\&.prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
+.RS 4
+Control whether sampling is currently active for the calling thread\&. This is an activation mechanism in addition to
+"prof\&.active"; both must be active for the calling thread to sample\&. This flag is enabled by default\&.
+.RE
+.PP
+"tcache\&.create" (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR]
+.RS 4
+Create an explicit thread\-specific cache (tcache) and return an identifier that can be passed to the
+\fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB)\fR
+macro to explicitly use the specified cache rather than the automatically managed one that is used by default\&. Each explicit cache can be used by only one thread at a time; the application must assure that this constraint holds\&.
+.RE
+.PP
+"tcache\&.flush" (\fBunsigned\fR) \-w [\fB\-\-enable\-tcache\fR]
+.RS 4
+Flush the specified thread\-specific cache (tcache)\&. The same considerations apply to this interface as to
+"thread\&.tcache\&.flush", except that the tcache will never be automatically be discarded\&.
.RE
.PP
-"arena\&.<i>\&.purge" (\fBunsigned\fR) \-\-
+"tcache\&.destroy" (\fBunsigned\fR) \-w [\fB\-\-enable\-tcache\fR]
+.RS 4
+Flush the specified thread\-specific cache (tcache) and make the identifier available for use during a future tcache creation\&.
+.RE
+.PP
+"arena\&.<i>\&.purge" (\fBvoid\fR) \-\-
.RS 4
Purge unused dirty pages for arena <i>, or for all arenas if <i> equals
"arenas\&.narenas"\&.
@@ -1019,11 +1065,237 @@ Purge unused dirty pages for arena <i>, or for all arenas if <i> equals
"arena\&.<i>\&.dss" (\fBconst char *\fR) rw
.RS 4
Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals
-"arenas\&.narenas"\&. Note that even during huge allocation this setting is read from the arena that would be chosen for small or large allocation so that applications can depend on consistent dss versus mmap allocation regardless of allocation size\&. See
+"arenas\&.narenas"\&. See
"opt\&.dss"
for supported settings\&.
.RE
.PP
+"arena\&.<i>\&.lg_dirty_mult" (\fBssize_t\fR) rw
+.RS 4
+Current per\-arena minimum ratio (log base 2) of active to dirty pages for arena <i>\&. Each time this interface is set and the ratio is increased, pages are synchronously purged as necessary to impose the new ratio\&. See
+"opt\&.lg_dirty_mult"
+for additional information\&.
+.RE
+.PP
+"arena\&.<i>\&.chunk_hooks" (\fBchunk_hooks_t\fR) rw
+.RS 4
+Get or set the chunk management hook functions for arena <i>\&. The functions must be capable of operating on all extant chunks associated with arena <i>, usually by passing unknown chunks to the replaced functions\&. In practice, it is feasible to control allocation for arenas created via
+"arenas\&.extend"
+such that all chunks originate from an application\-supplied chunk allocator (by setting custom chunk hook functions just after arena creation), but the automatically created arenas may have already created chunks prior to the application having an opportunity to take over chunk allocation\&.
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+typedef struct {
+ chunk_alloc_t *alloc;
+ chunk_dalloc_t *dalloc;
+ chunk_commit_t *commit;
+ chunk_decommit_t *decommit;
+ chunk_purge_t *purge;
+ chunk_split_t *split;
+ chunk_merge_t *merge;
+} chunk_hooks_t;
+.fi
+.if n \{\
+.RE
+.\}
+.sp
+The
+\fBchunk_hooks_t\fR
+structure comprises function pointers which are described individually below\&. jemalloc uses these functions to manage chunk lifetime, which starts off with allocation of mapped committed memory, in the simplest case followed by deallocation\&. However, there are performance and platform reasons to retain chunks for later reuse\&. Cleanup attempts cascade from deallocation to decommit to purging, which gives the chunk management functions opportunities to reject the most permanent cleanup operations in favor of less permanent (and often less costly) operations\&. The chunk splitting and merging operations can also be opted out of, but this is mainly intended to support platforms on which virtual memory mappings provided by the operating system kernel do not automatically coalesce and split, e\&.g\&. Windows\&.
+.HP \w'typedef\ void\ *(chunk_alloc_t)('u
+.BI "typedef void *(chunk_alloc_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "alignment" ", bool\ *" "zero" ", bool\ *" "commit" ", unsigned\ " "arena_ind" ");"
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+.fi
+.if n \{\
+.RE
+.\}
+.sp
+A chunk allocation function conforms to the
+\fBchunk_alloc_t\fR
+type and upon success returns a pointer to
+\fIsize\fR
+bytes of mapped memory on behalf of arena
+\fIarena_ind\fR
+such that the chunk\*(Aqs base address is a multiple of
+\fIalignment\fR, as well as setting
+\fI*zero\fR
+to indicate whether the chunk is zeroed and
+\fI*commit\fR
+to indicate whether the chunk is committed\&. Upon error the function returns
+\fBNULL\fR
+and leaves
+\fI*zero\fR
+and
+\fI*commit\fR
+unmodified\&. The
+\fIsize\fR
+parameter is always a multiple of the chunk size\&. The
+\fIalignment\fR
+parameter is always a power of two at least as large as the chunk size\&. Zeroing is mandatory if
+\fI*zero\fR
+is true upon function entry\&. Committing is mandatory if
+\fI*commit\fR
+is true upon function entry\&. If
+\fIchunk\fR
+is not
+\fBNULL\fR, the returned pointer must be
+\fIchunk\fR
+on success or
+\fBNULL\fR
+on error\&. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults\&. Note that replacing the default chunk allocation function makes the arena\*(Aqs
+"arena\&.<i>\&.dss"
+setting irrelevant\&.
+.HP \w'typedef\ bool\ (chunk_dalloc_t)('u
+.BI "typedef bool (chunk_dalloc_t)(void\ *" "chunk" ", size_t\ " "size" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+.fi
+.if n \{\
+.RE
+.\}
+.sp
+A chunk deallocation function conforms to the
+\fBchunk_dalloc_t\fR
+type and deallocates a
+\fIchunk\fR
+of given
+\fIsize\fR
+with
+\fIcommitted\fR/decommited memory as indicated, on behalf of arena
+\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates opt\-out from deallocation; the virtual memory mapping associated with the chunk remains mapped, in the same commit state, and available for future use, in which case it will be automatically retained for later reuse\&.
+.HP \w'typedef\ bool\ (chunk_commit_t)('u
+.BI "typedef bool (chunk_commit_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");"
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+.fi
+.if n \{\
+.RE
+.\}
+.sp
+A chunk commit function conforms to the
+\fBchunk_commit_t\fR
+type and commits zeroed physical memory to back pages within a
+\fIchunk\fR
+of given
+\fIsize\fR
+at
+\fIoffset\fR
+bytes, extending for
+\fIlength\fR
+on behalf of arena
+\fIarena_ind\fR, returning false upon success\&. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults\&. If the function returns true, this indicates insufficient physical memory to satisfy the request\&.
+.HP \w'typedef\ bool\ (chunk_decommit_t)('u
+.BI "typedef bool (chunk_decommit_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");"
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+.fi
+.if n \{\
+.RE
+.\}
+.sp
+A chunk decommit function conforms to the
+\fBchunk_decommit_t\fR
+type and decommits any physical memory that is backing pages within a
+\fIchunk\fR
+of given
+\fIsize\fR
+at
+\fIoffset\fR
+bytes, extending for
+\fIlength\fR
+on behalf of arena
+\fIarena_ind\fR, returning false upon success, in which case the pages will be committed via the chunk commit function before being reused\&. If the function returns true, this indicates opt\-out from decommit; the memory remains committed and available for future use, in which case it will be automatically retained for later reuse\&.
+.HP \w'typedef\ bool\ (chunk_purge_t)('u
+.BI "typedef bool (chunk_purge_t)(void\ *" "chunk" ", size_t" "size" ", size_t\ " "offset" ", size_t\ " "length" ", unsigned\ " "arena_ind" ");"
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+.fi
+.if n \{\
+.RE
+.\}
+.sp
+A chunk purge function conforms to the
+\fBchunk_purge_t\fR
+type and optionally discards physical pages within the virtual memory mapping associated with
+\fIchunk\fR
+of given
+\fIsize\fR
+at
+\fIoffset\fR
+bytes, extending for
+\fIlength\fR
+on behalf of arena
+\fIarena_ind\fR, returning false if pages within the purged virtual memory range will be zero\-filled the next time they are accessed\&.
+.HP \w'typedef\ bool\ (chunk_split_t)('u
+.BI "typedef bool (chunk_split_t)(void\ *" "chunk" ", size_t\ " "size" ", size_t\ " "size_a" ", size_t\ " "size_b" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+.fi
+.if n \{\
+.RE
+.\}
+.sp
+A chunk split function conforms to the
+\fBchunk_split_t\fR
+type and optionally splits
+\fIchunk\fR
+of given
+\fIsize\fR
+into two adjacent chunks, the first of
+\fIsize_a\fR
+bytes, and the second of
+\fIsize_b\fR
+bytes, operating on
+\fIcommitted\fR/decommitted memory as indicated, on behalf of arena
+\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates that the chunk remains unsplit and therefore should continue to be operated on as a whole\&.
+.HP \w'typedef\ bool\ (chunk_merge_t)('u
+.BI "typedef bool (chunk_merge_t)(void\ *" "chunk_a" ", size_t\ " "size_a" ", void\ *" "chunk_b" ", size_t\ " "size_b" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
+.sp
+.if n \{\
+.RS 4
+.\}
+.nf
+.fi
+.if n \{\
+.RE
+.\}
+.sp
+A chunk merge function conforms to the
+\fBchunk_merge_t\fR
+type and optionally merges adjacent chunks,
+\fIchunk_a\fR
+of given
+\fIsize_a\fR
+and
+\fIchunk_b\fR
+of given
+\fIsize_b\fR
+into one contiguous chunk, operating on
+\fIcommitted\fR/decommitted memory as indicated, on behalf of arena
+\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates that the chunks remain distinct mappings and therefore should continue to be operated on independently\&.
+.RE
+.PP
"arenas\&.narenas" (\fBunsigned\fR) r\-
.RS 4
Current limit on number of arenas\&.
@@ -1036,6 +1308,15 @@ An array of
booleans\&. Each boolean indicates whether the corresponding arena is initialized\&.
.RE
.PP
+"arenas\&.lg_dirty_mult" (\fBssize_t\fR) rw
+.RS 4
+Current default per\-arena minimum ratio (log base 2) of active to dirty pages, used to initialize
+"arena\&.<i>\&.lg_dirty_mult"
+during arena creation\&. See
+"opt\&.lg_dirty_mult"
+for additional information\&.
+.RE
+.PP
"arenas\&.quantum" (\fBsize_t\fR) r\-
.RS 4
Quantum size\&.
@@ -1076,7 +1357,7 @@ Number of regions per page run\&.
Number of bytes per page run\&.
.RE
.PP
-"arenas\&.nlruns" (\fBsize_t\fR) r\-
+"arenas\&.nlruns" (\fBunsigned\fR) r\-
.RS 4
Total number of large size classes\&.
.RE
@@ -1086,9 +1367,14 @@ Total number of large size classes\&.
Maximum size supported by this large size class\&.
.RE
.PP
-"arenas\&.purge" (\fBunsigned\fR) \-w
+"arenas\&.nhchunks" (\fBunsigned\fR) r\-
+.RS 4
+Total number of huge size classes\&.
+.RE
+.PP
+"arenas\&.hchunk\&.<i>\&.size" (\fBsize_t\fR) r\-
.RS 4
-Purge unused dirty pages for the specified arena, or for all arenas if none is specified\&.
+Maximum size supported by this huge size class\&.
.RE
.PP
"arenas\&.extend" (\fBunsigned\fR) r\-
@@ -1096,11 +1382,22 @@ Purge unused dirty pages for the specified arena, or for all arenas if none is s
Extend the array of arenas by appending a new arena, and returning the new arena index\&.
.RE
.PP
+"prof\&.thread_active_init" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
+.RS 4
+Control the initial setting for
+"thread\&.prof\&.active"
+in newly created threads\&. See the
+"opt\&.prof_thread_active_init"
+option for additional information\&.
+.RE
+.PP
"prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
.RS 4
Control whether sampling is currently active\&. See the
"opt\&.prof_active"
-option for additional information\&.
+option for additional information, as well as the interrelated
+"thread\&.prof\&.active"
+mallctl\&.
.RE
.PP
"prof\&.dump" (\fBconst char *\fR) \-w [\fB\-\-enable\-prof\fR]
@@ -1113,6 +1410,30 @@ is controlled by the
option\&.
.RE
.PP
+"prof\&.gdump" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
+.RS 4
+When enabled, trigger a memory profile dump every time the total virtual memory exceeds the previous maximum\&. Profiles are dumped to files named according to the pattern
+<prefix>\&.<pid>\&.<seq>\&.u<useq>\&.heap, where
+<prefix>
+is controlled by the
+"opt\&.prof_prefix"
+option\&.
+.RE
+.PP
+"prof\&.reset" (\fBsize_t\fR) \-w [\fB\-\-enable\-prof\fR]
+.RS 4
+Reset all memory profile statistics, and optionally update the sample rate (see
+"opt\&.lg_prof_sample"
+and
+"prof\&.lg_sample")\&.
+.RE
+.PP
+"prof\&.lg_sample" (\fBsize_t\fR) r\- [\fB\-\-enable\-prof\fR]
+.RS 4
+Get the current sample rate (see
+"opt\&.lg_prof_sample")\&.
+.RE
+.PP
"prof\&.interval" (\fBuint64_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Average number of bytes allocated between inverval\-based profile dumps\&. See the
@@ -1122,7 +1443,7 @@ option for additional information\&.
.PP
"stats\&.cactive" (\fBsize_t *\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Pointer to a counter that contains an approximate count of the current number of bytes in active pages\&. The estimate may be high, but never low, because each arena rounds up to the nearest multiple of the chunk size when computing its contribution to the counter\&. Note that the
+Pointer to a counter that contains an approximate count of the current number of bytes in active pages\&. The estimate may be high, but never low, because each arena rounds up when computing its contribution to the counter\&. Note that the
"epoch"
mallctl has no bearing on this counter\&. Furthermore, counter consistency is maintained via atomic operations, so it is necessary to use an atomic operation in order to guarantee a consistent read when dereferencing the pointer\&.
.RE
@@ -1136,44 +1457,27 @@ Total number of bytes allocated by the application\&.
.RS 4
Total number of bytes in active pages allocated by the application\&. This is a multiple of the page size, and greater than or equal to
"stats\&.allocated"\&. This does not include
-"stats\&.arenas\&.<i>\&.pdirty"
-and pages entirely devoted to allocator metadata\&.
-.RE
-.PP
-"stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
-.RS 4
-Total number of bytes in chunks mapped on behalf of the application\&. This is a multiple of the chunk size, and is at least as large as
-"stats\&.active"\&. This does not include inactive chunks\&.
-.RE
-.PP
-"stats\&.chunks\&.current" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
-.RS 4
-Total number of chunks actively mapped on behalf of the application\&. This does not include inactive chunks\&.
-.RE
-.PP
-"stats\&.chunks\&.total" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
-.RS 4
-Cumulative number of chunks allocated\&.
-.RE
-.PP
-"stats\&.chunks\&.high" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
-.RS 4
-Maximum number of active chunks at any time thus far\&.
+"stats\&.arenas\&.<i>\&.pdirty", nor pages entirely devoted to allocator metadata\&.
.RE
.PP
-"stats\&.huge\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.metadata" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Number of bytes currently allocated by huge objects\&.
+Total number of bytes dedicated to metadata, which comprise base allocations used for bootstrap\-sensitive internal allocator data structures, arena chunk headers (see
+"stats\&.arenas\&.<i>\&.metadata\&.mapped"), and internal allocations (see
+"stats\&.arenas\&.<i>\&.metadata\&.allocated")\&.
.RE
.PP
-"stats\&.huge\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.resident" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of huge allocation requests\&.
+Maximum number of bytes in physically resident data pages mapped by the allocator, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages\&. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand\-zeroed virtual memory that has not yet been touched\&. This is a multiple of the page size, and is larger than
+"stats\&.active"\&.
.RE
.PP
-"stats\&.huge\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Cumulative number of huge deallocation requests\&.
+Total number of bytes in active chunks mapped by the allocator\&. This is a multiple of the chunk size, and is larger than
+"stats\&.active"\&. This does not include inactive chunks, even those that contain unused dirty pages, which means that there is no strict ordering between this and
+"stats\&.resident"\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.dss" (\fBconst char *\fR) r\-
@@ -1185,6 +1489,13 @@ allocation\&. See
for details\&.
.RE
.PP
+"stats\&.arenas\&.<i>\&.lg_dirty_mult" (\fBssize_t\fR) r\-
+.RS 4
+Minimum ratio (log base 2) of active to dirty pages\&. See
+"opt\&.lg_dirty_mult"
+for details\&.
+.RE
+.PP
"stats\&.arenas\&.<i>\&.nthreads" (\fBunsigned\fR) r\-
.RS 4
Number of threads currently assigned to arena\&.
@@ -1207,6 +1518,24 @@ or similar has not been called\&.
Number of mapped bytes\&.
.RE
.PP
+"stats\&.arenas\&.<i>\&.metadata\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of mapped bytes in arena chunk headers, which track the states of the non\-metadata pages\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.metadata\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of bytes dedicated to internal allocations\&. Internal allocations differ from application\-originated allocations in that they are for internal use, and that they are omitted from heap profiles\&. This statistic is reported separately from
+"stats\&.metadata"
+and
+"stats\&.arenas\&.<i>\&.metadata\&.mapped"
+because it overlaps with e\&.g\&. the
+"stats\&.allocated"
+and
+"stats\&.active"
+statistics, whereas the other metadata statistics do not\&.
+.RE
+.PP
"stats\&.arenas\&.<i>\&.npurge" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of dirty page purge sweeps performed\&.
@@ -1264,9 +1593,24 @@ Cumulative number of large deallocation requests served directly by the arena\&.
Cumulative number of large allocation requests\&.
.RE
.PP
-"stats\&.arenas\&.<i>\&.bins\&.<j>\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.huge\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of bytes currently allocated by huge objects\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.huge\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of huge allocation requests served directly by the arena\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.huge\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
-Current number of bytes allocated by bin\&.
+Cumulative number of huge deallocation requests served directly by the arena\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.huge\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of huge allocation requests\&.
.RE
.PP
"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
@@ -1284,6 +1628,11 @@ Cumulative number of allocations returned to bin\&.
Cumulative number of allocation requests\&.
.RE
.PP
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.curregs" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Current number of regions for this size class\&.
+.RE
+.PP
"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nfills" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR]
.RS 4
Cumulative number of tcache fills\&.
@@ -1328,6 +1677,26 @@ Cumulative number of allocation requests for this size class\&.
.RS 4
Current number of runs for this size class\&.
.RE
+.PP
+"stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of allocation requests for this size class served directly by the arena\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of deallocation requests for this size class served directly by the arena\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Cumulative number of allocation requests for this size class\&.
+.RE
+.PP
+"stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.curhchunks" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Current number of huge allocations for this size class\&.
+.RE
.SH "DEBUGGING MALLOC PROBLEMS"
.PP
When debugging, it is a good idea to configure/build jemalloc with the
@@ -1513,44 +1882,6 @@ The
\fBmalloc_usable_size\fR\fB\fR
function returns the usable size of the allocation pointed to by
\fIptr\fR\&.
-.SS "Experimental API"
-.PP
-The
-\fBallocm\fR\fB\fR,
-\fBrallocm\fR\fB\fR,
-\fBsallocm\fR\fB\fR,
-\fBdallocm\fR\fB\fR, and
-\fBnallocm\fR\fB\fR
-functions return
-\fBALLOCM_SUCCESS\fR
-on success; otherwise they return an error value\&. The
-\fBallocm\fR\fB\fR,
-\fBrallocm\fR\fB\fR, and
-\fBnallocm\fR\fB\fR
-functions will fail if:
-.PP
-ALLOCM_ERR_OOM
-.RS 4
-Out of memory\&. Insufficient contiguous memory was available to service the allocation request\&. The
-\fBallocm\fR\fB\fR
-function additionally sets
-\fI*ptr\fR
-to
-\fBNULL\fR, whereas the
-\fBrallocm\fR\fB\fR
-function leaves
-\fB*ptr\fR
-unmodified\&.
-.RE
-The
-\fBrallocm\fR\fB\fR
-function will also fail if:
-.PP
-ALLOCM_ERR_NOT_MOVED
-.RS 4
-\fBALLOCM_NO_MOVE\fR
-was specified, but the reallocation request could not be serviced without moving the object\&.
-.RE
.SH "ENVIRONMENT"
.PP
The following environment variable affects the execution of the allocation functions:
diff --git a/deps/jemalloc/doc/jemalloc.html b/deps/jemalloc/doc/jemalloc.html
index 5a9fc7789..7b8e2be8c 100644
--- a/deps/jemalloc/doc/jemalloc.html
+++ b/deps/jemalloc/doc/jemalloc.html
@@ -1,8 +1,7 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>JEMALLOC</title><meta name="generator" content="DocBook XSL Stylesheets V1.78.1"></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="refentry"><a name="idm316394519664"></a><div class="titlepage"></div><div class="refnamediv"><h2>Name</h2><p>jemalloc &#8212; general purpose memory allocation functions</p></div><div class="refsect1"><a name="library"></a><h2>LIBRARY</h2><p>This manual describes jemalloc 3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340. More information
- can be found at the <a class="ulink" href="http://www.canonware.com/jemalloc/" target="_top">jemalloc website</a>.</p></div><div class="refsynopsisdiv"><h2>SYNOPSIS</h2><div class="funcsynopsis"><pre class="funcsynopsisinfo">#include &lt;<code class="filename">stdlib.h</code>&gt;
-#include &lt;<code class="filename">jemalloc/jemalloc.h</code>&gt;</pre><div class="refsect2"><a name="idm316394002288"></a><h3>Standard API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">malloc</b>(</code></td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">calloc</b>(</code></td><td>size_t <var class="pdparam">number</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">posix_memalign</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">aligned_alloc</b>(</code></td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">realloc</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">free</b>(</code></td><td>void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="refsect2"><a name="idm316393986160"></a><h3>Non-standard API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">mallocx</b>(</code></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">rallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">xallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">extra</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">sallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">dallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">nallocx</b>(</code></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctl</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlnametomib</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">mibp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">miblenp</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlbymib</b>(</code></td><td>const size_t *<var class="pdparam">mib</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">miblen</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">malloc_stats_print</b>(</code></td><td>void <var class="pdparam">(*write_cb)</var>
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>JEMALLOC</title><meta name="generator" content="DocBook XSL Stylesheets V1.78.1"></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="refentry"><a name="idp45223136"></a><div class="titlepage"></div><div class="refnamediv"><h2>Name</h2><p>jemalloc &#8212; general purpose memory allocation functions</p></div><div class="refsect1"><a name="library"></a><h2>LIBRARY</h2><p>This manual describes jemalloc 4.0.3-0-ge9192eacf8935e29fc62fddc2701f7942b1cc02c. More information
+ can be found at the <a class="ulink" href="http://www.canonware.com/jemalloc/" target="_top">jemalloc website</a>.</p></div><div class="refsynopsisdiv"><h2>SYNOPSIS</h2><div class="funcsynopsis"><pre class="funcsynopsisinfo">#include &lt;<code class="filename">jemalloc/jemalloc.h</code>&gt;</pre><div class="refsect2"><a name="idp44244480"></a><h3>Standard API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">malloc</b>(</code></td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">calloc</b>(</code></td><td>size_t <var class="pdparam">number</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">posix_memalign</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">aligned_alloc</b>(</code></td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">realloc</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">free</b>(</code></td><td>void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="refsect2"><a name="idp46062768"></a><h3>Non-standard API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">mallocx</b>(</code></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">rallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">xallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">extra</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">sallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">dallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">sdallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">nallocx</b>(</code></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctl</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlnametomib</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">mibp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">miblenp</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlbymib</b>(</code></td><td>const size_t *<var class="pdparam">mib</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">miblen</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">malloc_stats_print</b>(</code></td><td>void <var class="pdparam">(*write_cb)</var>
<code>(</code>void *, const char *<code>)</code>
- , </td></tr><tr><td> </td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">opts</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">malloc_usable_size</b>(</code></td><td>const void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">(*malloc_message)</b>(</code></td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">s</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><p><span class="type">const char *</span><code class="varname">malloc_conf</code>;</p></div><div class="refsect2"><a name="idm316388684112"></a><h3>Experimental API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">allocm</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">rallocm</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">extra</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">sallocm</b>(</code></td><td>const void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">dallocm</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">nallocm</b>(</code></td><td>size_t *<var class="pdparam">rsize</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div></div></div><div class="refsect1"><a name="description"></a><h2>DESCRIPTION</h2><div class="refsect2"><a name="idm316388663504"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) function allocates
+ , </td></tr><tr><td> </td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">opts</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">malloc_usable_size</b>(</code></td><td>const void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">(*malloc_message)</b>(</code></td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">s</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><p><span class="type">const char *</span><code class="varname">malloc_conf</code>;</p></div></div></div><div class="refsect1"><a name="description"></a><h2>DESCRIPTION</h2><div class="refsect2"><a name="idp46115952"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) function allocates
<em class="parameter"><code>size</code></em> bytes of uninitialized memory. The allocated
space is suitably aligned (after possible pointer coercion) for storage
of any type of object.</p><p>The <code class="function">calloc</code>(<em class="parameter"><code></code></em>) function allocates
@@ -13,13 +12,13 @@
exception that the allocated memory is explicitly initialized to zero
bytes.</p><p>The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function
allocates <em class="parameter"><code>size</code></em> bytes of memory such that the
- allocation's base address is an even multiple of
+ allocation's base address is a multiple of
<em class="parameter"><code>alignment</code></em>, and returns the allocation in the value
pointed to by <em class="parameter"><code>ptr</code></em>. The requested
- <em class="parameter"><code>alignment</code></em> must be a power of 2 at least as large
- as <code class="code">sizeof(<span class="type">void *</span>)</code>.</p><p>The <code class="function">aligned_alloc</code>(<em class="parameter"><code></code></em>) function
+ <em class="parameter"><code>alignment</code></em> must be a power of 2 at least as large as
+ <code class="code">sizeof(<span class="type">void *</span>)</code>.</p><p>The <code class="function">aligned_alloc</code>(<em class="parameter"><code></code></em>) function
allocates <em class="parameter"><code>size</code></em> bytes of memory such that the
- allocation's base address is an even multiple of
+ allocation's base address is a multiple of
<em class="parameter"><code>alignment</code></em>. The requested
<em class="parameter"><code>alignment</code></em> must be a power of 2. Behavior is
undefined if <em class="parameter"><code>size</code></em> is not an integral multiple of
@@ -38,37 +37,51 @@
<code class="function">malloc</code>(<em class="parameter"><code></code></em>) for the specified size.</p><p>The <code class="function">free</code>(<em class="parameter"><code></code></em>) function causes the
allocated memory referenced by <em class="parameter"><code>ptr</code></em> to be made
available for future allocations. If <em class="parameter"><code>ptr</code></em> is
- <code class="constant">NULL</code>, no action occurs.</p></div><div class="refsect2"><a name="idm316388639904"></a><h3>Non-standard API</h3><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>),
+ <code class="constant">NULL</code>, no action occurs.</p></div><div class="refsect2"><a name="idp46144704"></a><h3>Non-standard API</h3><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>),
<code class="function">rallocx</code>(<em class="parameter"><code></code></em>),
<code class="function">xallocx</code>(<em class="parameter"><code></code></em>),
<code class="function">sallocx</code>(<em class="parameter"><code></code></em>),
- <code class="function">dallocx</code>(<em class="parameter"><code></code></em>), and
+ <code class="function">dallocx</code>(<em class="parameter"><code></code></em>),
+ <code class="function">sdallocx</code>(<em class="parameter"><code></code></em>), and
<code class="function">nallocx</code>(<em class="parameter"><code></code></em>) functions all have a
<em class="parameter"><code>flags</code></em> argument that can be used to specify
options. The functions only check the options that are contextually
relevant. Use bitwise or (<code class="code">|</code>) operations to
specify one or more of the following:
- </p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><code class="constant">MALLOCX_LG_ALIGN(<em class="parameter"><code>la</code></em>)
+ </p><div class="variablelist"><dl class="variablelist"><dt><a name="MALLOCX_LG_ALIGN"></a><span class="term"><code class="constant">MALLOCX_LG_ALIGN(<em class="parameter"><code>la</code></em>)
</code></span></dt><dd><p>Align the memory allocation to start at an address
that is a multiple of <code class="code">(1 &lt;&lt;
<em class="parameter"><code>la</code></em>)</code>. This macro does not validate
that <em class="parameter"><code>la</code></em> is within the valid
- range.</p></dd><dt><span class="term"><code class="constant">MALLOCX_ALIGN(<em class="parameter"><code>a</code></em>)
+ range.</p></dd><dt><a name="MALLOCX_ALIGN"></a><span class="term"><code class="constant">MALLOCX_ALIGN(<em class="parameter"><code>a</code></em>)
</code></span></dt><dd><p>Align the memory allocation to start at an address
that is a multiple of <em class="parameter"><code>a</code></em>, where
<em class="parameter"><code>a</code></em> is a power of two. This macro does not
validate that <em class="parameter"><code>a</code></em> is a power of 2.
- </p></dd><dt><span class="term"><code class="constant">MALLOCX_ZERO</code></span></dt><dd><p>Initialize newly allocated memory to contain zero
+ </p></dd><dt><a name="MALLOCX_ZERO"></a><span class="term"><code class="constant">MALLOCX_ZERO</code></span></dt><dd><p>Initialize newly allocated memory to contain zero
bytes. In the growing reallocation case, the real size prior to
reallocation defines the boundary between untouched bytes and those
that are initialized to contain zero bytes. If this macro is
- absent, newly allocated memory is uninitialized.</p></dd><dt><span class="term"><code class="constant">MALLOCX_ARENA(<em class="parameter"><code>a</code></em>)
+ absent, newly allocated memory is uninitialized.</p></dd><dt><a name="MALLOCX_TCACHE"></a><span class="term"><code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)
+ </code></span></dt><dd><p>Use the thread-specific cache (tcache) specified by
+ the identifier <em class="parameter"><code>tc</code></em>, which must have been
+ acquired via the <a class="link" href="#tcache.create">
+ "<code class="mallctl">tcache.create</code>"
+ </a>
+ mallctl. This macro does not validate that
+ <em class="parameter"><code>tc</code></em> specifies a valid
+ identifier.</p></dd><dt><a name="MALLOC_TCACHE_NONE"></a><span class="term"><code class="constant">MALLOCX_TCACHE_NONE</code></span></dt><dd><p>Do not use a thread-specific cache (tcache). Unless
+ <code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)</code> or
+ <code class="constant">MALLOCX_TCACHE_NONE</code> is specified, an
+ automatically managed tcache will be used under many circumstances.
+ This macro cannot be used in the same <em class="parameter"><code>flags</code></em>
+ argument as
+ <code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)</code>.</p></dd><dt><a name="MALLOCX_ARENA"></a><span class="term"><code class="constant">MALLOCX_ARENA(<em class="parameter"><code>a</code></em>)
</code></span></dt><dd><p>Use the arena specified by the index
- <em class="parameter"><code>a</code></em> (and by necessity bypass the thread
- cache). This macro has no effect for huge regions, nor for regions
- that were allocated via an arena other than the one specified.
- This macro does not validate that <em class="parameter"><code>a</code></em>
- specifies an arena index in the valid range.</p></dd></dl></div><p>
+ <em class="parameter"><code>a</code></em>. This macro has no effect for regions that
+ were allocated via an arena other than the one specified. This
+ macro does not validate that <em class="parameter"><code>a</code></em> specifies an
+ arena index in the valid range.</p></dd></dl></div><p>
</p><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function allocates at
least <em class="parameter"><code>size</code></em> bytes of memory, and returns a pointer
to the base address of the allocation. Behavior is undefined if
@@ -91,7 +104,14 @@
&gt; <code class="constant">SIZE_T_MAX</code>)</code>.</p><p>The <code class="function">sallocx</code>(<em class="parameter"><code></code></em>) function returns the
real size of the allocation at <em class="parameter"><code>ptr</code></em>.</p><p>The <code class="function">dallocx</code>(<em class="parameter"><code></code></em>) function causes the
memory referenced by <em class="parameter"><code>ptr</code></em> to be made available for
- future allocations.</p><p>The <code class="function">nallocx</code>(<em class="parameter"><code></code></em>) function allocates no
+ future allocations.</p><p>The <code class="function">sdallocx</code>(<em class="parameter"><code></code></em>) function is an
+ extension of <code class="function">dallocx</code>(<em class="parameter"><code></code></em>) with a
+ <em class="parameter"><code>size</code></em> parameter to allow the caller to pass in the
+ allocation size as an optimization. The minimum valid input size is the
+ original requested size of the allocation, and the maximum valid input
+ size is the corresponding value returned by
+ <code class="function">nallocx</code>(<em class="parameter"><code></code></em>) or
+ <code class="function">sallocx</code>(<em class="parameter"><code></code></em>).</p><p>The <code class="function">nallocx</code>(<em class="parameter"><code></code></em>) function allocates no
memory, but it performs the same size computation as the
<code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function, and returns the real
size of the allocation that would result from the equivalent
@@ -162,11 +182,12 @@ for (i = 0; i &lt; nbins; i++) {
functions simultaneously. If <code class="option">--enable-stats</code> is
specified during configuration, &#8220;m&#8221; and &#8220;a&#8221; can
be specified to omit merged arena and per arena statistics, respectively;
- &#8220;b&#8221; and &#8220;l&#8221; can be specified to omit per size
- class statistics for bins and large objects, respectively. Unrecognized
- characters are silently ignored. Note that thread caching may prevent
- some statistics from being completely up to date, since extra locking
- would be required to merge counters that track thread cache operations.
+ &#8220;b&#8221;, &#8220;l&#8221;, and &#8220;h&#8221; can be specified to
+ omit per size class statistics for bins, large objects, and huge objects,
+ respectively. Unrecognized characters are silently ignored. Note that
+ thread caching may prevent some statistics from being completely up to
+ date, since extra locking would be required to merge counters that track
+ thread cache operations.
</p><p>The <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function
returns the usable size of the allocation pointed to by
<em class="parameter"><code>ptr</code></em>. The return value may be larger than the size
@@ -177,74 +198,7 @@ for (i = 0; i &lt; nbins; i++) {
discrepancy between the requested allocation size and the size reported
by <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) should not be
depended on, since such behavior is entirely implementation-dependent.
- </p></div><div class="refsect2"><a name="idm316388574208"></a><h3>Experimental API</h3><p>The experimental API is subject to change or removal without regard
- for backward compatibility. If <code class="option">--disable-experimental</code>
- is specified during configuration, the experimental API is
- omitted.</p><p>The <code class="function">allocm</code>(<em class="parameter"><code></code></em>),
- <code class="function">rallocm</code>(<em class="parameter"><code></code></em>),
- <code class="function">sallocm</code>(<em class="parameter"><code></code></em>),
- <code class="function">dallocm</code>(<em class="parameter"><code></code></em>), and
- <code class="function">nallocm</code>(<em class="parameter"><code></code></em>) functions all have a
- <em class="parameter"><code>flags</code></em> argument that can be used to specify
- options. The functions only check the options that are contextually
- relevant. Use bitwise or (<code class="code">|</code>) operations to
- specify one or more of the following:
- </p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><code class="constant">ALLOCM_LG_ALIGN(<em class="parameter"><code>la</code></em>)
- </code></span></dt><dd><p>Align the memory allocation to start at an address
- that is a multiple of <code class="code">(1 &lt;&lt;
- <em class="parameter"><code>la</code></em>)</code>. This macro does not validate
- that <em class="parameter"><code>la</code></em> is within the valid
- range.</p></dd><dt><span class="term"><code class="constant">ALLOCM_ALIGN(<em class="parameter"><code>a</code></em>)
- </code></span></dt><dd><p>Align the memory allocation to start at an address
- that is a multiple of <em class="parameter"><code>a</code></em>, where
- <em class="parameter"><code>a</code></em> is a power of two. This macro does not
- validate that <em class="parameter"><code>a</code></em> is a power of 2.
- </p></dd><dt><span class="term"><code class="constant">ALLOCM_ZERO</code></span></dt><dd><p>Initialize newly allocated memory to contain zero
- bytes. In the growing reallocation case, the real size prior to
- reallocation defines the boundary between untouched bytes and those
- that are initialized to contain zero bytes. If this macro is
- absent, newly allocated memory is uninitialized.</p></dd><dt><span class="term"><code class="constant">ALLOCM_NO_MOVE</code></span></dt><dd><p>For reallocation, fail rather than moving the
- object. This constraint can apply to both growth and
- shrinkage.</p></dd><dt><span class="term"><code class="constant">ALLOCM_ARENA(<em class="parameter"><code>a</code></em>)
- </code></span></dt><dd><p>Use the arena specified by the index
- <em class="parameter"><code>a</code></em> (and by necessity bypass the thread
- cache). This macro has no effect for huge regions, nor for regions
- that were allocated via an arena other than the one specified.
- This macro does not validate that <em class="parameter"><code>a</code></em>
- specifies an arena index in the valid range.</p></dd></dl></div><p>
- </p><p>The <code class="function">allocm</code>(<em class="parameter"><code></code></em>) function allocates at
- least <em class="parameter"><code>size</code></em> bytes of memory, sets
- <em class="parameter"><code>*ptr</code></em> to the base address of the allocation, and
- sets <em class="parameter"><code>*rsize</code></em> to the real size of the allocation if
- <em class="parameter"><code>rsize</code></em> is not <code class="constant">NULL</code>. Behavior
- is undefined if <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or
- if request size overflows due to size class and/or alignment
- constraints.</p><p>The <code class="function">rallocm</code>(<em class="parameter"><code></code></em>) function resizes the
- allocation at <em class="parameter"><code>*ptr</code></em> to be at least
- <em class="parameter"><code>size</code></em> bytes, sets <em class="parameter"><code>*ptr</code></em> to
- the base address of the allocation if it moved, and sets
- <em class="parameter"><code>*rsize</code></em> to the real size of the allocation if
- <em class="parameter"><code>rsize</code></em> is not <code class="constant">NULL</code>. If
- <em class="parameter"><code>extra</code></em> is non-zero, an attempt is made to resize
- the allocation to be at least <code class="code">(<em class="parameter"><code>size</code></em> +
- <em class="parameter"><code>extra</code></em>)</code> bytes, though inability to allocate
- the extra byte(s) will not by itself result in failure. Behavior is
- undefined if <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, if
- request size overflows due to size class and/or alignment constraints, or
- if <code class="code">(<em class="parameter"><code>size</code></em> +
- <em class="parameter"><code>extra</code></em> &gt;
- <code class="constant">SIZE_T_MAX</code>)</code>.</p><p>The <code class="function">sallocm</code>(<em class="parameter"><code></code></em>) function sets
- <em class="parameter"><code>*rsize</code></em> to the real size of the allocation.</p><p>The <code class="function">dallocm</code>(<em class="parameter"><code></code></em>) function causes the
- memory referenced by <em class="parameter"><code>ptr</code></em> to be made available for
- future allocations.</p><p>The <code class="function">nallocm</code>(<em class="parameter"><code></code></em>) function allocates no
- memory, but it performs the same size computation as the
- <code class="function">allocm</code>(<em class="parameter"><code></code></em>) function, and if
- <em class="parameter"><code>rsize</code></em> is not <code class="constant">NULL</code> it sets
- <em class="parameter"><code>*rsize</code></em> to the real size of the allocation that
- would result from the equivalent <code class="function">allocm</code>(<em class="parameter"><code></code></em>)
- function call. Behavior is undefined if <em class="parameter"><code>size</code></em> is
- <code class="constant">0</code>, or if request size overflows due to size class
- and/or alignment constraints.</p></div></div><div class="refsect1"><a name="tuning"></a><h2>TUNING</h2><p>Once, when the first call is made to one of the memory allocation
+ </p></div></div><div class="refsect1"><a name="tuning"></a><h2>TUNING</h2><p>Once, when the first call is made to one of the memory allocation
routines, the allocator initializes its internals based in part on various
options that can be specified at compile- or run-time.</p><p>The string pointed to by the global variable
<code class="varname">malloc_conf</code>, the &#8220;name&#8221; of the file
@@ -272,8 +226,9 @@ for (i = 0; i &lt; nbins; i++) {
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> to obtain memory, which is
suboptimal for several reasons, including race conditions, increased
fragmentation, and artificial limitations on maximum usable memory. If
- <code class="option">--enable-dss</code> is specified during configuration, this
- allocator uses both <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> and
+ <span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> is supported by the operating
+ system, this allocator uses both
+ <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> and
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>, in that order of preference;
otherwise only <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> is used.</p><p>This allocator uses multiple arenas in order to reduce lock
contention for threaded programs on multi-processor systems. This works
@@ -295,34 +250,52 @@ for (i = 0; i &lt; nbins; i++) {
chunk size is a power of two that is greater than the page size. Chunks
are always aligned to multiples of the chunk size. This alignment makes it
possible to find metadata for user objects very quickly.</p><p>User objects are broken into three categories according to size:
- small, large, and huge. Small objects are smaller than one page. Large
- objects are smaller than the chunk size. Huge objects are a multiple of
- the chunk size. Small and large objects are managed by arenas; huge
- objects are managed separately in a single data structure that is shared by
- all threads. Huge objects are used by applications infrequently enough
- that this single data structure is not a scalability issue.</p><p>Each chunk that is managed by an arena tracks its contents as runs of
+ small, large, and huge. Small and large objects are managed entirely by
+ arenas; huge objects are additionally aggregated in a single data structure
+ that is shared by all threads. Huge objects are typically used by
+ applications infrequently enough that this single data structure is not a
+ scalability issue.</p><p>Each chunk that is managed by an arena tracks its contents as runs of
contiguous pages (unused, backing a set of small objects, or backing one
large object). The combination of chunk alignment and chunk page maps
makes it possible to determine all metadata regarding small and large
allocations in constant time.</p><p>Small objects are managed in groups by page runs. Each run maintains
- a frontier and free list to track which regions are in use. Allocation
- requests that are no more than half the quantum (8 or 16, depending on
- architecture) are rounded up to the nearest power of two that is at least
- <code class="code">sizeof(<span class="type">double</span>)</code>. All other small
- object size classes are multiples of the quantum, spaced such that internal
- fragmentation is limited to approximately 25% for all but the smallest size
- classes. Allocation requests that are larger than the maximum small size
- class, but small enough to fit in an arena-managed chunk (see the <a class="link" href="#opt.lg_chunk">
+ a bitmap to track which regions are in use. Allocation requests that are no
+ more than half the quantum (8 or 16, depending on architecture) are rounded
+ up to the nearest power of two that is at least <code class="code">sizeof(<span class="type">double</span>)</code>. All other object size
+ classes are multiples of the quantum, spaced such that there are four size
+ classes for each doubling in size, which limits internal fragmentation to
+ approximately 20% for all but the smallest size classes. Small size classes
+ are smaller than four times the page size, large size classes are smaller
+ than the chunk size (see the <a class="link" href="#opt.lg_chunk">
"<code class="mallctl">opt.lg_chunk</code>"
- </a> option), are
- rounded up to the nearest run size. Allocation requests that are too large
- to fit in an arena-managed chunk are rounded up to the nearest multiple of
- the chunk size.</p><p>Allocations are packed tightly together, which can be an issue for
+ </a> option), and
+ huge size classes extend from the chunk size up to one size class less than
+ the full address space size.</p><p>Allocations are packed tightly together, which can be an issue for
multi-threaded applications. If you need to assure that allocations do not
suffer from cacheline sharing, round your allocation requests up to the
nearest multiple of the cacheline size, or specify cacheline alignment when
- allocating.</p><p>Assuming 4 MiB chunks, 4 KiB pages, and a 16-byte quantum on a 64-bit
- system, the size classes in each category are as shown in <a class="xref" href="#size_classes" title="Table 1. Size classes">Table 1</a>.</p><div class="table"><a name="size_classes"></a><p class="title"><b>Table 1. Size classes</b></p><div class="table-contents"><table summary="Size classes" border="1"><colgroup><col align="left" class="c1"><col align="right" class="c2"><col align="left" class="c3"></colgroup><thead><tr><th align="left">Category</th><th align="right">Spacing</th><th align="left">Size</th></tr></thead><tbody><tr><td rowspan="7" align="left">Small</td><td align="right">lg</td><td align="left">[8]</td></tr><tr><td align="right">16</td><td align="left">[16, 32, 48, ..., 128]</td></tr><tr><td align="right">32</td><td align="left">[160, 192, 224, 256]</td></tr><tr><td align="right">64</td><td align="left">[320, 384, 448, 512]</td></tr><tr><td align="right">128</td><td align="left">[640, 768, 896, 1024]</td></tr><tr><td align="right">256</td><td align="left">[1280, 1536, 1792, 2048]</td></tr><tr><td align="right">512</td><td align="left">[2560, 3072, 3584]</td></tr><tr><td align="left">Large</td><td align="right">4 KiB</td><td align="left">[4 KiB, 8 KiB, 12 KiB, ..., 4072 KiB]</td></tr><tr><td align="left">Huge</td><td align="right">4 MiB</td><td align="left">[4 MiB, 8 MiB, 12 MiB, ...]</td></tr></tbody></table></div></div><br class="table-break"></div><div class="refsect1"><a name="mallctl_namespace"></a><h2>MALLCTL NAMESPACE</h2><p>The following names are defined in the namespace accessible via the
+ allocating.</p><p>The <code class="function">realloc</code>(<em class="parameter"><code></code></em>),
+ <code class="function">rallocx</code>(<em class="parameter"><code></code></em>), and
+ <code class="function">xallocx</code>(<em class="parameter"><code></code></em>) functions may resize allocations
+ without moving them under limited circumstances. Unlike the
+ <code class="function">*allocx</code>(<em class="parameter"><code></code></em>) API, the standard API does not
+ officially round up the usable size of an allocation to the nearest size
+ class, so technically it is necessary to call
+ <code class="function">realloc</code>(<em class="parameter"><code></code></em>) to grow e.g. a 9-byte allocation to
+ 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage
+ trivially succeeds in place as long as the pre-size and post-size both round
+ up to the same size class. No other API guarantees are made regarding
+ in-place resizing, but the current implementation also tries to resize large
+ and huge allocations in place, as long as the pre-size and post-size are
+ both large or both huge. In such cases shrinkage always succeeds for large
+ size classes, but for huge size classes the chunk allocator must support
+ splitting (see <a class="link" href="#arena.i.chunk_hooks">
+ "<code class="mallctl">arena.&lt;i&gt;.chunk_hooks</code>"
+ </a>).
+ Growth only succeeds if the trailing memory is currently available, and
+ additionally for huge size classes the chunk allocator must support
+ merging.</p><p>Assuming 2 MiB chunks, 4 KiB pages, and a 16-byte quantum on a
+ 64-bit system, the size classes in each category are as shown in <a class="xref" href="#size_classes" title="Table 1. Size classes">Table 1</a>.</p><div class="table"><a name="size_classes"></a><p class="title"><b>Table 1. Size classes</b></p><div class="table-contents"><table summary="Size classes" border="1"><colgroup><col align="left" class="c1"><col align="right" class="c2"><col align="left" class="c3"></colgroup><thead><tr><th align="left">Category</th><th align="right">Spacing</th><th align="left">Size</th></tr></thead><tbody><tr><td rowspan="9" align="left">Small</td><td align="right">lg</td><td align="left">[8]</td></tr><tr><td align="right">16</td><td align="left">[16, 32, 48, 64, 80, 96, 112, 128]</td></tr><tr><td align="right">32</td><td align="left">[160, 192, 224, 256]</td></tr><tr><td align="right">64</td><td align="left">[320, 384, 448, 512]</td></tr><tr><td align="right">128</td><td align="left">[640, 768, 896, 1024]</td></tr><tr><td align="right">256</td><td align="left">[1280, 1536, 1792, 2048]</td></tr><tr><td align="right">512</td><td align="left">[2560, 3072, 3584, 4096]</td></tr><tr><td align="right">1 KiB</td><td align="left">[5 KiB, 6 KiB, 7 KiB, 8 KiB]</td></tr><tr><td align="right">2 KiB</td><td align="left">[10 KiB, 12 KiB, 14 KiB]</td></tr><tr><td rowspan="8" align="left">Large</td><td align="right">2 KiB</td><td align="left">[16 KiB]</td></tr><tr><td align="right">4 KiB</td><td align="left">[20 KiB, 24 KiB, 28 KiB, 32 KiB]</td></tr><tr><td align="right">8 KiB</td><td align="left">[40 KiB, 48 KiB, 54 KiB, 64 KiB]</td></tr><tr><td align="right">16 KiB</td><td align="left">[80 KiB, 96 KiB, 112 KiB, 128 KiB]</td></tr><tr><td align="right">32 KiB</td><td align="left">[160 KiB, 192 KiB, 224 KiB, 256 KiB]</td></tr><tr><td align="right">64 KiB</td><td align="left">[320 KiB, 384 KiB, 448 KiB, 512 KiB]</td></tr><tr><td align="right">128 KiB</td><td align="left">[640 KiB, 768 KiB, 896 KiB, 1 MiB]</td></tr><tr><td align="right">256 KiB</td><td align="left">[1280 KiB, 1536 KiB, 1792 KiB]</td></tr><tr><td rowspan="7" align="left">Huge</td><td align="right">256 KiB</td><td align="left">[2 MiB]</td></tr><tr><td align="right">512 KiB</td><td align="left">[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]</td></tr><tr><td align="right">1 MiB</td><td align="left">[5 MiB, 6 MiB, 7 MiB, 8 MiB]</td></tr><tr><td align="right">2 MiB</td><td align="left">[10 MiB, 12 MiB, 14 MiB, 16 MiB]</td></tr><tr><td align="right">4 MiB</td><td align="left">[20 MiB, 24 MiB, 28 MiB, 32 MiB]</td></tr><tr><td align="right">8 MiB</td><td align="left">[40 MiB, 48 MiB, 56 MiB, 64 MiB]</td></tr><tr><td align="right">...</td><td align="left">...</td></tr></tbody></table></div></div><br class="table-break"></div><div class="refsect1"><a name="mallctl_namespace"></a><h2>MALLCTL NAMESPACE</h2><p>The following names are defined in the namespace accessible via the
<code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions. Value types are
specified in parentheses, their readable/writable statuses are encoded as
<code class="literal">rw</code>, <code class="literal">r-</code>, <code class="literal">-w</code>, or
@@ -355,20 +328,20 @@ for (i = 0; i &lt; nbins; i++) {
</span></dt><dd><p>If a value is passed in, refresh the data from which
the <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions report values,
and increment the epoch. Return the current epoch. This is useful for
- detecting whether another thread caused a refresh.</p></dd><dt><a name="config.debug"></a><span class="term">
+ detecting whether another thread caused a refresh.</p></dd><dt><a name="config.cache_oblivious"></a><span class="term">
- "<code class="mallctl">config.debug</code>"
+ "<code class="mallctl">config.cache_oblivious</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
- </span></dt><dd><p><code class="option">--enable-debug</code> was specified during
- build configuration.</p></dd><dt><a name="config.dss"></a><span class="term">
+ </span></dt><dd><p><code class="option">--enable-cache-oblivious</code> was specified
+ during build configuration.</p></dd><dt><a name="config.debug"></a><span class="term">
- "<code class="mallctl">config.dss</code>"
+ "<code class="mallctl">config.debug</code>"
(<span class="type">bool</span>)
<code class="literal">r-</code>
- </span></dt><dd><p><code class="option">--enable-dss</code> was specified during
+ </span></dt><dd><p><code class="option">--enable-debug</code> was specified during
build configuration.</p></dd><dt><a name="config.fill"></a><span class="term">
"<code class="mallctl">config.fill</code>"
@@ -383,14 +356,7 @@ for (i = 0; i &lt; nbins; i++) {
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-lazy-lock</code> was specified
- during build configuration.</p></dd><dt><a name="config.mremap"></a><span class="term">
-
- "<code class="mallctl">config.mremap</code>"
-
- (<span class="type">bool</span>)
- <code class="literal">r-</code>
- </span></dt><dd><p><code class="option">--enable-mremap</code> was specified during
- build configuration.</p></dd><dt><a name="config.munmap"></a><span class="term">
+ during build configuration.</p></dd><dt><a name="config.munmap"></a><span class="term">
"<code class="mallctl">config.munmap</code>"
@@ -479,12 +445,13 @@ for (i = 0; i &lt; nbins; i++) {
<code class="literal">r-</code>
</span></dt><dd><p>dss (<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>) allocation precedence as
related to <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> allocation. The following
- settings are supported: &#8220;disabled&#8221;, &#8220;primary&#8221;,
- and &#8220;secondary&#8221;. The default is &#8220;secondary&#8221; if
- <a class="link" href="#config.dss">
- "<code class="mallctl">config.dss</code>"
- </a> is
- true, &#8220;disabled&#8221; otherwise.
+ settings are supported if
+ <span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> is supported by the operating
+ system: &#8220;disabled&#8221;, &#8220;primary&#8221;, and
+ &#8220;secondary&#8221;; otherwise only &#8220;disabled&#8221; is
+ supported. The default is &#8220;secondary&#8221; if
+ <span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> is supported by the operating
+ system; &#8220;disabled&#8221; otherwise.
</p></dd><dt><a name="opt.lg_chunk"></a><span class="term">
"<code class="mallctl">opt.lg_chunk</code>"
@@ -494,7 +461,7 @@ for (i = 0; i &lt; nbins; i++) {
</span></dt><dd><p>Virtual memory chunk size (log base 2). If a chunk
size outside the supported size range is specified, the size is
silently clipped to the minimum/maximum supported size. The default
- chunk size is 4 MiB (2^22).
+ chunk size is 2 MiB (2^21).
</p></dd><dt><a name="opt.narenas"></a><span class="term">
"<code class="mallctl">opt.narenas</code>"
@@ -517,7 +484,13 @@ for (i = 0; i &lt; nbins; i++) {
provides the kernel with sufficient information to recycle dirty pages
if physical memory becomes scarce and the pages remain unused. The
default minimum ratio is 8:1 (2^3:1); an option value of -1 will
- disable dirty page purging.</p></dd><dt><a name="opt.stats_print"></a><span class="term">
+ disable dirty page purging. See <a class="link" href="#arenas.lg_dirty_mult">
+ "<code class="mallctl">arenas.lg_dirty_mult</code>"
+ </a>
+ and <a class="link" href="#arena.i.lg_dirty_mult">
+ "<code class="mallctl">arena.&lt;i&gt;.lg_dirty_mult</code>"
+ </a>
+ for related dynamic control options.</p></dd><dt><a name="opt.stats_print"></a><span class="term">
"<code class="mallctl">opt.stats_print</code>"
@@ -530,23 +503,31 @@ for (i = 0; i &lt; nbins; i++) {
<code class="option">--enable-stats</code> is specified during configuration, this
has the potential to cause deadlock for a multi-threaded process that
exits while one or more threads are executing in the memory allocation
- functions. Therefore, this option should only be used with care; it is
- primarily intended as a performance tuning aid during application
+ functions. Furthermore, <code class="function">atexit</code>(<em class="parameter"><code></code></em>) may
+ allocate memory during application initialization and then deadlock
+ internally when jemalloc in turn calls
+ <code class="function">atexit</code>(<em class="parameter"><code></code></em>), so this option is not
+ univerally usable (though the application can register its own
+ <code class="function">atexit</code>(<em class="parameter"><code></code></em>) function with equivalent
+ functionality). Therefore, this option should only be used with care;
+ it is primarily intended as a performance tuning aid during application
development. This option is disabled by default.</p></dd><dt><a name="opt.junk"></a><span class="term">
"<code class="mallctl">opt.junk</code>"
- (<span class="type">bool</span>)
+ (<span class="type">const char *</span>)
<code class="literal">r-</code>
[<code class="option">--enable-fill</code>]
- </span></dt><dd><p>Junk filling enabled/disabled. If enabled, each byte
- of uninitialized allocated memory will be initialized to
- <code class="literal">0xa5</code>. All deallocated memory will be initialized to
- <code class="literal">0x5a</code>. This is intended for debugging and will
- impact performance negatively. This option is disabled by default
- unless <code class="option">--enable-debug</code> is specified during
- configuration, in which case it is enabled by default unless running
- inside <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>.</p></dd><dt><a name="opt.quarantine"></a><span class="term">
+ </span></dt><dd><p>Junk filling. If set to "alloc", each byte of
+ uninitialized allocated memory will be initialized to
+ <code class="literal">0xa5</code>. If set to "free", all deallocated memory will
+ be initialized to <code class="literal">0x5a</code>. If set to "true", both
+ allocated and deallocated memory will be initialized, and if set to
+ "false", junk filling be disabled entirely. This is intended for
+ debugging and will impact performance negatively. This option is
+ "false" by default unless <code class="option">--enable-debug</code> is specified
+ during configuration, in which case it is "true" by default unless
+ running inside <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>.</p></dd><dt><a name="opt.quarantine"></a><span class="term">
"<code class="mallctl">opt.quarantine</code>"
@@ -592,9 +573,8 @@ for (i = 0; i &lt; nbins; i++) {
</span></dt><dd><p>Zero filling enabled/disabled. If enabled, each byte
of uninitialized allocated memory will be initialized to 0. Note that
this initialization only happens once for each byte, so
- <code class="function">realloc</code>(<em class="parameter"><code></code></em>),
- <code class="function">rallocx</code>(<em class="parameter"><code></code></em>) and
- <code class="function">rallocm</code>(<em class="parameter"><code></code></em>) calls do not zero memory that
+ <code class="function">realloc</code>(<em class="parameter"><code></code></em>) and
+ <code class="function">rallocx</code>(<em class="parameter"><code></code></em>) calls do not zero memory that
was previously allocated. This is intended for debugging and will
impact performance negatively. This option is disabled by default.
</p></dd><dt><a name="opt.utrace"></a><span class="term">
@@ -606,17 +586,7 @@ for (i = 0; i &lt; nbins; i++) {
[<code class="option">--enable-utrace</code>]
</span></dt><dd><p>Allocation tracing based on
<span class="citerefentry"><span class="refentrytitle">utrace</span>(2)</span> enabled/disabled. This option
- is disabled by default.</p></dd><dt><a name="opt.valgrind"></a><span class="term">
-
- "<code class="mallctl">opt.valgrind</code>"
-
- (<span class="type">bool</span>)
- <code class="literal">r-</code>
- [<code class="option">--enable-valgrind</code>]
- </span></dt><dd><p><a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>
- support enabled/disabled. This option is vestigal because jemalloc
- auto-detects whether it is running inside Valgrind. This option is
- disabled by default, unless running inside Valgrind.</p></dd><dt><a name="opt.xmalloc"></a><span class="term">
+ is disabled by default.</p></dd><dt><a name="opt.xmalloc"></a><span class="term">
"<code class="mallctl">opt.xmalloc</code>"
@@ -639,16 +609,16 @@ malloc_conf = "xmalloc:true";</pre><p>
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
- </span></dt><dd><p>Thread-specific caching enabled/disabled. When there
- are multiple threads, each thread uses a thread-specific cache for
- objects up to a certain size. Thread-specific caching allows many
- allocations to be satisfied without performing any thread
- synchronization, at the cost of increased memory use. See the
- <a class="link" href="#opt.lg_tcache_max">
+ </span></dt><dd><p>Thread-specific caching (tcache) enabled/disabled. When
+ there are multiple threads, each thread uses a tcache for objects up to
+ a certain size. Thread-specific caching allows many allocations to be
+ satisfied without performing any thread synchronization, at the cost of
+ increased memory use. See the <a class="link" href="#opt.lg_tcache_max">
"<code class="mallctl">opt.lg_tcache_max</code>"
</a>
option for related tuning information. This option is enabled by
- default unless running inside <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>.</p></dd><dt><a name="opt.lg_tcache_max"></a><span class="term">
+ default unless running inside <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>, in which case it is
+ forcefully disabled.</p></dd><dt><a name="opt.lg_tcache_max"></a><span class="term">
"<code class="mallctl">opt.lg_tcache_max</code>"
@@ -656,8 +626,8 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Maximum size class (log base 2) to cache in the
- thread-specific cache. At a minimum, all small size classes are
- cached, and at a maximum all large size classes are cached. The
+ thread-specific cache (tcache). At a minimum, all small size classes
+ are cached, and at a maximum all large size classes are cached. The
default maximum is 32 KiB (2^15).</p></dd><dt><a name="opt.prof"></a><span class="term">
"<code class="mallctl">opt.prof</code>"
@@ -686,8 +656,8 @@ malloc_conf = "xmalloc:true";</pre><p>
"<code class="mallctl">opt.prof_final</code>"
</a>
option for final profile dumping. Profile output is compatible with
- the included <span class="command"><strong>pprof</strong></span> Perl script, which originates
- from the <a class="ulink" href="http://code.google.com/p/gperftools/" target="_top">gperftools
+ the <span class="command"><strong>jeprof</strong></span> command, which is based on the
+ <span class="command"><strong>pprof</strong></span> that is developed as part of the <a class="ulink" href="http://code.google.com/p/gperftools/" target="_top">gperftools
package</a>.</p></dd><dt><a name="opt.prof_prefix"></a><span class="term">
"<code class="mallctl">opt.prof_prefix</code>"
@@ -704,7 +674,7 @@ malloc_conf = "xmalloc:true";</pre><p>
"<code class="mallctl">opt.prof_active</code>"
(<span class="type">bool</span>)
- <code class="literal">rw</code>
+ <code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Profiling activated/deactivated. This is a secondary
control mechanism that makes it possible to start the application with
@@ -715,11 +685,25 @@ malloc_conf = "xmalloc:true";</pre><p>
with the <a class="link" href="#prof.active">
"<code class="mallctl">prof.active</code>"
</a> mallctl.
- This option is enabled by default.</p></dd><dt><a name="opt.lg_prof_sample"></a><span class="term">
+ This option is enabled by default.</p></dd><dt><a name="opt.prof_thread_active_init"></a><span class="term">
+
+ "<code class="mallctl">opt.prof_thread_active_init</code>"
+
+ (<span class="type">bool</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-prof</code>]
+ </span></dt><dd><p>Initial setting for <a class="link" href="#thread.prof.active">
+ "<code class="mallctl">thread.prof.active</code>"
+ </a>
+ in newly created threads. The initial setting for newly created threads
+ can also be changed during execution via the <a class="link" href="#prof.thread_active_init">
+ "<code class="mallctl">prof.thread_active_init</code>"
+ </a>
+ mallctl. This option is enabled by default.</p></dd><dt><a name="opt.lg_prof_sample"></a><span class="term">
"<code class="mallctl">opt.lg_prof_sample</code>"
- (<span class="type">ssize_t</span>)
+ (<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Average interval (log base 2) between allocation
@@ -764,14 +748,12 @@ malloc_conf = "xmalloc:true";</pre><p>
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
- </span></dt><dd><p>Trigger a memory profile dump every time the total
- virtual memory exceeds the previous maximum. Profiles are dumped to
- files named according to the pattern
- <code class="filename">&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</code>,
- where <code class="literal">&lt;prefix&gt;</code> is controlled by the <a class="link" href="#opt.prof_prefix">
- "<code class="mallctl">opt.prof_prefix</code>"
- </a>
- option. This option is disabled by default.</p></dd><dt><a name="opt.prof_final"></a><span class="term">
+ </span></dt><dd><p>Set the initial state of <a class="link" href="#prof.gdump">
+ "<code class="mallctl">prof.gdump</code>"
+ </a>, which when
+ enabled triggers a memory profile dump every time the total virtual
+ memory exceeds the previous maximum. This option is disabled by
+ default.</p></dd><dt><a name="opt.prof_final"></a><span class="term">
"<code class="mallctl">opt.prof_final</code>"
@@ -785,7 +767,13 @@ malloc_conf = "xmalloc:true";</pre><p>
where <code class="literal">&lt;prefix&gt;</code> is controlled by the <a class="link" href="#opt.prof_prefix">
"<code class="mallctl">opt.prof_prefix</code>"
</a>
- option. This option is enabled by default.</p></dd><dt><a name="opt.prof_leak"></a><span class="term">
+ option. Note that <code class="function">atexit</code>(<em class="parameter"><code></code></em>) may allocate
+ memory during application initialization and then deadlock internally
+ when jemalloc in turn calls <code class="function">atexit</code>(<em class="parameter"><code></code></em>), so
+ this option is not univerally usable (though the application can
+ register its own <code class="function">atexit</code>(<em class="parameter"><code></code></em>) function with
+ equivalent functionality). This option is disabled by
+ default.</p></dd><dt><a name="opt.prof_leak"></a><span class="term">
"<code class="mallctl">opt.prof_leak</code>"
@@ -864,9 +852,9 @@ malloc_conf = "xmalloc:true";</pre><p>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Enable/disable calling thread's tcache. The tcache is
implicitly flushed as a side effect of becoming
- disabled (see
+ disabled (see <a class="link" href="#thread.tcache.flush">
"<code class="mallctl">thread.tcache.flush</code>"
- ).
+ </a>).
</p></dd><dt><a name="thread.tcache.flush"></a><span class="term">
"<code class="mallctl">thread.tcache.flush</code>"
@@ -874,19 +862,84 @@ malloc_conf = "xmalloc:true";</pre><p>
(<span class="type">void</span>)
<code class="literal">--</code>
[<code class="option">--enable-tcache</code>]
- </span></dt><dd><p>Flush calling thread's tcache. This interface releases
- all cached objects and internal data structures associated with the
- calling thread's thread-specific cache. Ordinarily, this interface
+ </span></dt><dd><p>Flush calling thread's thread-specific cache (tcache).
+ This interface releases all cached objects and internal data structures
+ associated with the calling thread's tcache. Ordinarily, this interface
need not be called, since automatic periodic incremental garbage
collection occurs, and the thread cache is automatically discarded when
a thread exits. However, garbage collection is triggered by allocation
activity, so it is possible for a thread that stops
allocating/deallocating to retain its cache indefinitely, in which case
- the developer may find manual flushing useful.</p></dd><dt><a name="arena.i.purge"></a><span class="term">
+ the developer may find manual flushing useful.</p></dd><dt><a name="thread.prof.name"></a><span class="term">
- "<code class="mallctl">arena.&lt;i&gt;.purge</code>"
+ "<code class="mallctl">thread.prof.name</code>"
+
+ (<span class="type">const char *</span>)
+ <code class="literal">r-</code> or
+ <code class="literal">-w</code>
+ [<code class="option">--enable-prof</code>]
+ </span></dt><dd><p>Get/set the descriptive name associated with the calling
+ thread in memory profile dumps. An internal copy of the name string is
+ created, so the input string need not be maintained after this interface
+ completes execution. The output string of this interface should be
+ copied for non-ephemeral uses, because multiple implementation details
+ can cause asynchronous string deallocation. Furthermore, each
+ invocation of this interface can only read or write; simultaneous
+ read/write is not supported due to string lifetime limitations. The
+ name string must nil-terminated and comprised only of characters in the
+ sets recognized
+ by <span class="citerefentry"><span class="refentrytitle">isgraph</span>(3)</span> and
+ <span class="citerefentry"><span class="refentrytitle">isblank</span>(3)</span>.</p></dd><dt><a name="thread.prof.active"></a><span class="term">
+
+ "<code class="mallctl">thread.prof.active</code>"
+
+ (<span class="type">bool</span>)
+ <code class="literal">rw</code>
+ [<code class="option">--enable-prof</code>]
+ </span></dt><dd><p>Control whether sampling is currently active for the
+ calling thread. This is an activation mechanism in addition to <a class="link" href="#prof.active">
+ "<code class="mallctl">prof.active</code>"
+ </a>; both must
+ be active for the calling thread to sample. This flag is enabled by
+ default.</p></dd><dt><a name="tcache.create"></a><span class="term">
+
+ "<code class="mallctl">tcache.create</code>"
+
+ (<span class="type">unsigned</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-tcache</code>]
+ </span></dt><dd><p>Create an explicit thread-specific cache (tcache) and
+ return an identifier that can be passed to the <a class="link" href="#MALLOCX_TCACHE"><code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)</code></a>
+ macro to explicitly use the specified cache rather than the
+ automatically managed one that is used by default. Each explicit cache
+ can be used by only one thread at a time; the application must assure
+ that this constraint holds.
+ </p></dd><dt><a name="tcache.flush"></a><span class="term">
+
+ "<code class="mallctl">tcache.flush</code>"
+
+ (<span class="type">unsigned</span>)
+ <code class="literal">-w</code>
+ [<code class="option">--enable-tcache</code>]
+ </span></dt><dd><p>Flush the specified thread-specific cache (tcache). The
+ same considerations apply to this interface as to <a class="link" href="#thread.tcache.flush">
+ "<code class="mallctl">thread.tcache.flush</code>"
+ </a>,
+ except that the tcache will never be automatically be discarded.
+ </p></dd><dt><a name="tcache.destroy"></a><span class="term">
+
+ "<code class="mallctl">tcache.destroy</code>"
(<span class="type">unsigned</span>)
+ <code class="literal">-w</code>
+ [<code class="option">--enable-tcache</code>]
+ </span></dt><dd><p>Flush the specified thread-specific cache (tcache) and
+ make the identifier available for use during a future tcache creation.
+ </p></dd><dt><a name="arena.i.purge"></a><span class="term">
+
+ "<code class="mallctl">arena.&lt;i&gt;.purge</code>"
+
+ (<span class="type">void</span>)
<code class="literal">--</code>
</span></dt><dd><p>Purge unused dirty pages for arena &lt;i&gt;, or for
all arenas if &lt;i&gt; equals <a class="link" href="#arenas.narenas">
@@ -902,15 +955,138 @@ malloc_conf = "xmalloc:true";</pre><p>
allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
<a class="link" href="#arenas.narenas">
"<code class="mallctl">arenas.narenas</code>"
- </a>. Note
- that even during huge allocation this setting is read from the arena
- that would be chosen for small or large allocation so that applications
- can depend on consistent dss versus mmap allocation regardless of
- allocation size. See <a class="link" href="#opt.dss">
+ </a>. See
+ <a class="link" href="#opt.dss">
"<code class="mallctl">opt.dss</code>"
</a> for supported
- settings.
- </p></dd><dt><a name="arenas.narenas"></a><span class="term">
+ settings.</p></dd><dt><a name="arena.i.lg_dirty_mult"></a><span class="term">
+
+ "<code class="mallctl">arena.&lt;i&gt;.lg_dirty_mult</code>"
+
+ (<span class="type">ssize_t</span>)
+ <code class="literal">rw</code>
+ </span></dt><dd><p>Current per-arena minimum ratio (log base 2) of active
+ to dirty pages for arena &lt;i&gt;. Each time this interface is set and
+ the ratio is increased, pages are synchronously purged as necessary to
+ impose the new ratio. See <a class="link" href="#opt.lg_dirty_mult">
+ "<code class="mallctl">opt.lg_dirty_mult</code>"
+ </a>
+ for additional information.</p></dd><dt><a name="arena.i.chunk_hooks"></a><span class="term">
+
+ "<code class="mallctl">arena.&lt;i&gt;.chunk_hooks</code>"
+
+ (<span class="type">chunk_hooks_t</span>)
+ <code class="literal">rw</code>
+ </span></dt><dd><p>Get or set the chunk management hook functions for arena
+ &lt;i&gt;. The functions must be capable of operating on all extant
+ chunks associated with arena &lt;i&gt;, usually by passing unknown
+ chunks to the replaced functions. In practice, it is feasible to
+ control allocation for arenas created via <a class="link" href="#arenas.extend">
+ "<code class="mallctl">arenas.extend</code>"
+ </a> such
+ that all chunks originate from an application-supplied chunk allocator
+ (by setting custom chunk hook functions just after arena creation), but
+ the automatically created arenas may have already created chunks prior
+ to the application having an opportunity to take over chunk
+ allocation.</p><pre class="programlisting">
+typedef struct {
+ chunk_alloc_t *alloc;
+ chunk_dalloc_t *dalloc;
+ chunk_commit_t *commit;
+ chunk_decommit_t *decommit;
+ chunk_purge_t *purge;
+ chunk_split_t *split;
+ chunk_merge_t *merge;
+} chunk_hooks_t;</pre><p>The <span class="type">chunk_hooks_t</span> structure comprises function
+ pointers which are described individually below. jemalloc uses these
+ functions to manage chunk lifetime, which starts off with allocation of
+ mapped committed memory, in the simplest case followed by deallocation.
+ However, there are performance and platform reasons to retain chunks for
+ later reuse. Cleanup attempts cascade from deallocation to decommit to
+ purging, which gives the chunk management functions opportunities to
+ reject the most permanent cleanup operations in favor of less permanent
+ (and often less costly) operations. The chunk splitting and merging
+ operations can also be opted out of, but this is mainly intended to
+ support platforms on which virtual memory mappings provided by the
+ operating system kernel do not automatically coalesce and split, e.g.
+ Windows.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef void *<b class="fsfunc">(chunk_alloc_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>bool *<var class="pdparam">zero</var>, </td></tr><tr><td> </td><td>bool *<var class="pdparam">commit</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>A chunk allocation function conforms to the
+ <span class="type">chunk_alloc_t</span> type and upon success returns a pointer to
+ <em class="parameter"><code>size</code></em> bytes of mapped memory on behalf of arena
+ <em class="parameter"><code>arena_ind</code></em> such that the chunk's base address is a
+ multiple of <em class="parameter"><code>alignment</code></em>, as well as setting
+ <em class="parameter"><code>*zero</code></em> to indicate whether the chunk is zeroed and
+ <em class="parameter"><code>*commit</code></em> to indicate whether the chunk is
+ committed. Upon error the function returns <code class="constant">NULL</code>
+ and leaves <em class="parameter"><code>*zero</code></em> and
+ <em class="parameter"><code>*commit</code></em> unmodified. The
+ <em class="parameter"><code>size</code></em> parameter is always a multiple of the chunk
+ size. The <em class="parameter"><code>alignment</code></em> parameter is always a power
+ of two at least as large as the chunk size. Zeroing is mandatory if
+ <em class="parameter"><code>*zero</code></em> is true upon function entry. Committing is
+ mandatory if <em class="parameter"><code>*commit</code></em> is true upon function entry.
+ If <em class="parameter"><code>chunk</code></em> is not <code class="constant">NULL</code>, the
+ returned pointer must be <em class="parameter"><code>chunk</code></em> on success or
+ <code class="constant">NULL</code> on error. Committed memory may be committed
+ in absolute terms as on a system that does not overcommit, or in
+ implicit terms as on a system that overcommits and satisfies physical
+ memory needs on demand via soft page faults. Note that replacing the
+ default chunk allocation function makes the arena's <a class="link" href="#arena.i.dss">
+ "<code class="mallctl">arena.&lt;i&gt;.dss</code>"
+ </a>
+ setting irrelevant.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_dalloc_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>bool <var class="pdparam">committed</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>
+ A chunk deallocation function conforms to the
+ <span class="type">chunk_dalloc_t</span> type and deallocates a
+ <em class="parameter"><code>chunk</code></em> of given <em class="parameter"><code>size</code></em> with
+ <em class="parameter"><code>committed</code></em>/decommited memory as indicated, on
+ behalf of arena <em class="parameter"><code>arena_ind</code></em>, returning false upon
+ success. If the function returns true, this indicates opt-out from
+ deallocation; the virtual memory mapping associated with the chunk
+ remains mapped, in the same commit state, and available for future use,
+ in which case it will be automatically retained for later reuse.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_commit_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">offset</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">length</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>A chunk commit function conforms to the
+ <span class="type">chunk_commit_t</span> type and commits zeroed physical memory to
+ back pages within a <em class="parameter"><code>chunk</code></em> of given
+ <em class="parameter"><code>size</code></em> at <em class="parameter"><code>offset</code></em> bytes,
+ extending for <em class="parameter"><code>length</code></em> on behalf of arena
+ <em class="parameter"><code>arena_ind</code></em>, returning false upon success.
+ Committed memory may be committed in absolute terms as on a system that
+ does not overcommit, or in implicit terms as on a system that
+ overcommits and satisfies physical memory needs on demand via soft page
+ faults. If the function returns true, this indicates insufficient
+ physical memory to satisfy the request.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_decommit_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">offset</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">length</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>A chunk decommit function conforms to the
+ <span class="type">chunk_decommit_t</span> type and decommits any physical memory
+ that is backing pages within a <em class="parameter"><code>chunk</code></em> of given
+ <em class="parameter"><code>size</code></em> at <em class="parameter"><code>offset</code></em> bytes,
+ extending for <em class="parameter"><code>length</code></em> on behalf of arena
+ <em class="parameter"><code>arena_ind</code></em>, returning false upon success, in which
+ case the pages will be committed via the chunk commit function before
+ being reused. If the function returns true, this indicates opt-out from
+ decommit; the memory remains committed and available for future use, in
+ which case it will be automatically retained for later reuse.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_purge_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t<var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">offset</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">length</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>A chunk purge function conforms to the <span class="type">chunk_purge_t</span>
+ type and optionally discards physical pages within the virtual memory
+ mapping associated with <em class="parameter"><code>chunk</code></em> of given
+ <em class="parameter"><code>size</code></em> at <em class="parameter"><code>offset</code></em> bytes,
+ extending for <em class="parameter"><code>length</code></em> on behalf of arena
+ <em class="parameter"><code>arena_ind</code></em>, returning false if pages within the
+ purged virtual memory range will be zero-filled the next time they are
+ accessed.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_split_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size_a</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size_b</var>, </td></tr><tr><td> </td><td>bool <var class="pdparam">committed</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>A chunk split function conforms to the <span class="type">chunk_split_t</span>
+ type and optionally splits <em class="parameter"><code>chunk</code></em> of given
+ <em class="parameter"><code>size</code></em> into two adjacent chunks, the first of
+ <em class="parameter"><code>size_a</code></em> bytes, and the second of
+ <em class="parameter"><code>size_b</code></em> bytes, operating on
+ <em class="parameter"><code>committed</code></em>/decommitted memory as indicated, on
+ behalf of arena <em class="parameter"><code>arena_ind</code></em>, returning false upon
+ success. If the function returns true, this indicates that the chunk
+ remains unsplit and therefore should continue to be operated on as a
+ whole.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_merge_t)</b>(</code></td><td>void *<var class="pdparam">chunk_a</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size_a</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">chunk_b</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size_b</var>, </td></tr><tr><td> </td><td>bool <var class="pdparam">committed</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>A chunk merge function conforms to the <span class="type">chunk_merge_t</span>
+ type and optionally merges adjacent chunks,
+ <em class="parameter"><code>chunk_a</code></em> of given <em class="parameter"><code>size_a</code></em>
+ and <em class="parameter"><code>chunk_b</code></em> of given
+ <em class="parameter"><code>size_b</code></em> into one contiguous chunk, operating on
+ <em class="parameter"><code>committed</code></em>/decommitted memory as indicated, on
+ behalf of arena <em class="parameter"><code>arena_ind</code></em>, returning false upon
+ success. If the function returns true, this indicates that the chunks
+ remain distinct mappings and therefore should continue to be operated on
+ independently.</p></dd><dt><a name="arenas.narenas"></a><span class="term">
"<code class="mallctl">arenas.narenas</code>"
@@ -926,7 +1102,20 @@ malloc_conf = "xmalloc:true";</pre><p>
"<code class="mallctl">arenas.narenas</code>"
</a>
booleans. Each boolean indicates whether the corresponding arena is
- initialized.</p></dd><dt><a name="arenas.quantum"></a><span class="term">
+ initialized.</p></dd><dt><a name="arenas.lg_dirty_mult"></a><span class="term">
+
+ "<code class="mallctl">arenas.lg_dirty_mult</code>"
+
+ (<span class="type">ssize_t</span>)
+ <code class="literal">rw</code>
+ </span></dt><dd><p>Current default per-arena minimum ratio (log base 2) of
+ active to dirty pages, used to initialize <a class="link" href="#arena.i.lg_dirty_mult">
+ "<code class="mallctl">arena.&lt;i&gt;.lg_dirty_mult</code>"
+ </a>
+ during arena creation. See <a class="link" href="#opt.lg_dirty_mult">
+ "<code class="mallctl">opt.lg_dirty_mult</code>"
+ </a>
+ for additional information.</p></dd><dt><a name="arenas.quantum"></a><span class="term">
"<code class="mallctl">arenas.quantum</code>"
@@ -981,7 +1170,7 @@ malloc_conf = "xmalloc:true";</pre><p>
"<code class="mallctl">arenas.nlruns</code>"
- (<span class="type">size_t</span>)
+ (<span class="type">unsigned</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Total number of large size classes.</p></dd><dt><a name="arenas.lrun.i.size"></a><span class="term">
@@ -990,21 +1179,40 @@ malloc_conf = "xmalloc:true";</pre><p>
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Maximum size supported by this large size
- class.</p></dd><dt><a name="arenas.purge"></a><span class="term">
+ class.</p></dd><dt><a name="arenas.nhchunks"></a><span class="term">
- "<code class="mallctl">arenas.purge</code>"
+ "<code class="mallctl">arenas.nhchunks</code>"
(<span class="type">unsigned</span>)
- <code class="literal">-w</code>
- </span></dt><dd><p>Purge unused dirty pages for the specified arena, or
- for all arenas if none is specified.</p></dd><dt><a name="arenas.extend"></a><span class="term">
+ <code class="literal">r-</code>
+ </span></dt><dd><p>Total number of huge size classes.</p></dd><dt><a name="arenas.hchunk.i.size"></a><span class="term">
+
+ "<code class="mallctl">arenas.hchunk.&lt;i&gt;.size</code>"
+
+ (<span class="type">size_t</span>)
+ <code class="literal">r-</code>
+ </span></dt><dd><p>Maximum size supported by this huge size
+ class.</p></dd><dt><a name="arenas.extend"></a><span class="term">
"<code class="mallctl">arenas.extend</code>"
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Extend the array of arenas by appending a new arena,
- and returning the new arena index.</p></dd><dt><a name="prof.active"></a><span class="term">
+ and returning the new arena index.</p></dd><dt><a name="prof.thread_active_init"></a><span class="term">
+
+ "<code class="mallctl">prof.thread_active_init</code>"
+
+ (<span class="type">bool</span>)
+ <code class="literal">rw</code>
+ [<code class="option">--enable-prof</code>]
+ </span></dt><dd><p>Control the initial setting for <a class="link" href="#thread.prof.active">
+ "<code class="mallctl">thread.prof.active</code>"
+ </a>
+ in newly created threads. See the <a class="link" href="#opt.prof_thread_active_init">
+ "<code class="mallctl">opt.prof_thread_active_init</code>"
+ </a>
+ option for additional information.</p></dd><dt><a name="prof.active"></a><span class="term">
"<code class="mallctl">prof.active</code>"
@@ -1015,8 +1223,10 @@ malloc_conf = "xmalloc:true";</pre><p>
<a class="link" href="#opt.prof_active">
"<code class="mallctl">opt.prof_active</code>"
</a>
- option for additional information.
- </p></dd><dt><a name="prof.dump"></a><span class="term">
+ option for additional information, as well as the interrelated <a class="link" href="#thread.prof.active">
+ "<code class="mallctl">thread.prof.active</code>"
+ </a>
+ mallctl.</p></dd><dt><a name="prof.dump"></a><span class="term">
"<code class="mallctl">prof.dump</code>"
@@ -1030,7 +1240,45 @@ malloc_conf = "xmalloc:true";</pre><p>
<a class="link" href="#opt.prof_prefix">
"<code class="mallctl">opt.prof_prefix</code>"
</a>
- option.</p></dd><dt><a name="prof.interval"></a><span class="term">
+ option.</p></dd><dt><a name="prof.gdump"></a><span class="term">
+
+ "<code class="mallctl">prof.gdump</code>"
+
+ (<span class="type">bool</span>)
+ <code class="literal">rw</code>
+ [<code class="option">--enable-prof</code>]
+ </span></dt><dd><p>When enabled, trigger a memory profile dump every time
+ the total virtual memory exceeds the previous maximum. Profiles are
+ dumped to files named according to the pattern
+ <code class="filename">&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</code>,
+ where <code class="literal">&lt;prefix&gt;</code> is controlled by the <a class="link" href="#opt.prof_prefix">
+ "<code class="mallctl">opt.prof_prefix</code>"
+ </a>
+ option.</p></dd><dt><a name="prof.reset"></a><span class="term">
+
+ "<code class="mallctl">prof.reset</code>"
+
+ (<span class="type">size_t</span>)
+ <code class="literal">-w</code>
+ [<code class="option">--enable-prof</code>]
+ </span></dt><dd><p>Reset all memory profile statistics, and optionally
+ update the sample rate (see <a class="link" href="#opt.lg_prof_sample">
+ "<code class="mallctl">opt.lg_prof_sample</code>"
+ </a>
+ and <a class="link" href="#prof.lg_sample">
+ "<code class="mallctl">prof.lg_sample</code>"
+ </a>).
+ </p></dd><dt><a name="prof.lg_sample"></a><span class="term">
+
+ "<code class="mallctl">prof.lg_sample</code>"
+
+ (<span class="type">size_t</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-prof</code>]
+ </span></dt><dd><p>Get the current sample rate (see <a class="link" href="#opt.lg_prof_sample">
+ "<code class="mallctl">opt.lg_prof_sample</code>"
+ </a>).
+ </p></dd><dt><a name="prof.interval"></a><span class="term">
"<code class="mallctl">prof.interval</code>"
@@ -1051,9 +1299,8 @@ malloc_conf = "xmalloc:true";</pre><p>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Pointer to a counter that contains an approximate count
of the current number of bytes in active pages. The estimate may be
- high, but never low, because each arena rounds up to the nearest
- multiple of the chunk size when computing its contribution to the
- counter. Note that the <a class="link" href="#epoch">
+ high, but never low, because each arena rounds up when computing its
+ contribution to the counter. Note that the <a class="link" href="#epoch">
"<code class="mallctl">epoch</code>"
</a> mallctl has no bearing
on this counter. Furthermore, counter consistency is maintained via
@@ -1082,68 +1329,53 @@ malloc_conf = "xmalloc:true";</pre><p>
This does not include <a class="link" href="#stats.arenas.i.pdirty">
"<code class="mallctl">stats.arenas.&lt;i&gt;.pdirty</code>"
- </a> and pages
- entirely devoted to allocator metadata.</p></dd><dt><a name="stats.mapped"></a><span class="term">
-
- "<code class="mallctl">stats.mapped</code>"
-
- (<span class="type">size_t</span>)
- <code class="literal">r-</code>
- [<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Total number of bytes in chunks mapped on behalf of the
- application. This is a multiple of the chunk size, and is at least as
- large as <a class="link" href="#stats.active">
- "<code class="mallctl">stats.active</code>"
- </a>. This
- does not include inactive chunks.</p></dd><dt><a name="stats.chunks.current"></a><span class="term">
+ </a>, nor pages
+ entirely devoted to allocator metadata.</p></dd><dt><a name="stats.metadata"></a><span class="term">
- "<code class="mallctl">stats.chunks.current</code>"
+ "<code class="mallctl">stats.metadata</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Total number of chunks actively mapped on behalf of the
- application. This does not include inactive chunks.
- </p></dd><dt><a name="stats.chunks.total"></a><span class="term">
-
- "<code class="mallctl">stats.chunks.total</code>"
-
- (<span class="type">uint64_t</span>)
- <code class="literal">r-</code>
- [<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Cumulative number of chunks allocated.</p></dd><dt><a name="stats.chunks.high"></a><span class="term">
+ </span></dt><dd><p>Total number of bytes dedicated to metadata, which
+ comprise base allocations used for bootstrap-sensitive internal
+ allocator data structures, arena chunk headers (see <a class="link" href="#stats.arenas.i.metadata.mapped">
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.mapped</code>"
+ </a>),
+ and internal allocations (see <a class="link" href="#stats.arenas.i.metadata.allocated">
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.allocated</code>"
+ </a>).</p></dd><dt><a name="stats.resident"></a><span class="term">
- "<code class="mallctl">stats.chunks.high</code>"
+ "<code class="mallctl">stats.resident</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Maximum number of active chunks at any time thus far.
- </p></dd><dt><a name="stats.huge.allocated"></a><span class="term">
+ </span></dt><dd><p>Maximum number of bytes in physically resident data
+ pages mapped by the allocator, comprising all pages dedicated to
+ allocator metadata, pages backing active allocations, and unused dirty
+ pages. This is a maximum rather than precise because pages may not
+ actually be physically resident if they correspond to demand-zeroed
+ virtual memory that has not yet been touched. This is a multiple of the
+ page size, and is larger than <a class="link" href="#stats.active">
+ "<code class="mallctl">stats.active</code>"
+ </a>.</p></dd><dt><a name="stats.mapped"></a><span class="term">
- "<code class="mallctl">stats.huge.allocated</code>"
+ "<code class="mallctl">stats.mapped</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Number of bytes currently allocated by huge objects.
- </p></dd><dt><a name="stats.huge.nmalloc"></a><span class="term">
-
- "<code class="mallctl">stats.huge.nmalloc</code>"
-
- (<span class="type">uint64_t</span>)
- <code class="literal">r-</code>
- [<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Cumulative number of huge allocation requests.
- </p></dd><dt><a name="stats.huge.ndalloc"></a><span class="term">
-
- "<code class="mallctl">stats.huge.ndalloc</code>"
-
- (<span class="type">uint64_t</span>)
- <code class="literal">r-</code>
- [<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Cumulative number of huge deallocation requests.
- </p></dd><dt><a name="stats.arenas.i.dss"></a><span class="term">
+ </span></dt><dd><p>Total number of bytes in active chunks mapped by the
+ allocator. This is a multiple of the chunk size, and is larger than
+ <a class="link" href="#stats.active">
+ "<code class="mallctl">stats.active</code>"
+ </a>.
+ This does not include inactive chunks, even those that contain unused
+ dirty pages, which means that there is no strict ordering between this
+ and <a class="link" href="#stats.resident">
+ "<code class="mallctl">stats.resident</code>"
+ </a>.</p></dd><dt><a name="stats.arenas.i.dss"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.dss</code>"
@@ -1153,7 +1385,17 @@ malloc_conf = "xmalloc:true";</pre><p>
related to <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> allocation. See <a class="link" href="#opt.dss">
"<code class="mallctl">opt.dss</code>"
</a> for details.
- </p></dd><dt><a name="stats.arenas.i.nthreads"></a><span class="term">
+ </p></dd><dt><a name="stats.arenas.i.lg_dirty_mult"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.lg_dirty_mult</code>"
+
+ (<span class="type">ssize_t</span>)
+ <code class="literal">r-</code>
+ </span></dt><dd><p>Minimum ratio (log base 2) of active to dirty pages.
+ See <a class="link" href="#opt.lg_dirty_mult">
+ "<code class="mallctl">opt.lg_dirty_mult</code>"
+ </a>
+ for details.</p></dd><dt><a name="stats.arenas.i.nthreads"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.nthreads</code>"
@@ -1182,7 +1424,38 @@ malloc_conf = "xmalloc:true";</pre><p>
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Number of mapped bytes.</p></dd><dt><a name="stats.arenas.i.npurge"></a><span class="term">
+ </span></dt><dd><p>Number of mapped bytes.</p></dd><dt><a name="stats.arenas.i.metadata.mapped"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.mapped</code>"
+
+ (<span class="type">size_t</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-stats</code>]
+ </span></dt><dd><p>Number of mapped bytes in arena chunk headers, which
+ track the states of the non-metadata pages.</p></dd><dt><a name="stats.arenas.i.metadata.allocated"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.allocated</code>"
+
+ (<span class="type">size_t</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-stats</code>]
+ </span></dt><dd><p>Number of bytes dedicated to internal allocations.
+ Internal allocations differ from application-originated allocations in
+ that they are for internal use, and that they are omitted from heap
+ profiles. This statistic is reported separately from <a class="link" href="#stats.metadata">
+ "<code class="mallctl">stats.metadata</code>"
+ </a> and
+ <a class="link" href="#stats.arenas.i.metadata.mapped">
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.mapped</code>"
+ </a>
+ because it overlaps with e.g. the <a class="link" href="#stats.allocated">
+ "<code class="mallctl">stats.allocated</code>"
+ </a> and
+ <a class="link" href="#stats.active">
+ "<code class="mallctl">stats.active</code>"
+ </a>
+ statistics, whereas the other metadata statistics do
+ not.</p></dd><dt><a name="stats.arenas.i.npurge"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.npurge</code>"
@@ -1270,15 +1543,39 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of large allocation requests.
- </p></dd><dt><a name="stats.arenas.i.bins.j.allocated"></a><span class="term">
+ </p></dd><dt><a name="stats.arenas.i.huge.allocated"></a><span class="term">
- "<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.allocated</code>"
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.huge.allocated</code>"
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Current number of bytes allocated by
- bin.</p></dd><dt><a name="stats.arenas.i.bins.j.nmalloc"></a><span class="term">
+ </span></dt><dd><p>Number of bytes currently allocated by huge objects.
+ </p></dd><dt><a name="stats.arenas.i.huge.nmalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.huge.nmalloc</code>"
+
+ (<span class="type">uint64_t</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-stats</code>]
+ </span></dt><dd><p>Cumulative number of huge allocation requests served
+ directly by the arena.</p></dd><dt><a name="stats.arenas.i.huge.ndalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.huge.ndalloc</code>"
+
+ (<span class="type">uint64_t</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-stats</code>]
+ </span></dt><dd><p>Cumulative number of huge deallocation requests served
+ directly by the arena.</p></dd><dt><a name="stats.arenas.i.huge.nrequests"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.huge.nrequests</code>"
+
+ (<span class="type">uint64_t</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-stats</code>]
+ </span></dt><dd><p>Cumulative number of huge allocation requests.
+ </p></dd><dt><a name="stats.arenas.i.bins.j.nmalloc"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</code>"
@@ -1302,7 +1599,15 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation
- requests.</p></dd><dt><a name="stats.arenas.i.bins.j.nfills"></a><span class="term">
+ requests.</p></dd><dt><a name="stats.arenas.i.bins.j.curregs"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curregs</code>"
+
+ (<span class="type">size_t</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-stats</code>]
+ </span></dt><dd><p>Current number of regions for this size
+ class.</p></dd><dt><a name="stats.arenas.i.bins.j.nfills"></a><span class="term">
"<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</code>"
@@ -1370,6 +1675,38 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Current number of runs for this size class.
+ </p></dd><dt><a name="stats.arenas.i.hchunks.j.nmalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nmalloc</code>"
+
+ (<span class="type">uint64_t</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-stats</code>]
+ </span></dt><dd><p>Cumulative number of allocation requests for this size
+ class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.hchunks.j.ndalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.ndalloc</code>"
+
+ (<span class="type">uint64_t</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-stats</code>]
+ </span></dt><dd><p>Cumulative number of deallocation requests for this
+ size class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.hchunks.j.nrequests"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nrequests</code>"
+
+ (<span class="type">uint64_t</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-stats</code>]
+ </span></dt><dd><p>Cumulative number of allocation requests for this size
+ class.</p></dd><dt><a name="stats.arenas.i.hchunks.j.curhchunks"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.curhchunks</code>"
+
+ (<span class="type">size_t</span>)
+ <code class="literal">r-</code>
+ [<code class="option">--enable-stats</code>]
+ </span></dt><dd><p>Current number of huge allocations for this size class.
</p></dd></dl></div></div><div class="refsect1"><a name="debugging_malloc_problems"></a><h2>DEBUGGING MALLOC PROBLEMS</h2><p>When debugging, it is a good idea to configure/build jemalloc with
the <code class="option">--enable-debug</code> and <code class="option">--enable-fill</code>
options, and recompile the program with suitable options and symbols for
@@ -1406,7 +1743,7 @@ malloc_conf = "xmalloc:true";</pre><p>
<code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>), followed by a string
pointer. Please note that doing anything which tries to allocate memory in
this function is likely to result in a crash or deadlock.</p><p>All messages are prefixed by
- &#8220;<code class="computeroutput">&lt;jemalloc&gt;: </code>&#8221;.</p></div><div class="refsect1"><a name="return_values"></a><h2>RETURN VALUES</h2><div class="refsect2"><a name="idm316388028784"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) and
+ &#8220;<code class="computeroutput">&lt;jemalloc&gt;: </code>&#8221;.</p></div><div class="refsect1"><a name="return_values"></a><h2>RETURN VALUES</h2><div class="refsect2"><a name="idp46949776"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) and
<code class="function">calloc</code>(<em class="parameter"><code></code></em>) functions return a pointer to the
allocated memory if successful; otherwise a <code class="constant">NULL</code>
pointer is returned and <code class="varname">errno</code> is set to
@@ -1434,7 +1771,7 @@ malloc_conf = "xmalloc:true";</pre><p>
allocation failure. The <code class="function">realloc</code>(<em class="parameter"><code></code></em>)
function always leaves the original buffer intact when an error occurs.
</p><p>The <code class="function">free</code>(<em class="parameter"><code></code></em>) function returns no
- value.</p></div><div class="refsect2"><a name="idm316388003104"></a><h3>Non-standard API</h3><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) and
+ value.</p></div><div class="refsect2"><a name="idp46974576"></a><h3>Non-standard API</h3><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) and
<code class="function">rallocx</code>(<em class="parameter"><code></code></em>) functions return a pointer to
the allocated memory if successful; otherwise a <code class="constant">NULL</code>
pointer is returned to indicate insufficient contiguous memory was
@@ -1465,27 +1802,7 @@ malloc_conf = "xmalloc:true";</pre><p>
read/write processing.</p></dd></dl></div><p>
</p><p>The <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function
returns the usable size of the allocation pointed to by
- <em class="parameter"><code>ptr</code></em>. </p></div><div class="refsect2"><a name="idm316387973360"></a><h3>Experimental API</h3><p>The <code class="function">allocm</code>(<em class="parameter"><code></code></em>),
- <code class="function">rallocm</code>(<em class="parameter"><code></code></em>),
- <code class="function">sallocm</code>(<em class="parameter"><code></code></em>),
- <code class="function">dallocm</code>(<em class="parameter"><code></code></em>), and
- <code class="function">nallocm</code>(<em class="parameter"><code></code></em>) functions return
- <code class="constant">ALLOCM_SUCCESS</code> on success; otherwise they return an
- error value. The <code class="function">allocm</code>(<em class="parameter"><code></code></em>),
- <code class="function">rallocm</code>(<em class="parameter"><code></code></em>), and
- <code class="function">nallocm</code>(<em class="parameter"><code></code></em>) functions will fail if:
- </p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">ALLOCM_ERR_OOM</span></span></dt><dd><p>Out of memory. Insufficient contiguous memory was
- available to service the allocation request. The
- <code class="function">allocm</code>(<em class="parameter"><code></code></em>) function additionally sets
- <em class="parameter"><code>*ptr</code></em> to <code class="constant">NULL</code>, whereas
- the <code class="function">rallocm</code>(<em class="parameter"><code></code></em>) function leaves
- <code class="constant">*ptr</code> unmodified.</p></dd></dl></div><p>
- The <code class="function">rallocm</code>(<em class="parameter"><code></code></em>) function will also
- fail if:
- </p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">ALLOCM_ERR_NOT_MOVED</span></span></dt><dd><p><code class="constant">ALLOCM_NO_MOVE</code> was specified,
- but the reallocation request could not be serviced without moving
- the object.</p></dd></dl></div><p>
- </p></div></div><div class="refsect1"><a name="environment"></a><h2>ENVIRONMENT</h2><p>The following environment variable affects the execution of the
+ <em class="parameter"><code>ptr</code></em>. </p></div></div><div class="refsect1"><a name="environment"></a><h2>ENVIRONMENT</h2><p>The following environment variable affects the execution of the
allocation functions:
</p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><code class="envar">MALLOC_CONF</code></span></dt><dd><p>If the environment variable
<code class="envar">MALLOC_CONF</code> is set, the characters it contains
diff --git a/deps/jemalloc/doc/jemalloc.xml.in b/deps/jemalloc/doc/jemalloc.xml.in
index d8e2e711f..8fc774b18 100644
--- a/deps/jemalloc/doc/jemalloc.xml.in
+++ b/deps/jemalloc/doc/jemalloc.xml.in
@@ -38,17 +38,13 @@
<refname>xallocx</refname>
<refname>sallocx</refname>
<refname>dallocx</refname>
+ <refname>sdallocx</refname>
<refname>nallocx</refname>
<refname>mallctl</refname>
<refname>mallctlnametomib</refname>
<refname>mallctlbymib</refname>
<refname>malloc_stats_print</refname>
<refname>malloc_usable_size</refname>
- <refname>allocm</refname>
- <refname>rallocm</refname>
- <refname>sallocm</refname>
- <refname>dallocm</refname>
- <refname>nallocm</refname>
-->
<refpurpose>general purpose memory allocation functions</refpurpose>
</refnamediv>
@@ -61,8 +57,7 @@
<refsynopsisdiv>
<title>SYNOPSIS</title>
<funcsynopsis>
- <funcsynopsisinfo>#include &lt;<filename class="headerfile">stdlib.h</filename>&gt;
-#include &lt;<filename class="headerfile">jemalloc/jemalloc.h</filename>&gt;</funcsynopsisinfo>
+ <funcsynopsisinfo>#include &lt;<filename class="headerfile">jemalloc/jemalloc.h</filename>&gt;</funcsynopsisinfo>
<refsect2>
<title>Standard API</title>
<funcprototype>
@@ -126,6 +121,12 @@
<paramdef>int <parameter>flags</parameter></paramdef>
</funcprototype>
<funcprototype>
+ <funcdef>void <function>sdallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
<funcdef>size_t <function>nallocx</function></funcdef>
<paramdef>size_t <parameter>size</parameter></paramdef>
<paramdef>int <parameter>flags</parameter></paramdef>
@@ -172,41 +173,6 @@
</funcprototype>
<para><type>const char *</type><varname>malloc_conf</varname>;</para>
</refsect2>
- <refsect2>
- <title>Experimental API</title>
- <funcprototype>
- <funcdef>int <function>allocm</function></funcdef>
- <paramdef>void **<parameter>ptr</parameter></paramdef>
- <paramdef>size_t *<parameter>rsize</parameter></paramdef>
- <paramdef>size_t <parameter>size</parameter></paramdef>
- <paramdef>int <parameter>flags</parameter></paramdef>
- </funcprototype>
- <funcprototype>
- <funcdef>int <function>rallocm</function></funcdef>
- <paramdef>void **<parameter>ptr</parameter></paramdef>
- <paramdef>size_t *<parameter>rsize</parameter></paramdef>
- <paramdef>size_t <parameter>size</parameter></paramdef>
- <paramdef>size_t <parameter>extra</parameter></paramdef>
- <paramdef>int <parameter>flags</parameter></paramdef>
- </funcprototype>
- <funcprototype>
- <funcdef>int <function>sallocm</function></funcdef>
- <paramdef>const void *<parameter>ptr</parameter></paramdef>
- <paramdef>size_t *<parameter>rsize</parameter></paramdef>
- <paramdef>int <parameter>flags</parameter></paramdef>
- </funcprototype>
- <funcprototype>
- <funcdef>int <function>dallocm</function></funcdef>
- <paramdef>void *<parameter>ptr</parameter></paramdef>
- <paramdef>int <parameter>flags</parameter></paramdef>
- </funcprototype>
- <funcprototype>
- <funcdef>int <function>nallocm</function></funcdef>
- <paramdef>size_t *<parameter>rsize</parameter></paramdef>
- <paramdef>size_t <parameter>size</parameter></paramdef>
- <paramdef>int <parameter>flags</parameter></paramdef>
- </funcprototype>
- </refsect2>
</funcsynopsis>
</refsynopsisdiv>
<refsect1 id="description">
@@ -229,15 +195,15 @@
<para>The <function>posix_memalign<parameter/></function> function
allocates <parameter>size</parameter> bytes of memory such that the
- allocation's base address is an even multiple of
+ allocation's base address is a multiple of
<parameter>alignment</parameter>, and returns the allocation in the value
pointed to by <parameter>ptr</parameter>. The requested
- <parameter>alignment</parameter> must be a power of 2 at least as large
- as <code language="C">sizeof(<type>void *</type>)</code>.</para>
+ <parameter>alignment</parameter> must be a power of 2 at least as large as
+ <code language="C">sizeof(<type>void *</type>)</code>.</para>
<para>The <function>aligned_alloc<parameter/></function> function
allocates <parameter>size</parameter> bytes of memory such that the
- allocation's base address is an even multiple of
+ allocation's base address is a multiple of
<parameter>alignment</parameter>. The requested
<parameter>alignment</parameter> must be a power of 2. Behavior is
undefined if <parameter>size</parameter> is not an integral multiple of
@@ -268,14 +234,15 @@
<function>rallocx<parameter/></function>,
<function>xallocx<parameter/></function>,
<function>sallocx<parameter/></function>,
- <function>dallocx<parameter/></function>, and
+ <function>dallocx<parameter/></function>,
+ <function>sdallocx<parameter/></function>, and
<function>nallocx<parameter/></function> functions all have a
<parameter>flags</parameter> argument that can be used to specify
options. The functions only check the options that are contextually
relevant. Use bitwise or (<code language="C">|</code>) operations to
specify one or more of the following:
<variablelist>
- <varlistentry>
+ <varlistentry id="MALLOCX_LG_ALIGN">
<term><constant>MALLOCX_LG_ALIGN(<parameter>la</parameter>)
</constant></term>
@@ -285,7 +252,7 @@
that <parameter>la</parameter> is within the valid
range.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="MALLOCX_ALIGN">
<term><constant>MALLOCX_ALIGN(<parameter>a</parameter>)
</constant></term>
@@ -295,7 +262,7 @@
validate that <parameter>a</parameter> is a power of 2.
</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="MALLOCX_ZERO">
<term><constant>MALLOCX_ZERO</constant></term>
<listitem><para>Initialize newly allocated memory to contain zero
@@ -304,16 +271,38 @@
that are initialized to contain zero bytes. If this macro is
absent, newly allocated memory is uninitialized.</para></listitem>
</varlistentry>
- <varlistentry>
+ <varlistentry id="MALLOCX_TCACHE">
+ <term><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)
+ </constant></term>
+
+ <listitem><para>Use the thread-specific cache (tcache) specified by
+ the identifier <parameter>tc</parameter>, which must have been
+ acquired via the <link
+ linkend="tcache.create"><mallctl>tcache.create</mallctl></link>
+ mallctl. This macro does not validate that
+ <parameter>tc</parameter> specifies a valid
+ identifier.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOC_TCACHE_NONE">
+ <term><constant>MALLOCX_TCACHE_NONE</constant></term>
+
+ <listitem><para>Do not use a thread-specific cache (tcache). Unless
+ <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant> or
+ <constant>MALLOCX_TCACHE_NONE</constant> is specified, an
+ automatically managed tcache will be used under many circumstances.
+ This macro cannot be used in the same <parameter>flags</parameter>
+ argument as
+ <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant>.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_ARENA">
<term><constant>MALLOCX_ARENA(<parameter>a</parameter>)
</constant></term>
<listitem><para>Use the arena specified by the index
- <parameter>a</parameter> (and by necessity bypass the thread
- cache). This macro has no effect for huge regions, nor for regions
- that were allocated via an arena other than the one specified.
- This macro does not validate that <parameter>a</parameter>
- specifies an arena index in the valid range.</para></listitem>
+ <parameter>a</parameter>. This macro has no effect for regions that
+ were allocated via an arena other than the one specified. This
+ macro does not validate that <parameter>a</parameter> specifies an
+ arena index in the valid range.</para></listitem>
</varlistentry>
</variablelist>
</para>
@@ -352,6 +341,15 @@
memory referenced by <parameter>ptr</parameter> to be made available for
future allocations.</para>
+ <para>The <function>sdallocx<parameter/></function> function is an
+ extension of <function>dallocx<parameter/></function> with a
+ <parameter>size</parameter> parameter to allow the caller to pass in the
+ allocation size as an optimization. The minimum valid input size is the
+ original requested size of the allocation, and the maximum valid input
+ size is the corresponding value returned by
+ <function>nallocx<parameter/></function> or
+ <function>sallocx<parameter/></function>.</para>
+
<para>The <function>nallocx<parameter/></function> function allocates no
memory, but it performs the same size computation as the
<function>mallocx<parameter/></function> function, and returns the real
@@ -430,11 +428,12 @@ for (i = 0; i < nbins; i++) {
functions simultaneously. If <option>--enable-stats</option> is
specified during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can
be specified to omit merged arena and per arena statistics, respectively;
- &ldquo;b&rdquo; and &ldquo;l&rdquo; can be specified to omit per size
- class statistics for bins and large objects, respectively. Unrecognized
- characters are silently ignored. Note that thread caching may prevent
- some statistics from being completely up to date, since extra locking
- would be required to merge counters that track thread cache operations.
+ &ldquo;b&rdquo;, &ldquo;l&rdquo;, and &ldquo;h&rdquo; can be specified to
+ omit per size class statistics for bins, large objects, and huge objects,
+ respectively. Unrecognized characters are silently ignored. Note that
+ thread caching may prevent some statistics from being completely up to
+ date, since extra locking would be required to merge counters that track
+ thread cache operations.
</para>
<para>The <function>malloc_usable_size<parameter/></function> function
@@ -449,116 +448,6 @@ for (i = 0; i < nbins; i++) {
depended on, since such behavior is entirely implementation-dependent.
</para>
</refsect2>
- <refsect2>
- <title>Experimental API</title>
- <para>The experimental API is subject to change or removal without regard
- for backward compatibility. If <option>--disable-experimental</option>
- is specified during configuration, the experimental API is
- omitted.</para>
-
- <para>The <function>allocm<parameter/></function>,
- <function>rallocm<parameter/></function>,
- <function>sallocm<parameter/></function>,
- <function>dallocm<parameter/></function>, and
- <function>nallocm<parameter/></function> functions all have a
- <parameter>flags</parameter> argument that can be used to specify
- options. The functions only check the options that are contextually
- relevant. Use bitwise or (<code language="C">|</code>) operations to
- specify one or more of the following:
- <variablelist>
- <varlistentry>
- <term><constant>ALLOCM_LG_ALIGN(<parameter>la</parameter>)
- </constant></term>
-
- <listitem><para>Align the memory allocation to start at an address
- that is a multiple of <code language="C">(1 &lt;&lt;
- <parameter>la</parameter>)</code>. This macro does not validate
- that <parameter>la</parameter> is within the valid
- range.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term><constant>ALLOCM_ALIGN(<parameter>a</parameter>)
- </constant></term>
-
- <listitem><para>Align the memory allocation to start at an address
- that is a multiple of <parameter>a</parameter>, where
- <parameter>a</parameter> is a power of two. This macro does not
- validate that <parameter>a</parameter> is a power of 2.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term><constant>ALLOCM_ZERO</constant></term>
-
- <listitem><para>Initialize newly allocated memory to contain zero
- bytes. In the growing reallocation case, the real size prior to
- reallocation defines the boundary between untouched bytes and those
- that are initialized to contain zero bytes. If this macro is
- absent, newly allocated memory is uninitialized.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term><constant>ALLOCM_NO_MOVE</constant></term>
-
- <listitem><para>For reallocation, fail rather than moving the
- object. This constraint can apply to both growth and
- shrinkage.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term><constant>ALLOCM_ARENA(<parameter>a</parameter>)
- </constant></term>
-
- <listitem><para>Use the arena specified by the index
- <parameter>a</parameter> (and by necessity bypass the thread
- cache). This macro has no effect for huge regions, nor for regions
- that were allocated via an arena other than the one specified.
- This macro does not validate that <parameter>a</parameter>
- specifies an arena index in the valid range.</para></listitem>
- </varlistentry>
- </variablelist>
- </para>
-
- <para>The <function>allocm<parameter/></function> function allocates at
- least <parameter>size</parameter> bytes of memory, sets
- <parameter>*ptr</parameter> to the base address of the allocation, and
- sets <parameter>*rsize</parameter> to the real size of the allocation if
- <parameter>rsize</parameter> is not <constant>NULL</constant>. Behavior
- is undefined if <parameter>size</parameter> is <constant>0</constant>, or
- if request size overflows due to size class and/or alignment
- constraints.</para>
-
- <para>The <function>rallocm<parameter/></function> function resizes the
- allocation at <parameter>*ptr</parameter> to be at least
- <parameter>size</parameter> bytes, sets <parameter>*ptr</parameter> to
- the base address of the allocation if it moved, and sets
- <parameter>*rsize</parameter> to the real size of the allocation if
- <parameter>rsize</parameter> is not <constant>NULL</constant>. If
- <parameter>extra</parameter> is non-zero, an attempt is made to resize
- the allocation to be at least <code
- language="C">(<parameter>size</parameter> +
- <parameter>extra</parameter>)</code> bytes, though inability to allocate
- the extra byte(s) will not by itself result in failure. Behavior is
- undefined if <parameter>size</parameter> is <constant>0</constant>, if
- request size overflows due to size class and/or alignment constraints, or
- if <code language="C">(<parameter>size</parameter> +
- <parameter>extra</parameter> &gt;
- <constant>SIZE_T_MAX</constant>)</code>.</para>
-
- <para>The <function>sallocm<parameter/></function> function sets
- <parameter>*rsize</parameter> to the real size of the allocation.</para>
-
- <para>The <function>dallocm<parameter/></function> function causes the
- memory referenced by <parameter>ptr</parameter> to be made available for
- future allocations.</para>
-
- <para>The <function>nallocm<parameter/></function> function allocates no
- memory, but it performs the same size computation as the
- <function>allocm<parameter/></function> function, and if
- <parameter>rsize</parameter> is not <constant>NULL</constant> it sets
- <parameter>*rsize</parameter> to the real size of the allocation that
- would result from the equivalent <function>allocm<parameter/></function>
- function call. Behavior is undefined if <parameter>size</parameter> is
- <constant>0</constant>, or if request size overflows due to size class
- and/or alignment constraints.</para>
- </refsect2>
</refsect1>
<refsect1 id="tuning">
<title>TUNING</title>
@@ -598,8 +487,10 @@ for (i = 0; i < nbins; i++) {
<manvolnum>2</manvolnum></citerefentry> to obtain memory, which is
suboptimal for several reasons, including race conditions, increased
fragmentation, and artificial limitations on maximum usable memory. If
- <option>--enable-dss</option> is specified during configuration, this
- allocator uses both <citerefentry><refentrytitle>mmap</refentrytitle>
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system, this allocator uses both
+ <citerefentry><refentrytitle>mmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> and
<citerefentry><refentrytitle>sbrk</refentrytitle>
<manvolnum>2</manvolnum></citerefentry>, in that order of preference;
@@ -632,12 +523,11 @@ for (i = 0; i < nbins; i++) {
possible to find metadata for user objects very quickly.</para>
<para>User objects are broken into three categories according to size:
- small, large, and huge. Small objects are smaller than one page. Large
- objects are smaller than the chunk size. Huge objects are a multiple of
- the chunk size. Small and large objects are managed by arenas; huge
- objects are managed separately in a single data structure that is shared by
- all threads. Huge objects are used by applications infrequently enough
- that this single data structure is not a scalability issue.</para>
+ small, large, and huge. Small and large objects are managed entirely by
+ arenas; huge objects are additionally aggregated in a single data structure
+ that is shared by all threads. Huge objects are typically used by
+ applications infrequently enough that this single data structure is not a
+ scalability issue.</para>
<para>Each chunk that is managed by an arena tracks its contents as runs of
contiguous pages (unused, backing a set of small objects, or backing one
@@ -646,18 +536,18 @@ for (i = 0; i < nbins; i++) {
allocations in constant time.</para>
<para>Small objects are managed in groups by page runs. Each run maintains
- a frontier and free list to track which regions are in use. Allocation
- requests that are no more than half the quantum (8 or 16, depending on
- architecture) are rounded up to the nearest power of two that is at least
- <code language="C">sizeof(<type>double</type>)</code>. All other small
- object size classes are multiples of the quantum, spaced such that internal
- fragmentation is limited to approximately 25% for all but the smallest size
- classes. Allocation requests that are larger than the maximum small size
- class, but small enough to fit in an arena-managed chunk (see the <link
- linkend="opt.lg_chunk"><mallctl>opt.lg_chunk</mallctl></link> option), are
- rounded up to the nearest run size. Allocation requests that are too large
- to fit in an arena-managed chunk are rounded up to the nearest multiple of
- the chunk size.</para>
+ a bitmap to track which regions are in use. Allocation requests that are no
+ more than half the quantum (8 or 16, depending on architecture) are rounded
+ up to the nearest power of two that is at least <code
+ language="C">sizeof(<type>double</type>)</code>. All other object size
+ classes are multiples of the quantum, spaced such that there are four size
+ classes for each doubling in size, which limits internal fragmentation to
+ approximately 20% for all but the smallest size classes. Small size classes
+ are smaller than four times the page size, large size classes are smaller
+ than the chunk size (see the <link
+ linkend="opt.lg_chunk"><mallctl>opt.lg_chunk</mallctl></link> option), and
+ huge size classes extend from the chunk size up to one size class less than
+ the full address space size.</para>
<para>Allocations are packed tightly together, which can be an issue for
multi-threaded applications. If you need to assure that allocations do not
@@ -665,8 +555,29 @@ for (i = 0; i < nbins; i++) {
nearest multiple of the cacheline size, or specify cacheline alignment when
allocating.</para>
- <para>Assuming 4 MiB chunks, 4 KiB pages, and a 16-byte quantum on a 64-bit
- system, the size classes in each category are as shown in <xref
+ <para>The <function>realloc<parameter/></function>,
+ <function>rallocx<parameter/></function>, and
+ <function>xallocx<parameter/></function> functions may resize allocations
+ without moving them under limited circumstances. Unlike the
+ <function>*allocx<parameter/></function> API, the standard API does not
+ officially round up the usable size of an allocation to the nearest size
+ class, so technically it is necessary to call
+ <function>realloc<parameter/></function> to grow e.g. a 9-byte allocation to
+ 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage
+ trivially succeeds in place as long as the pre-size and post-size both round
+ up to the same size class. No other API guarantees are made regarding
+ in-place resizing, but the current implementation also tries to resize large
+ and huge allocations in place, as long as the pre-size and post-size are
+ both large or both huge. In such cases shrinkage always succeeds for large
+ size classes, but for huge size classes the chunk allocator must support
+ splitting (see <link
+ linkend="arena.i.chunk_hooks"><mallctl>arena.&lt;i&gt;.chunk_hooks</mallctl></link>).
+ Growth only succeeds if the trailing memory is currently available, and
+ additionally for huge size classes the chunk allocator must support
+ merging.</para>
+
+ <para>Assuming 2 MiB chunks, 4 KiB pages, and a 16-byte quantum on a
+ 64-bit system, the size classes in each category are as shown in <xref
linkend="size_classes" xrefstyle="template:Table %n"/>.</para>
<table xml:id="size_classes" frame="all">
@@ -684,13 +595,13 @@ for (i = 0; i < nbins; i++) {
</thead>
<tbody>
<row>
- <entry morerows="6">Small</entry>
+ <entry morerows="8">Small</entry>
<entry>lg</entry>
<entry>[8]</entry>
</row>
<row>
<entry>16</entry>
- <entry>[16, 32, 48, ..., 128]</entry>
+ <entry>[16, 32, 48, 64, 80, 96, 112, 128]</entry>
</row>
<row>
<entry>32</entry>
@@ -710,17 +621,77 @@ for (i = 0; i < nbins; i++) {
</row>
<row>
<entry>512</entry>
- <entry>[2560, 3072, 3584]</entry>
+ <entry>[2560, 3072, 3584, 4096]</entry>
+ </row>
+ <row>
+ <entry>1 KiB</entry>
+ <entry>[5 KiB, 6 KiB, 7 KiB, 8 KiB]</entry>
+ </row>
+ <row>
+ <entry>2 KiB</entry>
+ <entry>[10 KiB, 12 KiB, 14 KiB]</entry>
+ </row>
+ <row>
+ <entry morerows="7">Large</entry>
+ <entry>2 KiB</entry>
+ <entry>[16 KiB]</entry>
</row>
<row>
- <entry>Large</entry>
<entry>4 KiB</entry>
- <entry>[4 KiB, 8 KiB, 12 KiB, ..., 4072 KiB]</entry>
+ <entry>[20 KiB, 24 KiB, 28 KiB, 32 KiB]</entry>
+ </row>
+ <row>
+ <entry>8 KiB</entry>
+ <entry>[40 KiB, 48 KiB, 54 KiB, 64 KiB]</entry>
+ </row>
+ <row>
+ <entry>16 KiB</entry>
+ <entry>[80 KiB, 96 KiB, 112 KiB, 128 KiB]</entry>
+ </row>
+ <row>
+ <entry>32 KiB</entry>
+ <entry>[160 KiB, 192 KiB, 224 KiB, 256 KiB]</entry>
+ </row>
+ <row>
+ <entry>64 KiB</entry>
+ <entry>[320 KiB, 384 KiB, 448 KiB, 512 KiB]</entry>
+ </row>
+ <row>
+ <entry>128 KiB</entry>
+ <entry>[640 KiB, 768 KiB, 896 KiB, 1 MiB]</entry>
+ </row>
+ <row>
+ <entry>256 KiB</entry>
+ <entry>[1280 KiB, 1536 KiB, 1792 KiB]</entry>
+ </row>
+ <row>
+ <entry morerows="6">Huge</entry>
+ <entry>256 KiB</entry>
+ <entry>[2 MiB]</entry>
+ </row>
+ <row>
+ <entry>512 KiB</entry>
+ <entry>[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]</entry>
+ </row>
+ <row>
+ <entry>1 MiB</entry>
+ <entry>[5 MiB, 6 MiB, 7 MiB, 8 MiB]</entry>
+ </row>
+ <row>
+ <entry>2 MiB</entry>
+ <entry>[10 MiB, 12 MiB, 14 MiB, 16 MiB]</entry>
</row>
<row>
- <entry>Huge</entry>
<entry>4 MiB</entry>
- <entry>[4 MiB, 8 MiB, 12 MiB, ...]</entry>
+ <entry>[20 MiB, 24 MiB, 28 MiB, 32 MiB]</entry>
+ </row>
+ <row>
+ <entry>8 MiB</entry>
+ <entry>[40 MiB, 48 MiB, 56 MiB, 64 MiB]</entry>
+ </row>
+ <row>
+ <entry>...</entry>
+ <entry>...</entry>
</row>
</tbody>
</tgroup>
@@ -765,23 +736,23 @@ for (i = 0; i < nbins; i++) {
detecting whether another thread caused a refresh.</para></listitem>
</varlistentry>
- <varlistentry id="config.debug">
+ <varlistentry id="config.cache_oblivious">
<term>
- <mallctl>config.debug</mallctl>
+ <mallctl>config.cache_oblivious</mallctl>
(<type>bool</type>)
<literal>r-</literal>
</term>
- <listitem><para><option>--enable-debug</option> was specified during
- build configuration.</para></listitem>
+ <listitem><para><option>--enable-cache-oblivious</option> was specified
+ during build configuration.</para></listitem>
</varlistentry>
- <varlistentry id="config.dss">
+ <varlistentry id="config.debug">
<term>
- <mallctl>config.dss</mallctl>
+ <mallctl>config.debug</mallctl>
(<type>bool</type>)
<literal>r-</literal>
</term>
- <listitem><para><option>--enable-dss</option> was specified during
+ <listitem><para><option>--enable-debug</option> was specified during
build configuration.</para></listitem>
</varlistentry>
@@ -805,16 +776,6 @@ for (i = 0; i < nbins; i++) {
during build configuration.</para></listitem>
</varlistentry>
- <varlistentry id="config.mremap">
- <term>
- <mallctl>config.mremap</mallctl>
- (<type>bool</type>)
- <literal>r-</literal>
- </term>
- <listitem><para><option>--enable-mremap</option> was specified during
- build configuration.</para></listitem>
- </varlistentry>
-
<varlistentry id="config.munmap">
<term>
<mallctl>config.munmap</mallctl>
@@ -940,10 +901,15 @@ for (i = 0; i < nbins; i++) {
<manvolnum>2</manvolnum></citerefentry>) allocation precedence as
related to <citerefentry><refentrytitle>mmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> allocation. The following
- settings are supported: &ldquo;disabled&rdquo;, &ldquo;primary&rdquo;,
- and &ldquo;secondary&rdquo;. The default is &ldquo;secondary&rdquo; if
- <link linkend="config.dss"><mallctl>config.dss</mallctl></link> is
- true, &ldquo;disabled&rdquo; otherwise.
+ settings are supported if
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system: &ldquo;disabled&rdquo;, &ldquo;primary&rdquo;, and
+ &ldquo;secondary&rdquo;; otherwise only &ldquo;disabled&rdquo; is
+ supported. The default is &ldquo;secondary&rdquo; if
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system; &ldquo;disabled&rdquo; otherwise.
</para></listitem>
</varlistentry>
@@ -956,7 +922,7 @@ for (i = 0; i < nbins; i++) {
<listitem><para>Virtual memory chunk size (log base 2). If a chunk
size outside the supported size range is specified, the size is
silently clipped to the minimum/maximum supported size. The default
- chunk size is 4 MiB (2^22).
+ chunk size is 2 MiB (2^21).
</para></listitem>
</varlistentry>
@@ -986,7 +952,11 @@ for (i = 0; i < nbins; i++) {
provides the kernel with sufficient information to recycle dirty pages
if physical memory becomes scarce and the pages remain unused. The
default minimum ratio is 8:1 (2^3:1); an option value of -1 will
- disable dirty page purging.</para></listitem>
+ disable dirty page purging. See <link
+ linkend="arenas.lg_dirty_mult"><mallctl>arenas.lg_dirty_mult</mallctl></link>
+ and <link
+ linkend="arena.i.lg_dirty_mult"><mallctl>arena.&lt;i&gt;.lg_dirty_mult</mallctl></link>
+ for related dynamic control options.</para></listitem>
</varlistentry>
<varlistentry id="opt.stats_print">
@@ -1003,26 +973,34 @@ for (i = 0; i < nbins; i++) {
<option>--enable-stats</option> is specified during configuration, this
has the potential to cause deadlock for a multi-threaded process that
exits while one or more threads are executing in the memory allocation
- functions. Therefore, this option should only be used with care; it is
- primarily intended as a performance tuning aid during application
+ functions. Furthermore, <function>atexit<parameter/></function> may
+ allocate memory during application initialization and then deadlock
+ internally when jemalloc in turn calls
+ <function>atexit<parameter/></function>, so this option is not
+ univerally usable (though the application can register its own
+ <function>atexit<parameter/></function> function with equivalent
+ functionality). Therefore, this option should only be used with care;
+ it is primarily intended as a performance tuning aid during application
development. This option is disabled by default.</para></listitem>
</varlistentry>
<varlistentry id="opt.junk">
<term>
<mallctl>opt.junk</mallctl>
- (<type>bool</type>)
+ (<type>const char *</type>)
<literal>r-</literal>
[<option>--enable-fill</option>]
</term>
- <listitem><para>Junk filling enabled/disabled. If enabled, each byte
- of uninitialized allocated memory will be initialized to
- <literal>0xa5</literal>. All deallocated memory will be initialized to
- <literal>0x5a</literal>. This is intended for debugging and will
- impact performance negatively. This option is disabled by default
- unless <option>--enable-debug</option> is specified during
- configuration, in which case it is enabled by default unless running
- inside <ulink
+ <listitem><para>Junk filling. If set to "alloc", each byte of
+ uninitialized allocated memory will be initialized to
+ <literal>0xa5</literal>. If set to "free", all deallocated memory will
+ be initialized to <literal>0x5a</literal>. If set to "true", both
+ allocated and deallocated memory will be initialized, and if set to
+ "false", junk filling be disabled entirely. This is intended for
+ debugging and will impact performance negatively. This option is
+ "false" by default unless <option>--enable-debug</option> is specified
+ during configuration, in which case it is "true" by default unless
+ running inside <ulink
url="http://valgrind.org/">Valgrind</ulink>.</para></listitem>
</varlistentry>
@@ -1076,9 +1054,8 @@ for (i = 0; i < nbins; i++) {
<listitem><para>Zero filling enabled/disabled. If enabled, each byte
of uninitialized allocated memory will be initialized to 0. Note that
this initialization only happens once for each byte, so
- <function>realloc<parameter/></function>,
- <function>rallocx<parameter/></function> and
- <function>rallocm<parameter/></function> calls do not zero memory that
+ <function>realloc<parameter/></function> and
+ <function>rallocx<parameter/></function> calls do not zero memory that
was previously allocated. This is intended for debugging and will
impact performance negatively. This option is disabled by default.
</para></listitem>
@@ -1097,19 +1074,6 @@ for (i = 0; i < nbins; i++) {
is disabled by default.</para></listitem>
</varlistentry>
- <varlistentry id="opt.valgrind">
- <term>
- <mallctl>opt.valgrind</mallctl>
- (<type>bool</type>)
- <literal>r-</literal>
- [<option>--enable-valgrind</option>]
- </term>
- <listitem><para><ulink url="http://valgrind.org/">Valgrind</ulink>
- support enabled/disabled. This option is vestigal because jemalloc
- auto-detects whether it is running inside Valgrind. This option is
- disabled by default, unless running inside Valgrind.</para></listitem>
- </varlistentry>
-
<varlistentry id="opt.xmalloc">
<term>
<mallctl>opt.xmalloc</mallctl>
@@ -1137,16 +1101,16 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<literal>r-</literal>
[<option>--enable-tcache</option>]
</term>
- <listitem><para>Thread-specific caching enabled/disabled. When there
- are multiple threads, each thread uses a thread-specific cache for
- objects up to a certain size. Thread-specific caching allows many
- allocations to be satisfied without performing any thread
- synchronization, at the cost of increased memory use. See the
- <link
+ <listitem><para>Thread-specific caching (tcache) enabled/disabled. When
+ there are multiple threads, each thread uses a tcache for objects up to
+ a certain size. Thread-specific caching allows many allocations to be
+ satisfied without performing any thread synchronization, at the cost of
+ increased memory use. See the <link
linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
option for related tuning information. This option is enabled by
default unless running inside <ulink
- url="http://valgrind.org/">Valgrind</ulink>.</para></listitem>
+ url="http://valgrind.org/">Valgrind</ulink>, in which case it is
+ forcefully disabled.</para></listitem>
</varlistentry>
<varlistentry id="opt.lg_tcache_max">
@@ -1157,8 +1121,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
[<option>--enable-tcache</option>]
</term>
<listitem><para>Maximum size class (log base 2) to cache in the
- thread-specific cache. At a minimum, all small size classes are
- cached, and at a maximum all large size classes are cached. The
+ thread-specific cache (tcache). At a minimum, all small size classes
+ are cached, and at a maximum all large size classes are cached. The
default maximum is 32 KiB (2^15).</para></listitem>
</varlistentry>
@@ -1183,8 +1147,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
option for information on high-water-triggered profile dumping, and the
<link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl></link>
option for final profile dumping. Profile output is compatible with
- the included <command>pprof</command> Perl script, which originates
- from the <ulink url="http://code.google.com/p/gperftools/">gperftools
+ the <command>jeprof</command> command, which is based on the
+ <command>pprof</command> that is developed as part of the <ulink
+ url="http://code.google.com/p/gperftools/">gperftools
package</ulink>.</para></listitem>
</varlistentry>
@@ -1206,7 +1171,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<term>
<mallctl>opt.prof_active</mallctl>
(<type>bool</type>)
- <literal>rw</literal>
+ <literal>r-</literal>
[<option>--enable-prof</option>]
</term>
<listitem><para>Profiling activated/deactivated. This is a secondary
@@ -1219,10 +1184,25 @@ malloc_conf = "xmalloc:true";]]></programlisting>
This option is enabled by default.</para></listitem>
</varlistentry>
+ <varlistentry id="opt.prof_thread_active_init">
+ <term>
+ <mallctl>opt.prof_thread_active_init</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Initial setting for <link
+ linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
+ in newly created threads. The initial setting for newly created threads
+ can also be changed during execution via the <link
+ linkend="prof.thread_active_init"><mallctl>prof.thread_active_init</mallctl></link>
+ mallctl. This option is enabled by default.</para></listitem>
+ </varlistentry>
+
<varlistentry id="opt.lg_prof_sample">
<term>
<mallctl>opt.lg_prof_sample</mallctl>
- (<type>ssize_t</type>)
+ (<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-prof</option>]
</term>
@@ -1276,13 +1256,11 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<literal>r-</literal>
[<option>--enable-prof</option>]
</term>
- <listitem><para>Trigger a memory profile dump every time the total
- virtual memory exceeds the previous maximum. Profiles are dumped to
- files named according to the pattern
- <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
- where <literal>&lt;prefix&gt;</literal> is controlled by the <link
- linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
- option. This option is disabled by default.</para></listitem>
+ <listitem><para>Set the initial state of <link
+ linkend="prof.gdump"><mallctl>prof.gdump</mallctl></link>, which when
+ enabled triggers a memory profile dump every time the total virtual
+ memory exceeds the previous maximum. This option is disabled by
+ default.</para></listitem>
</varlistentry>
<varlistentry id="opt.prof_final">
@@ -1299,7 +1277,13 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</filename>,
where <literal>&lt;prefix&gt;</literal> is controlled by the <link
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
- option. This option is enabled by default.</para></listitem>
+ option. Note that <function>atexit<parameter/></function> may allocate
+ memory during application initialization and then deadlock internally
+ when jemalloc in turn calls <function>atexit<parameter/></function>, so
+ this option is not univerally usable (though the application can
+ register its own <function>atexit<parameter/></function> function with
+ equivalent functionality). This option is disabled by
+ default.</para></listitem>
</varlistentry>
<varlistentry id="opt.prof_leak">
@@ -1396,7 +1380,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Enable/disable calling thread's tcache. The tcache is
implicitly flushed as a side effect of becoming
disabled (see <link
- lenkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>).
+ linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>).
</para></listitem>
</varlistentry>
@@ -1407,9 +1391,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<literal>--</literal>
[<option>--enable-tcache</option>]
</term>
- <listitem><para>Flush calling thread's tcache. This interface releases
- all cached objects and internal data structures associated with the
- calling thread's thread-specific cache. Ordinarily, this interface
+ <listitem><para>Flush calling thread's thread-specific cache (tcache).
+ This interface releases all cached objects and internal data structures
+ associated with the calling thread's tcache. Ordinarily, this interface
need not be called, since automatic periodic incremental garbage
collection occurs, and the thread cache is automatically discarded when
a thread exits. However, garbage collection is triggered by allocation
@@ -1418,10 +1402,91 @@ malloc_conf = "xmalloc:true";]]></programlisting>
the developer may find manual flushing useful.</para></listitem>
</varlistentry>
+ <varlistentry id="thread.prof.name">
+ <term>
+ <mallctl>thread.prof.name</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal> or
+ <literal>-w</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Get/set the descriptive name associated with the calling
+ thread in memory profile dumps. An internal copy of the name string is
+ created, so the input string need not be maintained after this interface
+ completes execution. The output string of this interface should be
+ copied for non-ephemeral uses, because multiple implementation details
+ can cause asynchronous string deallocation. Furthermore, each
+ invocation of this interface can only read or write; simultaneous
+ read/write is not supported due to string lifetime limitations. The
+ name string must nil-terminated and comprised only of characters in the
+ sets recognized
+ by <citerefentry><refentrytitle>isgraph</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> and
+ <citerefentry><refentrytitle>isblank</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.prof.active">
+ <term>
+ <mallctl>thread.prof.active</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Control whether sampling is currently active for the
+ calling thread. This is an activation mechanism in addition to <link
+ linkend="prof.active"><mallctl>prof.active</mallctl></link>; both must
+ be active for the calling thread to sample. This flag is enabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="tcache.create">
+ <term>
+ <mallctl>tcache.create</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ [<option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Create an explicit thread-specific cache (tcache) and
+ return an identifier that can be passed to the <link
+ linkend="MALLOCX_TCACHE"><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant></link>
+ macro to explicitly use the specified cache rather than the
+ automatically managed one that is used by default. Each explicit cache
+ can be used by only one thread at a time; the application must assure
+ that this constraint holds.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="tcache.flush">
+ <term>
+ <mallctl>tcache.flush</mallctl>
+ (<type>unsigned</type>)
+ <literal>-w</literal>
+ [<option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Flush the specified thread-specific cache (tcache). The
+ same considerations apply to this interface as to <link
+ linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>,
+ except that the tcache will never be automatically be discarded.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="tcache.destroy">
+ <term>
+ <mallctl>tcache.destroy</mallctl>
+ (<type>unsigned</type>)
+ <literal>-w</literal>
+ [<option>--enable-tcache</option>]
+ </term>
+ <listitem><para>Flush the specified thread-specific cache (tcache) and
+ make the identifier available for use during a future tcache creation.
+ </para></listitem>
+ </varlistentry>
+
<varlistentry id="arena.i.purge">
<term>
<mallctl>arena.&lt;i&gt;.purge</mallctl>
- (<type>unsigned</type>)
+ (<type>void</type>)
<literal>--</literal>
</term>
<listitem><para>Purge unused dirty pages for arena &lt;i&gt;, or for
@@ -1439,14 +1504,222 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Set the precedence of dss allocation as related to mmap
allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
<link
- linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. Note
- that even during huge allocation this setting is read from the arena
- that would be chosen for small or large allocation so that applications
- can depend on consistent dss versus mmap allocation regardless of
- allocation size. See <link
- linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
- settings.
- </para></listitem>
+ linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. See
+ <link linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
+ settings.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.lg_dirty_mult">
+ <term>
+ <mallctl>arena.&lt;i&gt;.lg_dirty_mult</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current per-arena minimum ratio (log base 2) of active
+ to dirty pages for arena &lt;i&gt;. Each time this interface is set and
+ the ratio is increased, pages are synchronously purged as necessary to
+ impose the new ratio. See <link
+ linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+ for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.chunk_hooks">
+ <term>
+ <mallctl>arena.&lt;i&gt;.chunk_hooks</mallctl>
+ (<type>chunk_hooks_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Get or set the chunk management hook functions for arena
+ &lt;i&gt;. The functions must be capable of operating on all extant
+ chunks associated with arena &lt;i&gt;, usually by passing unknown
+ chunks to the replaced functions. In practice, it is feasible to
+ control allocation for arenas created via <link
+ linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link> such
+ that all chunks originate from an application-supplied chunk allocator
+ (by setting custom chunk hook functions just after arena creation), but
+ the automatically created arenas may have already created chunks prior
+ to the application having an opportunity to take over chunk
+ allocation.</para>
+
+ <programlisting language="C"><![CDATA[
+typedef struct {
+ chunk_alloc_t *alloc;
+ chunk_dalloc_t *dalloc;
+ chunk_commit_t *commit;
+ chunk_decommit_t *decommit;
+ chunk_purge_t *purge;
+ chunk_split_t *split;
+ chunk_merge_t *merge;
+} chunk_hooks_t;]]></programlisting>
+ <para>The <type>chunk_hooks_t</type> structure comprises function
+ pointers which are described individually below. jemalloc uses these
+ functions to manage chunk lifetime, which starts off with allocation of
+ mapped committed memory, in the simplest case followed by deallocation.
+ However, there are performance and platform reasons to retain chunks for
+ later reuse. Cleanup attempts cascade from deallocation to decommit to
+ purging, which gives the chunk management functions opportunities to
+ reject the most permanent cleanup operations in favor of less permanent
+ (and often less costly) operations. The chunk splitting and merging
+ operations can also be opted out of, but this is mainly intended to
+ support platforms on which virtual memory mappings provided by the
+ operating system kernel do not automatically coalesce and split, e.g.
+ Windows.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef void *<function>(chunk_alloc_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>alignment</parameter></paramdef>
+ <paramdef>bool *<parameter>zero</parameter></paramdef>
+ <paramdef>bool *<parameter>commit</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>A chunk allocation function conforms to the
+ <type>chunk_alloc_t</type> type and upon success returns a pointer to
+ <parameter>size</parameter> bytes of mapped memory on behalf of arena
+ <parameter>arena_ind</parameter> such that the chunk's base address is a
+ multiple of <parameter>alignment</parameter>, as well as setting
+ <parameter>*zero</parameter> to indicate whether the chunk is zeroed and
+ <parameter>*commit</parameter> to indicate whether the chunk is
+ committed. Upon error the function returns <constant>NULL</constant>
+ and leaves <parameter>*zero</parameter> and
+ <parameter>*commit</parameter> unmodified. The
+ <parameter>size</parameter> parameter is always a multiple of the chunk
+ size. The <parameter>alignment</parameter> parameter is always a power
+ of two at least as large as the chunk size. Zeroing is mandatory if
+ <parameter>*zero</parameter> is true upon function entry. Committing is
+ mandatory if <parameter>*commit</parameter> is true upon function entry.
+ If <parameter>chunk</parameter> is not <constant>NULL</constant>, the
+ returned pointer must be <parameter>chunk</parameter> on success or
+ <constant>NULL</constant> on error. Committed memory may be committed
+ in absolute terms as on a system that does not overcommit, or in
+ implicit terms as on a system that overcommits and satisfies physical
+ memory needs on demand via soft page faults. Note that replacing the
+ default chunk allocation function makes the arena's <link
+ linkend="arena.i.dss"><mallctl>arena.&lt;i&gt;.dss</mallctl></link>
+ setting irrelevant.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(chunk_dalloc_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>
+ A chunk deallocation function conforms to the
+ <type>chunk_dalloc_t</type> type and deallocates a
+ <parameter>chunk</parameter> of given <parameter>size</parameter> with
+ <parameter>committed</parameter>/decommited memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>, returning false upon
+ success. If the function returns true, this indicates opt-out from
+ deallocation; the virtual memory mapping associated with the chunk
+ remains mapped, in the same commit state, and available for future use,
+ in which case it will be automatically retained for later reuse.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(chunk_commit_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>offset</parameter></paramdef>
+ <paramdef>size_t <parameter>length</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>A chunk commit function conforms to the
+ <type>chunk_commit_t</type> type and commits zeroed physical memory to
+ back pages within a <parameter>chunk</parameter> of given
+ <parameter>size</parameter> at <parameter>offset</parameter> bytes,
+ extending for <parameter>length</parameter> on behalf of arena
+ <parameter>arena_ind</parameter>, returning false upon success.
+ Committed memory may be committed in absolute terms as on a system that
+ does not overcommit, or in implicit terms as on a system that
+ overcommits and satisfies physical memory needs on demand via soft page
+ faults. If the function returns true, this indicates insufficient
+ physical memory to satisfy the request.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(chunk_decommit_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>offset</parameter></paramdef>
+ <paramdef>size_t <parameter>length</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>A chunk decommit function conforms to the
+ <type>chunk_decommit_t</type> type and decommits any physical memory
+ that is backing pages within a <parameter>chunk</parameter> of given
+ <parameter>size</parameter> at <parameter>offset</parameter> bytes,
+ extending for <parameter>length</parameter> on behalf of arena
+ <parameter>arena_ind</parameter>, returning false upon success, in which
+ case the pages will be committed via the chunk commit function before
+ being reused. If the function returns true, this indicates opt-out from
+ decommit; the memory remains committed and available for future use, in
+ which case it will be automatically retained for later reuse.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(chunk_purge_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk</parameter></paramdef>
+ <paramdef>size_t<parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>offset</parameter></paramdef>
+ <paramdef>size_t <parameter>length</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>A chunk purge function conforms to the <type>chunk_purge_t</type>
+ type and optionally discards physical pages within the virtual memory
+ mapping associated with <parameter>chunk</parameter> of given
+ <parameter>size</parameter> at <parameter>offset</parameter> bytes,
+ extending for <parameter>length</parameter> on behalf of arena
+ <parameter>arena_ind</parameter>, returning false if pages within the
+ purged virtual memory range will be zero-filled the next time they are
+ accessed.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(chunk_split_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>size_a</parameter></paramdef>
+ <paramdef>size_t <parameter>size_b</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>A chunk split function conforms to the <type>chunk_split_t</type>
+ type and optionally splits <parameter>chunk</parameter> of given
+ <parameter>size</parameter> into two adjacent chunks, the first of
+ <parameter>size_a</parameter> bytes, and the second of
+ <parameter>size_b</parameter> bytes, operating on
+ <parameter>committed</parameter>/decommitted memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>, returning false upon
+ success. If the function returns true, this indicates that the chunk
+ remains unsplit and therefore should continue to be operated on as a
+ whole.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(chunk_merge_t)</function></funcdef>
+ <paramdef>void *<parameter>chunk_a</parameter></paramdef>
+ <paramdef>size_t <parameter>size_a</parameter></paramdef>
+ <paramdef>void *<parameter>chunk_b</parameter></paramdef>
+ <paramdef>size_t <parameter>size_b</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>A chunk merge function conforms to the <type>chunk_merge_t</type>
+ type and optionally merges adjacent chunks,
+ <parameter>chunk_a</parameter> of given <parameter>size_a</parameter>
+ and <parameter>chunk_b</parameter> of given
+ <parameter>size_b</parameter> into one contiguous chunk, operating on
+ <parameter>committed</parameter>/decommitted memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>, returning false upon
+ success. If the function returns true, this indicates that the chunks
+ remain distinct mappings and therefore should continue to be operated on
+ independently.</para>
+ </listitem>
</varlistentry>
<varlistentry id="arenas.narenas">
@@ -1470,6 +1743,20 @@ malloc_conf = "xmalloc:true";]]></programlisting>
initialized.</para></listitem>
</varlistentry>
+ <varlistentry id="arenas.lg_dirty_mult">
+ <term>
+ <mallctl>arenas.lg_dirty_mult</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current default per-arena minimum ratio (log base 2) of
+ active to dirty pages, used to initialize <link
+ linkend="arena.i.lg_dirty_mult"><mallctl>arena.&lt;i&gt;.lg_dirty_mult</mallctl></link>
+ during arena creation. See <link
+ linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+ for additional information.</para></listitem>
+ </varlistentry>
+
<varlistentry id="arenas.quantum">
<term>
<mallctl>arenas.quantum</mallctl>
@@ -1548,7 +1835,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<varlistentry id="arenas.nlruns">
<term>
<mallctl>arenas.nlruns</mallctl>
- (<type>size_t</type>)
+ (<type>unsigned</type>)
<literal>r-</literal>
</term>
<listitem><para>Total number of large size classes.</para></listitem>
@@ -1564,14 +1851,23 @@ malloc_conf = "xmalloc:true";]]></programlisting>
class.</para></listitem>
</varlistentry>
- <varlistentry id="arenas.purge">
+ <varlistentry id="arenas.nhchunks">
<term>
- <mallctl>arenas.purge</mallctl>
+ <mallctl>arenas.nhchunks</mallctl>
(<type>unsigned</type>)
- <literal>-w</literal>
+ <literal>r-</literal>
</term>
- <listitem><para>Purge unused dirty pages for the specified arena, or
- for all arenas if none is specified.</para></listitem>
+ <listitem><para>Total number of huge size classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.hchunk.i.size">
+ <term>
+ <mallctl>arenas.hchunk.&lt;i&gt;.size</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum size supported by this huge size
+ class.</para></listitem>
</varlistentry>
<varlistentry id="arenas.extend">
@@ -1584,6 +1880,20 @@ malloc_conf = "xmalloc:true";]]></programlisting>
and returning the new arena index.</para></listitem>
</varlistentry>
+ <varlistentry id="prof.thread_active_init">
+ <term>
+ <mallctl>prof.thread_active_init</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Control the initial setting for <link
+ linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
+ in newly created threads. See the <link
+ linkend="opt.prof_thread_active_init"><mallctl>opt.prof_thread_active_init</mallctl></link>
+ option for additional information.</para></listitem>
+ </varlistentry>
+
<varlistentry id="prof.active">
<term>
<mallctl>prof.active</mallctl>
@@ -1594,8 +1904,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Control whether sampling is currently active. See the
<link
linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
- option for additional information.
- </para></listitem>
+ option for additional information, as well as the interrelated <link
+ linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
+ mallctl.</para></listitem>
</varlistentry>
<varlistentry id="prof.dump">
@@ -1614,6 +1925,49 @@ malloc_conf = "xmalloc:true";]]></programlisting>
option.</para></listitem>
</varlistentry>
+ <varlistentry id="prof.gdump">
+ <term>
+ <mallctl>prof.gdump</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>When enabled, trigger a memory profile dump every time
+ the total virtual memory exceeds the previous maximum. Profiles are
+ dumped to files named according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+ option.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.reset">
+ <term>
+ <mallctl>prof.reset</mallctl>
+ (<type>size_t</type>)
+ <literal>-w</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Reset all memory profile statistics, and optionally
+ update the sample rate (see <link
+ linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
+ and <link
+ linkend="prof.lg_sample"><mallctl>prof.lg_sample</mallctl></link>).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.lg_sample">
+ <term>
+ <mallctl>prof.lg_sample</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Get the current sample rate (see <link
+ linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>).
+ </para></listitem>
+ </varlistentry>
+
<varlistentry id="prof.interval">
<term>
<mallctl>prof.interval</mallctl>
@@ -1637,9 +1991,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</term>
<listitem><para>Pointer to a counter that contains an approximate count
of the current number of bytes in active pages. The estimate may be
- high, but never low, because each arena rounds up to the nearest
- multiple of the chunk size when computing its contribution to the
- counter. Note that the <link
+ high, but never low, because each arena rounds up when computing its
+ contribution to the counter. Note that the <link
linkend="epoch"><mallctl>epoch</mallctl></link> mallctl has no bearing
on this counter. Furthermore, counter consistency is maintained via
atomic operations, so it is necessary to use an atomic operation in
@@ -1670,88 +2023,56 @@ malloc_conf = "xmalloc:true";]]></programlisting>
equal to <link
linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
This does not include <link linkend="stats.arenas.i.pdirty">
- <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link> and pages
+ <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link>, nor pages
entirely devoted to allocator metadata.</para></listitem>
</varlistentry>
- <varlistentry id="stats.mapped">
+ <varlistentry id="stats.metadata">
<term>
- <mallctl>stats.mapped</mallctl>
+ <mallctl>stats.metadata</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
- <listitem><para>Total number of bytes in chunks mapped on behalf of the
- application. This is a multiple of the chunk size, and is at least as
- large as <link
- linkend="stats.active"><mallctl>stats.active</mallctl></link>. This
- does not include inactive chunks.</para></listitem>
- </varlistentry>
-
- <varlistentry id="stats.chunks.current">
- <term>
- <mallctl>stats.chunks.current</mallctl>
- (<type>size_t</type>)
- <literal>r-</literal>
- [<option>--enable-stats</option>]
- </term>
- <listitem><para>Total number of chunks actively mapped on behalf of the
- application. This does not include inactive chunks.
- </para></listitem>
- </varlistentry>
-
- <varlistentry id="stats.chunks.total">
- <term>
- <mallctl>stats.chunks.total</mallctl>
- (<type>uint64_t</type>)
- <literal>r-</literal>
- [<option>--enable-stats</option>]
- </term>
- <listitem><para>Cumulative number of chunks allocated.</para></listitem>
+ <listitem><para>Total number of bytes dedicated to metadata, which
+ comprise base allocations used for bootstrap-sensitive internal
+ allocator data structures, arena chunk headers (see <link
+ linkend="stats.arenas.i.metadata.mapped"><mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl></link>),
+ and internal allocations (see <link
+ linkend="stats.arenas.i.metadata.allocated"><mallctl>stats.arenas.&lt;i&gt;.metadata.allocated</mallctl></link>).</para></listitem>
</varlistentry>
- <varlistentry id="stats.chunks.high">
+ <varlistentry id="stats.resident">
<term>
- <mallctl>stats.chunks.high</mallctl>
+ <mallctl>stats.resident</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
- <listitem><para>Maximum number of active chunks at any time thus far.
- </para></listitem>
+ <listitem><para>Maximum number of bytes in physically resident data
+ pages mapped by the allocator, comprising all pages dedicated to
+ allocator metadata, pages backing active allocations, and unused dirty
+ pages. This is a maximum rather than precise because pages may not
+ actually be physically resident if they correspond to demand-zeroed
+ virtual memory that has not yet been touched. This is a multiple of the
+ page size, and is larger than <link
+ linkend="stats.active"><mallctl>stats.active</mallctl></link>.</para></listitem>
</varlistentry>
- <varlistentry id="stats.huge.allocated">
+ <varlistentry id="stats.mapped">
<term>
- <mallctl>stats.huge.allocated</mallctl>
+ <mallctl>stats.mapped</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
- <listitem><para>Number of bytes currently allocated by huge objects.
- </para></listitem>
- </varlistentry>
-
- <varlistentry id="stats.huge.nmalloc">
- <term>
- <mallctl>stats.huge.nmalloc</mallctl>
- (<type>uint64_t</type>)
- <literal>r-</literal>
- [<option>--enable-stats</option>]
- </term>
- <listitem><para>Cumulative number of huge allocation requests.
- </para></listitem>
- </varlistentry>
-
- <varlistentry id="stats.huge.ndalloc">
- <term>
- <mallctl>stats.huge.ndalloc</mallctl>
- (<type>uint64_t</type>)
- <literal>r-</literal>
- [<option>--enable-stats</option>]
- </term>
- <listitem><para>Cumulative number of huge deallocation requests.
- </para></listitem>
+ <listitem><para>Total number of bytes in active chunks mapped by the
+ allocator. This is a multiple of the chunk size, and is larger than
+ <link linkend="stats.active"><mallctl>stats.active</mallctl></link>.
+ This does not include inactive chunks, even those that contain unused
+ dirty pages, which means that there is no strict ordering between this
+ and <link
+ linkend="stats.resident"><mallctl>stats.resident</mallctl></link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.dss">
@@ -1768,6 +2089,18 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
+ <varlistentry id="stats.arenas.i.lg_dirty_mult">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lg_dirty_mult</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Minimum ratio (log base 2) of active to dirty pages.
+ See <link
+ linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
+ for details.</para></listitem>
+ </varlistentry>
+
<varlistentry id="stats.arenas.i.nthreads">
<term>
<mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>
@@ -1809,6 +2142,38 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Number of mapped bytes.</para></listitem>
</varlistentry>
+ <varlistentry id="stats.arenas.i.metadata.mapped">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of mapped bytes in arena chunk headers, which
+ track the states of the non-metadata pages.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.metadata.allocated">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.metadata.allocated</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of bytes dedicated to internal allocations.
+ Internal allocations differ from application-originated allocations in
+ that they are for internal use, and that they are omitted from heap
+ profiles. This statistic is reported separately from <link
+ linkend="stats.metadata"><mallctl>stats.metadata</mallctl></link> and
+ <link
+ linkend="stats.arenas.i.metadata.mapped"><mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl></link>
+ because it overlaps with e.g. the <link
+ linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link> and
+ <link linkend="stats.active"><mallctl>stats.active</mallctl></link>
+ statistics, whereas the other metadata statistics do
+ not.</para></listitem>
+ </varlistentry>
+
<varlistentry id="stats.arenas.i.npurge">
<term>
<mallctl>stats.arenas.&lt;i&gt;.npurge</mallctl>
@@ -1930,15 +2295,48 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem>
</varlistentry>
- <varlistentry id="stats.arenas.i.bins.j.allocated">
+ <varlistentry id="stats.arenas.i.huge.allocated">
<term>
- <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.allocated</mallctl>
+ <mallctl>stats.arenas.&lt;i&gt;.huge.allocated</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
- <listitem><para>Current number of bytes allocated by
- bin.</para></listitem>
+ <listitem><para>Number of bytes currently allocated by huge objects.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.huge.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.huge.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of huge allocation requests served
+ directly by the arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.huge.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.huge.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of huge deallocation requests served
+ directly by the arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.huge.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.huge.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of huge allocation requests.
+ </para></listitem>
</varlistentry>
<varlistentry id="stats.arenas.i.bins.j.nmalloc">
@@ -1974,6 +2372,17 @@ malloc_conf = "xmalloc:true";]]></programlisting>
requests.</para></listitem>
</varlistentry>
+ <varlistentry id="stats.arenas.i.bins.j.curregs">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curregs</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of regions for this size
+ class.</para></listitem>
+ </varlistentry>
+
<varlistentry id="stats.arenas.i.bins.j.nfills">
<term>
<mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl>
@@ -2068,6 +2477,50 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Current number of runs for this size class.
</para></listitem>
</varlistentry>
+
+ <varlistentry id="stats.arenas.i.hchunks.j.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests for this size
+ class served directly by the arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.hchunks.j.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of deallocation requests for this
+ size class served directly by the arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.hchunks.j.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests for this size
+ class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.hchunks.j.curhchunks">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.curhchunks</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of huge allocations for this size class.
+ </para></listitem>
+ </varlistentry>
</variablelist>
</refsect1>
<refsect1 id="debugging_malloc_problems">
@@ -2253,42 +2706,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
returns the usable size of the allocation pointed to by
<parameter>ptr</parameter>. </para>
</refsect2>
- <refsect2>
- <title>Experimental API</title>
- <para>The <function>allocm<parameter/></function>,
- <function>rallocm<parameter/></function>,
- <function>sallocm<parameter/></function>,
- <function>dallocm<parameter/></function>, and
- <function>nallocm<parameter/></function> functions return
- <constant>ALLOCM_SUCCESS</constant> on success; otherwise they return an
- error value. The <function>allocm<parameter/></function>,
- <function>rallocm<parameter/></function>, and
- <function>nallocm<parameter/></function> functions will fail if:
- <variablelist>
- <varlistentry>
- <term><errorname>ALLOCM_ERR_OOM</errorname></term>
-
- <listitem><para>Out of memory. Insufficient contiguous memory was
- available to service the allocation request. The
- <function>allocm<parameter/></function> function additionally sets
- <parameter>*ptr</parameter> to <constant>NULL</constant>, whereas
- the <function>rallocm<parameter/></function> function leaves
- <constant>*ptr</constant> unmodified.</para></listitem>
- </varlistentry>
- </variablelist>
- The <function>rallocm<parameter/></function> function will also
- fail if:
- <variablelist>
- <varlistentry>
- <term><errorname>ALLOCM_ERR_NOT_MOVED</errorname></term>
-
- <listitem><para><constant>ALLOCM_NO_MOVE</constant> was specified,
- but the reallocation request could not be serviced without moving
- the object.</para></listitem>
- </varlistentry>
- </variablelist>
- </para>
- </refsect2>
</refsect1>
<refsect1 id="environment">
<title>ENVIRONMENT</title>
diff --git a/deps/jemalloc/include/jemalloc/internal/arena.h b/deps/jemalloc/include/jemalloc/internal/arena.h
index 9d000c03d..12c617979 100644
--- a/deps/jemalloc/include/jemalloc/internal/arena.h
+++ b/deps/jemalloc/include/jemalloc/internal/arena.h
@@ -1,30 +1,10 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
-/*
- * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
- * as small as possible such that this setting is still honored, without
- * violating other constraints. The goal is to make runs as small as possible
- * without exceeding a per run external fragmentation threshold.
- *
- * We use binary fixed point math for overhead computations, where the binary
- * point is implicitly RUN_BFP bits to the left.
- *
- * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
- * honored for some/all object sizes, since when heap profiling is enabled
- * there is one pointer of header overhead per object (plus a constant). This
- * constraint is relaxed (ignored) for runs that are so small that the
- * per-region overhead is greater than:
- *
- * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP))
- */
-#define RUN_BFP 12
-/* \/ Implicit binary fixed point. */
-#define RUN_MAX_OVRHD 0x0000003dU
-#define RUN_MAX_OVRHD_RELAX 0x00001800U
+#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
/* Maximum number of regions in one run. */
-#define LG_RUN_MAXREGS 11
+#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
/*
@@ -36,16 +16,18 @@
/*
* The minimum ratio of active:dirty pages per arena is computed as:
*
- * (nactive >> opt_lg_dirty_mult) >= ndirty
+ * (nactive >> lg_dirty_mult) >= ndirty
*
- * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
- * as many active pages as dirty pages.
+ * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as
+ * many active pages as dirty pages.
*/
#define LG_DIRTY_MULT_DEFAULT 3
-typedef struct arena_chunk_map_s arena_chunk_map_t;
-typedef struct arena_chunk_s arena_chunk_t;
+typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
typedef struct arena_run_s arena_run_t;
+typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
+typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
+typedef struct arena_chunk_s arena_chunk_t;
typedef struct arena_bin_info_s arena_bin_info_t;
typedef struct arena_bin_s arena_bin_t;
typedef struct arena_s arena_t;
@@ -54,54 +36,34 @@ typedef struct arena_s arena_t;
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
-/* Each element of the chunk map corresponds to one page within the chunk. */
-struct arena_chunk_map_s {
-#ifndef JEMALLOC_PROF
- /*
- * Overlay prof_ctx in order to allow it to be referenced by dead code.
- * Such antics aren't warranted for per arena data structures, but
- * chunk map overhead accounts for a percentage of memory, rather than
- * being just a fixed cost.
- */
- union {
-#endif
- union {
- /*
- * Linkage for run trees. There are two disjoint uses:
- *
- * 1) arena_t's runs_avail tree.
- * 2) arena_run_t conceptually uses this linkage for in-use
- * non-full runs, rather than directly embedding linkage.
- */
- rb_node(arena_chunk_map_t) rb_link;
- /*
- * List of runs currently in purgatory. arena_chunk_purge()
- * temporarily allocates runs that contain dirty pages while
- * purging, so that other threads cannot use the runs while the
- * purging thread is operating without the arena lock held.
- */
- ql_elm(arena_chunk_map_t) ql_link;
- } u;
+#ifdef JEMALLOC_ARENA_STRUCTS_A
+struct arena_run_s {
+ /* Index of bin this run is associated with. */
+ szind_t binind;
- /* Profile counters, used for large object runs. */
- prof_ctx_t *prof_ctx;
-#ifndef JEMALLOC_PROF
- }; /* union { ... }; */
-#endif
+ /* Number of free regions in run. */
+ unsigned nfree;
+ /* Per region allocated/deallocated bitmap. */
+ bitmap_t bitmap[BITMAP_GROUPS_MAX];
+};
+
+/* Each element of the chunk map corresponds to one page within the chunk. */
+struct arena_chunk_map_bits_s {
/*
* Run address (or size) and various flags are stored together. The bit
* layout looks like (assuming 32-bit system):
*
- * ???????? ???????? ????nnnn nnnndula
+ * ???????? ???????? ???nnnnn nnndumla
*
* ? : Unallocated: Run address for first/last pages, unset for internal
* pages.
* Small: Run page offset.
- * Large: Run size for first page, unset for trailing pages.
+ * Large: Run page count for first page, unset for trailing pages.
* n : binind for small size class, BININD_INVALID for large size class.
* d : dirty?
* u : unzeroed?
+ * m : decommitted?
* l : large?
* a : allocated?
*
@@ -110,78 +72,109 @@ struct arena_chunk_map_s {
* p : run page offset
* s : run size
* n : binind for size class; large objects set these to BININD_INVALID
- * except for promoted allocations (see prof_promote)
* x : don't care
* - : 0
* + : 1
- * [DULA] : bit set
- * [dula] : bit unset
+ * [DUMLA] : bit set
+ * [dumla] : bit unset
*
* Unallocated (clean):
- * ssssssss ssssssss ssss++++ ++++du-a
- * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
- * ssssssss ssssssss ssss++++ ++++dU-a
+ * ssssssss ssssssss sss+++++ +++dum-a
+ * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx
+ * ssssssss ssssssss sss+++++ +++dUm-a
*
* Unallocated (dirty):
- * ssssssss ssssssss ssss++++ ++++D--a
+ * ssssssss ssssssss sss+++++ +++D-m-a
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
- * ssssssss ssssssss ssss++++ ++++D--a
+ * ssssssss ssssssss sss+++++ +++D-m-a
*
* Small:
- * pppppppp pppppppp ppppnnnn nnnnd--A
- * pppppppp pppppppp ppppnnnn nnnn---A
- * pppppppp pppppppp ppppnnnn nnnnd--A
+ * pppppppp pppppppp pppnnnnn nnnd---A
+ * pppppppp pppppppp pppnnnnn nnn----A
+ * pppppppp pppppppp pppnnnnn nnnd---A
*
* Large:
- * ssssssss ssssssss ssss++++ ++++D-LA
+ * ssssssss ssssssss sss+++++ +++D--LA
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
- * -------- -------- ----++++ ++++D-LA
+ * -------- -------- ---+++++ +++D--LA
*
- * Large (sampled, size <= PAGE):
- * ssssssss ssssssss ssssnnnn nnnnD-LA
+ * Large (sampled, size <= LARGE_MINCLASS):
+ * ssssssss ssssssss sssnnnnn nnnD--LA
+ * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+ * -------- -------- ---+++++ +++D--LA
*
- * Large (not sampled, size == PAGE):
- * ssssssss ssssssss ssss++++ ++++D-LA
+ * Large (not sampled, size == LARGE_MINCLASS):
+ * ssssssss ssssssss sss+++++ +++D--LA
+ * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+ * -------- -------- ---+++++ +++D--LA
*/
size_t bits;
-#define CHUNK_MAP_BININD_SHIFT 4
+#define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
+#define CHUNK_MAP_LARGE ((size_t)0x02U)
+#define CHUNK_MAP_STATE_MASK ((size_t)0x3U)
+
+#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U)
+#define CHUNK_MAP_UNZEROED ((size_t)0x08U)
+#define CHUNK_MAP_DIRTY ((size_t)0x10U)
+#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU)
+
+#define CHUNK_MAP_BININD_SHIFT 5
#define BININD_INVALID ((size_t)0xffU)
-/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
-#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U)
+#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT)
#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
-#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU)
-#define CHUNK_MAP_DIRTY ((size_t)0x8U)
-#define CHUNK_MAP_UNZEROED ((size_t)0x4U)
-#define CHUNK_MAP_LARGE ((size_t)0x2U)
-#define CHUNK_MAP_ALLOCATED ((size_t)0x1U)
-#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED
+
+#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8)
+#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE)
+#define CHUNK_MAP_SIZE_MASK \
+ (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK))
};
-typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
-typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
-typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t;
-/* Arena chunk header. */
-struct arena_chunk_s {
- /* Arena that owns the chunk. */
- arena_t *arena;
+struct arena_runs_dirty_link_s {
+ qr(arena_runs_dirty_link_t) rd_link;
+};
- /* Linkage for tree of arena chunks that contain dirty runs. */
- rb_node(arena_chunk_t) dirty_link;
+/*
+ * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
+ * like arena_chunk_map_bits_t. Two separate arrays are stored within each
+ * chunk header in order to improve cache locality.
+ */
+struct arena_chunk_map_misc_s {
+ /*
+ * Linkage for run trees. There are two disjoint uses:
+ *
+ * 1) arena_t's runs_avail tree.
+ * 2) arena_run_t conceptually uses this linkage for in-use non-full
+ * runs, rather than directly embedding linkage.
+ */
+ rb_node(arena_chunk_map_misc_t) rb_link;
- /* Number of dirty pages. */
- size_t ndirty;
+ union {
+ /* Linkage for list of dirty runs. */
+ arena_runs_dirty_link_t rd;
- /* Number of available runs. */
- size_t nruns_avail;
+ /* Profile counters, used for large object runs. */
+ union {
+ void *prof_tctx_pun;
+ prof_tctx_t *prof_tctx;
+ };
+ /* Small region run metadata. */
+ arena_run_t run;
+ };
+};
+typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t;
+typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
+#endif /* JEMALLOC_ARENA_STRUCTS_A */
+
+#ifdef JEMALLOC_ARENA_STRUCTS_B
+/* Arena chunk header. */
+struct arena_chunk_s {
/*
- * Number of available run adjacencies that purging could coalesce.
- * Clean and dirty available runs are not coalesced, which causes
- * virtual memory fragmentation. The ratio of
- * (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this
- * fragmentation.
+ * A pointer to the arena that owns the chunk is stored within the node.
+ * This field as a whole is used by chunks_rtree to support both
+ * ivsalloc() and core-based debugging.
*/
- size_t nruns_adjac;
+ extent_node_t node;
/*
* Map of pages within chunk that keeps track of free/large/small. The
@@ -189,19 +182,7 @@ struct arena_chunk_s {
* need to be tracked in the map. This omission saves a header page
* for common chunk sizes (e.g. 4 MiB).
*/
- arena_chunk_map_t map[1]; /* Dynamically sized. */
-};
-typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
-
-struct arena_run_s {
- /* Bin this run is associated with. */
- arena_bin_t *bin;
-
- /* Index of next region that has never been allocated, or nregs. */
- uint32_t nextind;
-
- /* Number of free regions in run. */
- unsigned nfree;
+ arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
};
/*
@@ -212,12 +193,7 @@ struct arena_run_s {
* Each run has the following layout:
*
* /--------------------\
- * | arena_run_t header |
- * | ... |
- * bitmap_offset | bitmap |
- * | ... |
- * ctx0_offset | ctx map |
- * | ... |
+ * | pad? |
* |--------------------|
* | redzone |
* reg0_offset | region 0 |
@@ -259,23 +235,11 @@ struct arena_bin_info_s {
uint32_t nregs;
/*
- * Offset of first bitmap_t element in a run header for this bin's size
- * class.
- */
- uint32_t bitmap_offset;
-
- /*
* Metadata used to manipulate bitmaps for runs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
- /*
- * Offset of first (prof_ctx_t *) in a run header for this bin's size
- * class, or 0 if (config_prof == false || opt_prof == false).
- */
- uint32_t ctx0_offset;
-
/* Offset of first region in a run for this bin's size class. */
uint32_t reg0_offset;
};
@@ -321,8 +285,7 @@ struct arena_s {
/*
* There are three classes of arena operations from a locking
* perspective:
- * 1) Thread asssignment (modifies nthreads) is protected by
- * arenas_lock.
+ * 1) Thread assignment (modifies nthreads) is protected by arenas_lock.
* 2) Bin-related operations are protected by bin locks.
* 3) Chunk- and run-related operations are protected by this mutex.
*/
@@ -331,16 +294,20 @@ struct arena_s {
arena_stats_t stats;
/*
* List of tcaches for extant threads associated with this arena.
- * Stats from these are merged incrementally, and at exit.
+ * Stats from these are merged incrementally, and at exit if
+ * opt_stats_print is enabled.
*/
ql_head(tcache_t) tcache_ql;
uint64_t prof_accumbytes;
- dss_prec_t dss_prec;
+ /*
+ * PRNG state for cache index randomization of large allocation base
+ * pointers.
+ */
+ uint64_t offset_state;
- /* Tree of dirty-page-containing chunks this arena manages. */
- arena_chunk_tree_t chunks_dirty;
+ dss_prec_t dss_prec;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
@@ -354,7 +321,13 @@ struct arena_s {
*/
arena_chunk_t *spare;
- /* Number of pages in active runs. */
+ /* Minimum ratio (log base 2) of nactive:ndirty. */
+ ssize_t lg_dirty_mult;
+
+ /* True if a thread is currently executing arena_purge(). */
+ bool purging;
+
+ /* Number of pages in active runs and huge regions. */
size_t nactive;
/*
@@ -366,44 +339,116 @@ struct arena_s {
size_t ndirty;
/*
- * Approximate number of pages being purged. It is possible for
- * multiple threads to purge dirty pages concurrently, and they use
- * npurgatory to indicate the total number of pages all threads are
- * attempting to purge.
+ * Size/address-ordered tree of this arena's available runs. The tree
+ * is used for first-best-fit run allocation.
*/
- size_t npurgatory;
+ arena_avail_tree_t runs_avail;
/*
- * Size/address-ordered trees of this arena's available runs. The trees
- * are used for first-best-fit run allocation.
+ * Unused dirty memory this arena manages. Dirty memory is conceptually
+ * tracked as an arbitrarily interleaved LRU of dirty runs and cached
+ * chunks, but the list linkage is actually semi-duplicated in order to
+ * avoid extra arena_chunk_map_misc_t space overhead.
+ *
+ * LRU-----------------------------------------------------------MRU
+ *
+ * /-- arena ---\
+ * | |
+ * | |
+ * |------------| /- chunk -\
+ * ...->|chunks_cache|<--------------------------->| /----\ |<--...
+ * |------------| | |node| |
+ * | | | | | |
+ * | | /- run -\ /- run -\ | | | |
+ * | | | | | | | | | |
+ * | | | | | | | | | |
+ * |------------| |-------| |-------| | |----| |
+ * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----...
+ * |------------| |-------| |-------| | |----| |
+ * | | | | | | | | | |
+ * | | | | | | | \----/ |
+ * | | \-------/ \-------/ | |
+ * | | | |
+ * | | | |
+ * \------------/ \---------/
*/
- arena_avail_tree_t runs_avail;
+ arena_runs_dirty_link_t runs_dirty;
+ extent_node_t chunks_cache;
+
+ /* Extant huge allocations. */
+ ql_head(extent_node_t) huge;
+ /* Synchronizes all huge allocation/update/deallocation. */
+ malloc_mutex_t huge_mtx;
+
+ /*
+ * Trees of chunks that were previously allocated (trees differ only in
+ * node ordering). These are used when allocating chunks, in an attempt
+ * to re-use address space. Depending on function, different tree
+ * orderings are needed, which is why there are two trees with the same
+ * contents.
+ */
+ extent_tree_t chunks_szad_cached;
+ extent_tree_t chunks_ad_cached;
+ extent_tree_t chunks_szad_retained;
+ extent_tree_t chunks_ad_retained;
+
+ malloc_mutex_t chunks_mtx;
+ /* Cache of nodes that were allocated via base_alloc(). */
+ ql_head(extent_node_t) node_cache;
+ malloc_mutex_t node_cache_mtx;
+
+ /* User-configurable chunk hook functions. */
+ chunk_hooks_t chunk_hooks;
/* bins is used to store trees of free regions. */
arena_bin_t bins[NBINS];
};
+#endif /* JEMALLOC_ARENA_STRUCTS_B */
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-extern ssize_t opt_lg_dirty_mult;
-/*
- * small_size2bin is a compact lookup table that rounds request sizes up to
- * size classes. In order to reduce cache footprint, the table is compressed,
- * and all accesses are via the SMALL_SIZE2BIN macro.
- */
-extern uint8_t const small_size2bin[];
-#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN])
+static const size_t large_pad =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ PAGE
+#else
+ 0
+#endif
+ ;
-extern arena_bin_info_t arena_bin_info[NBINS];
+extern ssize_t opt_lg_dirty_mult;
-/* Number of large size classes. */
-#define nlclasses (chunk_npages - map_bias)
+extern arena_bin_info_t arena_bin_info[NBINS];
+extern size_t map_bias; /* Number of arena chunk header pages. */
+extern size_t map_misc_offset;
+extern size_t arena_maxrun; /* Max run size for arenas. */
+extern size_t large_maxclass; /* Max large size class. */
+extern unsigned nlclasses; /* Number of large size classes. */
+extern unsigned nhclasses; /* Number of huge size classes. */
+
+void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
+ bool cache);
+void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
+ bool cache);
+extent_node_t *arena_node_alloc(arena_t *arena);
+void arena_node_dalloc(arena_t *arena, extent_node_t *node);
+void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
+ bool *zero);
+void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
+void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize);
+void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize);
+bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize, bool *zero);
+ssize_t arena_lg_dirty_mult_get(arena_t *arena);
+bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
+void arena_maybe_purge(arena_t *arena);
void arena_purge_all(arena_t *arena);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
- size_t binind, uint64_t prof_accumbytes);
+ szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
@@ -418,19 +463,22 @@ void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
void arena_quarantine_junk_small(void *ptr, size_t usize);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
-void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
+void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache);
void arena_prof_promoted(const void *ptr, size_t size);
-void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- arena_chunk_map_t *mapelm);
+void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, arena_chunk_map_bits_t *bitselm);
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind, arena_chunk_map_t *mapelm);
+ size_t pageind, arena_chunk_map_bits_t *bitselm);
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind);
#ifdef JEMALLOC_JET
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
+#else
+void arena_dalloc_junk_large(void *ptr, size_t usize);
#endif
-void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
+void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
#ifdef JEMALLOC_JET
@@ -439,16 +487,18 @@ extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
#endif
bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
-void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
- bool try_tcache_dalloc);
+void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
+ size_t size, size_t alignment, bool zero, tcache_t *tcache);
dss_prec_t arena_dss_prec_get(arena_t *arena);
-void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
-void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
- size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats);
-bool arena_new(arena_t *arena, unsigned ind);
-void arena_boot(void);
+bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
+ssize_t arena_lg_dirty_mult_default_get(void);
+bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
+void arena_stats_merge(arena_t *arena, const char **dss,
+ ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty,
+ arena_stats_t *astats, malloc_bin_stats_t *bstats,
+ malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
+arena_t *arena_new(unsigned ind);
+bool arena_boot(void);
void arena_prefork(arena_t *arena);
void arena_postfork_parent(arena_t *arena);
void arena_postfork_child(arena_t *arena);
@@ -458,64 +508,138 @@ void arena_postfork_child(arena_t *arena);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
+arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
+ size_t pageind);
+arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
+ size_t pageind);
+size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm);
+void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
+arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
+arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_size_decode(size_t mapbits);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
+szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
+size_t arena_mapbits_size_encode(size_t size);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size);
+void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
+ size_t flags);
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
- size_t binind);
+ szind_t binind);
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
- size_t runind, size_t binind, size_t flags);
-void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
- size_t unzeroed);
+ size_t runind, szind_t binind, size_t flags);
+void arena_metadata_allocated_add(arena_t *arena, size_t size);
+void arena_metadata_allocated_sub(arena_t *arena, size_t size);
+size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
-size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
-size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
+szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
+szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
-prof_ctx_t *arena_prof_ctx_get(const void *ptr);
-void arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
-void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
+prof_tctx_t *arena_prof_tctx_get(const void *ptr);
+void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
+void arena_prof_tctx_reset(const void *ptr, size_t usize,
+ const void *old_ptr, prof_tctx_t *old_tctx);
+void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache);
+arena_t *arena_aalloc(const void *ptr);
size_t arena_salloc(const void *ptr, bool demote);
-void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- bool try_tcache);
+void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
+void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
-JEMALLOC_ALWAYS_INLINE arena_chunk_map_t *
-arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
+JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
+arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
assert(pageind < chunk_npages);
- return (&chunk->map[pageind-map_bias]);
+ return (&chunk->map_bits[pageind-map_bias]);
+}
+
+JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
+arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
+{
+
+ assert(pageind >= map_bias);
+ assert(pageind < chunk_npages);
+
+ return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
+ (uintptr_t)map_misc_offset) + pageind-map_bias);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
+ map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias;
+
+ assert(pageind >= map_bias);
+ assert(pageind < chunk_npages);
+
+ return (pageind);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+
+ return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE)));
+}
+
+JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
+arena_rd_to_miscelm(arena_runs_dirty_link_t *rd)
+{
+ arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
+ *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd));
+
+ assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
+ assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
+
+ return (miscelm);
+}
+
+JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
+arena_run_to_miscelm(arena_run_t *run)
+{
+ arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t
+ *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run));
+
+ assert(arena_miscelm_to_pageind(miscelm) >= map_bias);
+ assert(arena_miscelm_to_pageind(miscelm) < chunk_npages);
+
+ return (miscelm);
}
JEMALLOC_ALWAYS_INLINE size_t *
arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
{
- return (&arena_mapp_get(chunk, pageind)->bits);
+ return (&arena_bitselm_get(chunk, pageind)->bits);
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -533,13 +657,29 @@ arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_size_decode(size_t mapbits)
+{
+ size_t size;
+
+#if CHUNK_MAP_SIZE_SHIFT > 0
+ size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
+#elif CHUNK_MAP_SIZE_SHIFT == 0
+ size = mapbits & CHUNK_MAP_SIZE_MASK;
+#else
+ size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
+#endif
+
+ return (size);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
- return (mapbits & ~PAGE_MASK);
+ return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -550,7 +690,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
- return (mapbits & ~PAGE_MASK);
+ return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -561,14 +701,14 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
CHUNK_MAP_ALLOCATED);
- return (mapbits >> LG_PAGE);
+ return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
}
-JEMALLOC_ALWAYS_INLINE size_t
+JEMALLOC_ALWAYS_INLINE szind_t
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
- size_t binind;
+ szind_t binind;
mapbits = arena_mapbits_get(chunk, pageind);
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
@@ -582,6 +722,8 @@ arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
+ (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
return (mapbits & CHUNK_MAP_DIRTY);
}
@@ -591,10 +733,23 @@ arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
+ (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
return (mapbits & CHUNK_MAP_UNZEROED);
}
JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
+{
+ size_t mapbits;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits &
+ (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
+ return (mapbits & CHUNK_MAP_DECOMMITTED);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -619,6 +774,23 @@ arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
*mapbitsp = mapbits;
}
+JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbits_size_encode(size_t size)
+{
+ size_t mapbits;
+
+#if CHUNK_MAP_SIZE_SHIFT > 0
+ mapbits = size << CHUNK_MAP_SIZE_SHIFT;
+#elif CHUNK_MAP_SIZE_SHIFT == 0
+ mapbits = size;
+#else
+ mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
+#endif
+
+ assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
+ return (mapbits);
+}
+
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
@@ -626,9 +798,11 @@ arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
- assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
- assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
- arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
+ assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
+ assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
+ (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
+ arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
+ CHUNK_MAP_BININD_INVALID | flags);
}
JEMALLOC_ALWAYS_INLINE void
@@ -640,7 +814,17 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
assert((size & PAGE_MASK) == 0);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
- arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
+ arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
+ (mapbits & ~CHUNK_MAP_SIZE_MASK));
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
+{
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+
+ assert((flags & CHUNK_MAP_UNZEROED) == flags);
+ arena_mapbitsp_write(mapbitsp, flags);
}
JEMALLOC_ALWAYS_INLINE void
@@ -648,54 +832,62 @@ arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
- size_t mapbits = arena_mapbitsp_read(mapbitsp);
- size_t unzeroed;
assert((size & PAGE_MASK) == 0);
- assert((flags & CHUNK_MAP_DIRTY) == flags);
- unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
- arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
- | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
+ assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
+ assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
+ (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
+ arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
+ CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
+ CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
- size_t binind)
+ szind_t binind)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
- assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
+ assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS +
+ large_pad);
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
(binind << CHUNK_MAP_BININD_SHIFT));
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
- size_t binind, size_t flags)
+ szind_t binind, size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
- size_t mapbits = arena_mapbitsp_read(mapbitsp);
- size_t unzeroed;
assert(binind < BININD_INVALID);
assert(pageind - runind >= map_bias);
- assert((flags & CHUNK_MAP_DIRTY) == flags);
- unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
- arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
- CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
+ assert((flags & CHUNK_MAP_UNZEROED) == flags);
+ arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) |
+ (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED);
}
-JEMALLOC_ALWAYS_INLINE void
-arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
- size_t unzeroed)
+JEMALLOC_INLINE void
+arena_metadata_allocated_add(arena_t *arena, size_t size)
+{
+
+ atomic_add_z(&arena->stats.metadata_allocated, size);
+}
+
+JEMALLOC_INLINE void
+arena_metadata_allocated_sub(arena_t *arena, size_t size)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
- size_t mapbits = arena_mapbitsp_read(mapbitsp);
- arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
- unzeroed);
+ atomic_sub_z(&arena->stats.metadata_allocated, size);
+}
+
+JEMALLOC_INLINE size_t
+arena_metadata_allocated_get(arena_t *arena)
+{
+
+ return (atomic_read_z(&arena->stats.metadata_allocated));
}
JEMALLOC_INLINE bool
@@ -719,7 +911,7 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
cassert(config_prof);
- if (prof_interval == 0)
+ if (likely(prof_interval == 0))
return (false);
return (arena_prof_accum_impl(arena, accumbytes));
}
@@ -730,7 +922,7 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
cassert(config_prof);
- if (prof_interval == 0)
+ if (likely(prof_interval == 0))
return (false);
{
@@ -743,10 +935,10 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
}
}
-JEMALLOC_ALWAYS_INLINE size_t
+JEMALLOC_ALWAYS_INLINE szind_t
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
{
- size_t binind;
+ szind_t binind;
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
@@ -755,27 +947,34 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
arena_t *arena;
size_t pageind;
size_t actual_mapbits;
+ size_t rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
- size_t actual_binind;
+ szind_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
+ arena_chunk_map_misc_t *miscelm;
+ void *rpages;
assert(binind != BININD_INVALID);
assert(binind < NBINS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena = chunk->arena;
+ arena = extent_node_arena_get(&chunk->node);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
actual_mapbits = arena_mapbits_get(chunk, pageind);
assert(mapbits == actual_mapbits);
assert(arena_mapbits_large_get(chunk, pageind) == 0);
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- (actual_mapbits >> LG_PAGE)) << LG_PAGE));
- bin = run->bin;
+ rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
+ pageind);
+ miscelm = arena_miscelm_get(chunk, rpages_ind);
+ run = &miscelm->run;
+ run_binind = run->binind;
+ bin = &arena->bins[run_binind];
actual_binind = bin - arena->bins;
- assert(binind == actual_binind);
+ assert(run_binind == actual_binind);
bin_info = &arena_bin_info[actual_binind];
- assert(((uintptr_t)ptr - ((uintptr_t)run +
+ rpages = arena_miscelm_to_rpages(miscelm);
+ assert(((uintptr_t)ptr - ((uintptr_t)rpages +
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
== 0);
}
@@ -785,10 +984,10 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
# endif /* JEMALLOC_ARENA_INLINE_A */
# ifdef JEMALLOC_ARENA_INLINE_B
-JEMALLOC_INLINE size_t
+JEMALLOC_INLINE szind_t
arena_bin_index(arena_t *arena, arena_bin_t *bin)
{
- size_t binind = bin - arena->bins;
+ szind_t binind = bin - arena->bins;
assert(binind < NBINS);
return (binind);
}
@@ -798,24 +997,26 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
{
unsigned shift, diff, regind;
size_t interval;
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ void *rpages = arena_miscelm_to_rpages(miscelm);
/*
* Freeing a pointer lower than region zero can cause assertion
* failure.
*/
- assert((uintptr_t)ptr >= (uintptr_t)run +
+ assert((uintptr_t)ptr >= (uintptr_t)rpages +
(uintptr_t)bin_info->reg0_offset);
/*
* Avoid doing division with a variable divisor if possible. Using
* actual division here can reduce allocator throughput by over 20%!
*/
- diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run -
+ diff = (unsigned)((uintptr_t)ptr - (uintptr_t)rpages -
bin_info->reg0_offset);
/* Rescale (factor powers of 2 out of the numerator and denominator). */
interval = bin_info->reg_interval;
- shift = ffs(interval) - 1;
+ shift = jemalloc_ffs(interval) - 1;
diff >>= shift;
interval >>= shift;
@@ -850,8 +1051,8 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
};
- if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) +
- 2)) {
+ if (likely(interval <= ((sizeof(interval_invs) /
+ sizeof(unsigned)) + 2))) {
regind = (diff * interval_invs[interval - 3]) >>
SIZE_INV_SHIFT;
} else
@@ -865,113 +1066,138 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
return (regind);
}
-JEMALLOC_INLINE prof_ctx_t *
-arena_prof_ctx_get(const void *ptr)
+JEMALLOC_INLINE prof_tctx_t *
+arena_prof_tctx_get(const void *ptr)
{
- prof_ctx_t *ret;
+ prof_tctx_t *ret;
arena_chunk_t *chunk;
- size_t pageind, mapbits;
cassert(config_prof);
assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
- if (prof_promote)
- ret = (prof_ctx_t *)(uintptr_t)1U;
+ if (likely(chunk != ptr)) {
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t mapbits = arena_mapbits_get(chunk, pageind);
+ assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+ if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
+ ret = (prof_tctx_t *)(uintptr_t)1U;
else {
- arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
- LG_PAGE));
- size_t binind = arena_ptr_small_binind_get(ptr,
- mapbits);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
- unsigned regind;
-
- regind = arena_run_regind(run, bin_info, ptr);
- ret = *(prof_ctx_t **)((uintptr_t)run +
- bin_info->ctx0_offset + (regind *
- sizeof(prof_ctx_t *)));
+ arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
+ pageind);
+ ret = atomic_read_p(&elm->prof_tctx_pun);
}
} else
- ret = arena_mapp_get(chunk, pageind)->prof_ctx;
+ ret = huge_prof_tctx_get(ptr);
return (ret);
}
JEMALLOC_INLINE void
-arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
+arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
arena_chunk_t *chunk;
- size_t pageind;
cassert(config_prof);
assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
-
- if (usize > SMALL_MAXCLASS || (prof_promote &&
- ((uintptr_t)ctx != (uintptr_t)1U || arena_mapbits_large_get(chunk,
- pageind) != 0))) {
- assert(arena_mapbits_large_get(chunk, pageind) != 0);
- arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
- } else {
- assert(arena_mapbits_large_get(chunk, pageind) == 0);
- if (prof_promote == false) {
- size_t mapbits = arena_mapbits_get(chunk, pageind);
- arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
- LG_PAGE));
- size_t binind;
- arena_bin_info_t *bin_info;
- unsigned regind;
-
- binind = arena_ptr_small_binind_get(ptr, mapbits);
- bin_info = &arena_bin_info[binind];
- regind = arena_run_regind(run, bin_info, ptr);
-
- *((prof_ctx_t **)((uintptr_t)run +
- bin_info->ctx0_offset + (regind * sizeof(prof_ctx_t
- *)))) = ctx;
+ if (likely(chunk != ptr)) {
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+
+ if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
+ (uintptr_t)1U)) {
+ arena_chunk_map_misc_t *elm;
+
+ assert(arena_mapbits_large_get(chunk, pageind) != 0);
+
+ elm = arena_miscelm_get(chunk, pageind);
+ atomic_write_p(&elm->prof_tctx_pun, tctx);
+ } else {
+ /*
+ * tctx must always be initialized for large runs.
+ * Assert that the surrounding conditional logic is
+ * equivalent to checking whether ptr refers to a large
+ * run.
+ */
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
}
+ } else
+ huge_prof_tctx_set(ptr, tctx);
+}
+
+JEMALLOC_INLINE void
+arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+ prof_tctx_t *old_tctx)
+{
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
+ (uintptr_t)old_tctx > (uintptr_t)1U))) {
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr)) {
+ size_t pageind;
+ arena_chunk_map_misc_t *elm;
+
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
+ LG_PAGE;
+ assert(arena_mapbits_allocated_get(chunk, pageind) !=
+ 0);
+ assert(arena_mapbits_large_get(chunk, pageind) != 0);
+
+ elm = arena_miscelm_get(chunk, pageind);
+ atomic_write_p(&elm->prof_tctx_pun,
+ (prof_tctx_t *)(uintptr_t)1U);
+ } else
+ huge_prof_tctx_reset(ptr);
}
}
JEMALLOC_ALWAYS_INLINE void *
-arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
+arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache)
{
- tcache_t *tcache;
assert(size != 0);
- assert(size <= arena_maxclass);
- if (size <= SMALL_MAXCLASS) {
- if (try_tcache && (tcache = tcache_get(true)) != NULL)
- return (tcache_alloc_small(tcache, size, zero));
- else {
- return (arena_malloc_small(choose_arena(arena), size,
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL))
+ return (NULL);
+
+ if (likely(size <= SMALL_MAXCLASS)) {
+ if (likely(tcache != NULL)) {
+ return (tcache_alloc_small(tsd, arena, tcache, size,
zero));
- }
- } else {
+ } else
+ return (arena_malloc_small(arena, size, zero));
+ } else if (likely(size <= large_maxclass)) {
/*
* Initialize tcache after checking size in order to avoid
* infinite recursion during tcache initialization.
*/
- if (try_tcache && size <= tcache_maxclass && (tcache =
- tcache_get(true)) != NULL)
- return (tcache_alloc_large(tcache, size, zero));
- else {
- return (arena_malloc_large(choose_arena(arena), size,
+ if (likely(tcache != NULL) && size <= tcache_maxclass) {
+ return (tcache_alloc_large(tsd, arena, tcache, size,
zero));
- }
- }
+ } else
+ return (arena_malloc_large(arena, size, zero));
+ } else
+ return (huge_malloc(tsd, arena, size, zero, tcache));
+}
+
+JEMALLOC_ALWAYS_INLINE arena_t *
+arena_aalloc(const void *ptr)
+{
+ arena_chunk_t *chunk;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr))
+ return (extent_node_arena_get(&chunk->node));
+ else
+ return (huge_aalloc(ptr));
}
/* Return the size of the allocation pointed to by ptr. */
@@ -980,81 +1206,139 @@ arena_salloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
- size_t pageind, binind;
+ size_t pageind;
+ szind_t binind;
assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
- binind = arena_mapbits_binind_get(chunk, pageind);
- if (binind == BININD_INVALID || (config_prof && demote == false &&
- prof_promote && arena_mapbits_large_get(chunk, pageind) != 0)) {
- /*
- * Large allocation. In the common case (demote == true), and
- * as this is an inline function, most callers will only end up
- * looking at binind to determine that ptr is a small
- * allocation.
- */
- assert(((uintptr_t)ptr & PAGE_MASK) == 0);
- ret = arena_mapbits_large_size_get(chunk, pageind);
- assert(ret != 0);
- assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
- assert(ret == PAGE || arena_mapbits_large_size_get(chunk,
- pageind+(ret>>LG_PAGE)-1) == 0);
- assert(binind == arena_mapbits_binind_get(chunk,
- pageind+(ret>>LG_PAGE)-1));
- assert(arena_mapbits_dirty_get(chunk, pageind) ==
- arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
- } else {
- /*
- * Small allocation (possibly promoted to a large object due to
- * prof_promote).
- */
- assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
- arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
- pageind)) == binind);
- ret = arena_bin_info[binind].reg_size;
- }
+ if (likely(chunk != ptr)) {
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+ binind = arena_mapbits_binind_get(chunk, pageind);
+ if (unlikely(binind == BININD_INVALID || (config_prof && !demote
+ && arena_mapbits_large_get(chunk, pageind) != 0))) {
+ /*
+ * Large allocation. In the common case (demote), and
+ * as this is an inline function, most callers will only
+ * end up looking at binind to determine that ptr is a
+ * small allocation.
+ */
+ assert(config_cache_oblivious || ((uintptr_t)ptr &
+ PAGE_MASK) == 0);
+ ret = arena_mapbits_large_size_get(chunk, pageind) -
+ large_pad;
+ assert(ret != 0);
+ assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
+ chunk_npages);
+ assert(arena_mapbits_dirty_get(chunk, pageind) ==
+ arena_mapbits_dirty_get(chunk,
+ pageind+((ret+large_pad)>>LG_PAGE)-1));
+ } else {
+ /*
+ * Small allocation (possibly promoted to a large
+ * object).
+ */
+ assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
+ arena_ptr_small_binind_get(ptr,
+ arena_mapbits_get(chunk, pageind)) == binind);
+ ret = index2size(binind);
+ }
+ } else
+ ret = huge_salloc(ptr);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
-arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache)
+arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
+ arena_chunk_t *chunk;
size_t pageind, mapbits;
- tcache_t *tcache;
- assert(arena != NULL);
- assert(chunk->arena == arena);
assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- mapbits = arena_mapbits_get(chunk, pageind);
- assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
- /* Small allocation. */
- if (try_tcache && (tcache = tcache_get(false)) != NULL) {
- size_t binind;
-
- binind = arena_ptr_small_binind_get(ptr, mapbits);
- tcache_dalloc_small(tcache, ptr, binind);
- } else
- arena_dalloc_small(arena, chunk, ptr, pageind);
- } else {
- size_t size = arena_mapbits_large_size_get(chunk, pageind);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr)) {
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ mapbits = arena_mapbits_get(chunk, pageind);
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+ if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
+ /* Small allocation. */
+ if (likely(tcache != NULL)) {
+ szind_t binind = arena_ptr_small_binind_get(ptr,
+ mapbits);
+ tcache_dalloc_small(tsd, tcache, ptr, binind);
+ } else {
+ arena_dalloc_small(extent_node_arena_get(
+ &chunk->node), chunk, ptr, pageind);
+ }
+ } else {
+ size_t size = arena_mapbits_large_size_get(chunk,
+ pageind);
+
+ assert(config_cache_oblivious || ((uintptr_t)ptr &
+ PAGE_MASK) == 0);
+
+ if (likely(tcache != NULL) && size - large_pad <=
+ tcache_maxclass) {
+ tcache_dalloc_large(tsd, tcache, ptr, size -
+ large_pad);
+ } else {
+ arena_dalloc_large(extent_node_arena_get(
+ &chunk->node), chunk, ptr);
+ }
+ }
+ } else
+ huge_dalloc(tsd, ptr, tcache);
+}
- assert(((uintptr_t)ptr & PAGE_MASK) == 0);
+JEMALLOC_ALWAYS_INLINE void
+arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
+{
+ arena_chunk_t *chunk;
- if (try_tcache && size <= tcache_maxclass && (tcache =
- tcache_get(false)) != NULL) {
- tcache_dalloc_large(tcache, ptr, size);
- } else
- arena_dalloc_large(arena, chunk, ptr);
- }
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr)) {
+ if (config_prof && opt_prof) {
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
+ LG_PAGE;
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+ if (arena_mapbits_large_get(chunk, pageind) != 0) {
+ /*
+ * Make sure to use promoted size, not request
+ * size.
+ */
+ size = arena_mapbits_large_size_get(chunk,
+ pageind) - large_pad;
+ }
+ }
+ assert(s2u(size) == s2u(arena_salloc(ptr, false)));
+
+ if (likely(size <= SMALL_MAXCLASS)) {
+ /* Small allocation. */
+ if (likely(tcache != NULL)) {
+ szind_t binind = size2index(size);
+ tcache_dalloc_small(tsd, tcache, ptr, binind);
+ } else {
+ size_t pageind = ((uintptr_t)ptr -
+ (uintptr_t)chunk) >> LG_PAGE;
+ arena_dalloc_small(extent_node_arena_get(
+ &chunk->node), chunk, ptr, pageind);
+ }
+ } else {
+ assert(config_cache_oblivious || ((uintptr_t)ptr &
+ PAGE_MASK) == 0);
+
+ if (likely(tcache != NULL) && size <= tcache_maxclass)
+ tcache_dalloc_large(tsd, tcache, ptr, size);
+ else {
+ arena_dalloc_large(extent_node_arena_get(
+ &chunk->node), chunk, ptr);
+ }
+ }
+ } else
+ huge_dalloc(tsd, ptr, tcache);
}
# endif /* JEMALLOC_ARENA_INLINE_B */
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/atomic.h b/deps/jemalloc/include/jemalloc/internal/atomic.h
index 11a7b47fe..a9aad35d1 100644
--- a/deps/jemalloc/include/jemalloc/internal/atomic.h
+++ b/deps/jemalloc/include/jemalloc/internal/atomic.h
@@ -11,6 +11,7 @@
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
+#define atomic_read_p(p) atomic_add_p(p, NULL)
#define atomic_read_z(p) atomic_add_z(p, 0)
#define atomic_read_u(p) atomic_add_u(p, 0)
@@ -18,113 +19,244 @@
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
+/*
+ * All arithmetic functions return the arithmetic result of the atomic
+ * operation. Some atomic operation APIs return the value prior to mutation, in
+ * which case the following functions must redundantly compute the result so
+ * that it can be returned. These functions are normally inlined, so the extra
+ * operations can be optimized away if the return values aren't used by the
+ * callers.
+ *
+ * <t> atomic_read_<t>(<t> *p) { return (*p); }
+ * <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); }
+ * <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); }
+ * bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
+ * {
+ * if (*p != c)
+ * return (true);
+ * *p = s;
+ * return (false);
+ * }
+ * void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
+ */
+
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
+bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
+void atomic_write_uint64(uint64_t *p, uint64_t x);
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
+bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
+void atomic_write_uint32(uint32_t *p, uint32_t x);
+void *atomic_add_p(void **p, void *x);
+void *atomic_sub_p(void **p, void *x);
+bool atomic_cas_p(void **p, void *c, void *s);
+void atomic_write_p(void **p, const void *x);
size_t atomic_add_z(size_t *p, size_t x);
size_t atomic_sub_z(size_t *p, size_t x);
+bool atomic_cas_z(size_t *p, size_t c, size_t s);
+void atomic_write_z(size_t *p, size_t x);
unsigned atomic_add_u(unsigned *p, unsigned x);
unsigned atomic_sub_u(unsigned *p, unsigned x);
+bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
+void atomic_write_u(unsigned *p, unsigned x);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
/******************************************************************************/
/* 64-bit operations. */
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
-# ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
+# if (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
+ uint64_t t = x;
- return (__sync_add_and_fetch(p, x));
+ asm volatile (
+ "lock; xaddq %0, %1;"
+ : "+r" (t), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
+
+ return (t + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
+ uint64_t t;
- return (__sync_sub_and_fetch(p, x));
+ x = (uint64_t)(-(int64_t)x);
+ t = x;
+ asm volatile (
+ "lock; xaddq %0, %1;"
+ : "+r" (t), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
+
+ return (t + x);
}
-#elif (defined(_MSC_VER))
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+ uint8_t success;
+
+ asm volatile (
+ "lock; cmpxchgq %4, %0;"
+ "sete %1;"
+ : "=m" (*p), "=a" (success) /* Outputs. */
+ : "m" (*p), "a" (c), "r" (s) /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
+
+ return (!(bool)success);
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+
+ asm volatile (
+ "xchgq %1, %0;" /* Lock is implied by xchgq. */
+ : "=m" (*p), "+r" (x) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
+}
+# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
-
- return (InterlockedExchangeAdd64(p, x));
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ return (atomic_fetch_sub(a, x) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ return (!atomic_compare_exchange_strong(a, &c, s));
+}
- return (InterlockedExchangeAdd64(p, -((int64_t)x)));
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ atomic_store(a, x);
}
-#elif (defined(JEMALLOC_OSATOMIC))
+# elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
- return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
+ /*
+ * atomic_fetchadd_64() doesn't exist, but we only ever use this
+ * function on LP64 systems, so atomic_fetchadd_long() will do.
+ */
+ assert(sizeof(uint64_t) == sizeof(unsigned long));
+
+ return (atomic_fetchadd_long(p, (unsigned long)x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
- return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
+ assert(sizeof(uint64_t) == sizeof(unsigned long));
+
+ return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+
+ assert(sizeof(uint64_t) == sizeof(unsigned long));
+
+ return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+
+ assert(sizeof(uint64_t) == sizeof(unsigned long));
+
+ atomic_store_rel_long(p, x);
}
-# elif (defined(__amd64__) || defined(__x86_64__))
+# elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
- asm volatile (
- "lock; xaddq %0, %1;"
- : "+r" (x), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
-
- return (x);
+ return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
- x = (uint64_t)(-(int64_t)x);
- asm volatile (
- "lock; xaddq %0, %1;"
- : "+r" (x), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
+ return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
- return (x);
+ return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
}
-# elif (defined(JEMALLOC_ATOMIC9))
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+ uint64_t o;
+
+ /*The documented OSAtomic*() API does not expose an atomic exchange. */
+ do {
+ o = atomic_read_uint64(p);
+ } while (atomic_cas_uint64(p, o, x));
+}
+# elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
- /*
- * atomic_fetchadd_64() doesn't exist, but we only ever use this
- * function on LP64 systems, so atomic_fetchadd_long() will do.
- */
- assert(sizeof(uint64_t) == sizeof(unsigned long));
-
- return (atomic_fetchadd_long(p, (unsigned long)x) + x);
+ return (InterlockedExchangeAdd64(p, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
- assert(sizeof(uint64_t) == sizeof(unsigned long));
+ return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
+}
- return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+ uint64_t o;
+
+ o = InterlockedCompareExchange64(p, s, c);
+ return (o != c);
}
-# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+
+ InterlockedExchange64(p, x);
+}
+# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
+ defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
@@ -138,6 +270,20 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
return (__sync_sub_and_fetch(p, x));
}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
+{
+
+ return (!__sync_bool_compare_and_swap(p, c, s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint64(uint64_t *p, uint64_t x)
+{
+
+ __sync_lock_test_and_set(p, x);
+}
# else
# error "Missing implementation for 64-bit atomic operations"
# endif
@@ -145,90 +291,184 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
/******************************************************************************/
/* 32-bit operations. */
-#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
+#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
+ uint32_t t = x;
- return (__sync_add_and_fetch(p, x));
+ asm volatile (
+ "lock; xaddl %0, %1;"
+ : "+r" (t), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
+
+ return (t + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
+ uint32_t t;
- return (__sync_sub_and_fetch(p, x));
+ x = (uint32_t)(-(int32_t)x);
+ t = x;
+ asm volatile (
+ "lock; xaddl %0, %1;"
+ : "+r" (t), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
+
+ return (t + x);
}
-#elif (defined(_MSC_VER))
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+ uint8_t success;
+
+ asm volatile (
+ "lock; cmpxchgl %4, %0;"
+ "sete %1;"
+ : "=m" (*p), "=a" (success) /* Outputs. */
+ : "m" (*p), "a" (c), "r" (s) /* Inputs. */
+ : "memory"
+ );
+
+ return (!(bool)success);
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+
+ asm volatile (
+ "xchgl %1, %0;" /* Lock is implied by xchgl. */
+ : "=m" (*p), "+r" (x) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ : "memory" /* Clobbers. */
+ );
+}
+# elif (defined(JEMALLOC_C11ATOMICS))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
-
- return (InterlockedExchangeAdd(p, x));
+ volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
+ return (atomic_fetch_add(a, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
+ volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
+ return (atomic_fetch_sub(a, x) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+ volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
+ return (!atomic_compare_exchange_strong(a, &c, s));
+}
- return (InterlockedExchangeAdd(p, -((int32_t)x)));
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+ volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
+ atomic_store(a, x);
}
-#elif (defined(JEMALLOC_OSATOMIC))
+#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
- return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
+ return (atomic_fetchadd_32(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
- return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
+ return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+
+ return (!atomic_cmpset_32(p, c, s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+
+ atomic_store_rel_32(p, x);
}
-#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
+#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
- asm volatile (
- "lock; xaddl %0, %1;"
- : "+r" (x), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
-
- return (x);
+ return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
- x = (uint32_t)(-(int32_t)x);
- asm volatile (
- "lock; xaddl %0, %1;"
- : "+r" (x), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
+ return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
+}
- return (x);
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+
+ return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
}
-#elif (defined(JEMALLOC_ATOMIC9))
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+ uint32_t o;
+
+ /*The documented OSAtomic*() API does not expose an atomic exchange. */
+ do {
+ o = atomic_read_uint32(p);
+ } while (atomic_cas_uint32(p, o, x));
+}
+#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
- return (atomic_fetchadd_32(p, x) + x);
+ return (InterlockedExchangeAdd(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
- return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
+ return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+ uint32_t o;
+
+ o = InterlockedCompareExchange(p, s, c);
+ return (o != c);
}
-#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+
+ InterlockedExchange(p, x);
+}
+#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
+ defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
@@ -242,11 +482,73 @@ atomic_sub_uint32(uint32_t *p, uint32_t x)
return (__sync_sub_and_fetch(p, x));
}
+
+JEMALLOC_INLINE bool
+atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
+{
+
+ return (!__sync_bool_compare_and_swap(p, c, s));
+}
+
+JEMALLOC_INLINE void
+atomic_write_uint32(uint32_t *p, uint32_t x)
+{
+
+ __sync_lock_test_and_set(p, x);
+}
#else
# error "Missing implementation for 32-bit atomic operations"
#endif
/******************************************************************************/
+/* Pointer operations. */
+JEMALLOC_INLINE void *
+atomic_add_p(void **p, void *x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
+#elif (LG_SIZEOF_PTR == 2)
+ return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
+#endif
+}
+
+JEMALLOC_INLINE void *
+atomic_sub_p(void **p, void *x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ return ((void *)atomic_add_uint64((uint64_t *)p,
+ (uint64_t)-((int64_t)x)));
+#elif (LG_SIZEOF_PTR == 2)
+ return ((void *)atomic_add_uint32((uint32_t *)p,
+ (uint32_t)-((int32_t)x)));
+#endif
+}
+
+JEMALLOC_INLINE bool
+atomic_cas_p(void **p, void *c, void *s)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
+#elif (LG_SIZEOF_PTR == 2)
+ return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
+#endif
+}
+
+JEMALLOC_INLINE void
+atomic_write_p(void **p, const void *x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ atomic_write_uint64((uint64_t *)p, (uint64_t)x);
+#elif (LG_SIZEOF_PTR == 2)
+ atomic_write_uint32((uint32_t *)p, (uint32_t)x);
+#endif
+}
+
+/******************************************************************************/
/* size_t operations. */
JEMALLOC_INLINE size_t
atomic_add_z(size_t *p, size_t x)
@@ -272,6 +574,28 @@ atomic_sub_z(size_t *p, size_t x)
#endif
}
+JEMALLOC_INLINE bool
+atomic_cas_z(size_t *p, size_t c, size_t s)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
+#elif (LG_SIZEOF_PTR == 2)
+ return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
+#endif
+}
+
+JEMALLOC_INLINE void
+atomic_write_z(size_t *p, size_t x)
+{
+
+#if (LG_SIZEOF_PTR == 3)
+ atomic_write_uint64((uint64_t *)p, (uint64_t)x);
+#elif (LG_SIZEOF_PTR == 2)
+ atomic_write_uint32((uint32_t *)p, (uint32_t)x);
+#endif
+}
+
/******************************************************************************/
/* unsigned operations. */
JEMALLOC_INLINE unsigned
@@ -297,6 +621,29 @@ atomic_sub_u(unsigned *p, unsigned x)
(uint32_t)-((int32_t)x)));
#endif
}
+
+JEMALLOC_INLINE bool
+atomic_cas_u(unsigned *p, unsigned c, unsigned s)
+{
+
+#if (LG_SIZEOF_INT == 3)
+ return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
+#elif (LG_SIZEOF_INT == 2)
+ return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
+#endif
+}
+
+JEMALLOC_INLINE void
+atomic_write_u(unsigned *p, unsigned x)
+{
+
+#if (LG_SIZEOF_INT == 3)
+ atomic_write_uint64((uint64_t *)p, (uint64_t)x);
+#elif (LG_SIZEOF_INT == 2)
+ atomic_write_uint32((uint32_t *)p, (uint32_t)x);
+#endif
+}
+
/******************************************************************************/
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/base.h b/deps/jemalloc/include/jemalloc/internal/base.h
index 9cf75ffb0..39e46ee44 100644
--- a/deps/jemalloc/include/jemalloc/internal/base.h
+++ b/deps/jemalloc/include/jemalloc/internal/base.h
@@ -10,9 +10,7 @@
#ifdef JEMALLOC_H_EXTERNS
void *base_alloc(size_t size);
-void *base_calloc(size_t number, size_t size);
-extent_node_t *base_node_alloc(void);
-void base_node_dealloc(extent_node_t *node);
+void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped);
bool base_boot(void);
void base_prefork(void);
void base_postfork_parent(void);
diff --git a/deps/jemalloc/include/jemalloc/internal/bitmap.h b/deps/jemalloc/include/jemalloc/internal/bitmap.h
index 605ebac58..fcc6005c7 100644
--- a/deps/jemalloc/include/jemalloc/internal/bitmap.h
+++ b/deps/jemalloc/include/jemalloc/internal/bitmap.h
@@ -3,6 +3,7 @@
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
+#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)
typedef struct bitmap_level_s bitmap_level_t;
typedef struct bitmap_info_s bitmap_info_t;
@@ -14,6 +15,51 @@ typedef unsigned long bitmap_t;
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
+/* Number of groups required to store a given number of bits. */
+#define BITMAP_BITS2GROUPS(nbits) \
+ ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
+
+/*
+ * Number of groups required at a particular level for a given number of bits.
+ */
+#define BITMAP_GROUPS_L0(nbits) \
+ BITMAP_BITS2GROUPS(nbits)
+#define BITMAP_GROUPS_L1(nbits) \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
+#define BITMAP_GROUPS_L2(nbits) \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
+#define BITMAP_GROUPS_L3(nbits) \
+ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \
+ BITMAP_BITS2GROUPS((nbits)))))
+
+/*
+ * Assuming the number of levels, number of groups required for a given number
+ * of bits.
+ */
+#define BITMAP_GROUPS_1_LEVEL(nbits) \
+ BITMAP_GROUPS_L0(nbits)
+#define BITMAP_GROUPS_2_LEVEL(nbits) \
+ (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
+#define BITMAP_GROUPS_3_LEVEL(nbits) \
+ (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
+#define BITMAP_GROUPS_4_LEVEL(nbits) \
+ (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
+
+/*
+ * Maximum number of groups required to support LG_BITMAP_MAXBITS.
+ */
+#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
+#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
+#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
+#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
+# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
+#else
+# error "Unsupported bitmap size"
+#endif
+
/* Maximum number of levels possible. */
#define BITMAP_MAX_LEVELS \
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
@@ -93,7 +139,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
bitmap_t g;
assert(bit < binfo->nbits);
- assert(bitmap_get(bitmap, binfo, bit) == false);
+ assert(!bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
@@ -126,15 +172,15 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
bitmap_t g;
unsigned i;
- assert(bitmap_full(bitmap, binfo) == false);
+ assert(!bitmap_full(bitmap, binfo));
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
- bit = ffsl(g) - 1;
+ bit = jemalloc_ffsl(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
- bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1);
+ bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1);
}
bitmap_set(bitmap, binfo, bit);
@@ -158,7 +204,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
- assert(bitmap_get(bitmap, binfo, bit) == false);
+ assert(!bitmap_get(bitmap, binfo, bit));
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
@@ -172,7 +218,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
== 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
- if (propagate == false)
+ if (!propagate)
break;
}
}
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk.h b/deps/jemalloc/include/jemalloc/internal/chunk.h
index 87d8700da..5d1938353 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk.h
@@ -5,7 +5,7 @@
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
-#define LG_CHUNK_DEFAULT 22
+#define LG_CHUNK_DEFAULT 21
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
@@ -19,6 +19,16 @@
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
+#define CHUNK_HOOKS_INITIALIZER { \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL \
+}
+
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
@@ -30,23 +40,36 @@
extern size_t opt_lg_chunk;
extern const char *opt_dss;
-/* Protects stats_chunks; currently not used for any other purpose. */
-extern malloc_mutex_t chunks_mtx;
-/* Chunk statistics. */
-extern chunk_stats_t stats_chunks;
-
-extern rtree_t *chunks_rtree;
+extern rtree_t chunks_rtree;
extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */
extern size_t chunk_npages;
-extern size_t map_bias; /* Number of arena chunk header pages. */
-extern size_t arena_maxclass; /* Max size class for arenas. */
-void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
- dss_prec_t dss_prec);
-void chunk_unmap(void *chunk, size_t size);
-void chunk_dealloc(void *chunk, size_t size, bool unmap);
+extern const chunk_hooks_t chunk_hooks_default;
+
+chunk_hooks_t chunk_hooks_get(arena_t *arena);
+chunk_hooks_t chunk_hooks_set(arena_t *arena,
+ const chunk_hooks_t *chunk_hooks);
+
+bool chunk_register(const void *chunk, const extent_node_t *node);
+void chunk_deregister(const void *chunk, const extent_node_t *node);
+void *chunk_alloc_base(size_t size);
+void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool dalloc_node);
+void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
+void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool committed);
+void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool zeroed, bool committed);
+void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool committed);
+bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
+ size_t length);
+bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, size_t offset, size_t length);
bool chunk_boot(void);
void chunk_prefork(void);
void chunk_postfork_parent(void);
@@ -56,6 +79,19 @@ void chunk_postfork_child(void);
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
+#ifndef JEMALLOC_ENABLE_INLINE
+extent_node_t *chunk_lookup(const void *chunk, bool dependent);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
+JEMALLOC_INLINE extent_node_t *
+chunk_lookup(const void *ptr, bool dependent)
+{
+
+ return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent));
+}
+#endif
+
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk_dss.h b/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
index 4535ce09c..388f46be0 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
@@ -23,7 +23,8 @@ extern const char *dss_prec_names[];
dss_prec_t chunk_dss_prec_get(void);
bool chunk_dss_prec_set(dss_prec_t dss_prec);
-void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
+void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit);
bool chunk_in_dss(void *chunk);
bool chunk_dss_boot(void);
void chunk_dss_prefork(void);
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h b/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
index f24abac75..7d8014c58 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
@@ -9,10 +9,9 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-bool pages_purge(void *addr, size_t length);
-
-void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
-bool chunk_dealloc_mmap(void *chunk, size_t size);
+void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero,
+ bool *commit);
+bool chunk_dalloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/ckh.h b/deps/jemalloc/include/jemalloc/internal/ckh.h
index 58712a6a7..75c1c979f 100644
--- a/deps/jemalloc/include/jemalloc/internal/ckh.h
+++ b/deps/jemalloc/include/jemalloc/internal/ckh.h
@@ -66,13 +66,13 @@ struct ckh_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
-void ckh_delete(ckh_t *ckh);
+void ckh_delete(tsd_t *tsd, ckh_t *ckh);
size_t ckh_count(ckh_t *ckh);
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
-bool ckh_insert(ckh_t *ckh, const void *key, const void *data);
-bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
+bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
+bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]);
diff --git a/deps/jemalloc/include/jemalloc/internal/ctl.h b/deps/jemalloc/include/jemalloc/internal/ctl.h
index 0ffecc5f2..751c14b5b 100644
--- a/deps/jemalloc/include/jemalloc/internal/ctl.h
+++ b/deps/jemalloc/include/jemalloc/internal/ctl.h
@@ -34,6 +34,7 @@ struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
const char *dss;
+ ssize_t lg_dirty_mult;
size_t pactive;
size_t pdirty;
arena_stats_t astats;
@@ -46,22 +47,15 @@ struct ctl_arena_stats_s {
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */
+ malloc_huge_stats_t *hstats; /* nhclasses elements. */
};
struct ctl_stats_s {
size_t allocated;
size_t active;
+ size_t metadata;
+ size_t resident;
size_t mapped;
- struct {
- size_t current; /* stats_chunks.curchunks */
- uint64_t total; /* stats_chunks.nchunks */
- size_t high; /* stats_chunks.highchunks */
- } chunks;
- struct {
- size_t allocated; /* huge_allocated */
- uint64_t nmalloc; /* huge_nmalloc */
- uint64_t ndalloc; /* huge_ndalloc */
- } huge;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
diff --git a/deps/jemalloc/include/jemalloc/internal/extent.h b/deps/jemalloc/include/jemalloc/internal/extent.h
index ba95ca816..386d50ef4 100644
--- a/deps/jemalloc/include/jemalloc/internal/extent.h
+++ b/deps/jemalloc/include/jemalloc/internal/extent.h
@@ -7,25 +7,53 @@ typedef struct extent_node_s extent_node_t;
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
-/* Tree of extents. */
+/* Tree of extents. Use accessor functions for en_* fields. */
struct extent_node_s {
- /* Linkage for the size/address-ordered tree. */
- rb_node(extent_node_t) link_szad;
+ /* Arena from which this extent came, if any. */
+ arena_t *en_arena;
- /* Linkage for the address-ordered tree. */
- rb_node(extent_node_t) link_ad;
+ /* Pointer to the extent that this tree node is responsible for. */
+ void *en_addr;
+
+ /* Total region size. */
+ size_t en_size;
+
+ /*
+ * The zeroed flag is used by chunk recycling code to track whether
+ * memory is zero-filled.
+ */
+ bool en_zeroed;
+
+ /*
+ * True if physical memory is committed to the extent, whether
+ * explicitly or implicitly as on a system that overcommits and
+ * satisfies physical memory needs on demand via soft page faults.
+ */
+ bool en_committed;
+
+ /*
+ * The achunk flag is used to validate that huge allocation lookups
+ * don't return arena chunks.
+ */
+ bool en_achunk;
/* Profile counters, used for huge objects. */
- prof_ctx_t *prof_ctx;
+ prof_tctx_t *en_prof_tctx;
- /* Pointer to the extent that this tree node is responsible for. */
- void *addr;
+ /* Linkage for arena's runs_dirty and chunks_cache rings. */
+ arena_runs_dirty_link_t rd;
+ qr(extent_node_t) cc_link;
- /* Total region size. */
- size_t size;
+ union {
+ /* Linkage for the size/address-ordered tree. */
+ rb_node(extent_node_t) szad_link;
+
+ /* Linkage for arena's huge and node_cache lists. */
+ ql_elm(extent_node_t) ql_link;
+ };
- /* True if zero-filled; used by chunk recycling code. */
- bool zeroed;
+ /* Linkage for the address-ordered tree. */
+ rb_node(extent_node_t) ad_link;
};
typedef rb_tree(extent_node_t) extent_tree_t;
@@ -41,6 +69,171 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
+#ifndef JEMALLOC_ENABLE_INLINE
+arena_t *extent_node_arena_get(const extent_node_t *node);
+void *extent_node_addr_get(const extent_node_t *node);
+size_t extent_node_size_get(const extent_node_t *node);
+bool extent_node_zeroed_get(const extent_node_t *node);
+bool extent_node_committed_get(const extent_node_t *node);
+bool extent_node_achunk_get(const extent_node_t *node);
+prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
+void extent_node_arena_set(extent_node_t *node, arena_t *arena);
+void extent_node_addr_set(extent_node_t *node, void *addr);
+void extent_node_size_set(extent_node_t *node, size_t size);
+void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
+void extent_node_committed_set(extent_node_t *node, bool committed);
+void extent_node_achunk_set(extent_node_t *node, bool achunk);
+void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
+void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
+ size_t size, bool zeroed, bool committed);
+void extent_node_dirty_linkage_init(extent_node_t *node);
+void extent_node_dirty_insert(extent_node_t *node,
+ arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
+void extent_node_dirty_remove(extent_node_t *node);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
+JEMALLOC_INLINE arena_t *
+extent_node_arena_get(const extent_node_t *node)
+{
+
+ return (node->en_arena);
+}
+
+JEMALLOC_INLINE void *
+extent_node_addr_get(const extent_node_t *node)
+{
+
+ return (node->en_addr);
+}
+
+JEMALLOC_INLINE size_t
+extent_node_size_get(const extent_node_t *node)
+{
+
+ return (node->en_size);
+}
+
+JEMALLOC_INLINE bool
+extent_node_zeroed_get(const extent_node_t *node)
+{
+
+ return (node->en_zeroed);
+}
+
+JEMALLOC_INLINE bool
+extent_node_committed_get(const extent_node_t *node)
+{
+
+ assert(!node->en_achunk);
+ return (node->en_committed);
+}
+
+JEMALLOC_INLINE bool
+extent_node_achunk_get(const extent_node_t *node)
+{
+
+ return (node->en_achunk);
+}
+
+JEMALLOC_INLINE prof_tctx_t *
+extent_node_prof_tctx_get(const extent_node_t *node)
+{
+
+ return (node->en_prof_tctx);
+}
+
+JEMALLOC_INLINE void
+extent_node_arena_set(extent_node_t *node, arena_t *arena)
+{
+
+ node->en_arena = arena;
+}
+
+JEMALLOC_INLINE void
+extent_node_addr_set(extent_node_t *node, void *addr)
+{
+
+ node->en_addr = addr;
+}
+
+JEMALLOC_INLINE void
+extent_node_size_set(extent_node_t *node, size_t size)
+{
+
+ node->en_size = size;
+}
+
+JEMALLOC_INLINE void
+extent_node_zeroed_set(extent_node_t *node, bool zeroed)
+{
+
+ node->en_zeroed = zeroed;
+}
+
+JEMALLOC_INLINE void
+extent_node_committed_set(extent_node_t *node, bool committed)
+{
+
+ node->en_committed = committed;
+}
+
+JEMALLOC_INLINE void
+extent_node_achunk_set(extent_node_t *node, bool achunk)
+{
+
+ node->en_achunk = achunk;
+}
+
+JEMALLOC_INLINE void
+extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
+{
+
+ node->en_prof_tctx = tctx;
+}
+
+JEMALLOC_INLINE void
+extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
+ bool zeroed, bool committed)
+{
+
+ extent_node_arena_set(node, arena);
+ extent_node_addr_set(node, addr);
+ extent_node_size_set(node, size);
+ extent_node_zeroed_set(node, zeroed);
+ extent_node_committed_set(node, committed);
+ extent_node_achunk_set(node, false);
+ if (config_prof)
+ extent_node_prof_tctx_set(node, NULL);
+}
+
+JEMALLOC_INLINE void
+extent_node_dirty_linkage_init(extent_node_t *node)
+{
+
+ qr_new(&node->rd, rd_link);
+ qr_new(node, cc_link);
+}
+
+JEMALLOC_INLINE void
+extent_node_dirty_insert(extent_node_t *node,
+ arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty)
+{
+
+ qr_meld(runs_dirty, &node->rd, rd_link);
+ qr_meld(chunks_dirty, node, cc_link);
+}
+
+JEMALLOC_INLINE void
+extent_node_dirty_remove(extent_node_t *node)
+{
+
+ qr_remove(&node->rd, rd_link);
+ qr_remove(node, cc_link);
+}
+
+#endif
+
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/hash.h b/deps/jemalloc/include/jemalloc/internal/hash.h
index c7183ede8..bcead337a 100644
--- a/deps/jemalloc/include/jemalloc/internal/hash.h
+++ b/deps/jemalloc/include/jemalloc/internal/hash.h
@@ -35,13 +35,14 @@ JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r)
{
- return (x << r) | (x >> (32 - r));
+ return ((x << r) | (x >> (32 - r)));
}
JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r)
{
- return (x << r) | (x >> (64 - r));
+
+ return ((x << r) | (x >> (64 - r)));
}
JEMALLOC_INLINE uint32_t
@@ -76,9 +77,9 @@ hash_fmix_64(uint64_t k)
{
k ^= k >> 33;
- k *= QU(0xff51afd7ed558ccdLLU);
+ k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
- k *= QU(0xc4ceb9fe1a85ec53LLU);
+ k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return (k);
@@ -247,8 +248,8 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t h1 = seed;
uint64_t h2 = seed;
- const uint64_t c1 = QU(0x87c37b91114253d5LLU);
- const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
+ const uint64_t c1 = KQU(0x87c37b91114253d5);
+ const uint64_t c2 = KQU(0x4cf5ad432745937f);
/* body */
{
diff --git a/deps/jemalloc/include/jemalloc/internal/huge.h b/deps/jemalloc/include/jemalloc/internal/huge.h
index a2b9c7791..ece7af980 100644
--- a/deps/jemalloc/include/jemalloc/internal/huge.h
+++ b/deps/jemalloc/include/jemalloc/internal/huge.h
@@ -9,34 +9,24 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-/* Huge allocation statistics. */
-extern uint64_t huge_nmalloc;
-extern uint64_t huge_ndalloc;
-extern size_t huge_allocated;
-
-/* Protects chunk-related data structures. */
-extern malloc_mutex_t huge_mtx;
-
-void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
-void *huge_palloc(size_t size, size_t alignment, bool zero,
- dss_prec_t dss_prec);
-bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
- size_t extra);
-void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
+void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache);
+void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
+ bool zero, tcache_t *tcache);
+bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero);
+void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
+ size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
-void huge_dalloc(void *ptr, bool unmap);
+void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
+arena_t *huge_aalloc(const void *ptr);
size_t huge_salloc(const void *ptr);
-dss_prec_t huge_dss_prec_get(arena_t *arena);
-prof_ctx_t *huge_prof_ctx_get(const void *ptr);
-void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
-bool huge_boot(void);
-void huge_prefork(void);
-void huge_postfork_parent(void);
-void huge_postfork_child(void);
+prof_tctx_t *huge_prof_tctx_get(const void *ptr);
+void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
+void huge_prof_tctx_reset(const void *ptr);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
index df266abb7..8536a3eda 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
@@ -1,70 +1,13 @@
#ifndef JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H
-#include <math.h>
-#ifdef _WIN32
-# include <windows.h>
-# define ENOENT ERROR_PATH_NOT_FOUND
-# define EINVAL ERROR_BAD_ARGUMENTS
-# define EAGAIN ERROR_OUTOFMEMORY
-# define EPERM ERROR_WRITE_FAULT
-# define EFAULT ERROR_INVALID_ADDRESS
-# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
-# undef ERANGE
-# define ERANGE ERROR_INVALID_DATA
-#else
-# include <sys/param.h>
-# include <sys/mman.h>
-# include <sys/syscall.h>
-# if !defined(SYS_write) && defined(__NR_write)
-# define SYS_write __NR_write
-# endif
-# include <sys/uio.h>
-# include <pthread.h>
-# include <errno.h>
-#endif
-#include <sys/types.h>
-
-#include <limits.h>
-#ifndef SIZE_T_MAX
-# define SIZE_T_MAX SIZE_MAX
-#endif
-#include <stdarg.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <stddef.h>
-#ifndef offsetof
-# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
-#endif
-#include <inttypes.h>
-#include <string.h>
-#include <strings.h>
-#include <ctype.h>
-#ifdef _MSC_VER
-# include <io.h>
-typedef intptr_t ssize_t;
-# define PATH_MAX 1024
-# define STDERR_FILENO 2
-# define __func__ __FUNCTION__
-/* Disable warnings about deprecated system functions */
-# pragma warning(disable: 4996)
-#else
-# include <unistd.h>
-#endif
-#include <fcntl.h>
#include "jemalloc_internal_defs.h"
+#include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
#endif
-#ifdef JEMALLOC_VALGRIND
-#include <valgrind/valgrind.h>
-#include <valgrind/memcheck.h>
-#endif
-
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# define JEMALLOC_N(n) jet_##n
@@ -85,7 +28,7 @@ static const bool config_debug =
false
#endif
;
-static const bool config_dss =
+static const bool have_dss =
#ifdef JEMALLOC_DSS
true
#else
@@ -127,8 +70,8 @@ static const bool config_prof_libunwind =
false
#endif
;
-static const bool config_mremap =
-#ifdef JEMALLOC_MREMAP
+static const bool maps_coalesce =
+#ifdef JEMALLOC_MAPS_COALESCE
true
#else
false
@@ -190,6 +133,17 @@ static const bool config_ivsalloc =
false
#endif
;
+static const bool config_cache_oblivious =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ true
+#else
+ false
+#endif
+ ;
+
+#ifdef JEMALLOC_C11ATOMICS
+#include <stdatomic.h>
+#endif
#ifdef JEMALLOC_ATOMIC9
#include <machine/atomic.h>
@@ -229,20 +183,48 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/jemalloc_internal_macros.h"
+/* Size class index type. */
+typedef unsigned szind_t;
+
+/*
+ * Flags bits:
+ *
+ * a: arena
+ * t: tcache
+ * 0: unused
+ * z: zero
+ * n: alignment
+ *
+ * aaaaaaaa aaaatttt tttttttt 0znnnnnn
+ */
+#define MALLOCX_ARENA_MASK ((int)~0xfffff)
+#define MALLOCX_ARENA_MAX 0xffe
+#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU)
+#define MALLOCX_TCACHE_MAX 0xffd
#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
-#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
+/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
+#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \
+ (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
+#define MALLOCX_ALIGN_GET(flags) \
+ (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
+#define MALLOCX_ZERO_GET(flags) \
+ ((bool)(flags & MALLOCX_ZERO))
+
+#define MALLOCX_TCACHE_GET(flags) \
+ (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
+#define MALLOCX_ARENA_GET(flags) \
+ (((unsigned)(((unsigned)flags) >> 20)) - 1)
/* Smallest size class to support. */
-#define LG_TINY_MIN 3
#define TINY_MIN (1U << LG_TINY_MIN)
/*
- * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
+ * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
-# define LG_QUANTUM 3
+# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
@@ -250,11 +232,11 @@ static const bool config_ivsalloc =
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
-# ifdef __sparc64__
+# if (defined(__sparc64__) || defined(__sparcv9))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
-# define LG_QUANTUM 3
+# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
@@ -268,6 +250,9 @@ static const bool config_ivsalloc =
# ifdef __mips__
# define LG_QUANTUM 3
# endif
+# ifdef __or1k__
+# define LG_QUANTUM 3
+# endif
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
@@ -280,8 +265,12 @@ static const bool config_ivsalloc =
# ifdef __tile__
# define LG_QUANTUM 4
# endif
+# ifdef __le32__
+# define LG_QUANTUM 4
+# endif
# ifndef LG_QUANTUM
-# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
+# error "Unknown minimum alignment for architecture; specify via "
+ "--with-lg-quantum"
# endif
#endif
@@ -321,12 +310,11 @@ static const bool config_ivsalloc =
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
-/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */
+/* Page size. LG_PAGE is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
-#define LG_PAGE STATIC_PAGE_SHIFT
-#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
+#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the smallest pagesize multiple that is >= s. */
@@ -345,7 +333,7 @@ static const bool config_ivsalloc =
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + (alignment - 1)) & (-(alignment)))
-/* Declare a variable length array */
+/* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER
# include <malloc.h>
@@ -358,86 +346,12 @@ static const bool config_ivsalloc =
# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
- type *name = alloca(sizeof(type) * count)
-#else
-# define VARIABLE_ARRAY(type, name, count) type name[count]
-#endif
-
-#ifdef JEMALLOC_VALGRIND
-/*
- * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions
- * so that when Valgrind reports errors, there are no extra stack frames
- * in the backtraces.
- *
- * The size that is reported to valgrind must be consistent through a chain of
- * malloc..realloc..realloc calls. Request size isn't recorded anywhere in
- * jemalloc, so it is critical that all callers of these macros provide usize
- * rather than request size. As a result, buffer overflow detection is
- * technically weakened for the standard API, though it is generally accepted
- * practice to consider any extra bytes reported by malloc_usable_size() as
- * usable space.
- */
-#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
- if (config_valgrind && opt_valgrind && cond) \
- VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
-} while (0)
-#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
- old_rzsize, zero) do { \
- if (config_valgrind && opt_valgrind) { \
- size_t rzsize = p2rz(ptr); \
- \
- if (ptr == old_ptr) { \
- VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
- usize, rzsize); \
- if (zero && old_usize < usize) { \
- VALGRIND_MAKE_MEM_DEFINED( \
- (void *)((uintptr_t)ptr + \
- old_usize), usize - old_usize); \
- } \
- } else { \
- if (old_ptr != NULL) { \
- VALGRIND_FREELIKE_BLOCK(old_ptr, \
- old_rzsize); \
- } \
- if (ptr != NULL) { \
- size_t copy_size = (old_usize < usize) \
- ? old_usize : usize; \
- size_t tail_size = usize - copy_size; \
- VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
- rzsize, false); \
- if (copy_size > 0) { \
- VALGRIND_MAKE_MEM_DEFINED(ptr, \
- copy_size); \
- } \
- if (zero && tail_size > 0) { \
- VALGRIND_MAKE_MEM_DEFINED( \
- (void *)((uintptr_t)ptr + \
- copy_size), tail_size); \
- } \
- } \
- } \
- } \
-} while (0)
-#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
- if (config_valgrind && opt_valgrind) \
- VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
-} while (0)
+ type *name = alloca(sizeof(type) * (count))
#else
-#define RUNNING_ON_VALGRIND ((unsigned)0)
-#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
- do {} while (0)
-#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
- do {} while (0)
-#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0)
-#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0)
-#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0)
-#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0)
-#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
-#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \
- old_rzsize, zero) do {} while (0)
-#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
+# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
+#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
@@ -452,9 +366,10 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
-#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
@@ -464,6 +379,7 @@ static const bool config_ivsalloc =
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
+#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
@@ -472,68 +388,83 @@ static const bool config_ivsalloc =
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
+#define JEMALLOC_ARENA_STRUCTS_A
+#include "jemalloc/internal/arena.h"
+#undef JEMALLOC_ARENA_STRUCTS_A
#include "jemalloc/internal/extent.h"
+#define JEMALLOC_ARENA_STRUCTS_B
#include "jemalloc/internal/arena.h"
+#undef JEMALLOC_ARENA_STRUCTS_B
#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
-#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
-typedef struct {
- uint64_t allocated;
- uint64_t deallocated;
-} thread_allocated_t;
-/*
- * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
- * argument.
- */
-#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0})
+#include "jemalloc/internal/tsd.h"
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
-extern bool opt_junk;
+extern const char *opt_junk;
+extern bool opt_junk_alloc;
+extern bool opt_junk_free;
extern size_t opt_quarantine;
extern bool opt_redzone;
extern bool opt_utrace;
-extern bool opt_valgrind;
extern bool opt_xmalloc;
extern bool opt_zero;
extern size_t opt_narenas;
+extern bool in_valgrind;
+
/* Number of CPUs. */
extern unsigned ncpus;
-/* Protects arenas initialization (arenas, arenas_total). */
-extern malloc_mutex_t arenas_lock;
/*
- * Arenas that are used to service external requests. Not all elements of the
- * arenas array are necessarily used; arenas are created lazily as needed.
- *
- * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
- * arenas. arenas[narenas_auto..narenas_total) are only used if the application
- * takes some action to create them and allocate from them.
+ * index2size_tab encodes the same information as could be computed (at
+ * unacceptable cost in some code paths) by index2size_compute().
*/
-extern arena_t **arenas;
-extern unsigned narenas_total;
-extern unsigned narenas_auto; /* Read-only after initialization. */
-
+extern size_t const index2size_tab[NSIZES];
+/*
+ * size2index_tab is a compact lookup table that rounds request sizes up to
+ * size classes. In order to reduce cache footprint, the table is compressed,
+ * and all accesses are via size2index().
+ */
+extern uint8_t const size2index_tab[];
+
+arena_t *a0get(void);
+void *a0malloc(size_t size);
+void a0dalloc(void *ptr);
+void *bootstrap_malloc(size_t size);
+void *bootstrap_calloc(size_t num, size_t size);
+void bootstrap_free(void *ptr);
arena_t *arenas_extend(unsigned ind);
-void arenas_cleanup(void *arg);
-arena_t *choose_arena_hard(void);
+arena_t *arena_init(unsigned ind);
+unsigned narenas_total_get(void);
+arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing);
+arena_t *arena_choose_hard(tsd_t *tsd);
+void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
+unsigned arena_nbound(unsigned ind);
+void thread_allocated_cleanup(tsd_t *tsd);
+void thread_deallocated_cleanup(tsd_t *tsd);
+void arena_cleanup(tsd_t *tsd);
+void arenas_cache_cleanup(tsd_t *tsd);
+void narenas_cache_cleanup(tsd_t *tsd);
+void arenas_cache_bypass_cleanup(tsd_t *tsd);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
+#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
@@ -542,24 +473,26 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
-#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
+#include "jemalloc/internal/tsd.h"
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
#define JEMALLOC_H_INLINES
+#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
@@ -572,26 +505,158 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
-malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
-
+szind_t size2index_compute(size_t size);
+szind_t size2index_lookup(size_t size);
+szind_t size2index(size_t size);
+size_t index2size_compute(szind_t index);
+size_t index2size_lookup(szind_t index);
+size_t index2size(szind_t index);
+size_t s2u_compute(size_t size);
+size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
-unsigned narenas_total_get(void);
-arena_t *choose_arena(arena_t *arena);
+arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
+arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
+ bool refresh_if_missing);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-/*
- * Map of pthread_self() --> arenas[???], used for selecting an arena to use
- * for allocations.
- */
-malloc_tsd_externs(arenas, arena_t *)
-malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL,
- arenas_cleanup)
+JEMALLOC_INLINE szind_t
+size2index_compute(size_t size)
+{
+
+#if (NTBINS != 0)
+ if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
+ size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
+ size_t lg_ceil = lg_floor(pow2_ceil(size));
+ return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
+ }
+#endif
+ {
+ size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
+ (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
+ : lg_floor((size<<1)-1);
+ size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
+ x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
+ size_t grp = shift << LG_SIZE_CLASS_GROUP;
+
+ size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
+ ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
+
+ size_t delta_inverse_mask = ZI(-1) << lg_delta;
+ size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
+ ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
+
+ size_t index = NTBINS + grp + mod;
+ return (index);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+size2index_lookup(size_t size)
+{
+
+ assert(size <= LOOKUP_MAXCLASS);
+ {
+ size_t ret = ((size_t)(size2index_tab[(size-1) >>
+ LG_TINY_MIN]));
+ assert(ret == size2index_compute(size));
+ return (ret);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+size2index(size_t size)
+{
+
+ assert(size > 0);
+ if (likely(size <= LOOKUP_MAXCLASS))
+ return (size2index_lookup(size));
+ return (size2index_compute(size));
+}
+
+JEMALLOC_INLINE size_t
+index2size_compute(szind_t index)
+{
+
+#if (NTBINS > 0)
+ if (index < NTBINS)
+ return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
+#endif
+ {
+ size_t reduced_index = index - NTBINS;
+ size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
+ size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
+ 1);
+
+ size_t grp_size_mask = ~((!!grp)-1);
+ size_t grp_size = ((ZU(1) << (LG_QUANTUM +
+ (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
+
+ size_t shift = (grp == 0) ? 1 : grp;
+ size_t lg_delta = shift + (LG_QUANTUM-1);
+ size_t mod_size = (mod+1) << lg_delta;
+
+ size_t usize = grp_size + mod_size;
+ return (usize);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+index2size_lookup(szind_t index)
+{
+ size_t ret = (size_t)index2size_tab[index];
+ assert(ret == index2size_compute(index));
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+index2size(szind_t index)
+{
+
+ assert(index < NSIZES);
+ return (index2size_lookup(index));
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+s2u_compute(size_t size)
+{
+
+#if (NTBINS > 0)
+ if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
+ size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
+ size_t lg_ceil = lg_floor(pow2_ceil(size));
+ return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
+ (ZU(1) << lg_ceil));
+ }
+#endif
+ {
+ size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
+ (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
+ : lg_floor((size<<1)-1);
+ size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
+ ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
+ size_t delta = ZU(1) << lg_delta;
+ size_t delta_mask = delta - 1;
+ size_t usize = (size + delta_mask) & ~delta_mask;
+ return (usize);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+s2u_lookup(size_t size)
+{
+ size_t ret = index2size_lookup(size2index_lookup(size));
+
+ assert(ret == s2u_compute(size));
+ return (ret);
+}
/*
* Compute usable size that would result from allocating an object with the
@@ -601,11 +666,10 @@ JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size)
{
- if (size <= SMALL_MAXCLASS)
- return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size);
- if (size <= arena_maxclass)
- return (PAGE_CEILING(size));
- return (CHUNK_CEILING(size));
+ assert(size > 0);
+ if (likely(size <= LOOKUP_MAXCLASS))
+ return (s2u_lookup(size));
+ return (s2u_compute(size));
}
/*
@@ -619,108 +683,128 @@ sa2u(size_t size, size_t alignment)
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
- /*
- * Round size up to the nearest multiple of alignment.
- *
- * This done, we can take advantage of the fact that for each small
- * size class, every object is aligned at the smallest power of two
- * that is non-zero in the base two representation of the size. For
- * example:
- *
- * Size | Base 2 | Minimum alignment
- * -----+----------+------------------
- * 96 | 1100000 | 32
- * 144 | 10100000 | 32
- * 192 | 11000000 | 64
- */
- usize = ALIGNMENT_CEILING(size, alignment);
- /*
- * (usize < size) protects against the combination of maximal
- * alignment and size greater than maximal alignment.
- */
- if (usize < size) {
- /* size_t overflow. */
- return (0);
+ /* Try for a small size class. */
+ if (size <= SMALL_MAXCLASS && alignment < PAGE) {
+ /*
+ * Round size up to the nearest multiple of alignment.
+ *
+ * This done, we can take advantage of the fact that for each
+ * small size class, every object is aligned at the smallest
+ * power of two that is non-zero in the base two representation
+ * of the size. For example:
+ *
+ * Size | Base 2 | Minimum alignment
+ * -----+----------+------------------
+ * 96 | 1100000 | 32
+ * 144 | 10100000 | 32
+ * 192 | 11000000 | 64
+ */
+ usize = s2u(ALIGNMENT_CEILING(size, alignment));
+ if (usize < LARGE_MINCLASS)
+ return (usize);
}
- if (usize <= arena_maxclass && alignment <= PAGE) {
- if (usize <= SMALL_MAXCLASS)
- return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
- return (PAGE_CEILING(usize));
- } else {
- size_t run_size;
-
+ /* Try for a large size class. */
+ if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
/*
* We can't achieve subpage alignment, so round up alignment
- * permanently; it makes later calculations simpler.
+ * to the minimum that can actually be supported.
*/
alignment = PAGE_CEILING(alignment);
- usize = PAGE_CEILING(size);
- /*
- * (usize < size) protects against very large sizes within
- * PAGE of SIZE_T_MAX.
- *
- * (usize + alignment < usize) protects against the
- * combination of maximal alignment and usize large enough
- * to cause overflow. This is similar to the first overflow
- * check above, but it needs to be repeated due to the new
- * usize value, which may now be *equal* to maximal
- * alignment, whereas before we only detected overflow if the
- * original size was *greater* than maximal alignment.
- */
- if (usize < size || usize + alignment < usize) {
- /* size_t overflow. */
- return (0);
- }
+
+ /* Make sure result is a large size class. */
+ usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
/*
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
- * If the run wouldn't fit within a chunk, round up to a huge
- * allocation size.
*/
- run_size = usize + alignment - PAGE;
- if (run_size <= arena_maxclass)
- return (PAGE_CEILING(usize));
- return (CHUNK_CEILING(usize));
+ if (usize + large_pad + alignment - PAGE <= arena_maxrun)
+ return (usize);
}
-}
-JEMALLOC_INLINE unsigned
-narenas_total_get(void)
-{
- unsigned narenas;
+ /* Huge size class. Beware of size_t overflow. */
- malloc_mutex_lock(&arenas_lock);
- narenas = narenas_total;
- malloc_mutex_unlock(&arenas_lock);
+ /*
+ * We can't achieve subchunk alignment, so round up alignment to the
+ * minimum that can actually be supported.
+ */
+ alignment = CHUNK_CEILING(alignment);
+ if (alignment == 0) {
+ /* size_t overflow. */
+ return (0);
+ }
+
+ /* Make sure result is a huge size class. */
+ if (size <= chunksize)
+ usize = chunksize;
+ else {
+ usize = s2u(size);
+ if (usize < size) {
+ /* size_t overflow. */
+ return (0);
+ }
+ }
- return (narenas);
+ /*
+ * Calculate the multi-chunk mapping that huge_palloc() would need in
+ * order to guarantee the alignment.
+ */
+ if (usize + alignment - PAGE < usize) {
+ /* size_t overflow. */
+ return (0);
+ }
+ return (usize);
}
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
-choose_arena(arena_t *arena)
+arena_choose(tsd_t *tsd, arena_t *arena)
{
arena_t *ret;
if (arena != NULL)
return (arena);
- if ((ret = *arenas_tsd_get()) == NULL) {
- ret = choose_arena_hard();
- assert(ret != NULL);
- }
+ if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
+ ret = arena_choose_hard(tsd);
return (ret);
}
+
+JEMALLOC_INLINE arena_t *
+arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
+ bool refresh_if_missing)
+{
+ arena_t *arena;
+ arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
+
+ /* init_if_missing requires refresh_if_missing. */
+ assert(!init_if_missing || refresh_if_missing);
+
+ if (unlikely(arenas_cache == NULL)) {
+ /* arenas_cache hasn't been initialized yet. */
+ return (arena_get_hard(tsd, ind, init_if_missing));
+ }
+ if (unlikely(ind >= tsd_narenas_cache_get(tsd))) {
+ /*
+ * ind is invalid, cache is old (too small), or arena to be
+ * initialized.
+ */
+ return (refresh_if_missing ? arena_get_hard(tsd, ind,
+ init_if_missing) : NULL);
+ }
+ arena = arenas_cache[ind];
+ if (likely(arena != NULL) || !refresh_if_missing)
+ return (arena);
+ return (arena_get_hard(tsd, ind, init_if_missing));
+}
#endif
#include "jemalloc/internal/bitmap.h"
-#include "jemalloc/internal/rtree.h"
/*
- * Include arena.h twice in order to resolve circular dependencies with
- * tcache.h.
+ * Include portions of arena.h interleaved with tcache.h in order to resolve
+ * circular dependencies.
*/
#define JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/arena.h"
@@ -733,133 +817,155 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
-void *imalloct(size_t size, bool try_tcache, arena_t *arena);
-void *imalloc(size_t size);
-void *icalloct(size_t size, bool try_tcache, arena_t *arena);
-void *icalloc(size_t size);
-void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena);
-void *ipalloc(size_t usize, size_t alignment, bool zero);
+arena_t *iaalloc(const void *ptr);
size_t isalloc(const void *ptr, bool demote);
+void *iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache,
+ bool is_metadata, arena_t *arena);
+void *imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
+void *imalloc(tsd_t *tsd, size_t size);
+void *icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
+void *icalloc(tsd_t *tsd, size_t size);
+void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, bool is_metadata, arena_t *arena);
+void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena);
+void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
-void idalloct(void *ptr, bool try_tcache);
-void idalloc(void *ptr);
-void iqalloct(void *ptr, bool try_tcache);
-void iqalloc(void *ptr);
-void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
+void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata);
+void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
+void idalloc(tsd_t *tsd, void *ptr);
+void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
+void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
+void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
+void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena);
-void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
- bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
-void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
- bool zero);
-bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
- bool zero);
-malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
+void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+ size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
+void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+ size_t alignment, bool zero);
+bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
+JEMALLOC_ALWAYS_INLINE arena_t *
+iaalloc(const void *ptr)
+{
+
+ assert(ptr != NULL);
+
+ return (arena_aalloc(ptr));
+}
+
+/*
+ * Typical usage:
+ * void *ptr = [...]
+ * size_t sz = isalloc(ptr, config_prof);
+ */
+JEMALLOC_ALWAYS_INLINE size_t
+isalloc(const void *ptr, bool demote)
+{
+
+ assert(ptr != NULL);
+ /* Demotion only makes sense if config_prof is true. */
+ assert(config_prof || !demote);
+
+ return (arena_salloc(ptr, demote));
+}
+
JEMALLOC_ALWAYS_INLINE void *
-imalloct(size_t size, bool try_tcache, arena_t *arena)
+iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata,
+ arena_t *arena)
{
+ void *ret;
assert(size != 0);
- if (size <= arena_maxclass)
- return (arena_malloc(arena, size, false, try_tcache));
- else
- return (huge_malloc(size, false, huge_dss_prec_get(arena)));
+ ret = arena_malloc(tsd, arena, size, zero, tcache);
+ if (config_stats && is_metadata && likely(ret != NULL)) {
+ arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
+ config_prof));
+ }
+ return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
-imalloc(size_t size)
+imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
{
- return (imalloct(size, true, NULL));
+ return (iallocztm(tsd, size, false, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
-icalloct(size_t size, bool try_tcache, arena_t *arena)
+imalloc(tsd_t *tsd, size_t size)
{
- if (size <= arena_maxclass)
- return (arena_malloc(arena, size, true, try_tcache));
- else
- return (huge_malloc(size, true, huge_dss_prec_get(arena)));
+ return (iallocztm(tsd, size, false, tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
-icalloc(size_t size)
+icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
{
- return (icalloct(size, true, NULL));
+ return (iallocztm(tsd, size, true, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
-ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena)
+icalloc(tsd_t *tsd, size_t size)
+{
+
+ return (iallocztm(tsd, size, true, tcache_get(tsd, true), false, NULL));
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, bool is_metadata, arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
- if (usize <= arena_maxclass && alignment <= PAGE)
- ret = arena_malloc(arena, usize, zero, try_tcache);
- else {
- if (usize <= arena_maxclass) {
- ret = arena_palloc(choose_arena(arena), usize,
- alignment, zero);
- } else if (alignment <= chunksize)
- ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
- else
- ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
- }
-
+ ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
+ if (config_stats && is_metadata && likely(ret != NULL)) {
+ arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
+ config_prof));
+ }
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
-ipalloc(size_t usize, size_t alignment, bool zero)
+ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena)
{
- return (ipalloct(usize, alignment, zero, true, NULL));
+ return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
}
-/*
- * Typical usage:
- * void *ptr = [...]
- * size_t sz = isalloc(ptr, config_prof);
- */
-JEMALLOC_ALWAYS_INLINE size_t
-isalloc(const void *ptr, bool demote)
+JEMALLOC_ALWAYS_INLINE void *
+ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
- size_t ret;
- arena_chunk_t *chunk;
-
- assert(ptr != NULL);
- /* Demotion only makes sense if config_prof is true. */
- assert(config_prof || demote == false);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr)
- ret = arena_salloc(ptr, demote);
- else
- ret = huge_salloc(ptr);
-
- return (ret);
+ return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd,
+ NULL), false, NULL));
}
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote)
{
+ extent_node_t *node;
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
- if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
+ node = chunk_lookup(ptr, false);
+ if (node == NULL)
return (0);
+ /* Only arena chunks should be looked up via interior pointers. */
+ assert(extent_node_addr_get(node) == ptr ||
+ extent_node_achunk_get(node));
return (isalloc(ptr, demote));
}
@@ -870,7 +976,7 @@ u2rz(size_t usize)
size_t ret;
if (usize <= SMALL_MAXCLASS) {
- size_t binind = SMALL_SIZE2BIN(usize);
+ szind_t binind = size2index(usize);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;
@@ -887,47 +993,62 @@ p2rz(const void *ptr)
}
JEMALLOC_ALWAYS_INLINE void
-idalloct(void *ptr, bool try_tcache)
+idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata)
{
- arena_chunk_t *chunk;
assert(ptr != NULL);
+ if (config_stats && is_metadata) {
+ arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
+ config_prof));
+ }
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr)
- arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
- else
- huge_dalloc(ptr, true);
+ arena_dalloc(tsd, ptr, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
+{
+
+ idalloctm(tsd, ptr, tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
-idalloc(void *ptr)
+idalloc(tsd_t *tsd, void *ptr)
{
- idalloct(ptr, true);
+ idalloctm(tsd, ptr, tcache_get(tsd, false), false);
}
JEMALLOC_ALWAYS_INLINE void
-iqalloct(void *ptr, bool try_tcache)
+iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
- if (config_fill && opt_quarantine)
- quarantine(ptr);
+ if (config_fill && unlikely(opt_quarantine))
+ quarantine(tsd, ptr);
else
- idalloct(ptr, try_tcache);
+ idalloctm(tsd, ptr, tcache, false);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
+{
+
+ arena_sdalloc(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void
-iqalloc(void *ptr)
+isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
- iqalloct(ptr, true);
+ if (config_fill && unlikely(opt_quarantine))
+ quarantine(tsd, ptr);
+ else
+ isdalloct(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
-iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
- arena_t *arena)
+iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
{
void *p;
size_t usize, copysize;
@@ -935,7 +1056,7 @@ iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
- p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
+ p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
@@ -943,7 +1064,7 @@ iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
- p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
+ p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
if (p == NULL)
return (NULL);
}
@@ -953,72 +1074,57 @@ iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
- iqalloct(ptr, try_tcache_dalloc);
+ isqalloc(tsd, ptr, oldsize, tcache);
return (p);
}
JEMALLOC_ALWAYS_INLINE void *
-iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
- bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
+iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
+ bool zero, tcache_t *tcache, arena_t *arena)
{
- size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
- oldsize = isalloc(ptr, config_prof);
-
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
- return (iralloct_realign(ptr, oldsize, size, extra, alignment,
- zero, try_tcache_alloc, try_tcache_dalloc, arena));
+ return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
+ zero, tcache, arena));
}
- if (size + extra <= arena_maxclass) {
- return (arena_ralloc(arena, ptr, oldsize, size, extra,
- alignment, zero, try_tcache_alloc,
- try_tcache_dalloc));
- } else {
- return (huge_ralloc(ptr, oldsize, size, extra,
- alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
- }
+ return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
+ tcache));
}
JEMALLOC_ALWAYS_INLINE void *
-iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
+iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
+ bool zero)
{
- return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
+ return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
+ tcache_get(tsd, true), NULL));
}
JEMALLOC_ALWAYS_INLINE bool
-ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
+ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
+ bool zero)
{
- size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
- oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/* Existing object alignment is inadequate. */
return (true);
}
- if (size <= arena_maxclass)
- return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
- else
- return (huge_ralloc_no_move(ptr, oldsize, size, extra));
+ return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
}
-
-malloc_tsd_externs(thread_allocated, thread_allocated_t)
-malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
- THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
#endif
#include "jemalloc/internal/prof.h"
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
new file mode 100644
index 000000000..a601d6ebb
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
@@ -0,0 +1,64 @@
+#ifndef JEMALLOC_INTERNAL_DECLS_H
+#define JEMALLOC_INTERNAL_DECLS_H
+
+#include <math.h>
+#ifdef _WIN32
+# include <windows.h>
+# include "msvc_compat/windows_extra.h"
+
+#else
+# include <sys/param.h>
+# include <sys/mman.h>
+# if !defined(__pnacl__) && !defined(__native_client__)
+# include <sys/syscall.h>
+# if !defined(SYS_write) && defined(__NR_write)
+# define SYS_write __NR_write
+# endif
+# include <sys/uio.h>
+# endif
+# include <pthread.h>
+# include <errno.h>
+#endif
+#include <sys/types.h>
+
+#include <limits.h>
+#ifndef SIZE_T_MAX
+# define SIZE_T_MAX SIZE_MAX
+#endif
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stddef.h>
+#ifndef offsetof
+# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
+#endif
+#include <string.h>
+#include <strings.h>
+#include <ctype.h>
+#ifdef _MSC_VER
+# include <io.h>
+typedef intptr_t ssize_t;
+# define PATH_MAX 1024
+# define STDERR_FILENO 2
+# define __func__ __FUNCTION__
+# ifdef JEMALLOC_HAS_RESTRICT
+# define restrict __restrict
+# endif
+/* Disable warnings about deprecated system functions. */
+# pragma warning(disable: 4996)
+#if _MSC_VER < 1800
+static int
+isblank(int c)
+{
+
+ return (c == '\t' || c == ' ');
+}
+#endif
+#else
+# include <unistd.h>
+#endif
+#include <fcntl.h>
+
+#endif /* JEMALLOC_INTERNAL_H */
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
index c166fbd9e..b0f8caaf8 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -22,6 +22,9 @@
*/
#undef CPU_SPINWAIT
+/* Defined if C11 atomics are available. */
+#undef JEMALLOC_C11ATOMICS
+
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
#undef JEMALLOC_ATOMIC9
@@ -35,7 +38,7 @@
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
- * functions are defined in libgcc instead of being inlines)
+ * functions are defined in libgcc instead of being inlines).
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4
@@ -43,17 +46,37 @@
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
- * functions are defined in libgcc instead of being inlines)
+ * functions are defined in libgcc instead of being inlines).
*/
#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8
/*
+ * Defined if __builtin_clz() and __builtin_clzl() are available.
+ */
+#undef JEMALLOC_HAVE_BUILTIN_CLZ
+
+/*
+ * Defined if madvise(2) is available.
+ */
+#undef JEMALLOC_HAVE_MADVISE
+
+/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
#undef JEMALLOC_OSSPIN
/*
+ * Defined if secure_getenv(3) is available.
+ */
+#undef JEMALLOC_HAVE_SECURE_GETENV
+
+/*
+ * Defined if issetugid(2) is available.
+ */
+#undef JEMALLOC_HAVE_ISSETUGID
+
+/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
@@ -76,9 +99,6 @@
*/
#undef JEMALLOC_MUTEX_INIT_CB
-/* Defined if sbrk() is supported. */
-#undef JEMALLOC_HAVE_SBRK
-
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
@@ -137,8 +157,26 @@
/* Support lazy locking (avoid locking unless a second thread is launched). */
#undef JEMALLOC_LAZY_LOCK
-/* One page is 2^STATIC_PAGE_SHIFT bytes. */
-#undef STATIC_PAGE_SHIFT
+/* Minimum size class to support is 2^LG_TINY_MIN bytes. */
+#undef LG_TINY_MIN
+
+/*
+ * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
+ * classes).
+ */
+#undef LG_QUANTUM
+
+/* One page is 2^LG_PAGE bytes. */
+#undef LG_PAGE
+
+/*
+ * If defined, adjacent virtual memory mappings with identical attributes
+ * automatically coalesce, and they fragment when changes are made to subranges.
+ * This is the normal order of things for mmap()/munmap(), but on Windows
+ * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
+ * mappings do *not* coalesce/fragment.
+ */
+#undef JEMALLOC_MAPS_COALESCE
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
@@ -147,23 +185,29 @@
*/
#undef JEMALLOC_MUNMAP
-/*
- * If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
- * disabled by default because it is Linux-specific and it will cause virtual
- * memory map holes, much like munmap(2) does.
- */
-#undef JEMALLOC_MREMAP
-
/* TLS is used to map arenas and magazine caches to threads. */
#undef JEMALLOC_TLS
/*
+ * ffs()/ffsl() functions to use for bitmapping. Don't use these directly;
+ * instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h.
+ */
+#undef JEMALLOC_INTERNAL_FFSL
+#undef JEMALLOC_INTERNAL_FFS
+
+/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
#undef JEMALLOC_IVSALLOC
/*
+ * If defined, explicitly attempt to more uniformly distribute large allocation
+ * pointer alignments across all cache indices.
+ */
+#undef JEMALLOC_CACHE_OBLIVIOUS
+
+/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
@@ -182,9 +226,7 @@
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
#undef JEMALLOC_PURGE_MADVISE_FREE
-/*
- * Define if operating system has alloca.h header.
- */
+/* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H
/* C99 restrict keyword supported. */
@@ -202,4 +244,19 @@
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
+/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
+#undef JEMALLOC_GLIBC_MALLOC_HOOK
+
+/* glibc memalign hook. */
+#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
+
+/* Adaptive mutex support in pthreads. */
+#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
+
+/*
+ * If defined, jemalloc symbols are not exported (doesn't work when
+ * JEMALLOC_PREFIX is not defined).
+ */
+#undef JEMALLOC_EXPORT
+
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
index 4e2392302..a08ba772e 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
@@ -39,9 +39,15 @@
#endif
#define ZU(z) ((size_t)z)
+#define ZI(z) ((ssize_t)z)
#define QU(q) ((uint64_t)q)
#define QI(q) ((int64_t)q)
+#define KZU(z) ZU(z##ULL)
+#define KZI(z) ZI(z##LL)
+#define KQU(q) QU(q##ULL)
+#define KQI(q) QI(q##LL)
+
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/mutex.h b/deps/jemalloc/include/jemalloc/internal/mutex.h
index de44e1435..f051f2917 100644
--- a/deps/jemalloc/include/jemalloc/internal/mutex.h
+++ b/deps/jemalloc/include/jemalloc/internal/mutex.h
@@ -10,7 +10,7 @@ typedef struct malloc_mutex_s malloc_mutex_t;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
#else
-# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
+# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
@@ -26,7 +26,11 @@ typedef struct malloc_mutex_s malloc_mutex_t;
struct malloc_mutex_s {
#ifdef _WIN32
+# if _WIN32_WINNT >= 0x0600
+ SRWLOCK lock;
+# else
CRITICAL_SECTION lock;
+# endif
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
@@ -70,7 +74,11 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
if (isthreaded) {
#ifdef _WIN32
+# if _WIN32_WINNT >= 0x0600
+ AcquireSRWLockExclusive(&mutex->lock);
+# else
EnterCriticalSection(&mutex->lock);
+# endif
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock);
#else
@@ -85,7 +93,11 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
if (isthreaded) {
#ifdef _WIN32
+# if _WIN32_WINNT >= 0x0600
+ ReleaseSRWLockExclusive(&mutex->lock);
+# else
LeaveCriticalSection(&mutex->lock);
+# endif
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock);
#else
diff --git a/deps/jemalloc/include/jemalloc/internal/pages.h b/deps/jemalloc/include/jemalloc/internal/pages.h
new file mode 100644
index 000000000..da7eb9686
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/internal/pages.h
@@ -0,0 +1,26 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void *pages_map(void *addr, size_t size);
+void pages_unmap(void *addr, size_t size);
+void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
+ size_t size);
+bool pages_commit(void *addr, size_t size);
+bool pages_decommit(void *addr, size_t size);
+bool pages_purge(void *addr, size_t size);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
diff --git a/deps/jemalloc/include/jemalloc/internal/private_symbols.txt b/deps/jemalloc/include/jemalloc/internal/private_symbols.txt
index 93516d242..a90021aa6 100644
--- a/deps/jemalloc/include/jemalloc/internal/private_symbols.txt
+++ b/deps/jemalloc/include/jemalloc/internal/private_symbols.txt
@@ -1,44 +1,76 @@
-a0calloc
-a0free
+a0dalloc
+a0get
a0malloc
+arena_aalloc
arena_alloc_junk_small
arena_bin_index
arena_bin_info
+arena_bitselm_get
arena_boot
+arena_choose
+arena_choose_hard
+arena_chunk_alloc_huge
+arena_chunk_cache_maybe_insert
+arena_chunk_cache_maybe_remove
+arena_chunk_dalloc_huge
+arena_chunk_ralloc_huge_expand
+arena_chunk_ralloc_huge_shrink
+arena_chunk_ralloc_huge_similar
+arena_cleanup
arena_dalloc
arena_dalloc_bin
-arena_dalloc_bin_locked
+arena_dalloc_bin_junked_locked
arena_dalloc_junk_large
arena_dalloc_junk_small
arena_dalloc_large
-arena_dalloc_large_locked
+arena_dalloc_large_junked_locked
arena_dalloc_small
arena_dss_prec_get
arena_dss_prec_set
+arena_get
+arena_get_hard
+arena_init
+arena_lg_dirty_mult_default_get
+arena_lg_dirty_mult_default_set
+arena_lg_dirty_mult_get
+arena_lg_dirty_mult_set
arena_malloc
arena_malloc_large
arena_malloc_small
arena_mapbits_allocated_get
arena_mapbits_binind_get
+arena_mapbits_decommitted_get
arena_mapbits_dirty_get
arena_mapbits_get
+arena_mapbits_internal_set
arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
arena_mapbits_large_size_get
+arena_mapbitsp_get
+arena_mapbitsp_read
+arena_mapbitsp_write
+arena_mapbits_size_decode
+arena_mapbits_size_encode
arena_mapbits_small_runind_get
arena_mapbits_small_set
arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
-arena_mapbits_unzeroed_set
-arena_mapbitsp_get
-arena_mapbitsp_read
-arena_mapbitsp_write
-arena_mapp_get
-arena_maxclass
+arena_maxrun
+arena_maybe_purge
+arena_metadata_allocated_add
+arena_metadata_allocated_get
+arena_metadata_allocated_sub
+arena_migrate
+arena_miscelm_get
+arena_miscelm_to_pageind
+arena_miscelm_to_rpages
+arena_nbound
arena_new
+arena_node_alloc
+arena_node_dalloc
arena_palloc
arena_postfork_child
arena_postfork_parent
@@ -46,50 +78,47 @@ arena_prefork
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
-arena_prof_ctx_get
-arena_prof_ctx_set
arena_prof_promoted
+arena_prof_tctx_get
+arena_prof_tctx_reset
+arena_prof_tctx_set
arena_ptr_small_binind_get
arena_purge_all
arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
+arena_rd_to_miscelm
arena_redzone_corruption
arena_run_regind
+arena_run_to_miscelm
arena_salloc
+arenas_cache_bypass_cleanup
+arenas_cache_cleanup
+arena_sdalloc
arena_stats_merge
arena_tcache_fill_small
-arenas
-arenas_booted
-arenas_cleanup
-arenas_extend
-arenas_initialized
-arenas_lock
-arenas_tls
-arenas_tsd
-arenas_tsd_boot
-arenas_tsd_cleanup_wrapper
-arenas_tsd_get
-arenas_tsd_get_wrapper
-arenas_tsd_init_head
-arenas_tsd_set
+atomic_add_p
atomic_add_u
atomic_add_uint32
atomic_add_uint64
atomic_add_z
+atomic_cas_p
+atomic_cas_u
+atomic_cas_uint32
+atomic_cas_uint64
+atomic_cas_z
+atomic_sub_p
atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
base_alloc
base_boot
-base_calloc
-base_node_alloc
-base_node_dealloc
base_postfork_child
base_postfork_parent
base_prefork
+base_stats_get
bitmap_full
bitmap_get
bitmap_info_init
@@ -99,49 +128,54 @@ bitmap_set
bitmap_sfu
bitmap_size
bitmap_unset
+bootstrap_calloc
+bootstrap_free
+bootstrap_malloc
bt_init
buferror
-choose_arena
-choose_arena_hard
-chunk_alloc
+chunk_alloc_base
+chunk_alloc_cache
chunk_alloc_dss
chunk_alloc_mmap
+chunk_alloc_wrapper
chunk_boot
-chunk_dealloc
-chunk_dealloc_mmap
+chunk_dalloc_arena
+chunk_dalloc_cache
+chunk_dalloc_mmap
+chunk_dalloc_wrapper
+chunk_deregister
chunk_dss_boot
chunk_dss_postfork_child
chunk_dss_postfork_parent
chunk_dss_prec_get
chunk_dss_prec_set
chunk_dss_prefork
+chunk_hooks_default
+chunk_hooks_get
+chunk_hooks_set
chunk_in_dss
+chunk_lookup
chunk_npages
chunk_postfork_child
chunk_postfork_parent
chunk_prefork
-chunk_unmap
-chunks_mtx
-chunks_rtree
+chunk_purge_arena
+chunk_purge_wrapper
+chunk_register
chunksize
chunksize_mask
-ckh_bucket_search
+chunks_rtree
ckh_count
ckh_delete
-ckh_evict_reloc_insert
ckh_insert
-ckh_isearch
ckh_iter
ckh_new
ckh_pointer_hash
ckh_pointer_keycomp
-ckh_rebuild
ckh_remove
ckh_search
ckh_string_hash
ckh_string_keycomp
-ckh_try_bucket_insert
-ckh_try_insert
ctl_boot
ctl_bymib
ctl_byname
@@ -150,6 +184,23 @@ ctl_postfork_child
ctl_postfork_parent
ctl_prefork
dss_prec_names
+extent_node_achunk_get
+extent_node_achunk_set
+extent_node_addr_get
+extent_node_addr_set
+extent_node_arena_get
+extent_node_arena_set
+extent_node_dirty_insert
+extent_node_dirty_linkage_init
+extent_node_dirty_remove
+extent_node_init
+extent_node_prof_tctx_get
+extent_node_prof_tctx_set
+extent_node_size_get
+extent_node_size_set
+extent_node_zeroed_get
+extent_node_zeroed_set
+extent_tree_ad_empty
extent_tree_ad_first
extent_tree_ad_insert
extent_tree_ad_iter
@@ -166,6 +217,7 @@ extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
+extent_tree_szad_empty
extent_tree_szad_first
extent_tree_szad_insert
extent_tree_szad_iter
@@ -193,45 +245,49 @@ hash_rotl_64
hash_x64_128
hash_x86_128
hash_x86_32
-huge_allocated
-huge_boot
+huge_aalloc
huge_dalloc
huge_dalloc_junk
-huge_dss_prec_get
huge_malloc
-huge_mtx
-huge_ndalloc
-huge_nmalloc
huge_palloc
-huge_postfork_child
-huge_postfork_parent
-huge_prefork
-huge_prof_ctx_get
-huge_prof_ctx_set
+huge_prof_tctx_get
+huge_prof_tctx_reset
+huge_prof_tctx_set
huge_ralloc
huge_ralloc_no_move
huge_salloc
-iallocm
+iaalloc
+iallocztm
icalloc
icalloct
idalloc
idalloct
+idalloctm
imalloc
imalloct
+index2size
+index2size_compute
+index2size_lookup
+index2size_tab
+in_valgrind
ipalloc
ipalloct
+ipallocztm
iqalloc
-iqalloct
iralloc
iralloct
iralloct_realign
isalloc
+isdalloct
+isqalloc
isthreaded
ivsalloc
ixalloc
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
+large_maxclass
+lg_floor
malloc_cprintf
malloc_mutex_init
malloc_mutex_lock
@@ -242,7 +298,8 @@ malloc_mutex_unlock
malloc_printf
malloc_snprintf
malloc_strtoumax
-malloc_tsd_boot
+malloc_tsd_boot0
+malloc_tsd_boot1
malloc_tsd_cleanup_register
malloc_tsd_dalloc
malloc_tsd_malloc
@@ -251,16 +308,18 @@ malloc_vcprintf
malloc_vsnprintf
malloc_write
map_bias
+map_misc_offset
mb_write
mutex_boot
-narenas_auto
-narenas_total
+narenas_cache_cleanup
narenas_total_get
ncpus
nhbins
opt_abort
opt_dss
opt_junk
+opt_junk_alloc
+opt_junk_free
opt_lg_chunk
opt_lg_dirty_mult
opt_lg_prof_interval
@@ -274,84 +333,99 @@ opt_prof_final
opt_prof_gdump
opt_prof_leak
opt_prof_prefix
+opt_prof_thread_active_init
opt_quarantine
opt_redzone
opt_stats_print
opt_tcache
opt_utrace
-opt_valgrind
opt_xmalloc
opt_zero
p2rz
+pages_commit
+pages_decommit
+pages_map
pages_purge
+pages_trim
+pages_unmap
pow2_ceil
+prof_active_get
+prof_active_get_unlocked
+prof_active_set
+prof_alloc_prep
+prof_alloc_rollback
prof_backtrace
prof_boot0
prof_boot1
prof_boot2
-prof_bt_count
-prof_ctx_get
-prof_ctx_set
+prof_dump_header
prof_dump_open
prof_free
+prof_free_sampled_object
prof_gdump
+prof_gdump_get
+prof_gdump_get_unlocked
+prof_gdump_set
+prof_gdump_val
prof_idump
prof_interval
prof_lookup
prof_malloc
+prof_malloc_sample_object
prof_mdump
prof_postfork_child
prof_postfork_parent
prof_prefork
-prof_promote
prof_realloc
+prof_reset
prof_sample_accum_update
prof_sample_threshold_update
-prof_tdata_booted
+prof_tctx_get
+prof_tctx_reset
+prof_tctx_set
prof_tdata_cleanup
prof_tdata_get
prof_tdata_init
-prof_tdata_initialized
-prof_tdata_tls
-prof_tdata_tsd
-prof_tdata_tsd_boot
-prof_tdata_tsd_cleanup_wrapper
-prof_tdata_tsd_get
-prof_tdata_tsd_get_wrapper
-prof_tdata_tsd_init_head
-prof_tdata_tsd_set
+prof_tdata_reinit
+prof_thread_active_get
+prof_thread_active_init_get
+prof_thread_active_init_set
+prof_thread_active_set
+prof_thread_name_get
+prof_thread_name_set
quarantine
quarantine_alloc_hook
-quarantine_boot
-quarantine_booted
+quarantine_alloc_hook_work
quarantine_cleanup
-quarantine_init
-quarantine_tls
-quarantine_tsd
-quarantine_tsd_boot
-quarantine_tsd_cleanup_wrapper
-quarantine_tsd_get
-quarantine_tsd_get_wrapper
-quarantine_tsd_init_head
-quarantine_tsd_set
register_zone
+rtree_child_read
+rtree_child_read_hard
+rtree_child_tryread
rtree_delete
rtree_get
-rtree_get_locked
rtree_new
-rtree_postfork_child
-rtree_postfork_parent
-rtree_prefork
+rtree_node_valid
rtree_set
+rtree_start_level
+rtree_subkey
+rtree_subtree_read
+rtree_subtree_read_hard
+rtree_subtree_tryread
+rtree_val_read
+rtree_val_write
s2u
+s2u_compute
+s2u_lookup
sa2u
set_errno
-small_size2bin
+size2index
+size2index_compute
+size2index_lookup
+size2index_tab
stats_cactive
stats_cactive_add
stats_cactive_get
stats_cactive_sub
-stats_chunks
stats_print
tcache_alloc_easy
tcache_alloc_large
@@ -359,55 +433,67 @@ tcache_alloc_small
tcache_alloc_small_hard
tcache_arena_associate
tcache_arena_dissociate
+tcache_arena_reassociate
tcache_bin_flush_large
tcache_bin_flush_small
tcache_bin_info
-tcache_boot0
-tcache_boot1
-tcache_booted
+tcache_boot
+tcache_cleanup
tcache_create
tcache_dalloc_large
tcache_dalloc_small
-tcache_destroy
-tcache_enabled_booted
+tcache_enabled_cleanup
tcache_enabled_get
-tcache_enabled_initialized
tcache_enabled_set
-tcache_enabled_tls
-tcache_enabled_tsd
-tcache_enabled_tsd_boot
-tcache_enabled_tsd_cleanup_wrapper
-tcache_enabled_tsd_get
-tcache_enabled_tsd_get_wrapper
-tcache_enabled_tsd_init_head
-tcache_enabled_tsd_set
tcache_event
tcache_event_hard
tcache_flush
tcache_get
-tcache_initialized
+tcache_get_hard
tcache_maxclass
+tcaches
tcache_salloc
+tcaches_create
+tcaches_destroy
+tcaches_flush
+tcaches_get
tcache_stats_merge
-tcache_thread_cleanup
-tcache_tls
-tcache_tsd
-tcache_tsd_boot
-tcache_tsd_cleanup_wrapper
-tcache_tsd_get
-tcache_tsd_get_wrapper
-tcache_tsd_init_head
-tcache_tsd_set
-thread_allocated_booted
-thread_allocated_initialized
-thread_allocated_tls
-thread_allocated_tsd
-thread_allocated_tsd_boot
-thread_allocated_tsd_cleanup_wrapper
-thread_allocated_tsd_get
-thread_allocated_tsd_get_wrapper
-thread_allocated_tsd_init_head
-thread_allocated_tsd_set
+thread_allocated_cleanup
+thread_deallocated_cleanup
+tsd_arena_get
+tsd_arena_set
+tsd_boot
+tsd_boot0
+tsd_boot1
+tsd_booted
+tsd_cleanup
+tsd_cleanup_wrapper
+tsd_fetch
+tsd_get
+tsd_wrapper_get
+tsd_wrapper_set
+tsd_initialized
tsd_init_check_recursion
tsd_init_finish
+tsd_init_head
+tsd_nominal
+tsd_quarantine_get
+tsd_quarantine_set
+tsd_set
+tsd_tcache_enabled_get
+tsd_tcache_enabled_set
+tsd_tcache_get
+tsd_tcache_set
+tsd_tls
+tsd_tsd
+tsd_prof_tdata_get
+tsd_prof_tdata_set
+tsd_thread_allocated_get
+tsd_thread_allocated_set
+tsd_thread_deallocated_get
+tsd_thread_deallocated_set
u2rz
+valgrind_freelike_block
+valgrind_make_mem_defined
+valgrind_make_mem_noaccess
+valgrind_make_mem_undefined
diff --git a/deps/jemalloc/include/jemalloc/internal/prng.h b/deps/jemalloc/include/jemalloc/internal/prng.h
index 7b2b06512..216d0ef47 100644
--- a/deps/jemalloc/include/jemalloc/internal/prng.h
+++ b/deps/jemalloc/include/jemalloc/internal/prng.h
@@ -15,7 +15,7 @@
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
*
* This choice of m has the disadvantage that the quality of the bits is
- * proportional to bit position. For example. the lowest bit has a cycle of 2,
+ * proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
*
@@ -26,22 +26,22 @@
* const uint32_t a, c : See above discussion.
*/
#define prng32(r, lg_range, state, a, c) do { \
- assert(lg_range > 0); \
- assert(lg_range <= 32); \
+ assert((lg_range) > 0); \
+ assert((lg_range) <= 32); \
\
r = (state * (a)) + (c); \
state = r; \
- r >>= (32 - lg_range); \
+ r >>= (32 - (lg_range)); \
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
- assert(lg_range > 0); \
- assert(lg_range <= 64); \
+ assert((lg_range) > 0); \
+ assert((lg_range) <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
- r >>= (64 - lg_range); \
+ r >>= (64 - (lg_range)); \
} while (false)
#endif /* JEMALLOC_H_TYPES */
diff --git a/deps/jemalloc/include/jemalloc/internal/prof.h b/deps/jemalloc/include/jemalloc/internal/prof.h
index 6f162d21e..e5198c3e8 100644
--- a/deps/jemalloc/include/jemalloc/internal/prof.h
+++ b/deps/jemalloc/include/jemalloc/internal/prof.h
@@ -3,8 +3,8 @@
typedef struct prof_bt_s prof_bt_t;
typedef struct prof_cnt_s prof_cnt_t;
-typedef struct prof_thr_cnt_s prof_thr_cnt_t;
-typedef struct prof_ctx_s prof_ctx_t;
+typedef struct prof_tctx_s prof_tctx_t;
+typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
@@ -23,9 +23,6 @@ typedef struct prof_tdata_s prof_tdata_t;
*/
#define PROF_BT_MAX 128
-/* Maximum number of backtraces to store in each per thread LRU cache. */
-#define PROF_TCMAX 1024
-
/* Initial hash table size. */
#define PROF_CKH_MINITEMS 64
@@ -36,12 +33,18 @@ typedef struct prof_tdata_s prof_tdata_t;
#define PROF_PRINTF_BUFSIZE 128
/*
- * Number of mutexes shared among all ctx's. No space is allocated for these
+ * Number of mutexes shared among all gctx's. No space is allocated for these
* unless profiling is enabled, so it's okay to over-provision.
*/
#define PROF_NCTX_LOCKS 1024
/*
+ * Number of mutexes shared among all tdata's. No space is allocated for these
+ * unless profiling is enabled, so it's okay to over-provision.
+ */
+#define PROF_NTDATA_LOCKS 256
+
+/*
* prof_tdata pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
@@ -63,141 +66,186 @@ struct prof_bt_s {
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct {
prof_bt_t *bt;
- unsigned nignore;
unsigned max;
} prof_unwind_data_t;
#endif
struct prof_cnt_s {
- /*
- * Profiling counters. An allocation/deallocation pair can operate on
- * different prof_thr_cnt_t objects that are linked into the same
- * prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
- * negative. In principle it is possible for the *bytes counters to
- * overflow/underflow, but a general solution would require something
- * like 128-bit counters; this implementation doesn't bother to solve
- * that problem.
- */
- int64_t curobjs;
- int64_t curbytes;
+ /* Profiling counters. */
+ uint64_t curobjs;
+ uint64_t curbytes;
uint64_t accumobjs;
uint64_t accumbytes;
};
-struct prof_thr_cnt_s {
- /* Linkage into prof_ctx_t's cnts_ql. */
- ql_elm(prof_thr_cnt_t) cnts_link;
+typedef enum {
+ prof_tctx_state_initializing,
+ prof_tctx_state_nominal,
+ prof_tctx_state_dumping,
+ prof_tctx_state_purgatory /* Dumper must finish destroying. */
+} prof_tctx_state_t;
- /* Linkage into thread's LRU. */
- ql_elm(prof_thr_cnt_t) lru_link;
+struct prof_tctx_s {
+ /* Thread data for thread that performed the allocation. */
+ prof_tdata_t *tdata;
/*
- * Associated context. If a thread frees an object that it did not
- * allocate, it is possible that the context is not cached in the
- * thread's hash table, in which case it must be able to look up the
- * context, insert a new prof_thr_cnt_t into the thread's hash table,
- * and link it into the prof_ctx_t's cnts_ql.
+ * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
+ * defunct during teardown.
*/
- prof_ctx_t *ctx;
+ uint64_t thr_uid;
+ uint64_t thr_discrim;
+
+ /* Profiling counters, protected by tdata->lock. */
+ prof_cnt_t cnts;
+
+ /* Associated global context. */
+ prof_gctx_t *gctx;
/*
- * Threads use memory barriers to update the counters. Since there is
- * only ever one writer, the only challenge is for the reader to get a
- * consistent read of the counters.
- *
- * The writer uses this series of operations:
- *
- * 1) Increment epoch to an odd number.
- * 2) Update counters.
- * 3) Increment epoch to an even number.
- *
- * The reader must assure 1) that the epoch is even while it reads the
- * counters, and 2) that the epoch doesn't change between the time it
- * starts and finishes reading the counters.
+ * UID that distinguishes multiple tctx's created by the same thread,
+ * but coexisting in gctx->tctxs. There are two ways that such
+ * coexistence can occur:
+ * - A dumper thread can cause a tctx to be retained in the purgatory
+ * state.
+ * - Although a single "producer" thread must create all tctx's which
+ * share the same thr_uid, multiple "consumers" can each concurrently
+ * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only
+ * gets called once each time cnts.cur{objs,bytes} drop to 0, but this
+ * threshold can be hit again before the first consumer finishes
+ * executing prof_tctx_destroy().
*/
- unsigned epoch;
+ uint64_t tctx_uid;
- /* Profiling counters. */
- prof_cnt_t cnts;
-};
+ /* Linkage into gctx's tctxs. */
+ rb_node(prof_tctx_t) tctx_link;
-struct prof_ctx_s {
- /* Associated backtrace. */
- prof_bt_t *bt;
+ /*
+ * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
+ * sample vs destroy race.
+ */
+ bool prepared;
+
+ /* Current dump-related state, protected by gctx->lock. */
+ prof_tctx_state_t state;
+
+ /*
+ * Copy of cnts snapshotted during early dump phase, protected by
+ * dump_mtx.
+ */
+ prof_cnt_t dump_cnts;
+};
+typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
- /* Protects nlimbo, cnt_merged, and cnts_ql. */
+struct prof_gctx_s {
+ /* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t *lock;
/*
- * Number of threads that currently cause this ctx to be in a state of
+ * Number of threads that currently cause this gctx to be in a state of
* limbo due to one of:
- * - Initializing per thread counters associated with this ctx.
- * - Preparing to destroy this ctx.
- * - Dumping a heap profile that includes this ctx.
+ * - Initializing this gctx.
+ * - Initializing per thread counters associated with this gctx.
+ * - Preparing to destroy this gctx.
+ * - Dumping a heap profile that includes this gctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
- * ctx.
+ * gctx.
*/
unsigned nlimbo;
- /* Temporary storage for summation during dump. */
- prof_cnt_t cnt_summed;
-
- /* When threads exit, they merge their stats into cnt_merged. */
- prof_cnt_t cnt_merged;
-
/*
- * List of profile counters, one for each thread that has allocated in
+ * Tree of profile counters, one for each thread that has allocated in
* this context.
*/
- ql_head(prof_thr_cnt_t) cnts_ql;
+ prof_tctx_tree_t tctxs;
+
+ /* Linkage for tree of contexts to be dumped. */
+ rb_node(prof_gctx_t) dump_link;
+
+ /* Temporary storage for summation during dump. */
+ prof_cnt_t cnt_summed;
+
+ /* Associated backtrace. */
+ prof_bt_t bt;
- /* Linkage for list of contexts to be dumped. */
- ql_elm(prof_ctx_t) dump_link;
+ /* Backtrace vector, variable size, referred to by bt. */
+ void *vec[1];
};
-typedef ql_head(prof_ctx_t) prof_ctx_list_t;
+typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
struct prof_tdata_s {
+ malloc_mutex_t *lock;
+
+ /* Monotonically increasing unique thread identifier. */
+ uint64_t thr_uid;
+
/*
- * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
- * cache of backtraces, with associated thread-specific prof_thr_cnt_t
- * objects. Other threads may read the prof_thr_cnt_t contents, but no
- * others will ever write them.
- *
- * Upon thread exit, the thread must merge all the prof_thr_cnt_t
- * counter data into the associated prof_ctx_t objects, and unlink/free
- * the prof_thr_cnt_t objects.
+ * Monotonically increasing discriminator among tdata structures
+ * associated with the same thr_uid.
*/
- ckh_t bt2cnt;
+ uint64_t thr_discrim;
- /* LRU for contents of bt2cnt. */
- ql_head(prof_thr_cnt_t) lru_ql;
+ /* Included in heap profile dumps if non-NULL. */
+ char *thread_name;
- /* Backtrace vector, used for calls to prof_backtrace(). */
- void **vec;
+ bool attached;
+ bool expired;
+
+ rb_node(prof_tdata_t) tdata_link;
+
+ /*
+ * Counter used to initialize prof_tctx_t's tctx_uid. No locking is
+ * necessary when incrementing this field, because only one thread ever
+ * does so.
+ */
+ uint64_t tctx_uid_next;
+
+ /*
+ * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks
+ * backtraces for which it has non-zero allocation/deallocation counters
+ * associated with thread-specific prof_tctx_t objects. Other threads
+ * may write to prof_tctx_t contents when freeing associated objects.
+ */
+ ckh_t bt2tctx;
/* Sampling state. */
uint64_t prng_state;
- uint64_t threshold;
- uint64_t accum;
+ uint64_t bytes_until_sample;
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
bool enq_gdump;
+
+ /*
+ * Set to true during an early dump phase for tdata's which are
+ * currently being dumped. New threads' tdata's have this initialized
+ * to false so that they aren't accidentally included in later dump
+ * phases.
+ */
+ bool dumping;
+
+ /*
+ * True if profiling is active for this tdata's thread
+ * (thread.prof.active mallctl).
+ */
+ bool active;
+
+ /* Temporary storage for summation during dump. */
+ prof_cnt_t cnt_summed;
+
+ /* Backtrace vector, used for calls to prof_backtrace(). */
+ void *vec[PROF_BT_MAX];
};
+typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_prof;
-/*
- * Even if opt_prof is true, sampling can be temporarily disabled by setting
- * opt_prof_active to false. No locking is used when updating opt_prof_active,
- * so there are no guarantees regarding how long it will take for all threads
- * to notice state changes.
- */
extern bool opt_prof_active;
+extern bool opt_prof_thread_active_init;
extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
extern bool opt_prof_gdump; /* High-water memory dumping. */
@@ -211,6 +259,12 @@ extern char opt_prof_prefix[
#endif
1];
+/* Accessed via prof_active_[gs]et{_unlocked,}(). */
+extern bool prof_active;
+
+/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
+extern bool prof_gdump_val;
+
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
* profile dump when it reaches this threshold. The effect is that the
@@ -221,391 +275,269 @@ extern char opt_prof_prefix[
extern uint64_t prof_interval;
/*
- * If true, promote small sampled objects to large objects, since small run
- * headers do not have embedded profile context pointers.
+ * Initialized as opt_lg_prof_sample, and potentially modified during profiling
+ * resets.
*/
-extern bool prof_promote;
+extern size_t lg_prof_sample;
+void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
+void prof_malloc_sample_object(const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
-void prof_backtrace(prof_bt_t *bt, unsigned nignore);
-prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
+void prof_backtrace(prof_bt_t *bt);
+prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
#ifdef JEMALLOC_JET
+size_t prof_tdata_count(void);
size_t prof_bt_count(void);
+const prof_cnt_t *prof_cnt_all(void);
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
+typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *);
+extern prof_dump_header_t *prof_dump_header;
#endif
void prof_idump(void);
bool prof_mdump(const char *filename);
void prof_gdump(void);
-prof_tdata_t *prof_tdata_init(void);
-void prof_tdata_cleanup(void *arg);
+prof_tdata_t *prof_tdata_init(tsd_t *tsd);
+prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
+void prof_reset(tsd_t *tsd, size_t lg_sample);
+void prof_tdata_cleanup(tsd_t *tsd);
+const char *prof_thread_name_get(void);
+bool prof_active_get(void);
+bool prof_active_set(bool active);
+int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
+bool prof_thread_active_get(void);
+bool prof_thread_active_set(bool active);
+bool prof_thread_active_init_get(void);
+bool prof_thread_active_init_set(bool active_init);
+bool prof_gdump_get(void);
+bool prof_gdump_set(bool active);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(void);
void prof_prefork(void);
void prof_postfork_parent(void);
void prof_postfork_child(void);
+void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
-#define PROF_ALLOC_PREP(nignore, size, ret) do { \
- prof_tdata_t *prof_tdata; \
- prof_bt_t bt; \
- \
- assert(size == s2u(size)); \
- \
- prof_tdata = prof_tdata_get(true); \
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
- if (prof_tdata != NULL) \
- ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
- else \
- ret = NULL; \
- break; \
- } \
- \
- if (opt_prof_active == false) { \
- /* Sampling is currently inactive, so avoid sampling. */\
- ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
- } else if (opt_lg_prof_sample == 0) { \
- /* Don't bother with sampling logic, since sampling */\
- /* interval is 1. */\
- bt_init(&bt, prof_tdata->vec); \
- prof_backtrace(&bt, nignore); \
- ret = prof_lookup(&bt); \
- } else { \
- if (prof_tdata->threshold == 0) { \
- /* Initialize. Seed the prng differently for */\
- /* each thread. */\
- prof_tdata->prng_state = \
- (uint64_t)(uintptr_t)&size; \
- prof_sample_threshold_update(prof_tdata); \
- } \
- \
- /* Determine whether to capture a backtrace based on */\
- /* whether size is enough for prof_accum to reach */\
- /* prof_tdata->threshold. However, delay updating */\
- /* these variables until prof_{m,re}alloc(), because */\
- /* we don't know for sure that the allocation will */\
- /* succeed. */\
- /* */\
- /* Use subtraction rather than addition to avoid */\
- /* potential integer overflow. */\
- if (size >= prof_tdata->threshold - \
- prof_tdata->accum) { \
- bt_init(&bt, prof_tdata->vec); \
- prof_backtrace(&bt, nignore); \
- ret = prof_lookup(&bt); \
- } else \
- ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
- } \
-} while (0)
-
#ifndef JEMALLOC_ENABLE_INLINE
-malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
-
-prof_tdata_t *prof_tdata_get(bool create);
-void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
-prof_ctx_t *prof_ctx_get(const void *ptr);
-void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
-bool prof_sample_accum_update(size_t size);
-void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt);
-void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
- size_t old_usize, prof_ctx_t *old_ctx);
-void prof_free(const void *ptr, size_t size);
+bool prof_active_get_unlocked(void);
+bool prof_gdump_get_unlocked(void);
+prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
+bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
+ prof_tdata_t **tdata_out);
+prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
+ bool update);
+prof_tctx_t *prof_tctx_get(const void *ptr);
+void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
+void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+ prof_tctx_t *tctx);
+void prof_malloc_sample_object(const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
+void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
+ prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
+ size_t old_usize, prof_tctx_t *old_tctx);
+void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
-/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */
-malloc_tsd_externs(prof_tdata, prof_tdata_t *)
-malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
- prof_tdata_cleanup)
+JEMALLOC_ALWAYS_INLINE bool
+prof_active_get_unlocked(void)
+{
+
+ /*
+ * Even if opt_prof is true, sampling can be temporarily disabled by
+ * setting prof_active to false. No locking is used when reading
+ * prof_active in the fast path, so there are no guarantees regarding
+ * how long it will take for all threads to notice state changes.
+ */
+ return (prof_active);
+}
-JEMALLOC_INLINE prof_tdata_t *
-prof_tdata_get(bool create)
+JEMALLOC_ALWAYS_INLINE bool
+prof_gdump_get_unlocked(void)
{
- prof_tdata_t *prof_tdata;
+
+ /*
+ * No locking is used when reading prof_gdump_val in the fast path, so
+ * there are no guarantees regarding how long it will take for all
+ * threads to notice state changes.
+ */
+ return (prof_gdump_val);
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tdata_t *
+prof_tdata_get(tsd_t *tsd, bool create)
+{
+ prof_tdata_t *tdata;
cassert(config_prof);
- prof_tdata = *prof_tdata_tsd_get();
- if (create && prof_tdata == NULL)
- prof_tdata = prof_tdata_init();
+ tdata = tsd_prof_tdata_get(tsd);
+ if (create) {
+ if (unlikely(tdata == NULL)) {
+ if (tsd_nominal(tsd)) {
+ tdata = prof_tdata_init(tsd);
+ tsd_prof_tdata_set(tsd, tdata);
+ }
+ } else if (unlikely(tdata->expired)) {
+ tdata = prof_tdata_reinit(tsd, tdata);
+ tsd_prof_tdata_set(tsd, tdata);
+ }
+ assert(tdata == NULL || tdata->attached);
+ }
- return (prof_tdata);
+ return (tdata);
}
-JEMALLOC_INLINE void
-prof_sample_threshold_update(prof_tdata_t *prof_tdata)
+JEMALLOC_ALWAYS_INLINE prof_tctx_t *
+prof_tctx_get(const void *ptr)
{
- /*
- * The body of this function is compiled out unless heap profiling is
- * enabled, so that it is possible to compile jemalloc with floating
- * point support completely disabled. Avoiding floating point code is
- * important on memory-constrained systems, but it also enables a
- * workaround for versions of glibc that don't properly save/restore
- * floating point registers during dynamic lazy symbol loading (which
- * internally calls into whatever malloc implementation happens to be
- * integrated into the application). Note that some compilers (e.g.
- * gcc 4.8) may use floating point registers for fast memory moves, so
- * jemalloc must be compiled with such optimizations disabled (e.g.
- * -mno-sse) in order for the workaround to be complete.
- */
-#ifdef JEMALLOC_PROF
- uint64_t r;
- double u;
cassert(config_prof);
+ assert(ptr != NULL);
- /*
- * Compute sample threshold as a geometrically distributed random
- * variable with mean (2^opt_lg_prof_sample).
- *
- * __ __
- * | log(u) | 1
- * prof_tdata->threshold = | -------- |, where p = -------------------
- * | log(1-p) | opt_lg_prof_sample
- * 2
- *
- * For more information on the math, see:
- *
- * Non-Uniform Random Variate Generation
- * Luc Devroye
- * Springer-Verlag, New York, 1986
- * pp 500
- * (http://luc.devroye.org/rnbookindex.html)
- */
- prng64(r, 53, prof_tdata->prng_state,
- UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
- u = (double)r * (1.0/9007199254740992.0L);
- prof_tdata->threshold = (uint64_t)(log(u) /
- log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
- + (uint64_t)1U;
-#endif
+ return (arena_prof_tctx_get(ptr));
}
-JEMALLOC_INLINE prof_ctx_t *
-prof_ctx_get(const void *ptr)
+JEMALLOC_ALWAYS_INLINE void
+prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
- prof_ctx_t *ret;
- arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr) {
- /* Region. */
- ret = arena_prof_ctx_get(ptr);
- } else
- ret = huge_prof_ctx_get(ptr);
-
- return (ret);
+ arena_prof_tctx_set(ptr, usize, tctx);
}
-JEMALLOC_INLINE void
-prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
+JEMALLOC_ALWAYS_INLINE void
+prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+ prof_tctx_t *old_tctx)
{
- arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr) {
- /* Region. */
- arena_prof_ctx_set(ptr, usize, ctx);
- } else
- huge_prof_ctx_set(ptr, ctx);
+ arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
}
-JEMALLOC_INLINE bool
-prof_sample_accum_update(size_t size)
+JEMALLOC_ALWAYS_INLINE bool
+prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
+ prof_tdata_t **tdata_out)
{
- prof_tdata_t *prof_tdata;
+ prof_tdata_t *tdata;
cassert(config_prof);
- /* Sampling logic is unnecessary if the interval is 1. */
- assert(opt_lg_prof_sample != 0);
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ tdata = prof_tdata_get(tsd, true);
+ if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ tdata = NULL;
+
+ if (tdata_out != NULL)
+ *tdata_out = tdata;
+
+ if (tdata == NULL)
return (true);
- /* Take care to avoid integer overflow. */
- if (size >= prof_tdata->threshold - prof_tdata->accum) {
- prof_tdata->accum -= (prof_tdata->threshold - size);
- /* Compute new sample threshold. */
- prof_sample_threshold_update(prof_tdata);
- while (prof_tdata->accum >= prof_tdata->threshold) {
- prof_tdata->accum -= prof_tdata->threshold;
- prof_sample_threshold_update(prof_tdata);
- }
- return (false);
- } else {
- prof_tdata->accum += size;
+ if (tdata->bytes_until_sample >= usize) {
+ if (update)
+ tdata->bytes_until_sample -= usize;
return (true);
+ } else {
+ /* Compute new sample threshold. */
+ if (update)
+ prof_sample_threshold_update(tdata);
+ return (!tdata->active);
}
}
-JEMALLOC_INLINE void
-prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt)
+JEMALLOC_ALWAYS_INLINE prof_tctx_t *
+prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
+{
+ prof_tctx_t *ret;
+ prof_tdata_t *tdata;
+ prof_bt_t bt;
+
+ assert(usize == s2u(usize));
+
+ if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
+ &tdata)))
+ ret = (prof_tctx_t *)(uintptr_t)1U;
+ else {
+ bt_init(&bt, tdata->vec);
+ prof_backtrace(&bt);
+ ret = prof_lookup(tsd, &bt);
+ }
+
+ return (ret);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(ptr, true));
- if (opt_lg_prof_sample != 0) {
- if (prof_sample_accum_update(usize)) {
- /*
- * Don't sample. For malloc()-like allocation, it is
- * always possible to tell in advance how large an
- * object's usable size will be, so there should never
- * be a difference between the usize passed to
- * PROF_ALLOC_PREP() and prof_malloc().
- */
- assert((uintptr_t)cnt == (uintptr_t)1U);
- }
- }
-
- if ((uintptr_t)cnt > (uintptr_t)1U) {
- prof_ctx_set(ptr, usize, cnt->ctx);
-
- cnt->epoch++;
- /*********/
- mb_write();
- /*********/
- cnt->cnts.curobjs++;
- cnt->cnts.curbytes += usize;
- if (opt_prof_accum) {
- cnt->cnts.accumobjs++;
- cnt->cnts.accumbytes += usize;
- }
- /*********/
- mb_write();
- /*********/
- cnt->epoch++;
- /*********/
- mb_write();
- /*********/
- } else
- prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
+ if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
+ prof_malloc_sample_object(ptr, usize, tctx);
+ else
+ prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
-JEMALLOC_INLINE void
-prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
- size_t old_usize, prof_ctx_t *old_ctx)
+JEMALLOC_ALWAYS_INLINE void
+prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
+ bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
+ prof_tctx_t *old_tctx)
{
- prof_thr_cnt_t *told_cnt;
+ bool sampled, old_sampled;
cassert(config_prof);
- assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
+ assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
- if (ptr != NULL) {
+ if (prof_active && !updated && ptr != NULL) {
assert(usize == isalloc(ptr, true));
- if (opt_lg_prof_sample != 0) {
- if (prof_sample_accum_update(usize)) {
- /*
- * Don't sample. The usize passed to
- * PROF_ALLOC_PREP() was larger than what
- * actually got allocated, so a backtrace was
- * captured for this allocation, even though
- * its actual usize was insufficient to cross
- * the sample threshold.
- */
- cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
- }
- }
- }
-
- if ((uintptr_t)old_ctx > (uintptr_t)1U) {
- told_cnt = prof_lookup(old_ctx->bt);
- if (told_cnt == NULL) {
+ if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
- * It's too late to propagate OOM for this realloc(),
- * so operate directly on old_cnt->ctx->cnt_merged.
+ * Don't sample. The usize passed to prof_alloc_prep()
+ * was larger than what actually got allocated, so a
+ * backtrace was captured for this allocation, even
+ * though its actual usize was insufficient to cross the
+ * sample threshold.
*/
- malloc_mutex_lock(old_ctx->lock);
- old_ctx->cnt_merged.curobjs--;
- old_ctx->cnt_merged.curbytes -= old_usize;
- malloc_mutex_unlock(old_ctx->lock);
- told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
+ tctx = (prof_tctx_t *)(uintptr_t)1U;
}
- } else
- told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
-
- if ((uintptr_t)told_cnt > (uintptr_t)1U)
- told_cnt->epoch++;
- if ((uintptr_t)cnt > (uintptr_t)1U) {
- prof_ctx_set(ptr, usize, cnt->ctx);
- cnt->epoch++;
- } else if (ptr != NULL)
- prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
- /*********/
- mb_write();
- /*********/
- if ((uintptr_t)told_cnt > (uintptr_t)1U) {
- told_cnt->cnts.curobjs--;
- told_cnt->cnts.curbytes -= old_usize;
}
- if ((uintptr_t)cnt > (uintptr_t)1U) {
- cnt->cnts.curobjs++;
- cnt->cnts.curbytes += usize;
- if (opt_prof_accum) {
- cnt->cnts.accumobjs++;
- cnt->cnts.accumbytes += usize;
- }
- }
- /*********/
- mb_write();
- /*********/
- if ((uintptr_t)told_cnt > (uintptr_t)1U)
- told_cnt->epoch++;
- if ((uintptr_t)cnt > (uintptr_t)1U)
- cnt->epoch++;
- /*********/
- mb_write(); /* Not strictly necessary. */
+
+ sampled = ((uintptr_t)tctx > (uintptr_t)1U);
+ old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
+
+ if (unlikely(sampled))
+ prof_malloc_sample_object(ptr, usize, tctx);
+ else
+ prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
+
+ if (unlikely(old_sampled))
+ prof_free_sampled_object(tsd, old_usize, old_tctx);
}
-JEMALLOC_INLINE void
-prof_free(const void *ptr, size_t size)
+JEMALLOC_ALWAYS_INLINE void
+prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{
- prof_ctx_t *ctx = prof_ctx_get(ptr);
+ prof_tctx_t *tctx = prof_tctx_get(ptr);
cassert(config_prof);
+ assert(usize == isalloc(ptr, true));
- if ((uintptr_t)ctx > (uintptr_t)1) {
- prof_thr_cnt_t *tcnt;
- assert(size == isalloc(ptr, true));
- tcnt = prof_lookup(ctx->bt);
-
- if (tcnt != NULL) {
- tcnt->epoch++;
- /*********/
- mb_write();
- /*********/
- tcnt->cnts.curobjs--;
- tcnt->cnts.curbytes -= size;
- /*********/
- mb_write();
- /*********/
- tcnt->epoch++;
- /*********/
- mb_write();
- /*********/
- } else {
- /*
- * OOM during free() cannot be propagated, so operate
- * directly on cnt->ctx->cnt_merged.
- */
- malloc_mutex_lock(ctx->lock);
- ctx->cnt_merged.curobjs--;
- ctx->cnt_merged.curbytes -= size;
- malloc_mutex_unlock(ctx->lock);
- }
- }
+ if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
+ prof_free_sampled_object(tsd, usize, tctx);
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/ql.h b/deps/jemalloc/include/jemalloc/internal/ql.h
index f70c5f6f3..1834bb855 100644
--- a/deps/jemalloc/include/jemalloc/internal/ql.h
+++ b/deps/jemalloc/include/jemalloc/internal/ql.h
@@ -1,6 +1,4 @@
-/*
- * List definitions.
- */
+/* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
diff --git a/deps/jemalloc/include/jemalloc/internal/qr.h b/deps/jemalloc/include/jemalloc/internal/qr.h
index 602944b9b..0fbaec25e 100644
--- a/deps/jemalloc/include/jemalloc/internal/qr.h
+++ b/deps/jemalloc/include/jemalloc/internal/qr.h
@@ -40,8 +40,10 @@ struct { \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
-/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
- * have two copies of the code. */
+/*
+ * qr_meld() and qr_split() are functionally equivalent, so there's no need to
+ * have two copies of the code.
+ */
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
diff --git a/deps/jemalloc/include/jemalloc/internal/quarantine.h b/deps/jemalloc/include/jemalloc/internal/quarantine.h
index 16f677f73..ae607399f 100644
--- a/deps/jemalloc/include/jemalloc/internal/quarantine.h
+++ b/deps/jemalloc/include/jemalloc/internal/quarantine.h
@@ -29,36 +29,29 @@ struct quarantine_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-quarantine_t *quarantine_init(size_t lg_maxobjs);
-void quarantine(void *ptr);
-void quarantine_cleanup(void *arg);
-bool quarantine_boot(void);
+void quarantine_alloc_hook_work(tsd_t *tsd);
+void quarantine(tsd_t *tsd, void *ptr);
+void quarantine_cleanup(tsd_t *tsd);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *)
-
void quarantine_alloc_hook(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_))
-malloc_tsd_externs(quarantine, quarantine_t *)
-malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL,
- quarantine_cleanup)
-
JEMALLOC_ALWAYS_INLINE void
quarantine_alloc_hook(void)
{
- quarantine_t *quarantine;
+ tsd_t *tsd;
assert(config_fill && opt_quarantine);
- quarantine = *quarantine_tsd_get();
- if (quarantine == NULL)
- quarantine_init(LG_MAXOBJS_INIT);
+ tsd = tsd_fetch();
+ if (tsd_quarantine_get(tsd) == NULL)
+ quarantine_alloc_hook_work(tsd);
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/rb.h b/deps/jemalloc/include/jemalloc/internal/rb.h
index 423802eb2..2ca8e5933 100644
--- a/deps/jemalloc/include/jemalloc/internal/rb.h
+++ b/deps/jemalloc/include/jemalloc/internal/rb.h
@@ -158,6 +158,8 @@ struct { \
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
+a_attr bool \
+a_prefix##empty(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree); \
a_attr a_type * \
@@ -198,7 +200,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* ^^^^^^
* or a_key
- * Interpretation of comparision function return values:
+ * Interpretation of comparison function return values:
* -1 : a_node < a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
@@ -224,6 +226,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
* Args:
* tree: Pointer to an uninitialized red-black tree object.
*
+ * static bool
+ * ex_empty(ex_t *tree);
+ * Description: Determine whether tree is empty.
+ * Args:
+ * tree: Pointer to an initialized red-black tree object.
+ * Ret: True if tree is empty, false otherwise.
+ *
* static ex_node_t *
* ex_first(ex_t *tree);
* static ex_node_t *
@@ -309,6 +318,10 @@ a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
} \
+a_attr bool \
+a_prefix##empty(a_rbt_type *rbtree) { \
+ return (rbtree->rbt_root == &rbtree->rbt_nil); \
+} \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
@@ -580,7 +593,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
if (left != &rbtree->rbt_nil) { \
/* node has no successor, but it has a left child. */\
/* Splice node out, without losing the left child. */\
- assert(rbtn_red_get(a_type, a_field, node) == false); \
+ assert(!rbtn_red_get(a_type, a_field, node)); \
assert(rbtn_red_get(a_type, a_field, left)); \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
@@ -616,8 +629,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \
- assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
- == false); \
+ assert(!rbtn_red_get(a_type, a_field, pathp[1].node)); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
@@ -681,7 +693,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */\
- /* subree root, which may actually be the tree */\
+ /* subtree root, which may actually be the tree */\
/* root. */\
if (pathp == path) { \
/* Set root. */ \
@@ -849,7 +861,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
/* Set root. */ \
rbtree->rbt_root = path->node; \
- assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \
+ assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
} \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
diff --git a/deps/jemalloc/include/jemalloc/internal/rtree.h b/deps/jemalloc/include/jemalloc/internal/rtree.h
index bc74769f5..28ae9d1dd 100644
--- a/deps/jemalloc/include/jemalloc/internal/rtree.h
+++ b/deps/jemalloc/include/jemalloc/internal/rtree.h
@@ -1,170 +1,292 @@
/*
* This radix tree implementation is tailored to the singular purpose of
- * tracking which chunks are currently owned by jemalloc. This functionality
- * is mandatory for OS X, where jemalloc must be able to respond to object
- * ownership queries.
+ * associating metadata with chunks that are currently owned by jemalloc.
*
*******************************************************************************
*/
#ifdef JEMALLOC_H_TYPES
+typedef struct rtree_node_elm_s rtree_node_elm_t;
+typedef struct rtree_level_s rtree_level_t;
typedef struct rtree_s rtree_t;
/*
- * Size of each radix tree node (must be a power of 2). This impacts tree
- * depth.
+ * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the
+ * machine address width.
*/
-#define RTREE_NODESIZE (1U << 16)
+#define LG_RTREE_BITS_PER_LEVEL 4
+#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
+#define RTREE_HEIGHT_MAX \
+ ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
-typedef void *(rtree_alloc_t)(size_t);
-typedef void (rtree_dalloc_t)(void *);
+/* Used for two-stage lock-free node initialization. */
+#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
+
+/*
+ * The node allocation callback function's argument is the number of contiguous
+ * rtree_node_elm_t structures to allocate, and the resulting memory must be
+ * zeroed.
+ */
+typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t);
+typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *);
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
+struct rtree_node_elm_s {
+ union {
+ void *pun;
+ rtree_node_elm_t *child;
+ extent_node_t *val;
+ };
+};
+
+struct rtree_level_s {
+ /*
+ * A non-NULL subtree points to a subtree rooted along the hypothetical
+ * path to the leaf node corresponding to key 0. Depending on what keys
+ * have been used to store to the tree, an arbitrary combination of
+ * subtree pointers may remain NULL.
+ *
+ * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4.
+ * This results in a 3-level tree, and the leftmost leaf can be directly
+ * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding
+ * 0x00000000) can be accessed via subtrees[1], and the remainder of the
+ * tree can be accessed via subtrees[0].
+ *
+ * levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...]
+ *
+ * levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ]
+ *
+ * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...]
+ *
+ * This has practical implications on x64, which currently uses only the
+ * lower 47 bits of virtual address space in userland, thus leaving
+ * subtrees[0] unused and avoiding a level of tree traversal.
+ */
+ union {
+ void *subtree_pun;
+ rtree_node_elm_t *subtree;
+ };
+ /* Number of key bits distinguished by this level. */
+ unsigned bits;
+ /*
+ * Cumulative number of key bits distinguished by traversing to
+ * corresponding tree level.
+ */
+ unsigned cumbits;
+};
+
struct rtree_s {
- rtree_alloc_t *alloc;
- rtree_dalloc_t *dalloc;
- malloc_mutex_t mutex;
- void **root;
- unsigned height;
- unsigned level2bits[1]; /* Dynamically sized. */
+ rtree_node_alloc_t *alloc;
+ rtree_node_dalloc_t *dalloc;
+ unsigned height;
+ /*
+ * Precomputed table used to convert from the number of leading 0 key
+ * bits to which subtree level to start at.
+ */
+ unsigned start_level[RTREE_HEIGHT_MAX];
+ rtree_level_t levels[RTREE_HEIGHT_MAX];
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
+bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
+ rtree_node_dalloc_t *dalloc);
void rtree_delete(rtree_t *rtree);
-void rtree_prefork(rtree_t *rtree);
-void rtree_postfork_parent(rtree_t *rtree);
-void rtree_postfork_child(rtree_t *rtree);
+rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree,
+ unsigned level);
+rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree,
+ rtree_node_elm_t *elm, unsigned level);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-#ifdef JEMALLOC_DEBUG
-uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
-#endif
-uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
-bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
+unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
+uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
+
+bool rtree_node_valid(rtree_node_elm_t *node);
+rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm);
+rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
+ unsigned level);
+extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
+ bool dependent);
+void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
+ const extent_node_t *val);
+rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level);
+rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level);
+
+extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
+bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
-#define RTREE_GET_GENERATE(f) \
-/* The least significant bits of the key are ignored. */ \
-JEMALLOC_INLINE uint8_t \
-f(rtree_t *rtree, uintptr_t key) \
-{ \
- uint8_t ret; \
- uintptr_t subkey; \
- unsigned i, lshift, height, bits; \
- void **node, **child; \
- \
- RTREE_LOCK(&rtree->mutex); \
- for (i = lshift = 0, height = rtree->height, node = rtree->root;\
- i < height - 1; \
- i++, lshift += bits, node = child) { \
- bits = rtree->level2bits[i]; \
- subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
- 3)) - bits); \
- child = (void**)node[subkey]; \
- if (child == NULL) { \
- RTREE_UNLOCK(&rtree->mutex); \
- return (0); \
- } \
- } \
- \
- /* \
- * node is a leaf, so it contains values rather than node \
- * pointers. \
- */ \
- bits = rtree->level2bits[i]; \
- subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
- bits); \
- { \
- uint8_t *leaf = (uint8_t *)node; \
- ret = leaf[subkey]; \
- } \
- RTREE_UNLOCK(&rtree->mutex); \
- \
- RTREE_GET_VALIDATE \
- return (ret); \
+JEMALLOC_INLINE unsigned
+rtree_start_level(rtree_t *rtree, uintptr_t key)
+{
+ unsigned start_level;
+
+ if (unlikely(key == 0))
+ return (rtree->height - 1);
+
+ start_level = rtree->start_level[lg_floor(key) >>
+ LG_RTREE_BITS_PER_LEVEL];
+ assert(start_level < rtree->height);
+ return (start_level);
}
-#ifdef JEMALLOC_DEBUG
-# define RTREE_LOCK(l) malloc_mutex_lock(l)
-# define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
-# define RTREE_GET_VALIDATE
-RTREE_GET_GENERATE(rtree_get_locked)
-# undef RTREE_LOCK
-# undef RTREE_UNLOCK
-# undef RTREE_GET_VALIDATE
-#endif
+JEMALLOC_INLINE uintptr_t
+rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
+{
-#define RTREE_LOCK(l)
-#define RTREE_UNLOCK(l)
-#ifdef JEMALLOC_DEBUG
- /*
- * Suppose that it were possible for a jemalloc-allocated chunk to be
- * munmap()ped, followed by a different allocator in another thread re-using
- * overlapping virtual memory, all without invalidating the cached rtree
- * value. The result would be a false positive (the rtree would claim that
- * jemalloc owns memory that it had actually discarded). This scenario
- * seems impossible, but the following assertion is a prudent sanity check.
- */
-# define RTREE_GET_VALIDATE \
- assert(rtree_get_locked(rtree, key) == ret);
-#else
-# define RTREE_GET_VALIDATE
-#endif
-RTREE_GET_GENERATE(rtree_get)
-#undef RTREE_LOCK
-#undef RTREE_UNLOCK
-#undef RTREE_GET_VALIDATE
+ return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
+ rtree->levels[level].cumbits)) & ((ZU(1) <<
+ rtree->levels[level].bits) - 1));
+}
JEMALLOC_INLINE bool
-rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
+rtree_node_valid(rtree_node_elm_t *node)
+{
+
+ return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
+}
+
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_child_tryread(rtree_node_elm_t *elm)
+{
+ rtree_node_elm_t *child;
+
+ /* Double-checked read (first read may be stale. */
+ child = elm->child;
+ if (!rtree_node_valid(child))
+ child = atomic_read_p(&elm->pun);
+ return (child);
+}
+
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
+{
+ rtree_node_elm_t *child;
+
+ child = rtree_child_tryread(elm);
+ if (unlikely(!rtree_node_valid(child)))
+ child = rtree_child_read_hard(rtree, elm, level);
+ return (child);
+}
+
+JEMALLOC_INLINE extent_node_t *
+rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
+{
+
+ if (dependent) {
+ /*
+ * Reading a val on behalf of a pointer to a valid allocation is
+ * guaranteed to be a clean read even without synchronization,
+ * because the rtree update became visible in memory before the
+ * pointer came into existence.
+ */
+ return (elm->val);
+ } else {
+ /*
+ * An arbitrary read, e.g. on behalf of ivsalloc(), may not be
+ * dependent on a previous rtree write, which means a stale read
+ * could result if synchronization were omitted here.
+ */
+ return (atomic_read_p(&elm->pun));
+ }
+}
+
+JEMALLOC_INLINE void
+rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
+{
+
+ atomic_write_p(&elm->pun, val);
+}
+
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_subtree_tryread(rtree_t *rtree, unsigned level)
+{
+ rtree_node_elm_t *subtree;
+
+ /* Double-checked read (first read may be stale. */
+ subtree = rtree->levels[level].subtree;
+ if (!rtree_node_valid(subtree))
+ subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
+ return (subtree);
+}
+
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_subtree_read(rtree_t *rtree, unsigned level)
+{
+ rtree_node_elm_t *subtree;
+
+ subtree = rtree_subtree_tryread(rtree, level);
+ if (unlikely(!rtree_node_valid(subtree)))
+ subtree = rtree_subtree_read_hard(rtree, level);
+ return (subtree);
+}
+
+JEMALLOC_INLINE extent_node_t *
+rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
{
uintptr_t subkey;
- unsigned i, lshift, height, bits;
- void **node, **child;
-
- malloc_mutex_lock(&rtree->mutex);
- for (i = lshift = 0, height = rtree->height, node = rtree->root;
- i < height - 1;
- i++, lshift += bits, node = child) {
- bits = rtree->level2bits[i];
- subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
- bits);
- child = (void**)node[subkey];
- if (child == NULL) {
- size_t size = ((i + 1 < height - 1) ? sizeof(void *)
- : (sizeof(uint8_t))) << rtree->level2bits[i+1];
- child = (void**)rtree->alloc(size);
- if (child == NULL) {
- malloc_mutex_unlock(&rtree->mutex);
- return (true);
- }
- memset(child, 0, size);
- node[subkey] = child;
+ unsigned i, start_level;
+ rtree_node_elm_t *node, *child;
+
+ start_level = rtree_start_level(rtree, key);
+
+ for (i = start_level, node = rtree_subtree_tryread(rtree, start_level);
+ /**/; i++, node = child) {
+ if (!dependent && unlikely(!rtree_node_valid(node)))
+ return (NULL);
+ subkey = rtree_subkey(rtree, key, i);
+ if (i == rtree->height - 1) {
+ /*
+ * node is a leaf, so it contains values rather than
+ * child pointers.
+ */
+ return (rtree_val_read(rtree, &node[subkey],
+ dependent));
}
+ assert(i < rtree->height - 1);
+ child = rtree_child_tryread(&node[subkey]);
}
+ not_reached();
+}
- /* node is a leaf, so it contains values rather than node pointers. */
- bits = rtree->level2bits[i];
- subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
- {
- uint8_t *leaf = (uint8_t *)node;
- leaf[subkey] = val;
- }
- malloc_mutex_unlock(&rtree->mutex);
+JEMALLOC_INLINE bool
+rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
+{
+ uintptr_t subkey;
+ unsigned i, start_level;
+ rtree_node_elm_t *node, *child;
- return (false);
+ start_level = rtree_start_level(rtree, key);
+
+ node = rtree_subtree_read(rtree, start_level);
+ if (node == NULL)
+ return (true);
+ for (i = start_level; /**/; i++, node = child) {
+ subkey = rtree_subkey(rtree, key, i);
+ if (i == rtree->height - 1) {
+ /*
+ * node is a leaf, so it contains values rather than
+ * child pointers.
+ */
+ rtree_val_write(rtree, &node[subkey], val);
+ return (false);
+ }
+ assert(i + 1 < rtree->height);
+ child = rtree_child_read(rtree, &node[subkey], i);
+ if (child == NULL)
+ return (true);
+ }
+ not_reached();
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/size_classes.sh b/deps/jemalloc/include/jemalloc/internal/size_classes.sh
index 29c80c1fb..fc82036d3 100755
--- a/deps/jemalloc/include/jemalloc/internal/size_classes.sh
+++ b/deps/jemalloc/include/jemalloc/internal/size_classes.sh
@@ -1,17 +1,26 @@
#!/bin/sh
+#
+# Usage: size_classes.sh <lg_qarr> <lg_tmin> <lg_parr> <lg_g>
# The following limits are chosen such that they cover all supported platforms.
-# Range of quanta.
-lg_qmin=3
-lg_qmax=4
+# Pointer sizes.
+lg_zarr="2 3"
+
+# Quanta.
+lg_qarr=$1
# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
-lg_tmin=3
+lg_tmin=$2
+
+# Maximum lookup size.
+lg_kmax=12
+
+# Page sizes.
+lg_parr=`echo $3 | tr ',' ' '`
-# Range of page sizes.
-lg_pmin=12
-lg_pmax=16
+# Size class group size (number of size classes for each size doubling).
+lg_g=$4
pow2() {
e=$1
@@ -22,68 +31,224 @@ pow2() {
done
}
+lg() {
+ x=$1
+ lg_result=0
+ while [ ${x} -gt 1 ] ; do
+ lg_result=$((${lg_result} + 1))
+ x=$((${x} / 2))
+ done
+}
+
+size_class() {
+ index=$1
+ lg_grp=$2
+ lg_delta=$3
+ ndelta=$4
+ lg_p=$5
+ lg_kmax=$6
+
+ lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta}
+ if [ ${pow2_result} -lt ${ndelta} ] ; then
+ rem="yes"
+ else
+ rem="no"
+ fi
+
+ lg_size=${lg_grp}
+ if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then
+ lg_size=$((${lg_grp} + 1))
+ else
+ lg_size=${lg_grp}
+ rem="yes"
+ fi
+
+ if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
+ bin="yes"
+ else
+ bin="no"
+ fi
+ if [ ${lg_size} -lt ${lg_kmax} \
+ -o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then
+ lg_delta_lookup=${lg_delta}
+ else
+ lg_delta_lookup="no"
+ fi
+ printf ' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${lg_delta_lookup}
+ # Defined upon return:
+ # - lg_delta_lookup (${lg_delta} or "no")
+ # - bin ("yes" or "no")
+}
+
+sep_line() {
+ echo " \\"
+}
+
+size_classes() {
+ lg_z=$1
+ lg_q=$2
+ lg_t=$3
+ lg_p=$4
+ lg_g=$5
+
+ pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result}
+ pow2 ${lg_g}; g=${pow2_result}
+
+ echo "#define SIZE_CLASSES \\"
+ echo " /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \\"
+
+ ntbins=0
+ nlbins=0
+ lg_tiny_maxclass='"NA"'
+ nbins=0
+
+ # Tiny size classes.
+ ndelta=0
+ index=0
+ lg_grp=${lg_t}
+ lg_delta=${lg_grp}
+ while [ ${lg_grp} -lt ${lg_q} ] ; do
+ size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
+ if [ ${lg_delta_lookup} != "no" ] ; then
+ nlbins=$((${index} + 1))
+ fi
+ if [ ${bin} != "no" ] ; then
+ nbins=$((${index} + 1))
+ fi
+ ntbins=$((${ntbins} + 1))
+ lg_tiny_maxclass=${lg_grp} # Final written value is correct.
+ index=$((${index} + 1))
+ lg_delta=${lg_grp}
+ lg_grp=$((${lg_grp} + 1))
+ done
+
+ # First non-tiny group.
+ if [ ${ntbins} -gt 0 ] ; then
+ sep_line
+ # The first size class has an unusual encoding, because the size has to be
+ # split between grp and delta*ndelta.
+ lg_grp=$((${lg_grp} - 1))
+ ndelta=1
+ size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
+ index=$((${index} + 1))
+ lg_grp=$((${lg_grp} + 1))
+ lg_delta=$((${lg_delta} + 1))
+ fi
+ while [ ${ndelta} -lt ${g} ] ; do
+ size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
+ index=$((${index} + 1))
+ ndelta=$((${ndelta} + 1))
+ done
+
+ # All remaining groups.
+ lg_grp=$((${lg_grp} + ${lg_g}))
+ while [ ${lg_grp} -lt ${ptr_bits} ] ; do
+ sep_line
+ ndelta=1
+ if [ ${lg_grp} -eq $((${ptr_bits} - 1)) ] ; then
+ ndelta_limit=$((${g} - 1))
+ else
+ ndelta_limit=${g}
+ fi
+ while [ ${ndelta} -le ${ndelta_limit} ] ; do
+ size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
+ if [ ${lg_delta_lookup} != "no" ] ; then
+ nlbins=$((${index} + 1))
+ # Final written value is correct:
+ lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
+ fi
+ if [ ${bin} != "no" ] ; then
+ nbins=$((${index} + 1))
+ # Final written value is correct:
+ small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
+ if [ ${lg_g} -gt 0 ] ; then
+ lg_large_minclass=$((${lg_grp} + 1))
+ else
+ lg_large_minclass=$((${lg_grp} + 2))
+ fi
+ fi
+ # Final written value is correct:
+ huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
+ index=$((${index} + 1))
+ ndelta=$((${ndelta} + 1))
+ done
+ lg_grp=$((${lg_grp} + 1))
+ lg_delta=$((${lg_delta} + 1))
+ done
+ echo
+ nsizes=${index}
+
+ # Defined upon completion:
+ # - ntbins
+ # - nlbins
+ # - nbins
+ # - nsizes
+ # - lg_tiny_maxclass
+ # - lookup_maxclass
+ # - small_maxclass
+ # - lg_large_minclass
+ # - huge_maxclass
+}
+
cat <<EOF
/* This file was automatically generated by size_classes.sh. */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
+/*
+ * This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
+ * be defined prior to inclusion, and it in turn defines:
+ *
+ * LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
+ * SIZE_CLASSES: Complete table of
+ * SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)
+ * tuples.
+ * index: Size class index.
+ * lg_grp: Lg group base size (no deltas added).
+ * lg_delta: Lg delta to previous size class.
+ * ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
+ * bin: 'yes' if a small bin size class, 'no' otherwise.
+ * lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
+ * otherwise.
+ * NTBINS: Number of tiny bins.
+ * NLBINS: Number of bins supported by the lookup table.
+ * NBINS: Number of small size class bins.
+ * NSIZES: Number of size classes.
+ * LG_TINY_MAXCLASS: Lg of maximum tiny size class.
+ * LOOKUP_MAXCLASS: Maximum size class included in lookup table.
+ * SMALL_MAXCLASS: Maximum small size class.
+ * LG_LARGE_MINCLASS: Lg of minimum large size class.
+ * HUGE_MAXCLASS: Maximum (huge) size class.
+ */
+
+#define LG_SIZE_CLASS_GROUP ${lg_g}
+
EOF
-lg_q=${lg_qmin}
-while [ ${lg_q} -le ${lg_qmax} ] ; do
- lg_t=${lg_tmin}
- while [ ${lg_t} -le ${lg_q} ] ; do
- lg_p=${lg_pmin}
- while [ ${lg_p} -le ${lg_pmax} ] ; do
- echo "#if (LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})"
- echo "#define SIZE_CLASSES_DEFINED"
- pow2 ${lg_q}; q=${pow2_result}
- pow2 ${lg_t}; t=${pow2_result}
- pow2 ${lg_p}; p=${pow2_result}
- bin=0
- psz=0
- sz=${t}
- delta=$((${sz} - ${psz}))
- echo "/* SIZE_CLASS(bin, delta, sz) */"
- echo "#define SIZE_CLASSES \\"
-
- # Tiny size classes.
- while [ ${sz} -lt ${q} ] ; do
- echo " SIZE_CLASS(${bin}, ${delta}, ${sz}) \\"
- bin=$((${bin} + 1))
- psz=${sz}
- sz=$((${sz} + ${sz}))
- delta=$((${sz} - ${psz}))
- done
- # Quantum-multiple size classes. For each doubling of sz, as many as 4
- # size classes exist. Their spacing is the greater of:
- # - q
- # - sz/4, where sz is a power of 2
- while [ ${sz} -lt ${p} ] ; do
- if [ ${sz} -ge $((${q} * 4)) ] ; then
- i=$((${sz} / 4))
- else
- i=${q}
- fi
- next_2pow=$((${sz} * 2))
- while [ ${sz} -lt $next_2pow ] ; do
- echo " SIZE_CLASS(${bin}, ${delta}, ${sz}) \\"
- bin=$((${bin} + 1))
- psz=${sz}
- sz=$((${sz} + ${i}))
- delta=$((${sz} - ${psz}))
- done
+for lg_z in ${lg_zarr} ; do
+ for lg_q in ${lg_qarr} ; do
+ lg_t=${lg_tmin}
+ while [ ${lg_t} -le ${lg_q} ] ; do
+ # Iterate through page sizes and compute how many bins there are.
+ for lg_p in ${lg_parr} ; do
+ echo "#if (LG_SIZEOF_PTR == ${lg_z} && LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})"
+ size_classes ${lg_z} ${lg_q} ${lg_t} ${lg_p} ${lg_g}
+ echo "#define SIZE_CLASSES_DEFINED"
+ echo "#define NTBINS ${ntbins}"
+ echo "#define NLBINS ${nlbins}"
+ echo "#define NBINS ${nbins}"
+ echo "#define NSIZES ${nsizes}"
+ echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
+ echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
+ echo "#define SMALL_MAXCLASS ${small_maxclass}"
+ echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
+ echo "#define HUGE_MAXCLASS ${huge_maxclass}"
+ echo "#endif"
+ echo
done
- echo
- echo "#define NBINS ${bin}"
- echo "#define SMALL_MAXCLASS ${psz}"
- echo "#endif"
- echo
- lg_p=$((${lg_p} + 1))
+ lg_t=$((${lg_t} + 1))
done
- lg_t=$((${lg_t} + 1))
done
- lg_q=$((${lg_q} + 1))
done
cat <<EOF
@@ -92,11 +257,10 @@ cat <<EOF
#endif
#undef SIZE_CLASSES_DEFINED
/*
- * The small_size2bin lookup table uses uint8_t to encode each bin index, so we
+ * The size2index_tab lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes. Further constrain NBINS to
- * 255 to support prof_promote, since all small size classes, plus a "not
- * small" size class must be stored in 8 bits of arena_chunk_map_t's bits
- * field.
+ * 255 since all small size classes, plus a "not small" size class must be
+ * stored in 8 bits of arena_chunk_map_bits_t's bits field.
*/
#if (NBINS > 255)
# error "Too many small size classes"
diff --git a/deps/jemalloc/include/jemalloc/internal/stats.h b/deps/jemalloc/include/jemalloc/internal/stats.h
index 27f68e368..c91dba99d 100644
--- a/deps/jemalloc/include/jemalloc/internal/stats.h
+++ b/deps/jemalloc/include/jemalloc/internal/stats.h
@@ -4,6 +4,7 @@
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t;
+typedef struct malloc_huge_stats_s malloc_huge_stats_t;
typedef struct arena_stats_s arena_stats_t;
typedef struct chunk_stats_s chunk_stats_t;
@@ -21,12 +22,6 @@ struct tcache_bin_stats_s {
struct malloc_bin_stats_s {
/*
- * Current number of bytes allocated, including objects currently
- * cached by tcache.
- */
- size_t allocated;
-
- /*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
@@ -42,6 +37,12 @@ struct malloc_bin_stats_s {
*/
uint64_t nrequests;
+ /*
+ * Current number of regions of this size class, including regions
+ * currently cached by tcache.
+ */
+ size_t curregs;
+
/* Number of tcache fills from this bin. */
uint64_t nfills;
@@ -78,10 +79,25 @@ struct malloc_large_stats_s {
*/
uint64_t nrequests;
- /* Current number of runs of this size class. */
+ /*
+ * Current number of runs of this size class, including runs currently
+ * cached by tcache.
+ */
size_t curruns;
};
+struct malloc_huge_stats_s {
+ /*
+ * Total number of allocation/deallocation requests served directly by
+ * the arena.
+ */
+ uint64_t nmalloc;
+ uint64_t ndalloc;
+
+ /* Current number of (multi-)chunk allocations of this size class. */
+ size_t curhchunks;
+};
+
struct arena_stats_s {
/* Number of bytes currently mapped. */
size_t mapped;
@@ -95,34 +111,28 @@ struct arena_stats_s {
uint64_t nmadvise;
uint64_t purged;
+ /*
+ * Number of bytes currently mapped purely for metadata purposes, and
+ * number of bytes currently allocated for internal metadata.
+ */
+ size_t metadata_mapped;
+ size_t metadata_allocated; /* Protected via atomic_*_z(). */
+
/* Per-size-category statistics. */
size_t allocated_large;
uint64_t nmalloc_large;
uint64_t ndalloc_large;
uint64_t nrequests_large;
- /*
- * One element for each possible size class, including sizes that
- * overlap with bin size classes. This is necessary because ipalloc()
- * sometimes has to use such large objects in order to assure proper
- * alignment.
- */
- malloc_large_stats_t *lstats;
-};
-
-struct chunk_stats_s {
- /* Number of chunks that were allocated. */
- uint64_t nchunks;
+ size_t allocated_huge;
+ uint64_t nmalloc_huge;
+ uint64_t ndalloc_huge;
- /* High-water mark for number of chunks allocated. */
- size_t highchunks;
+ /* One element for each large size class. */
+ malloc_large_stats_t *lstats;
- /*
- * Current number of chunks allocated. This value isn't maintained for
- * any other purpose, so keep track of it in order to be able to set
- * highchunks.
- */
- size_t curchunks;
+ /* One element for each huge size class. */
+ malloc_huge_stats_t *hstats;
};
#endif /* JEMALLOC_H_STRUCTS */
diff --git a/deps/jemalloc/include/jemalloc/internal/tcache.h b/deps/jemalloc/include/jemalloc/internal/tcache.h
index c3d4b58d4..5079cd266 100644
--- a/deps/jemalloc/include/jemalloc/internal/tcache.h
+++ b/deps/jemalloc/include/jemalloc/internal/tcache.h
@@ -4,6 +4,7 @@
typedef struct tcache_bin_info_s tcache_bin_info_t;
typedef struct tcache_bin_s tcache_bin_t;
typedef struct tcache_s tcache_t;
+typedef struct tcaches_s tcaches_t;
/*
* tcache pointers close to NULL are used to encode state information that is
@@ -16,6 +17,11 @@ typedef struct tcache_s tcache_t;
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
+ * Absolute minimum number of cache slots for each small bin.
+ */
+#define TCACHE_NSLOTS_SMALL_MIN 20
+
+/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per run for this size class.
@@ -69,10 +75,9 @@ struct tcache_bin_s {
struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */
- uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
- arena_t *arena; /* This thread's arena. */
+ uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
unsigned ev_cnt; /* Event count since incremental GC. */
- unsigned next_gc_bin; /* Next bin to GC. */
+ szind_t next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
@@ -82,6 +87,14 @@ struct tcache_s {
*/
};
+/* Linkage for list of available (previously used) explicit tcache IDs. */
+struct tcaches_s {
+ union {
+ tcache_t *tcache;
+ tcaches_t *next;
+ };
+};
+
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
@@ -95,84 +108,90 @@ extern tcache_bin_info_t *tcache_bin_info;
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
-extern size_t nhbins;
+extern size_t nhbins;
/* Maximum cached size class. */
-extern size_t tcache_maxclass;
+extern size_t tcache_maxclass;
+
+/*
+ * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
+ * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
+ * completely disjoint from this data structure. tcaches starts off as a sparse
+ * array, so it has no physical memory footprint until individual pages are
+ * touched. This allows the entire array to be allocated the first time an
+ * explicit tcache is created without a disproportionate impact on memory usage.
+ */
+extern tcaches_t *tcaches;
size_t tcache_salloc(const void *ptr);
-void tcache_event_hard(tcache_t *tcache);
-void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
- size_t binind);
-void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
- tcache_t *tcache);
-void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
- tcache_t *tcache);
+void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
+void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind);
+void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
+ szind_t binind, unsigned rem);
+void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+ unsigned rem, tcache_t *tcache);
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
-void tcache_arena_dissociate(tcache_t *tcache);
-tcache_t *tcache_create(arena_t *arena);
-void tcache_destroy(tcache_t *tcache);
-void tcache_thread_cleanup(void *arg);
+void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
+ arena_t *newarena);
+void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena);
+tcache_t *tcache_get_hard(tsd_t *tsd);
+tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
+void tcache_cleanup(tsd_t *tsd);
+void tcache_enabled_cleanup(tsd_t *tsd);
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
-bool tcache_boot0(void);
-bool tcache_boot1(void);
+bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
+void tcaches_flush(tsd_t *tsd, unsigned ind);
+void tcaches_destroy(tsd_t *tsd, unsigned ind);
+bool tcache_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *)
-malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
-
-void tcache_event(tcache_t *tcache);
+void tcache_event(tsd_t *tsd, tcache_t *tcache);
void tcache_flush(void);
bool tcache_enabled_get(void);
-tcache_t *tcache_get(bool create);
+tcache_t *tcache_get(tsd_t *tsd, bool create);
void tcache_enabled_set(bool enabled);
void *tcache_alloc_easy(tcache_bin_t *tbin);
-void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
-void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
-void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
-void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
+void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+ size_t size, bool zero);
+void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+ size_t size, bool zero);
+void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
+ szind_t binind);
+void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
+ size_t size);
+tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
-/* Map of thread-specific caches. */
-malloc_tsd_externs(tcache, tcache_t *)
-malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL,
- tcache_thread_cleanup)
-/* Per thread flag that allows thread caches to be disabled. */
-malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
-malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
- tcache_enabled_default, malloc_tsd_no_cleanup)
-
JEMALLOC_INLINE void
tcache_flush(void)
{
- tcache_t *tcache;
+ tsd_t *tsd;
cassert(config_tcache);
- tcache = *tcache_tsd_get();
- if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
- return;
- tcache_destroy(tcache);
- tcache = NULL;
- tcache_tsd_set(&tcache);
+ tsd = tsd_fetch();
+ tcache_cleanup(tsd);
}
JEMALLOC_INLINE bool
tcache_enabled_get(void)
{
+ tsd_t *tsd;
tcache_enabled_t tcache_enabled;
cassert(config_tcache);
- tcache_enabled = *tcache_enabled_tsd_get();
+ tsd = tsd_fetch();
+ tcache_enabled = tsd_tcache_enabled_get(tsd);
if (tcache_enabled == tcache_enabled_default) {
tcache_enabled = (tcache_enabled_t)opt_tcache;
- tcache_enabled_tsd_set(&tcache_enabled);
+ tsd_tcache_enabled_set(tsd, tcache_enabled);
}
return ((bool)tcache_enabled);
@@ -181,85 +200,41 @@ tcache_enabled_get(void)
JEMALLOC_INLINE void
tcache_enabled_set(bool enabled)
{
+ tsd_t *tsd;
tcache_enabled_t tcache_enabled;
- tcache_t *tcache;
cassert(config_tcache);
+ tsd = tsd_fetch();
+
tcache_enabled = (tcache_enabled_t)enabled;
- tcache_enabled_tsd_set(&tcache_enabled);
- tcache = *tcache_tsd_get();
- if (enabled) {
- if (tcache == TCACHE_STATE_DISABLED) {
- tcache = NULL;
- tcache_tsd_set(&tcache);
- }
- } else /* disabled */ {
- if (tcache > TCACHE_STATE_MAX) {
- tcache_destroy(tcache);
- tcache = NULL;
- }
- if (tcache == NULL) {
- tcache = TCACHE_STATE_DISABLED;
- tcache_tsd_set(&tcache);
- }
- }
+ tsd_tcache_enabled_set(tsd, tcache_enabled);
+
+ if (!enabled)
+ tcache_cleanup(tsd);
}
JEMALLOC_ALWAYS_INLINE tcache_t *
-tcache_get(bool create)
+tcache_get(tsd_t *tsd, bool create)
{
tcache_t *tcache;
- if (config_tcache == false)
- return (NULL);
- if (config_lazy_lock && isthreaded == false)
+ if (!config_tcache)
return (NULL);
- tcache = *tcache_tsd_get();
- if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
- if (tcache == TCACHE_STATE_DISABLED)
- return (NULL);
- if (tcache == NULL) {
- if (create == false) {
- /*
- * Creating a tcache here would cause
- * allocation as a side effect of free().
- * Ordinarily that would be okay since
- * tcache_create() failure is a soft failure
- * that doesn't propagate. However, if TLS
- * data are freed via free() as in glibc,
- * subtle corruption could result from setting
- * a TLS variable after its backing memory is
- * freed.
- */
- return (NULL);
- }
- if (tcache_enabled_get() == false) {
- tcache_enabled_set(false); /* Memoize. */
- return (NULL);
- }
- return (tcache_create(choose_arena(NULL)));
- }
- if (tcache == TCACHE_STATE_PURGATORY) {
- /*
- * Make a note that an allocator function was called
- * after tcache_thread_cleanup() was called.
- */
- tcache = TCACHE_STATE_REINCARNATED;
- tcache_tsd_set(&tcache);
- return (NULL);
- }
- if (tcache == TCACHE_STATE_REINCARNATED)
- return (NULL);
- not_reached();
+ tcache = tsd_tcache_get(tsd);
+ if (!create)
+ return (tcache);
+ if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
+ tcache = tcache_get_hard(tsd);
+ tsd_tcache_set(tsd, tcache);
}
return (tcache);
}
JEMALLOC_ALWAYS_INLINE void
-tcache_event(tcache_t *tcache)
+tcache_event(tsd_t *tsd, tcache_t *tcache)
{
if (TCACHE_GC_INCR == 0)
@@ -267,8 +242,8 @@ tcache_event(tcache_t *tcache)
tcache->ev_cnt++;
assert(tcache->ev_cnt <= TCACHE_GC_INCR);
- if (tcache->ev_cnt == TCACHE_GC_INCR)
- tcache_event_hard(tcache);
+ if (unlikely(tcache->ev_cnt == TCACHE_GC_INCR))
+ tcache_event_hard(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
@@ -276,85 +251,87 @@ tcache_alloc_easy(tcache_bin_t *tbin)
{
void *ret;
- if (tbin->ncached == 0) {
+ if (unlikely(tbin->ncached == 0)) {
tbin->low_water = -1;
return (NULL);
}
tbin->ncached--;
- if ((int)tbin->ncached < tbin->low_water)
+ if (unlikely((int)tbin->ncached < tbin->low_water))
tbin->low_water = tbin->ncached;
ret = tbin->avail[tbin->ncached];
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
+tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
+ bool zero)
{
void *ret;
- size_t binind;
+ szind_t binind;
+ size_t usize;
tcache_bin_t *tbin;
- binind = SMALL_SIZE2BIN(size);
+ binind = size2index(size);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
- size = arena_bin_info[binind].reg_size;
+ usize = index2size(binind);
ret = tcache_alloc_easy(tbin);
- if (ret == NULL) {
- ret = tcache_alloc_small_hard(tcache, tbin, binind);
+ if (unlikely(ret == NULL)) {
+ ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind);
if (ret == NULL)
return (NULL);
}
- assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
+ assert(tcache_salloc(ret) == usize);
- if (zero == false) {
+ if (likely(!zero)) {
if (config_fill) {
- if (opt_junk) {
+ if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
- } else if (opt_zero)
- memset(ret, 0, size);
+ } else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
- if (config_fill && opt_junk) {
+ if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- memset(ret, 0, size);
+ memset(ret, 0, usize);
}
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
- tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
- tcache_event(tcache);
+ tcache->prof_accumbytes += usize;
+ tcache_event(tsd, tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
+tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
+ bool zero)
{
void *ret;
- size_t binind;
+ szind_t binind;
+ size_t usize;
tcache_bin_t *tbin;
- size = PAGE_CEILING(size);
- assert(size <= tcache_maxclass);
- binind = NBINS + (size >> LG_PAGE) - 1;
+ binind = size2index(size);
+ usize = index2size(binind);
+ assert(usize <= tcache_maxclass);
assert(binind < nhbins);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin);
- if (ret == NULL) {
+ if (unlikely(ret == NULL)) {
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
- ret = arena_malloc_large(tcache->arena, size, zero);
+ ret = arena_malloc_large(arena, usize, zero);
if (ret == NULL)
return (NULL);
} else {
- if (config_prof && prof_promote && size == PAGE) {
+ if (config_prof && usize == LARGE_MINCLASS) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
@@ -362,57 +339,54 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
arena_mapbits_large_binind_set(chunk, pageind,
BININD_INVALID);
}
- if (zero == false) {
+ if (likely(!zero)) {
if (config_fill) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
+ if (unlikely(opt_junk_alloc))
+ memset(ret, 0xa5, usize);
+ else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- } else {
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- memset(ret, 0, size);
- }
+ } else
+ memset(ret, 0, usize);
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
- tcache->prof_accumbytes += size;
+ tcache->prof_accumbytes += usize;
}
- tcache_event(tcache);
+ tcache_event(tsd, tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
+tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind)
{
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
- if (config_fill && opt_junk)
+ if (config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
- if (tbin->ncached == tbin_info->ncached_max) {
- tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
- 1), tcache);
+ if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
+ tcache_bin_flush_small(tsd, tcache, tbin, binind,
+ (tbin_info->ncached_max >> 1));
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
- tcache_event(tcache);
+ tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
+tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size)
{
- size_t binind;
+ szind_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
@@ -420,22 +394,31 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(ptr) <= tcache_maxclass);
- binind = NBINS + (size >> LG_PAGE) - 1;
+ binind = size2index(size);
- if (config_fill && opt_junk)
- memset(ptr, 0x5a, size);
+ if (config_fill && unlikely(opt_junk_free))
+ arena_dalloc_junk_large(ptr, size);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
- if (tbin->ncached == tbin_info->ncached_max) {
- tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
- 1), tcache);
+ if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
+ tcache_bin_flush_large(tsd, tbin, binind,
+ (tbin_info->ncached_max >> 1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
- tcache_event(tcache);
+ tcache_event(tsd, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_t *
+tcaches_get(tsd_t *tsd, unsigned ind)
+{
+ tcaches_t *elm = &tcaches[ind];
+ if (unlikely(elm->tcache == NULL))
+ elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
+ return (elm->tcache);
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/tsd.h b/deps/jemalloc/include/jemalloc/internal/tsd.h
index 9fb4a23ec..eed7aa013 100644
--- a/deps/jemalloc/include/jemalloc/internal/tsd.h
+++ b/deps/jemalloc/include/jemalloc/internal/tsd.h
@@ -2,7 +2,7 @@
#ifdef JEMALLOC_H_TYPES
/* Maximum number of malloc_tsd users with cleanup functions. */
-#define MALLOC_TSD_CLEANUPS_MAX 8
+#define MALLOC_TSD_CLEANUPS_MAX 2
typedef bool (*malloc_tsd_cleanup_t)(void);
@@ -12,9 +12,18 @@ typedef struct tsd_init_block_s tsd_init_block_t;
typedef struct tsd_init_head_s tsd_init_head_t;
#endif
+typedef struct tsd_s tsd_t;
+
+typedef enum {
+ tsd_state_uninitialized,
+ tsd_state_nominal,
+ tsd_state_purgatory,
+ tsd_state_reincarnated
+} tsd_state_t;
+
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
- * are four macros that support (at least) three use cases: file-private,
+ * are five macros that support (at least) three use cases: file-private,
* library-private, and library-private inlined. Following is an example
* library-private tsd variable:
*
@@ -24,34 +33,36 @@ typedef struct tsd_init_head_s tsd_init_head_t;
* int y;
* } example_t;
* #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0})
- * malloc_tsd_protos(, example, example_t *)
- * malloc_tsd_externs(example, example_t *)
+ * malloc_tsd_types(example_, example_t)
+ * malloc_tsd_protos(, example_, example_t)
+ * malloc_tsd_externs(example_, example_t)
* In example.c:
- * malloc_tsd_data(, example, example_t *, EX_INITIALIZER)
- * malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER,
+ * malloc_tsd_data(, example_, example_t, EX_INITIALIZER)
+ * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER,
* example_tsd_cleanup)
*
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
- * example_t **example_tsd_get() {...}
- * void example_tsd_set(example_t **val) {...}
+ * example_t *example_tsd_get() {...}
+ * void example_tsd_set(example_t *val) {...}
*
* Note that all of the functions deal in terms of (a_type *) rather than
- * (a_type) so that it is possible to support non-pointer types (unlike
+ * (a_type) so that it is possible to support non-pointer types (unlike
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
- * cast to (void *). This means that the cleanup function needs to cast *and*
- * dereference the function argument, e.g.:
+ * cast to (void *). This means that the cleanup function needs to cast the
+ * function argument to (a_type *), then dereference the resulting pointer to
+ * access fields, e.g.
*
* void
* example_tsd_cleanup(void *arg)
* {
- * example_t *example = *(example_t **)arg;
+ * example_t *example = (example_t *)arg;
*
+ * example->x = 42;
* [...]
- * if ([want the cleanup function to be called again]) {
- * example_tsd_set(&example);
- * }
+ * if ([want the cleanup function to be called again])
+ * example_tsd_set(example);
* }
*
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
@@ -60,63 +71,96 @@ typedef struct tsd_init_head_s tsd_init_head_t;
* non-NULL.
*/
+/* malloc_tsd_types(). */
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+#define malloc_tsd_types(a_name, a_type)
+#elif (defined(JEMALLOC_TLS))
+#define malloc_tsd_types(a_name, a_type)
+#elif (defined(_WIN32))
+#define malloc_tsd_types(a_name, a_type) \
+typedef struct { \
+ bool initialized; \
+ a_type val; \
+} a_name##tsd_wrapper_t;
+#else
+#define malloc_tsd_types(a_name, a_type) \
+typedef struct { \
+ bool initialized; \
+ a_type val; \
+} a_name##tsd_wrapper_t;
+#endif
+
/* malloc_tsd_protos(). */
#define malloc_tsd_protos(a_attr, a_name, a_type) \
a_attr bool \
-a_name##_tsd_boot(void); \
+a_name##tsd_boot0(void); \
+a_attr void \
+a_name##tsd_boot1(void); \
+a_attr bool \
+a_name##tsd_boot(void); \
a_attr a_type * \
-a_name##_tsd_get(void); \
+a_name##tsd_get(void); \
a_attr void \
-a_name##_tsd_set(a_type *val);
+a_name##tsd_set(a_type *val);
/* malloc_tsd_externs(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_externs(a_name, a_type) \
-extern __thread a_type a_name##_tls; \
-extern __thread bool a_name##_initialized; \
-extern bool a_name##_booted;
+extern __thread a_type a_name##tsd_tls; \
+extern __thread bool a_name##tsd_initialized; \
+extern bool a_name##tsd_booted;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_externs(a_name, a_type) \
-extern __thread a_type a_name##_tls; \
-extern pthread_key_t a_name##_tsd; \
-extern bool a_name##_booted;
+extern __thread a_type a_name##tsd_tls; \
+extern pthread_key_t a_name##tsd_tsd; \
+extern bool a_name##tsd_booted;
#elif (defined(_WIN32))
#define malloc_tsd_externs(a_name, a_type) \
-extern DWORD a_name##_tsd; \
-extern bool a_name##_booted;
+extern DWORD a_name##tsd_tsd; \
+extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
+extern bool a_name##tsd_booted;
#else
#define malloc_tsd_externs(a_name, a_type) \
-extern pthread_key_t a_name##_tsd; \
-extern tsd_init_head_t a_name##_tsd_init_head; \
-extern bool a_name##_booted;
+extern pthread_key_t a_name##tsd_tsd; \
+extern tsd_init_head_t a_name##tsd_init_head; \
+extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \
+extern bool a_name##tsd_booted;
#endif
/* malloc_tsd_data(). */
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
- a_name##_tls = a_initializer; \
+ a_name##tsd_tls = a_initializer; \
a_attr __thread bool JEMALLOC_TLS_MODEL \
- a_name##_initialized = false; \
-a_attr bool a_name##_booted = false;
+ a_name##tsd_initialized = false; \
+a_attr bool a_name##tsd_booted = false;
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr __thread a_type JEMALLOC_TLS_MODEL \
- a_name##_tls = a_initializer; \
-a_attr pthread_key_t a_name##_tsd; \
-a_attr bool a_name##_booted = false;
+ a_name##tsd_tls = a_initializer; \
+a_attr pthread_key_t a_name##tsd_tsd; \
+a_attr bool a_name##tsd_booted = false;
#elif (defined(_WIN32))
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
-a_attr DWORD a_name##_tsd; \
-a_attr bool a_name##_booted = false;
+a_attr DWORD a_name##tsd_tsd; \
+a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
+ false, \
+ a_initializer \
+}; \
+a_attr bool a_name##tsd_booted = false;
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
-a_attr pthread_key_t a_name##_tsd; \
-a_attr tsd_init_head_t a_name##_tsd_init_head = { \
+a_attr pthread_key_t a_name##tsd_tsd; \
+a_attr tsd_init_head_t a_name##tsd_init_head = { \
ql_head_initializer(blocks), \
MALLOC_MUTEX_INITIALIZER \
}; \
-a_attr bool a_name##_booted = false;
+a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \
+ false, \
+ a_initializer \
+}; \
+a_attr bool a_name##tsd_booted = false;
#endif
/* malloc_tsd_funcs(). */
@@ -125,75 +169,100 @@ a_attr bool a_name##_booted = false;
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
-a_name##_tsd_cleanup_wrapper(void) \
+a_name##tsd_cleanup_wrapper(void) \
{ \
\
- if (a_name##_initialized) { \
- a_name##_initialized = false; \
- a_cleanup(&a_name##_tls); \
+ if (a_name##tsd_initialized) { \
+ a_name##tsd_initialized = false; \
+ a_cleanup(&a_name##tsd_tls); \
} \
- return (a_name##_initialized); \
+ return (a_name##tsd_initialized); \
} \
a_attr bool \
-a_name##_tsd_boot(void) \
+a_name##tsd_boot0(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
- &a_name##_tsd_cleanup_wrapper); \
+ &a_name##tsd_cleanup_wrapper); \
} \
- a_name##_booted = true; \
+ a_name##tsd_booted = true; \
return (false); \
} \
+a_attr void \
+a_name##tsd_boot1(void) \
+{ \
+ \
+ /* Do nothing. */ \
+} \
+a_attr bool \
+a_name##tsd_boot(void) \
+{ \
+ \
+ return (a_name##tsd_boot0()); \
+} \
/* Get/set. */ \
a_attr a_type * \
-a_name##_tsd_get(void) \
+a_name##tsd_get(void) \
{ \
\
- assert(a_name##_booted); \
- return (&a_name##_tls); \
+ assert(a_name##tsd_booted); \
+ return (&a_name##tsd_tls); \
} \
a_attr void \
-a_name##_tsd_set(a_type *val) \
+a_name##tsd_set(a_type *val) \
{ \
\
- assert(a_name##_booted); \
- a_name##_tls = (*val); \
+ assert(a_name##tsd_booted); \
+ a_name##tsd_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
- a_name##_initialized = true; \
+ a_name##tsd_initialized = true; \
}
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Initialization/cleanup. */ \
a_attr bool \
-a_name##_tsd_boot(void) \
+a_name##tsd_boot0(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
- if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
+ if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
+ 0) \
return (true); \
} \
- a_name##_booted = true; \
+ a_name##tsd_booted = true; \
return (false); \
} \
+a_attr void \
+a_name##tsd_boot1(void) \
+{ \
+ \
+ /* Do nothing. */ \
+} \
+a_attr bool \
+a_name##tsd_boot(void) \
+{ \
+ \
+ return (a_name##tsd_boot0()); \
+} \
/* Get/set. */ \
a_attr a_type * \
-a_name##_tsd_get(void) \
+a_name##tsd_get(void) \
{ \
\
- assert(a_name##_booted); \
- return (&a_name##_tls); \
+ assert(a_name##tsd_booted); \
+ return (&a_name##tsd_tls); \
} \
a_attr void \
-a_name##_tsd_set(a_type *val) \
+a_name##tsd_set(a_type *val) \
{ \
\
- assert(a_name##_booted); \
- a_name##_tls = (*val); \
+ assert(a_name##tsd_booted); \
+ a_name##tsd_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
- if (pthread_setspecific(a_name##_tsd, \
- (void *)(&a_name##_tls))) { \
+ if (pthread_setspecific(a_name##tsd_tsd, \
+ (void *)(&a_name##tsd_tls))) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
@@ -204,27 +273,21 @@ a_name##_tsd_set(a_type *val) \
#elif (defined(_WIN32))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
-/* Data structure. */ \
-typedef struct { \
- bool initialized; \
- a_type val; \
-} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */ \
a_attr bool \
-a_name##_tsd_cleanup_wrapper(void) \
+a_name##tsd_cleanup_wrapper(void) \
{ \
- a_name##_tsd_wrapper_t *wrapper; \
+ DWORD error = GetLastError(); \
+ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
+ TlsGetValue(a_name##tsd_tsd); \
+ SetLastError(error); \
\
- wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
if (wrapper == NULL) \
return (false); \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
- a_type val = wrapper->val; \
- a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
- wrapper->val = tsd_static_data; \
- a_cleanup(&val); \
+ a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
return (true); \
@@ -233,63 +296,95 @@ a_name##_tsd_cleanup_wrapper(void) \
malloc_tsd_dalloc(wrapper); \
return (false); \
} \
-a_attr bool \
-a_name##_tsd_boot(void) \
+a_attr void \
+a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \
\
- a_name##_tsd = TlsAlloc(); \
- if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
- return (true); \
- if (a_cleanup != malloc_tsd_no_cleanup) { \
- malloc_tsd_cleanup_register( \
- &a_name##_tsd_cleanup_wrapper); \
+ if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
+ malloc_write("<jemalloc>: Error setting" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
} \
- a_name##_booted = true; \
- return (false); \
} \
-/* Get/set. */ \
-a_attr a_name##_tsd_wrapper_t * \
-a_name##_tsd_get_wrapper(void) \
+a_attr a_name##tsd_wrapper_t * \
+a_name##tsd_wrapper_get(void) \
{ \
- a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
- TlsGetValue(a_name##_tsd); \
+ DWORD error = GetLastError(); \
+ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
+ TlsGetValue(a_name##tsd_tsd); \
+ SetLastError(error); \
\
- if (wrapper == NULL) { \
- wrapper = (a_name##_tsd_wrapper_t *) \
- malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
+ if (unlikely(wrapper == NULL)) { \
+ wrapper = (a_name##tsd_wrapper_t *) \
+ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
- static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
- wrapper->val = tsd_static_data; \
- } \
- if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
- malloc_write("<jemalloc>: Error setting" \
- " TSD for "#a_name"\n"); \
- abort(); \
+ wrapper->val = a_initializer; \
} \
+ a_name##tsd_wrapper_set(wrapper); \
} \
return (wrapper); \
} \
+a_attr bool \
+a_name##tsd_boot0(void) \
+{ \
+ \
+ a_name##tsd_tsd = TlsAlloc(); \
+ if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
+ return (true); \
+ if (a_cleanup != malloc_tsd_no_cleanup) { \
+ malloc_tsd_cleanup_register( \
+ &a_name##tsd_cleanup_wrapper); \
+ } \
+ a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
+ a_name##tsd_booted = true; \
+ return (false); \
+} \
+a_attr void \
+a_name##tsd_boot1(void) \
+{ \
+ a_name##tsd_wrapper_t *wrapper; \
+ wrapper = (a_name##tsd_wrapper_t *) \
+ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
+ if (wrapper == NULL) { \
+ malloc_write("<jemalloc>: Error allocating" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } \
+ memcpy(wrapper, &a_name##tsd_boot_wrapper, \
+ sizeof(a_name##tsd_wrapper_t)); \
+ a_name##tsd_wrapper_set(wrapper); \
+} \
+a_attr bool \
+a_name##tsd_boot(void) \
+{ \
+ \
+ if (a_name##tsd_boot0()) \
+ return (true); \
+ a_name##tsd_boot1(); \
+ return (false); \
+} \
+/* Get/set. */ \
a_attr a_type * \
-a_name##_tsd_get(void) \
+a_name##tsd_get(void) \
{ \
- a_name##_tsd_wrapper_t *wrapper; \
+ a_name##tsd_wrapper_t *wrapper; \
\
- assert(a_name##_booted); \
- wrapper = a_name##_tsd_get_wrapper(); \
+ assert(a_name##tsd_booted); \
+ wrapper = a_name##tsd_wrapper_get(); \
return (&wrapper->val); \
} \
a_attr void \
-a_name##_tsd_set(a_type *val) \
+a_name##tsd_set(a_type *val) \
{ \
- a_name##_tsd_wrapper_t *wrapper; \
+ a_name##tsd_wrapper_t *wrapper; \
\
- assert(a_name##_booted); \
- wrapper = a_name##_tsd_get_wrapper(); \
+ assert(a_name##tsd_booted); \
+ wrapper = a_name##tsd_wrapper_get(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
@@ -297,16 +392,11 @@ a_name##_tsd_set(a_type *val) \
#else
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
-/* Data structure. */ \
-typedef struct { \
- bool initialized; \
- a_type val; \
-} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */ \
a_attr void \
-a_name##_tsd_cleanup_wrapper(void *arg) \
+a_name##tsd_cleanup_wrapper(void *arg) \
{ \
- a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
+ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \
\
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
@@ -314,7 +404,7 @@ a_name##_tsd_cleanup_wrapper(void *arg) \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
- if (pthread_setspecific(a_name##_tsd, \
+ if (pthread_setspecific(a_name##tsd_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
@@ -326,67 +416,97 @@ a_name##_tsd_cleanup_wrapper(void *arg) \
} \
malloc_tsd_dalloc(wrapper); \
} \
-a_attr bool \
-a_name##_tsd_boot(void) \
+a_attr void \
+a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \
\
- if (pthread_key_create(&a_name##_tsd, \
- a_name##_tsd_cleanup_wrapper) != 0) \
- return (true); \
- a_name##_booted = true; \
- return (false); \
+ if (pthread_setspecific(a_name##tsd_tsd, \
+ (void *)wrapper)) { \
+ malloc_write("<jemalloc>: Error setting" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } \
} \
-/* Get/set. */ \
-a_attr a_name##_tsd_wrapper_t * \
-a_name##_tsd_get_wrapper(void) \
+a_attr a_name##tsd_wrapper_t * \
+a_name##tsd_wrapper_get(void) \
{ \
- a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
- pthread_getspecific(a_name##_tsd); \
+ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
+ pthread_getspecific(a_name##tsd_tsd); \
\
- if (wrapper == NULL) { \
+ if (unlikely(wrapper == NULL)) { \
tsd_init_block_t block; \
wrapper = tsd_init_check_recursion( \
- &a_name##_tsd_init_head, &block); \
+ &a_name##tsd_init_head, &block); \
if (wrapper) \
return (wrapper); \
- wrapper = (a_name##_tsd_wrapper_t *) \
- malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
+ wrapper = (a_name##tsd_wrapper_t *) \
+ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
block.data = wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
- static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
- wrapper->val = tsd_static_data; \
- } \
- if (pthread_setspecific(a_name##_tsd, \
- (void *)wrapper)) { \
- malloc_write("<jemalloc>: Error setting" \
- " TSD for "#a_name"\n"); \
- abort(); \
+ wrapper->val = a_initializer; \
} \
- tsd_init_finish(&a_name##_tsd_init_head, &block); \
+ a_name##tsd_wrapper_set(wrapper); \
+ tsd_init_finish(&a_name##tsd_init_head, &block); \
} \
return (wrapper); \
} \
+a_attr bool \
+a_name##tsd_boot0(void) \
+{ \
+ \
+ if (pthread_key_create(&a_name##tsd_tsd, \
+ a_name##tsd_cleanup_wrapper) != 0) \
+ return (true); \
+ a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
+ a_name##tsd_booted = true; \
+ return (false); \
+} \
+a_attr void \
+a_name##tsd_boot1(void) \
+{ \
+ a_name##tsd_wrapper_t *wrapper; \
+ wrapper = (a_name##tsd_wrapper_t *) \
+ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
+ if (wrapper == NULL) { \
+ malloc_write("<jemalloc>: Error allocating" \
+ " TSD for "#a_name"\n"); \
+ abort(); \
+ } \
+ memcpy(wrapper, &a_name##tsd_boot_wrapper, \
+ sizeof(a_name##tsd_wrapper_t)); \
+ a_name##tsd_wrapper_set(wrapper); \
+} \
+a_attr bool \
+a_name##tsd_boot(void) \
+{ \
+ \
+ if (a_name##tsd_boot0()) \
+ return (true); \
+ a_name##tsd_boot1(); \
+ return (false); \
+} \
+/* Get/set. */ \
a_attr a_type * \
-a_name##_tsd_get(void) \
+a_name##tsd_get(void) \
{ \
- a_name##_tsd_wrapper_t *wrapper; \
+ a_name##tsd_wrapper_t *wrapper; \
\
- assert(a_name##_booted); \
- wrapper = a_name##_tsd_get_wrapper(); \
+ assert(a_name##tsd_booted); \
+ wrapper = a_name##tsd_wrapper_get(); \
return (&wrapper->val); \
} \
a_attr void \
-a_name##_tsd_set(a_type *val) \
+a_name##tsd_set(a_type *val) \
{ \
- a_name##_tsd_wrapper_t *wrapper; \
+ a_name##tsd_wrapper_t *wrapper; \
\
- assert(a_name##_booted); \
- wrapper = a_name##_tsd_get_wrapper(); \
+ assert(a_name##tsd_booted); \
+ wrapper = a_name##tsd_wrapper_get(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
@@ -410,25 +530,136 @@ struct tsd_init_head_s {
};
#endif
+#define MALLOC_TSD \
+/* O(name, type) */ \
+ O(tcache, tcache_t *) \
+ O(thread_allocated, uint64_t) \
+ O(thread_deallocated, uint64_t) \
+ O(prof_tdata, prof_tdata_t *) \
+ O(arena, arena_t *) \
+ O(arenas_cache, arena_t **) \
+ O(narenas_cache, unsigned) \
+ O(arenas_cache_bypass, bool) \
+ O(tcache_enabled, tcache_enabled_t) \
+ O(quarantine, quarantine_t *) \
+
+#define TSD_INITIALIZER { \
+ tsd_state_uninitialized, \
+ NULL, \
+ 0, \
+ 0, \
+ NULL, \
+ NULL, \
+ NULL, \
+ 0, \
+ false, \
+ tcache_enabled_default, \
+ NULL \
+}
+
+struct tsd_s {
+ tsd_state_t state;
+#define O(n, t) \
+ t n;
+MALLOC_TSD
+#undef O
+};
+
+static const tsd_t tsd_initializer = TSD_INITIALIZER;
+
+malloc_tsd_types(, tsd_t)
+
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
-void malloc_tsd_no_cleanup(void *);
+void malloc_tsd_no_cleanup(void *arg);
void malloc_tsd_cleanup_register(bool (*f)(void));
-void malloc_tsd_boot(void);
+bool malloc_tsd_boot0(void);
+void malloc_tsd_boot1(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *tsd_init_check_recursion(tsd_init_head_t *head,
tsd_init_block_t *block);
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
#endif
+void tsd_cleanup(void *arg);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
+#ifndef JEMALLOC_ENABLE_INLINE
+malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
+
+tsd_t *tsd_fetch(void);
+bool tsd_nominal(tsd_t *tsd);
+#define O(n, t) \
+t *tsd_##n##p_get(tsd_t *tsd); \
+t tsd_##n##_get(tsd_t *tsd); \
+void tsd_##n##_set(tsd_t *tsd, t n);
+MALLOC_TSD
+#undef O
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
+malloc_tsd_externs(, tsd_t)
+malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
+
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_fetch(void)
+{
+ tsd_t *tsd = tsd_get();
+
+ if (unlikely(tsd->state != tsd_state_nominal)) {
+ if (tsd->state == tsd_state_uninitialized) {
+ tsd->state = tsd_state_nominal;
+ /* Trigger cleanup handler registration. */
+ tsd_set(tsd);
+ } else if (tsd->state == tsd_state_purgatory) {
+ tsd->state = tsd_state_reincarnated;
+ tsd_set(tsd);
+ } else
+ assert(tsd->state == tsd_state_reincarnated);
+ }
+
+ return (tsd);
+}
+
+JEMALLOC_INLINE bool
+tsd_nominal(tsd_t *tsd)
+{
+
+ return (tsd->state == tsd_state_nominal);
+}
+
+#define O(n, t) \
+JEMALLOC_ALWAYS_INLINE t * \
+tsd_##n##p_get(tsd_t *tsd) \
+{ \
+ \
+ return (&tsd->n); \
+} \
+ \
+JEMALLOC_ALWAYS_INLINE t \
+tsd_##n##_get(tsd_t *tsd) \
+{ \
+ \
+ return (*tsd_##n##p_get(tsd)); \
+} \
+ \
+JEMALLOC_ALWAYS_INLINE void \
+tsd_##n##_set(tsd_t *tsd, t n) \
+{ \
+ \
+ assert(tsd->state == tsd_state_nominal); \
+ tsd->n = n; \
+}
+MALLOC_TSD
+#undef O
+#endif
+
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/util.h b/deps/jemalloc/include/jemalloc/internal/util.h
index 6b938f746..b2ea740fd 100644
--- a/deps/jemalloc/include/jemalloc/internal/util.h
+++ b/deps/jemalloc/include/jemalloc/internal/util.h
@@ -1,6 +1,36 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
+#ifdef _WIN32
+# ifdef _WIN64
+# define FMT64_PREFIX "ll"
+# define FMTPTR_PREFIX "ll"
+# else
+# define FMT64_PREFIX "ll"
+# define FMTPTR_PREFIX ""
+# endif
+# define FMTd32 "d"
+# define FMTu32 "u"
+# define FMTx32 "x"
+# define FMTd64 FMT64_PREFIX "d"
+# define FMTu64 FMT64_PREFIX "u"
+# define FMTx64 FMT64_PREFIX "x"
+# define FMTdPTR FMTPTR_PREFIX "d"
+# define FMTuPTR FMTPTR_PREFIX "u"
+# define FMTxPTR FMTPTR_PREFIX "x"
+#else
+# include <inttypes.h>
+# define FMTd32 PRId32
+# define FMTu32 PRIu32
+# define FMTx32 PRIx32
+# define FMTd64 PRId64
+# define FMTu64 PRIu64
+# define FMTx64 PRIx64
+# define FMTdPTR PRIdPTR
+# define FMTuPTR PRIuPTR
+# define FMTxPTR PRIxPTR
+#endif
+
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
@@ -22,9 +52,33 @@
* uninitialized.
*/
#ifdef JEMALLOC_CC_SILENCE
-# define JEMALLOC_CC_SILENCE_INIT(v) = v
+# define JEMALLOC_CC_SILENCE_INIT(v) = v
#else
-# define JEMALLOC_CC_SILENCE_INIT(v)
+# define JEMALLOC_CC_SILENCE_INIT(v)
+#endif
+
+#define JEMALLOC_GNUC_PREREQ(major, minor) \
+ (!defined(__clang__) && \
+ (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))))
+#ifndef __has_builtin
+# define __has_builtin(builtin) (0)
+#endif
+#define JEMALLOC_CLANG_HAS_BUILTIN(builtin) \
+ (defined(__clang__) && __has_builtin(builtin))
+
+#ifdef __GNUC__
+# define likely(x) __builtin_expect(!!(x), 1)
+# define unlikely(x) __builtin_expect(!!(x), 0)
+# if JEMALLOC_GNUC_PREREQ(4, 6) || \
+ JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable)
+# define unreachable() __builtin_unreachable()
+# else
+# define unreachable()
+# endif
+#else
+# define likely(x) !!(x)
+# define unlikely(x) !!(x)
+# define unreachable()
#endif
/*
@@ -33,7 +87,7 @@
*/
#ifndef assert
#define assert(e) do { \
- if (config_debug && !(e)) { \
+ if (unlikely(config_debug && !(e))) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
@@ -50,6 +104,7 @@
__FILE__, __LINE__); \
abort(); \
} \
+ unreachable(); \
} while (0)
#endif
@@ -65,14 +120,14 @@
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
- if (config_debug && !(e)) \
+ if (unlikely(config_debug && !(e))) \
not_implemented(); \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
- if ((c) == false) \
+ if (unlikely(!(c))) \
not_reached(); \
} while (0)
@@ -96,25 +151,47 @@ void malloc_write(const char *s);
int malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
int malloc_snprintf(char *str, size_t size, const char *format, ...)
- JEMALLOC_ATTR(format(printf, 3, 4));
+ JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
- const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
-void malloc_printf(const char *format, ...)
- JEMALLOC_ATTR(format(printf, 1, 2));
+ const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
+void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
+int jemalloc_ffsl(long bitmap);
+int jemalloc_ffs(int bitmap);
size_t pow2_ceil(size_t x);
+size_t lg_floor(size_t x);
void set_errno(int errnum);
int get_errno(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
+
+/* Sanity check. */
+#if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS)
+# error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure
+#endif
+
+JEMALLOC_ALWAYS_INLINE int
+jemalloc_ffsl(long bitmap)
+{
+
+ return (JEMALLOC_INTERNAL_FFSL(bitmap));
+}
+
+JEMALLOC_ALWAYS_INLINE int
+jemalloc_ffs(int bitmap)
+{
+
+ return (JEMALLOC_INTERNAL_FFS(bitmap));
+}
+
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
pow2_ceil(size_t x)
@@ -133,7 +210,82 @@ pow2_ceil(size_t x)
return (x);
}
-/* Sets error code */
+#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
+JEMALLOC_INLINE size_t
+lg_floor(size_t x)
+{
+ size_t ret;
+
+ assert(x != 0);
+
+ asm ("bsr %1, %0"
+ : "=r"(ret) // Outputs.
+ : "r"(x) // Inputs.
+ );
+ return (ret);
+}
+#elif (defined(_MSC_VER))
+JEMALLOC_INLINE size_t
+lg_floor(size_t x)
+{
+ unsigned long ret;
+
+ assert(x != 0);
+
+#if (LG_SIZEOF_PTR == 3)
+ _BitScanReverse64(&ret, x);
+#elif (LG_SIZEOF_PTR == 2)
+ _BitScanReverse(&ret, x);
+#else
+# error "Unsupported type sizes for lg_floor()"
+#endif
+ return (ret);
+}
+#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
+JEMALLOC_INLINE size_t
+lg_floor(size_t x)
+{
+
+ assert(x != 0);
+
+#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
+ return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x));
+#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
+ return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
+#else
+# error "Unsupported type sizes for lg_floor()"
+#endif
+}
+#else
+JEMALLOC_INLINE size_t
+lg_floor(size_t x)
+{
+
+ assert(x != 0);
+
+ x |= (x >> 1);
+ x |= (x >> 2);
+ x |= (x >> 4);
+ x |= (x >> 8);
+ x |= (x >> 16);
+#if (LG_SIZEOF_PTR == 3 && LG_SIZEOF_PTR == LG_SIZEOF_LONG)
+ x |= (x >> 32);
+ if (x == KZU(0xffffffffffffffff))
+ return (63);
+ x++;
+ return (jemalloc_ffsl(x) - 2);
+#elif (LG_SIZEOF_PTR == 2)
+ if (x == KZU(0xffffffff))
+ return (31);
+ x++;
+ return (jemalloc_ffs(x) - 2);
+#else
+# error "Unsupported type sizes for lg_floor()"
+#endif
+}
+#endif
+
+/* Set error code. */
JEMALLOC_INLINE void
set_errno(int errnum)
{
@@ -145,7 +297,7 @@ set_errno(int errnum)
#endif
}
-/* Get last error code */
+/* Get last error code. */
JEMALLOC_INLINE int
get_errno(void)
{
diff --git a/deps/jemalloc/include/jemalloc/internal/valgrind.h b/deps/jemalloc/include/jemalloc/internal/valgrind.h
new file mode 100644
index 000000000..a3380df92
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/internal/valgrind.h
@@ -0,0 +1,112 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+#ifdef JEMALLOC_VALGRIND
+#include <valgrind/valgrind.h>
+
+/*
+ * The size that is reported to Valgrind must be consistent through a chain of
+ * malloc..realloc..realloc calls. Request size isn't recorded anywhere in
+ * jemalloc, so it is critical that all callers of these macros provide usize
+ * rather than request size. As a result, buffer overflow detection is
+ * technically weakened for the standard API, though it is generally accepted
+ * practice to consider any extra bytes reported by malloc_usable_size() as
+ * usable space.
+ */
+#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \
+ if (unlikely(in_valgrind)) \
+ valgrind_make_mem_noaccess(ptr, usize); \
+} while (0)
+#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \
+ if (unlikely(in_valgrind)) \
+ valgrind_make_mem_undefined(ptr, usize); \
+} while (0)
+#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \
+ if (unlikely(in_valgrind)) \
+ valgrind_make_mem_defined(ptr, usize); \
+} while (0)
+/*
+ * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro
+ * calls must be embedded in macros rather than in functions so that when
+ * Valgrind reports errors, there are no extra stack frames in the backtraces.
+ */
+#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
+ if (unlikely(in_valgrind && cond)) \
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
+} while (0)
+#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
+ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
+ zero) do { \
+ if (unlikely(in_valgrind)) { \
+ size_t rzsize = p2rz(ptr); \
+ \
+ if (!maybe_moved || ptr == old_ptr) { \
+ VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
+ usize, rzsize); \
+ if (zero && old_usize < usize) { \
+ valgrind_make_mem_defined( \
+ (void *)((uintptr_t)ptr + \
+ old_usize), usize - old_usize); \
+ } \
+ } else { \
+ if (!old_ptr_maybe_null || old_ptr != NULL) { \
+ valgrind_freelike_block(old_ptr, \
+ old_rzsize); \
+ } \
+ if (!ptr_maybe_null || ptr != NULL) { \
+ size_t copy_size = (old_usize < usize) \
+ ? old_usize : usize; \
+ size_t tail_size = usize - copy_size; \
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
+ rzsize, false); \
+ if (copy_size > 0) { \
+ valgrind_make_mem_defined(ptr, \
+ copy_size); \
+ } \
+ if (zero && tail_size > 0) { \
+ valgrind_make_mem_defined( \
+ (void *)((uintptr_t)ptr + \
+ copy_size), tail_size); \
+ } \
+ } \
+ } \
+ } \
+} while (0)
+#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
+ if (unlikely(in_valgrind)) \
+ valgrind_freelike_block(ptr, rzsize); \
+} while (0)
+#else
+#define RUNNING_ON_VALGRIND ((unsigned)0)
+#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
+#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
+#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
+#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
+#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
+ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
+ zero) do {} while (0)
+#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
+#endif
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+#ifdef JEMALLOC_VALGRIND
+void valgrind_make_mem_noaccess(void *ptr, size_t usize);
+void valgrind_make_mem_undefined(void *ptr, size_t usize);
+void valgrind_make_mem_defined(void *ptr, size_t usize);
+void valgrind_freelike_block(void *ptr, size_t usize);
+#endif
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
+
diff --git a/deps/jemalloc/include/jemalloc/jemalloc.sh b/deps/jemalloc/include/jemalloc/jemalloc.sh
index e4738ebae..c085814f2 100755
--- a/deps/jemalloc/include/jemalloc/jemalloc.sh
+++ b/deps/jemalloc/include/jemalloc/jemalloc.sh
@@ -12,7 +12,7 @@ extern "C" {
EOF
for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \
- jemalloc_protos.h jemalloc_mangle.h ; do
+ jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do
cat "${objroot}include/jemalloc/${hdr}" \
| grep -v 'Generated from .* by configure\.' \
| sed -e 's/^#define /#define /g' \
@@ -22,7 +22,7 @@ done
cat <<EOF
#ifdef __cplusplus
-};
+}
#endif
#endif /* JEMALLOC_H_ */
EOF
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in b/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
index eb38d7105..ab13c3758 100644
--- a/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
+++ b/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
@@ -1,8 +1,14 @@
/* Defined if __attribute__((...)) syntax is supported. */
#undef JEMALLOC_HAVE_ATTR
-/* Support the experimental API. */
-#undef JEMALLOC_EXPERIMENTAL
+/* Defined if alloc_size attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+
+/* Defined if format(gnu_printf, ...) attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+
+/* Defined if format(printf, ...) attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
/*
* Define overrides for non-standard allocator-related functions if they are
@@ -20,5 +26,12 @@
*/
#undef JEMALLOC_USABLE_SIZE_CONST
+/*
+ * If defined, specify throw() for the public function prototypes when compiling
+ * with C++. The only justification for this is to match the prototypes that
+ * glibc defines.
+ */
+#undef JEMALLOC_USE_CXX_THROW
+
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#undef LG_SIZEOF_PTR
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
index 13dbdd912..7f64d9ff9 100644
--- a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
+++ b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
@@ -1,3 +1,6 @@
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdint.h>
#include <limits.h>
#include <strings.h>
@@ -16,46 +19,84 @@
((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
-/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
-# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
+/*
+ * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
+ * encodes MALLOCX_TCACHE_NONE.
+ */
+# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
+# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
+/*
+ * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
+ */
+# define MALLOCX_ARENA(a) ((int)(((a)+1) << 20))
-#ifdef JEMALLOC_EXPERIMENTAL
-# define ALLOCM_LG_ALIGN(la) (la)
-# if LG_SIZEOF_PTR == 2
-# define ALLOCM_ALIGN(a) (ffs(a)-1)
-# else
-# define ALLOCM_ALIGN(a) \
- ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
-# endif
-# define ALLOCM_ZERO ((int)0x40)
-# define ALLOCM_NO_MOVE ((int)0x80)
-/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
-# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
-# define ALLOCM_SUCCESS 0
-# define ALLOCM_ERR_OOM 1
-# define ALLOCM_ERR_NOT_MOVED 2
+#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
+# define JEMALLOC_CXX_THROW throw()
+#else
+# define JEMALLOC_CXX_THROW
#endif
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
-# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
-# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
+# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
+# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
+# else
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# endif
+# ifndef JEMALLOC_EXPORT
+# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
+# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
+# else
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# endif
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
+# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
+# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
-# ifdef DLLEXPORT
-# define JEMALLOC_EXPORT __declspec(dllexport)
+# define JEMALLOC_ALIGNED(s) __declspec(align(s))
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# ifndef JEMALLOC_EXPORT
+# ifdef DLLEXPORT
+# define JEMALLOC_EXPORT __declspec(dllexport)
+# else
+# define JEMALLOC_EXPORT __declspec(dllimport)
+# endif
+# endif
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_NOINLINE __declspec(noinline)
+# ifdef __cplusplus
+# define JEMALLOC_NOTHROW __declspec(nothrow)
# else
-# define JEMALLOC_EXPORT __declspec(dllimport)
+# define JEMALLOC_NOTHROW
# endif
-# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
-# define JEMALLOC_NOINLINE __declspec(noinline)
+# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
+# if _MSC_VER >= 1900 && !defined(__EDG__)
+# define JEMALLOC_ALLOCATOR __declspec(allocator)
+# else
+# define JEMALLOC_ALLOCATOR
+# endif
#else
# define JEMALLOC_ATTR(s)
-# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
-# define JEMALLOC_SECTION(s)
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# define JEMALLOC_EXPORT
+# define JEMALLOC_FORMAT_PRINTF(s, i)
# define JEMALLOC_NOINLINE
+# define JEMALLOC_NOTHROW
+# define JEMALLOC_SECTION(s)
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
#endif
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in b/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in
index 25446de3d..a78414b19 100644
--- a/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in
+++ b/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in
@@ -7,52 +7,60 @@ extern JEMALLOC_EXPORT const char *@je_@malloc_conf;
extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
const char *s);
-JEMALLOC_EXPORT void *@je_@malloc(size_t size) JEMALLOC_ATTR(malloc);
-JEMALLOC_EXPORT void *@je_@calloc(size_t num, size_t size)
- JEMALLOC_ATTR(malloc);
-JEMALLOC_EXPORT int @je_@posix_memalign(void **memptr, size_t alignment,
- size_t size) JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT void *@je_@aligned_alloc(size_t alignment, size_t size)
- JEMALLOC_ATTR(malloc);
-JEMALLOC_EXPORT void *@je_@realloc(void *ptr, size_t size);
-JEMALLOC_EXPORT void @je_@free(void *ptr);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@malloc(size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@calloc(size_t num, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@posix_memalign(void **memptr,
+ size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@aligned_alloc(size_t alignment,
+ size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
+ JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@realloc(void *ptr, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@free(void *ptr)
+ JEMALLOC_CXX_THROW;
-JEMALLOC_EXPORT void *@je_@mallocx(size_t size, int flags);
-JEMALLOC_EXPORT void *@je_@rallocx(void *ptr, size_t size, int flags);
-JEMALLOC_EXPORT size_t @je_@xallocx(void *ptr, size_t size, size_t extra,
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@mallocx(size_t size, int flags)
+ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@rallocx(void *ptr, size_t size,
+ int flags) JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@xallocx(void *ptr, size_t size,
+ size_t extra, int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@sallocx(const void *ptr,
+ int flags) JEMALLOC_ATTR(pure);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@dallocx(void *ptr, int flags);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@sdallocx(void *ptr, size_t size,
int flags);
-JEMALLOC_EXPORT size_t @je_@sallocx(const void *ptr, int flags);
-JEMALLOC_EXPORT void @je_@dallocx(void *ptr, int flags);
-JEMALLOC_EXPORT size_t @je_@nallocx(size_t size, int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@nallocx(size_t size, int flags)
+ JEMALLOC_ATTR(pure);
-JEMALLOC_EXPORT int @je_@mallctl(const char *name, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int @je_@mallctlnametomib(const char *name, size_t *mibp,
- size_t *miblenp);
-JEMALLOC_EXPORT int @je_@mallctlbymib(const size_t *mib, size_t miblen,
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctl(const char *name,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT void @je_@malloc_stats_print(void (*write_cb)(void *,
- const char *), void *@je_@cbopaque, const char *opts);
-JEMALLOC_EXPORT size_t @je_@malloc_usable_size(
- JEMALLOC_USABLE_SIZE_CONST void *ptr);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlnametomib(const char *name,
+ size_t *mibp, size_t *miblenp);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlbymib(const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@malloc_stats_print(
+ void (*write_cb)(void *, const char *), void *@je_@cbopaque,
+ const char *opts);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size)
- JEMALLOC_ATTR(malloc);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@memalign(size_t alignment, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc);
-#endif
-
-#ifdef JEMALLOC_EXPERIMENTAL
-JEMALLOC_EXPORT int @je_@allocm(void **ptr, size_t *rsize, size_t size,
- int flags) JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT int @je_@rallocm(void **ptr, size_t *rsize, size_t size,
- size_t extra, int flags) JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT int @je_@sallocm(const void *ptr, size_t *rsize, int flags)
- JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT int @je_@dallocm(void *ptr, int flags)
- JEMALLOC_ATTR(nonnull(1));
-JEMALLOC_EXPORT int @je_@nallocm(size_t *rsize, size_t size, int flags);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@valloc(size_t size) JEMALLOC_CXX_THROW
+ JEMALLOC_ATTR(malloc);
#endif
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in b/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in
new file mode 100644
index 000000000..fa7b350ad
--- /dev/null
+++ b/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in
@@ -0,0 +1,57 @@
+/*
+ * void *
+ * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
+ * bool *commit, unsigned arena_ind);
+ */
+typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned);
+
+/*
+ * bool
+ * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind);
+ */
+typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned);
+
+/*
+ * bool
+ * chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
+ * unsigned arena_ind);
+ */
+typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned);
+
+/*
+ * bool
+ * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
+ * unsigned arena_ind);
+ */
+typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned);
+
+/*
+ * bool
+ * chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
+ * unsigned arena_ind);
+ */
+typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned);
+
+/*
+ * bool
+ * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
+ * bool committed, unsigned arena_ind);
+ */
+typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned);
+
+/*
+ * bool
+ * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ * bool committed, unsigned arena_ind);
+ */
+typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned);
+
+typedef struct {
+ chunk_alloc_t *alloc;
+ chunk_dalloc_t *dalloc;
+ chunk_commit_t *commit;
+ chunk_decommit_t *decommit;
+ chunk_purge_t *purge;
+ chunk_split_t *split;
+ chunk_merge_t *merge;
+} chunk_hooks_t;
diff --git a/deps/jemalloc/include/msvc_compat/stdbool.h b/deps/jemalloc/include/msvc_compat/C99/stdbool.h
index da9ee8b80..d92160ebc 100644
--- a/deps/jemalloc/include/msvc_compat/stdbool.h
+++ b/deps/jemalloc/include/msvc_compat/C99/stdbool.h
@@ -5,7 +5,11 @@
/* MSVC doesn't define _Bool or bool in C, but does have BOOL */
/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */
+/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as
+ * a built-in type. */
+#ifndef __clang__
typedef BOOL _Bool;
+#endif
#define bool _Bool
#define true 1
diff --git a/deps/jemalloc/include/msvc_compat/stdint.h b/deps/jemalloc/include/msvc_compat/C99/stdint.h
index d02608a59..d02608a59 100644
--- a/deps/jemalloc/include/msvc_compat/stdint.h
+++ b/deps/jemalloc/include/msvc_compat/C99/stdint.h
diff --git a/deps/jemalloc/include/msvc_compat/inttypes.h b/deps/jemalloc/include/msvc_compat/inttypes.h
deleted file mode 100644
index a4e6b75cb..000000000
--- a/deps/jemalloc/include/msvc_compat/inttypes.h
+++ /dev/null
@@ -1,313 +0,0 @@
-// ISO C9x compliant inttypes.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
-//
-// Copyright (c) 2006 Alexander Chemeris
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// 3. The name of the author may be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
-// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-///////////////////////////////////////////////////////////////////////////////
-
-#ifndef _MSC_VER // [
-#error "Use this header only with Microsoft Visual C++ compilers!"
-#endif // _MSC_VER ]
-
-#ifndef _MSC_INTTYPES_H_ // [
-#define _MSC_INTTYPES_H_
-
-#if _MSC_VER > 1000
-#pragma once
-#endif
-
-#include "stdint.h"
-
-// 7.8 Format conversion of integer types
-
-typedef struct {
- intmax_t quot;
- intmax_t rem;
-} imaxdiv_t;
-
-// 7.8.1 Macros for format specifiers
-
-#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
-
-#ifdef _WIN64
-# define __PRI64_PREFIX "l"
-# define __PRIPTR_PREFIX "l"
-#else
-# define __PRI64_PREFIX "ll"
-# define __PRIPTR_PREFIX
-#endif
-
-// The fprintf macros for signed integers are:
-#define PRId8 "d"
-#define PRIi8 "i"
-#define PRIdLEAST8 "d"
-#define PRIiLEAST8 "i"
-#define PRIdFAST8 "d"
-#define PRIiFAST8 "i"
-
-#define PRId16 "hd"
-#define PRIi16 "hi"
-#define PRIdLEAST16 "hd"
-#define PRIiLEAST16 "hi"
-#define PRIdFAST16 "hd"
-#define PRIiFAST16 "hi"
-
-#define PRId32 "d"
-#define PRIi32 "i"
-#define PRIdLEAST32 "d"
-#define PRIiLEAST32 "i"
-#define PRIdFAST32 "d"
-#define PRIiFAST32 "i"
-
-#define PRId64 __PRI64_PREFIX "d"
-#define PRIi64 __PRI64_PREFIX "i"
-#define PRIdLEAST64 __PRI64_PREFIX "d"
-#define PRIiLEAST64 __PRI64_PREFIX "i"
-#define PRIdFAST64 __PRI64_PREFIX "d"
-#define PRIiFAST64 __PRI64_PREFIX "i"
-
-#define PRIdMAX __PRI64_PREFIX "d"
-#define PRIiMAX __PRI64_PREFIX "i"
-
-#define PRIdPTR __PRIPTR_PREFIX "d"
-#define PRIiPTR __PRIPTR_PREFIX "i"
-
-// The fprintf macros for unsigned integers are:
-#define PRIo8 "o"
-#define PRIu8 "u"
-#define PRIx8 "x"
-#define PRIX8 "X"
-#define PRIoLEAST8 "o"
-#define PRIuLEAST8 "u"
-#define PRIxLEAST8 "x"
-#define PRIXLEAST8 "X"
-#define PRIoFAST8 "o"
-#define PRIuFAST8 "u"
-#define PRIxFAST8 "x"
-#define PRIXFAST8 "X"
-
-#define PRIo16 "ho"
-#define PRIu16 "hu"
-#define PRIx16 "hx"
-#define PRIX16 "hX"
-#define PRIoLEAST16 "ho"
-#define PRIuLEAST16 "hu"
-#define PRIxLEAST16 "hx"
-#define PRIXLEAST16 "hX"
-#define PRIoFAST16 "ho"
-#define PRIuFAST16 "hu"
-#define PRIxFAST16 "hx"
-#define PRIXFAST16 "hX"
-
-#define PRIo32 "o"
-#define PRIu32 "u"
-#define PRIx32 "x"
-#define PRIX32 "X"
-#define PRIoLEAST32 "o"
-#define PRIuLEAST32 "u"
-#define PRIxLEAST32 "x"
-#define PRIXLEAST32 "X"
-#define PRIoFAST32 "o"
-#define PRIuFAST32 "u"
-#define PRIxFAST32 "x"
-#define PRIXFAST32 "X"
-
-#define PRIo64 __PRI64_PREFIX "o"
-#define PRIu64 __PRI64_PREFIX "u"
-#define PRIx64 __PRI64_PREFIX "x"
-#define PRIX64 __PRI64_PREFIX "X"
-#define PRIoLEAST64 __PRI64_PREFIX "o"
-#define PRIuLEAST64 __PRI64_PREFIX "u"
-#define PRIxLEAST64 __PRI64_PREFIX "x"
-#define PRIXLEAST64 __PRI64_PREFIX "X"
-#define PRIoFAST64 __PRI64_PREFIX "o"
-#define PRIuFAST64 __PRI64_PREFIX "u"
-#define PRIxFAST64 __PRI64_PREFIX "x"
-#define PRIXFAST64 __PRI64_PREFIX "X"
-
-#define PRIoMAX __PRI64_PREFIX "o"
-#define PRIuMAX __PRI64_PREFIX "u"
-#define PRIxMAX __PRI64_PREFIX "x"
-#define PRIXMAX __PRI64_PREFIX "X"
-
-#define PRIoPTR __PRIPTR_PREFIX "o"
-#define PRIuPTR __PRIPTR_PREFIX "u"
-#define PRIxPTR __PRIPTR_PREFIX "x"
-#define PRIXPTR __PRIPTR_PREFIX "X"
-
-// The fscanf macros for signed integers are:
-#define SCNd8 "d"
-#define SCNi8 "i"
-#define SCNdLEAST8 "d"
-#define SCNiLEAST8 "i"
-#define SCNdFAST8 "d"
-#define SCNiFAST8 "i"
-
-#define SCNd16 "hd"
-#define SCNi16 "hi"
-#define SCNdLEAST16 "hd"
-#define SCNiLEAST16 "hi"
-#define SCNdFAST16 "hd"
-#define SCNiFAST16 "hi"
-
-#define SCNd32 "ld"
-#define SCNi32 "li"
-#define SCNdLEAST32 "ld"
-#define SCNiLEAST32 "li"
-#define SCNdFAST32 "ld"
-#define SCNiFAST32 "li"
-
-#define SCNd64 "I64d"
-#define SCNi64 "I64i"
-#define SCNdLEAST64 "I64d"
-#define SCNiLEAST64 "I64i"
-#define SCNdFAST64 "I64d"
-#define SCNiFAST64 "I64i"
-
-#define SCNdMAX "I64d"
-#define SCNiMAX "I64i"
-
-#ifdef _WIN64 // [
-# define SCNdPTR "I64d"
-# define SCNiPTR "I64i"
-#else // _WIN64 ][
-# define SCNdPTR "ld"
-# define SCNiPTR "li"
-#endif // _WIN64 ]
-
-// The fscanf macros for unsigned integers are:
-#define SCNo8 "o"
-#define SCNu8 "u"
-#define SCNx8 "x"
-#define SCNX8 "X"
-#define SCNoLEAST8 "o"
-#define SCNuLEAST8 "u"
-#define SCNxLEAST8 "x"
-#define SCNXLEAST8 "X"
-#define SCNoFAST8 "o"
-#define SCNuFAST8 "u"
-#define SCNxFAST8 "x"
-#define SCNXFAST8 "X"
-
-#define SCNo16 "ho"
-#define SCNu16 "hu"
-#define SCNx16 "hx"
-#define SCNX16 "hX"
-#define SCNoLEAST16 "ho"
-#define SCNuLEAST16 "hu"
-#define SCNxLEAST16 "hx"
-#define SCNXLEAST16 "hX"
-#define SCNoFAST16 "ho"
-#define SCNuFAST16 "hu"
-#define SCNxFAST16 "hx"
-#define SCNXFAST16 "hX"
-
-#define SCNo32 "lo"
-#define SCNu32 "lu"
-#define SCNx32 "lx"
-#define SCNX32 "lX"
-#define SCNoLEAST32 "lo"
-#define SCNuLEAST32 "lu"
-#define SCNxLEAST32 "lx"
-#define SCNXLEAST32 "lX"
-#define SCNoFAST32 "lo"
-#define SCNuFAST32 "lu"
-#define SCNxFAST32 "lx"
-#define SCNXFAST32 "lX"
-
-#define SCNo64 "I64o"
-#define SCNu64 "I64u"
-#define SCNx64 "I64x"
-#define SCNX64 "I64X"
-#define SCNoLEAST64 "I64o"
-#define SCNuLEAST64 "I64u"
-#define SCNxLEAST64 "I64x"
-#define SCNXLEAST64 "I64X"
-#define SCNoFAST64 "I64o"
-#define SCNuFAST64 "I64u"
-#define SCNxFAST64 "I64x"
-#define SCNXFAST64 "I64X"
-
-#define SCNoMAX "I64o"
-#define SCNuMAX "I64u"
-#define SCNxMAX "I64x"
-#define SCNXMAX "I64X"
-
-#ifdef _WIN64 // [
-# define SCNoPTR "I64o"
-# define SCNuPTR "I64u"
-# define SCNxPTR "I64x"
-# define SCNXPTR "I64X"
-#else // _WIN64 ][
-# define SCNoPTR "lo"
-# define SCNuPTR "lu"
-# define SCNxPTR "lx"
-# define SCNXPTR "lX"
-#endif // _WIN64 ]
-
-#endif // __STDC_FORMAT_MACROS ]
-
-// 7.8.2 Functions for greatest-width integer types
-
-// 7.8.2.1 The imaxabs function
-#define imaxabs _abs64
-
-// 7.8.2.2 The imaxdiv function
-
-// This is modified version of div() function from Microsoft's div.c found
-// in %MSVC.NET%\crt\src\div.c
-#ifdef STATIC_IMAXDIV // [
-static
-#else // STATIC_IMAXDIV ][
-_inline
-#endif // STATIC_IMAXDIV ]
-imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
-{
- imaxdiv_t result;
-
- result.quot = numer / denom;
- result.rem = numer % denom;
-
- if (numer < 0 && result.rem > 0) {
- // did division wrong; must fix up
- ++result.quot;
- result.rem -= denom;
- }
-
- return result;
-}
-
-// 7.8.2.3 The strtoimax and strtoumax functions
-#define strtoimax _strtoi64
-#define strtoumax _strtoui64
-
-// 7.8.2.4 The wcstoimax and wcstoumax functions
-#define wcstoimax _wcstoi64
-#define wcstoumax _wcstoui64
-
-
-#endif // _MSC_INTTYPES_H_ ]
diff --git a/deps/jemalloc/include/msvc_compat/strings.h b/deps/jemalloc/include/msvc_compat/strings.h
index c84975b6b..f01ffdd18 100644
--- a/deps/jemalloc/include/msvc_compat/strings.h
+++ b/deps/jemalloc/include/msvc_compat/strings.h
@@ -3,8 +3,9 @@
/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
* for both */
-#include <intrin.h>
-#pragma intrinsic(_BitScanForward)
+#ifdef _MSC_VER
+# include <intrin.h>
+# pragma intrinsic(_BitScanForward)
static __forceinline int ffsl(long x)
{
unsigned long i;
@@ -20,4 +21,9 @@ static __forceinline int ffs(int x)
return (ffsl(x));
}
+#else
+# define ffsl(x) __builtin_ffsl(x)
+# define ffs(x) __builtin_ffs(x)
#endif
+
+#endif /* strings_h */
diff --git a/deps/jemalloc/include/msvc_compat/windows_extra.h b/deps/jemalloc/include/msvc_compat/windows_extra.h
new file mode 100644
index 000000000..0c5e323ff
--- /dev/null
+++ b/deps/jemalloc/include/msvc_compat/windows_extra.h
@@ -0,0 +1,26 @@
+#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
+#define MSVC_COMPAT_WINDOWS_EXTRA_H
+
+#ifndef ENOENT
+# define ENOENT ERROR_PATH_NOT_FOUND
+#endif
+#ifndef EINVAL
+# define EINVAL ERROR_BAD_ARGUMENTS
+#endif
+#ifndef EAGAIN
+# define EAGAIN ERROR_OUTOFMEMORY
+#endif
+#ifndef EPERM
+# define EPERM ERROR_WRITE_FAULT
+#endif
+#ifndef EFAULT
+# define EFAULT ERROR_INVALID_ADDRESS
+#endif
+#ifndef ENOMEM
+# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
+#endif
+#ifndef ERANGE
+# define ERANGE ERROR_INVALID_DATA
+#endif
+
+#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
diff --git a/deps/jemalloc/jemalloc.pc.in b/deps/jemalloc/jemalloc.pc.in
new file mode 100644
index 000000000..1a3ad9b34
--- /dev/null
+++ b/deps/jemalloc/jemalloc.pc.in
@@ -0,0 +1,12 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+install_suffix=@install_suffix@
+
+Name: jemalloc
+Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
+URL: http://www.canonware.com/jemalloc
+Version: @jemalloc_version@
+Cflags: -I${includedir}
+Libs: -L${libdir} -ljemalloc${install_suffix}
diff --git a/deps/jemalloc/src/arena.c b/deps/jemalloc/src/arena.c
index dad707b63..3081519cc 100644
--- a/deps/jemalloc/src/arena.c
+++ b/deps/jemalloc/src/arena.c
@@ -5,37 +5,17 @@
/* Data. */
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
+static ssize_t lg_dirty_mult_default;
arena_bin_info_t arena_bin_info[NBINS];
-JEMALLOC_ALIGNED(CACHELINE)
-const uint8_t small_size2bin[] = {
-#define S2B_8(i) i,
-#define S2B_16(i) S2B_8(i) S2B_8(i)
-#define S2B_32(i) S2B_16(i) S2B_16(i)
-#define S2B_64(i) S2B_32(i) S2B_32(i)
-#define S2B_128(i) S2B_64(i) S2B_64(i)
-#define S2B_256(i) S2B_128(i) S2B_128(i)
-#define S2B_512(i) S2B_256(i) S2B_256(i)
-#define S2B_1024(i) S2B_512(i) S2B_512(i)
-#define S2B_2048(i) S2B_1024(i) S2B_1024(i)
-#define S2B_4096(i) S2B_2048(i) S2B_2048(i)
-#define S2B_8192(i) S2B_4096(i) S2B_4096(i)
-#define SIZE_CLASS(bin, delta, size) \
- S2B_##delta(bin)
- SIZE_CLASSES
-#undef S2B_8
-#undef S2B_16
-#undef S2B_32
-#undef S2B_64
-#undef S2B_128
-#undef S2B_256
-#undef S2B_512
-#undef S2B_1024
-#undef S2B_2048
-#undef S2B_4096
-#undef S2B_8192
-#undef SIZE_CLASS
-};
+size_t map_bias;
+size_t map_misc_offset;
+size_t arena_maxrun; /* Max run size for arenas. */
+size_t large_maxclass; /* Max large size class. */
+static size_t small_maxrun; /* Max run size used for small size classes. */
+static bool *small_run_tab; /* Valid small run page multiples. */
+unsigned nlclasses; /* Number of large size classes. */
+unsigned nhclasses; /* Number of huge size classes. */
/******************************************************************************/
/*
@@ -45,7 +25,7 @@ const uint8_t small_size2bin[] = {
static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
- bool cleaned);
+ bool cleaned, bool decommitted);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
@@ -53,296 +33,326 @@ static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
/******************************************************************************/
-static inline int
-arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
+#define CHUNK_MAP_KEY ((uintptr_t)0x1U)
+
+JEMALLOC_INLINE_C arena_chunk_map_misc_t *
+arena_miscelm_key_create(size_t size)
{
- uintptr_t a_mapelm = (uintptr_t)a;
- uintptr_t b_mapelm = (uintptr_t)b;
- assert(a != NULL);
- assert(b != NULL);
+ return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) |
+ CHUNK_MAP_KEY));
+}
+
+JEMALLOC_INLINE_C bool
+arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm)
+{
- return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
+ return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0);
}
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
- u.rb_link, arena_run_comp)
+#undef CHUNK_MAP_KEY
-static inline int
-arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
+JEMALLOC_INLINE_C size_t
+arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
{
- int ret;
- size_t a_size = a->bits & ~PAGE_MASK;
- size_t b_size = b->bits & ~PAGE_MASK;
- ret = (a_size > b_size) - (a_size < b_size);
- if (ret == 0) {
- uintptr_t a_mapelm, b_mapelm;
+ assert(arena_miscelm_is_key(miscelm));
- if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
- a_mapelm = (uintptr_t)a;
- else {
- /*
- * Treat keys as though they are lower than anything
- * else.
- */
- a_mapelm = 0;
- }
- b_mapelm = (uintptr_t)b;
+ return (arena_mapbits_size_decode((uintptr_t)miscelm));
+}
- ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
- }
+JEMALLOC_INLINE_C size_t
+arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk;
+ size_t pageind, mapbits;
- return (ret);
-}
+ assert(!arena_miscelm_is_key(miscelm));
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
- u.rb_link, arena_avail_comp)
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ pageind = arena_miscelm_to_pageind(miscelm);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (arena_mapbits_size_decode(mapbits));
+}
-static inline int
-arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
+JEMALLOC_INLINE_C int
+arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
+ uintptr_t a_miscelm = (uintptr_t)a;
+ uintptr_t b_miscelm = (uintptr_t)b;
assert(a != NULL);
assert(b != NULL);
- /*
- * Short-circuit for self comparison. The following comparison code
- * would come to the same result, but at the cost of executing the slow
- * path.
- */
- if (a == b)
- return (0);
+ return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
+}
+
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
+ rb_link, arena_run_comp)
+
+static size_t
+run_quantize(size_t size)
+{
+ size_t qsize;
+
+ assert(size != 0);
+ assert(size == PAGE_CEILING(size));
+
+ /* Don't change sizes that are valid small run sizes. */
+ if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
+ return (size);
/*
- * Order such that chunks with higher fragmentation are "less than"
- * those with lower fragmentation -- purging order is from "least" to
- * "greatest". Fragmentation is measured as:
- *
- * mean current avail run size
- * --------------------------------
- * mean defragmented avail run size
- *
- * navail
- * -----------
- * nruns_avail nruns_avail-nruns_adjac
- * = ========================= = -----------------------
- * navail nruns_avail
- * -----------------------
- * nruns_avail-nruns_adjac
- *
- * The following code multiplies away the denominator prior to
- * comparison, in order to avoid division.
- *
+ * Round down to the nearest run size that can actually be requested
+ * during normal large allocation. Add large_pad so that cache index
+ * randomization can offset the allocation from the page boundary.
*/
- {
- size_t a_val = (a->nruns_avail - a->nruns_adjac) *
- b->nruns_avail;
- size_t b_val = (b->nruns_avail - b->nruns_adjac) *
- a->nruns_avail;
+ qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
+ if (qsize <= SMALL_MAXCLASS + large_pad)
+ return (run_quantize(size - large_pad));
+ assert(qsize <= size);
+ return (qsize);
+}
+
+static size_t
+run_quantize_next(size_t size)
+{
+ size_t large_run_size_next;
+
+ assert(size != 0);
+ assert(size == PAGE_CEILING(size));
- if (a_val < b_val)
- return (1);
- if (a_val > b_val)
- return (-1);
- }
/*
- * Break ties by chunk address. For fragmented chunks, report lower
- * addresses as "lower", so that fragmentation reduction happens first
- * at lower addresses. However, use the opposite ordering for
- * unfragmented chunks, in order to increase the chances of
- * re-allocating dirty runs.
+ * Return the next quantized size greater than the input size.
+ * Quantized sizes comprise the union of run sizes that back small
+ * region runs, and run sizes that back large regions with no explicit
+ * alignment constraints.
*/
- {
- uintptr_t a_chunk = (uintptr_t)a;
- uintptr_t b_chunk = (uintptr_t)b;
- int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
- if (a->nruns_adjac == 0) {
- assert(b->nruns_adjac == 0);
- ret = -ret;
+
+ if (size > SMALL_MAXCLASS) {
+ large_run_size_next = PAGE_CEILING(index2size(size2index(size -
+ large_pad) + 1) + large_pad);
+ } else
+ large_run_size_next = SIZE_T_MAX;
+ if (size >= small_maxrun)
+ return (large_run_size_next);
+
+ while (true) {
+ size += PAGE;
+ assert(size <= small_maxrun);
+ if (small_run_tab[size >> LG_PAGE]) {
+ if (large_run_size_next < size)
+ return (large_run_size_next);
+ return (size);
}
- return (ret);
}
}
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
- dirty_link, arena_chunk_dirty_comp)
-
-static inline bool
-arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
+static size_t
+run_quantize_first(size_t size)
{
- bool ret;
+ size_t qsize = run_quantize(size);
- if (pageind-1 < map_bias)
- ret = false;
- else {
- ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
- assert(ret == false || arena_mapbits_dirty_get(chunk,
- pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
+ if (qsize < size) {
+ /*
+ * Skip a quantization that may have an adequately large run,
+ * because under-sized runs may be mixed in. This only happens
+ * when an unusual size is requested, i.e. for aligned
+ * allocation, and is just one of several places where linear
+ * search would potentially find sufficiently aligned available
+ * memory somewhere lower.
+ */
+ qsize = run_quantize_next(size);
}
- return (ret);
+ return (qsize);
}
-static inline bool
-arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
+JEMALLOC_INLINE_C int
+arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
- bool ret;
+ int ret;
+ uintptr_t a_miscelm = (uintptr_t)a;
+ size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ?
+ arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a));
+ size_t b_qsize = run_quantize(arena_miscelm_size_get(b));
- if (pageind+npages == chunk_npages)
- ret = false;
- else {
- assert(pageind+npages < chunk_npages);
- ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
- assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
- != arena_mapbits_dirty_get(chunk, pageind+npages));
+ /*
+ * Compare based on quantized size rather than size, in order to sort
+ * equally useful runs only by address.
+ */
+ ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
+ if (ret == 0) {
+ if (!arena_miscelm_is_key(a)) {
+ uintptr_t b_miscelm = (uintptr_t)b;
+
+ ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
+ } else {
+ /*
+ * Treat keys as if they are lower than anything else.
+ */
+ ret = -1;
+ }
}
+
return (ret);
}
-static inline bool
-arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
+ arena_chunk_map_misc_t, rb_link, arena_avail_comp)
+
+static void
+arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
{
- return (arena_avail_adjac_pred(chunk, pageind) ||
- arena_avail_adjac_succ(chunk, pageind, npages));
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+ arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
+ pageind));
}
static void
-arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
+arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
{
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
+ arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
+ pageind));
+}
- /*
- * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
- * removed and reinserted even if the run to be inserted is clean.
- */
- if (chunk->ndirty != 0)
- arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
-
- if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
- chunk->nruns_adjac++;
- if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
- chunk->nruns_adjac++;
- chunk->nruns_avail++;
- assert(chunk->nruns_avail > chunk->nruns_adjac);
+static void
+arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
+{
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
- if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- arena->ndirty += npages;
- chunk->ndirty += npages;
- }
- if (chunk->ndirty != 0)
- arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+ assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
+ assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
+ CHUNK_MAP_DIRTY);
- arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
- pageind));
+ qr_new(&miscelm->rd, rd_link);
+ qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
+ arena->ndirty += npages;
}
static void
-arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
+arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
{
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
+ assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
+ assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
+ CHUNK_MAP_DIRTY);
- /*
- * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
- * removed and reinserted even if the run to be removed is clean.
- */
- if (chunk->ndirty != 0)
- arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
+ qr_remove(&miscelm->rd, rd_link);
+ assert(arena->ndirty >= npages);
+ arena->ndirty -= npages;
+}
- if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
- chunk->nruns_adjac--;
- if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
- chunk->nruns_adjac--;
- chunk->nruns_avail--;
- assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
- == 0 && chunk->nruns_adjac == 0));
+static size_t
+arena_chunk_dirty_npages(const extent_node_t *node)
+{
+
+ return (extent_node_size_get(node) >> LG_PAGE);
+}
- if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- arena->ndirty -= npages;
- chunk->ndirty -= npages;
+void
+arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
+{
+
+ if (cache) {
+ extent_node_dirty_linkage_init(node);
+ extent_node_dirty_insert(node, &arena->runs_dirty,
+ &arena->chunks_cache);
+ arena->ndirty += arena_chunk_dirty_npages(node);
}
- if (chunk->ndirty != 0)
- arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+}
- arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
- pageind));
+void
+arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
+{
+
+ if (dirty) {
+ extent_node_dirty_remove(node);
+ assert(arena->ndirty >= arena_chunk_dirty_npages(node));
+ arena->ndirty -= arena_chunk_dirty_npages(node);
+ }
}
-static inline void *
+JEMALLOC_INLINE_C void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
{
void *ret;
unsigned regind;
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
+ arena_chunk_map_misc_t *miscelm;
+ void *rpages;
assert(run->nfree > 0);
- assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
+ assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
- regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
- ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
+ regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
+ miscelm = arena_run_to_miscelm(run);
+ rpages = arena_miscelm_to_rpages(miscelm);
+ ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
(uintptr_t)(bin_info->reg_interval * regind));
run->nfree--;
- if (regind == run->nextind)
- run->nextind++;
- assert(regind < run->nextind);
return (ret);
}
-static inline void
+JEMALLOC_INLINE_C void
arena_run_reg_dalloc(arena_run_t *run, void *ptr)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
- size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
+ szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind = arena_run_regind(run, bin_info, ptr);
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */
- assert(((uintptr_t)ptr - ((uintptr_t)run +
+ assert(((uintptr_t)ptr -
+ ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
(uintptr_t)bin_info->reg0_offset)) %
(uintptr_t)bin_info->reg_interval == 0);
- assert((uintptr_t)ptr >= (uintptr_t)run +
+ assert((uintptr_t)ptr >=
+ (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
(uintptr_t)bin_info->reg0_offset);
/* Freeing an unallocated pointer can cause assertion failure. */
- assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
+ assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
- bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
+ bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
run->nfree++;
}
-static inline void
+JEMALLOC_INLINE_C void
arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
{
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), (npages << LG_PAGE));
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ (run_ind << LG_PAGE)), (npages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
(npages << LG_PAGE));
}
-static inline void
+JEMALLOC_INLINE_C void
arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
- VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), PAGE);
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
+ << LG_PAGE)), PAGE);
}
-static inline void
+JEMALLOC_INLINE_C void
arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
size_t i;
@@ -358,9 +368,9 @@ arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
{
if (config_stats) {
- ssize_t cactive_diff = CHUNK_CEILING((arena->nactive +
- add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive -
- sub_pages) << LG_PAGE);
+ ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
+ - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
+ LG_PAGE);
if (cactive_diff != 0)
stats_cactive_add(cactive_diff);
}
@@ -368,10 +378,12 @@ arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
static void
arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
- size_t flag_dirty, size_t need_pages)
+ size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
{
size_t total_pages, rem_pages;
+ assert(flag_dirty == 0 || flag_decommitted == 0);
+
total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
LG_PAGE;
assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
@@ -379,58 +391,75 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
assert(need_pages <= total_pages);
rem_pages = total_pages - need_pages;
- arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
+ arena_avail_remove(arena, chunk, run_ind, total_pages);
+ if (flag_dirty != 0)
+ arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
arena_cactive_update(arena, need_pages, 0);
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) {
+ size_t flags = flag_dirty | flag_decommitted;
+ size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
+ 0;
+
+ arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
+ (rem_pages << LG_PAGE), flags |
+ (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
+ flag_unzeroed_mask));
+ arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
+ (rem_pages << LG_PAGE), flags |
+ (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
+ flag_unzeroed_mask));
if (flag_dirty != 0) {
- arena_mapbits_unallocated_set(chunk,
- run_ind+need_pages, (rem_pages << LG_PAGE),
- flag_dirty);
- arena_mapbits_unallocated_set(chunk,
- run_ind+total_pages-1, (rem_pages << LG_PAGE),
- flag_dirty);
- } else {
- arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
- (rem_pages << LG_PAGE),
- arena_mapbits_unzeroed_get(chunk,
- run_ind+need_pages));
- arena_mapbits_unallocated_set(chunk,
- run_ind+total_pages-1, (rem_pages << LG_PAGE),
- arena_mapbits_unzeroed_get(chunk,
- run_ind+total_pages-1));
+ arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
+ rem_pages);
}
- arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
- false, true);
+ arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
}
}
-static void
+static bool
arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
bool remove, bool zero)
{
arena_chunk_t *chunk;
- size_t flag_dirty, run_ind, need_pages, i;
+ arena_chunk_map_misc_t *miscelm;
+ size_t flag_dirty, flag_decommitted, run_ind, need_pages;
+ size_t flag_unzeroed_mask;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ miscelm = arena_run_to_miscelm(run);
+ run_ind = arena_miscelm_to_pageind(miscelm);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
need_pages = (size >> LG_PAGE);
assert(need_pages > 0);
+ if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
+ run_ind << LG_PAGE, size, arena->ind))
+ return (true);
+
if (remove) {
arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
- need_pages);
+ flag_decommitted, need_pages);
}
if (zero) {
- if (flag_dirty == 0) {
+ if (flag_decommitted != 0) {
+ /* The run is untouched, and therefore zeroed. */
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
+ *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
+ (need_pages << LG_PAGE));
+ } else if (flag_dirty != 0) {
+ /* The run is dirty, so all pages must be zeroed. */
+ arena_run_zero(chunk, run_ind, need_pages);
+ } else {
/*
* The run is clean, so some pages may be zeroed (i.e.
* never before touched).
*/
+ size_t i;
for (i = 0; i < need_pages; i++) {
if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
!= 0)
@@ -443,12 +472,9 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
run_ind+i);
}
}
- } else {
- /* The run is dirty, so all pages must be zeroed. */
- arena_run_zero(chunk, run_ind, need_pages);
}
} else {
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
@@ -456,68 +482,66 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
* Set the last element first, in case the run only contains one page
* (i.e. both statements set the same element).
*/
- arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
+ flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+ CHUNK_MAP_UNZEROED : 0;
+ arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ run_ind+need_pages-1)));
+ arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
+ return (false);
}
-static void
+static bool
arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
{
- arena_run_split_large_helper(arena, run, size, true, zero);
+ return (arena_run_split_large_helper(arena, run, size, true, zero));
}
-static void
+static bool
arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
{
- arena_run_split_large_helper(arena, run, size, false, zero);
+ return (arena_run_split_large_helper(arena, run, size, false, zero));
}
-static void
+static bool
arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
- size_t binind)
+ szind_t binind)
{
arena_chunk_t *chunk;
- size_t flag_dirty, run_ind, need_pages, i;
+ arena_chunk_map_misc_t *miscelm;
+ size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
assert(binind != BININD_INVALID);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ miscelm = arena_run_to_miscelm(run);
+ run_ind = arena_miscelm_to_pageind(miscelm);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
need_pages = (size >> LG_PAGE);
assert(need_pages > 0);
- arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
+ if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
+ run_ind << LG_PAGE, size, arena->ind))
+ return (true);
- /*
- * Propagate the dirty and unzeroed flags to the allocated small run,
- * so that arena_dalloc_bin_run() has the ability to conditionally trim
- * clean pages.
- */
- arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
- /*
- * The first page will always be dirtied during small run
- * initialization, so a validation failure here would not actually
- * cause an observable failure.
- */
- if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
- run_ind) == 0)
- arena_run_page_validate_zeroed(chunk, run_ind);
- for (i = 1; i < need_pages - 1; i++) {
- arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
+ arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
+ flag_decommitted, need_pages);
+
+ for (i = 0; i < need_pages; i++) {
+ size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
+ run_ind+i);
+ arena_mapbits_small_set(chunk, run_ind+i, i, binind,
+ flag_unzeroed);
+ if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
arena_run_page_validate_zeroed(chunk, run_ind+i);
}
- arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
- binind, flag_dirty);
- if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
- run_ind+need_pages-1) == 0)
- arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
+ return (false);
}
static arena_chunk_t *
@@ -533,76 +557,143 @@ arena_chunk_init_spare(arena_t *arena)
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxclass);
+ arena_maxrun);
assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxclass);
+ arena_maxrun);
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
arena_mapbits_dirty_get(chunk, chunk_npages-1));
return (chunk);
}
+static bool
+arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
+{
+
+ /*
+ * The extent node notion of "committed" doesn't directly apply to
+ * arena chunks. Arbitrarily mark them as committed. The commit state
+ * of runs is tracked individually, and upon chunk deallocation the
+ * entire chunk is in a consistent commit state.
+ */
+ extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
+ extent_node_achunk_set(&chunk->node, true);
+ return (chunk_register(chunk, &chunk->node));
+}
+
static arena_chunk_t *
-arena_chunk_init_hard(arena_t *arena)
+arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ bool *zero, bool *commit)
{
arena_chunk_t *chunk;
- bool zero;
- size_t unzeroed, i;
-
- assert(arena->spare == NULL);
- zero = false;
malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false,
- &zero, arena->dss_prec);
+
+ chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
+ chunksize, chunksize, zero, commit);
+ if (chunk != NULL && !*commit) {
+ /* Commit header. */
+ if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
+ LG_PAGE, arena->ind)) {
+ chunk_dalloc_wrapper(arena, chunk_hooks,
+ (void *)chunk, chunksize, *commit);
+ chunk = NULL;
+ }
+ }
+ if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
+ if (!*commit) {
+ /* Undo commit of header. */
+ chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
+ LG_PAGE, arena->ind);
+ }
+ chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
+ chunksize, *commit);
+ chunk = NULL;
+ }
+
malloc_mutex_lock(&arena->lock);
- if (chunk == NULL)
- return (NULL);
- if (config_stats)
+ return (chunk);
+}
+
+static arena_chunk_t *
+arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
+{
+ arena_chunk_t *chunk;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+
+ chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
+ chunksize, zero, true);
+ if (chunk != NULL) {
+ if (arena_chunk_register(arena, chunk, *zero)) {
+ chunk_dalloc_cache(arena, &chunk_hooks, chunk,
+ chunksize, true);
+ return (NULL);
+ }
+ *commit = true;
+ }
+ if (chunk == NULL) {
+ chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
+ zero, commit);
+ }
+
+ if (config_stats && chunk != NULL) {
arena->stats.mapped += chunksize;
+ arena->stats.metadata_mapped += (map_bias << LG_PAGE);
+ }
- chunk->arena = arena;
+ return (chunk);
+}
- /*
- * Claim that no pages are in use, since the header is merely overhead.
- */
- chunk->ndirty = 0;
+static arena_chunk_t *
+arena_chunk_init_hard(arena_t *arena)
+{
+ arena_chunk_t *chunk;
+ bool zero, commit;
+ size_t flag_unzeroed, flag_decommitted, i;
+
+ assert(arena->spare == NULL);
- chunk->nruns_avail = 0;
- chunk->nruns_adjac = 0;
+ zero = false;
+ commit = false;
+ chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
+ if (chunk == NULL)
+ return (NULL);
/*
* Initialize the map to contain one maximal free untouched run. Mark
- * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
+ * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
+ * chunk.
*/
- unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
- arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
- unzeroed);
+ flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
+ flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
+ arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
+ flag_unzeroed | flag_decommitted);
/*
* There is no need to initialize the internal page map entries unless
* the chunk is not zeroed.
*/
- if (zero == false) {
- VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk,
- map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
- chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
+ if (!zero) {
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
+ (void *)arena_bitselm_get(chunk, map_bias+1),
+ (size_t)((uintptr_t) arena_bitselm_get(chunk,
+ chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
- arena_mapbits_unzeroed_set(chunk, i, unzeroed);
+ arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else {
- VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
- map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
- chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
- map_bias+1)));
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
+ *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
+ arena_bitselm_get(chunk, chunk_npages-1) -
+ (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
- unzeroed);
+ flag_unzeroed);
}
}
}
- arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
- unzeroed);
+ arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
+ flag_unzeroed);
return (chunk);
}
@@ -621,65 +712,383 @@ arena_chunk_alloc(arena_t *arena)
}
/* Insert the run into the runs_avail tree. */
- arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
- false, false);
+ arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
return (chunk);
}
static void
-arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
+arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
{
+
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxclass);
+ arena_maxrun);
assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxclass);
+ arena_maxrun);
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
arena_mapbits_dirty_get(chunk, chunk_npages-1));
+ assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
+ arena_mapbits_decommitted_get(chunk, chunk_npages-1));
/*
* Remove run from the runs_avail tree, so that the arena does not use
* it.
*/
- arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
- false, false);
+ arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ bool committed;
arena->spare = chunk;
- malloc_mutex_unlock(&arena->lock);
- chunk_dealloc((void *)spare, chunksize, true);
- malloc_mutex_lock(&arena->lock);
- if (config_stats)
+ if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
+ arena_run_dirty_remove(arena, spare, map_bias,
+ chunk_npages-map_bias);
+ }
+
+ chunk_deregister(spare, &spare->node);
+
+ committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
+ 0);
+ if (!committed) {
+ /*
+ * Decommit the header. Mark the chunk as decommitted
+ * even if header decommit fails, since treating a
+ * partially committed chunk as committed has a high
+ * potential for causing later access of decommitted
+ * memory.
+ */
+ chunk_hooks = chunk_hooks_get(arena);
+ chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
+ LG_PAGE, arena->ind);
+ }
+
+ chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
+ chunksize, committed);
+
+ if (config_stats) {
arena->stats.mapped -= chunksize;
+ arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
+ }
} else
arena->spare = chunk;
}
-static arena_run_t *
-arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
+static void
+arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
{
- arena_run_t *run;
- arena_chunk_map_t *mapelm, key;
+ szind_t index = size2index(usize) - nlclasses - NBINS;
- key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
+ cassert(config_stats);
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- LG_PAGE));
- arena_run_split_large(arena, run, size, zero);
- return (run);
+ arena->stats.nmalloc_huge++;
+ arena->stats.allocated_huge += usize;
+ arena->stats.hstats[index].nmalloc++;
+ arena->stats.hstats[index].curhchunks++;
+}
+
+static void
+arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.nmalloc_huge--;
+ arena->stats.allocated_huge -= usize;
+ arena->stats.hstats[index].nmalloc--;
+ arena->stats.hstats[index].curhchunks--;
+}
+
+static void
+arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.ndalloc_huge++;
+ arena->stats.allocated_huge -= usize;
+ arena->stats.hstats[index].ndalloc++;
+ arena->stats.hstats[index].curhchunks--;
+}
+
+static void
+arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.ndalloc_huge--;
+ arena->stats.allocated_huge += usize;
+ arena->stats.hstats[index].ndalloc--;
+ arena->stats.hstats[index].curhchunks++;
+}
+
+static void
+arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
+{
+
+ arena_huge_dalloc_stats_update(arena, oldsize);
+ arena_huge_malloc_stats_update(arena, usize);
+}
+
+static void
+arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
+ size_t usize)
+{
+
+ arena_huge_dalloc_stats_update_undo(arena, oldsize);
+ arena_huge_malloc_stats_update_undo(arena, usize);
+}
+
+extent_node_t *
+arena_node_alloc(arena_t *arena)
+{
+ extent_node_t *node;
+
+ malloc_mutex_lock(&arena->node_cache_mtx);
+ node = ql_last(&arena->node_cache, ql_link);
+ if (node == NULL) {
+ malloc_mutex_unlock(&arena->node_cache_mtx);
+ return (base_alloc(sizeof(extent_node_t)));
}
+ ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
+ malloc_mutex_unlock(&arena->node_cache_mtx);
+ return (node);
+}
- return (NULL);
+void
+arena_node_dalloc(arena_t *arena, extent_node_t *node)
+{
+
+ malloc_mutex_lock(&arena->node_cache_mtx);
+ ql_elm_new(node, ql_link);
+ ql_tail_insert(&arena->node_cache, node, ql_link);
+ malloc_mutex_unlock(&arena->node_cache_mtx);
+}
+
+static void *
+arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ size_t usize, size_t alignment, bool *zero, size_t csize)
+{
+ void *ret;
+ bool commit = true;
+
+ ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
+ zero, &commit);
+ if (ret == NULL) {
+ /* Revert optimistic stats updates. */
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats) {
+ arena_huge_malloc_stats_update_undo(arena, usize);
+ arena->stats.mapped -= usize;
+ }
+ arena->nactive -= (usize >> LG_PAGE);
+ malloc_mutex_unlock(&arena->lock);
+ }
+
+ return (ret);
+}
+
+void *
+arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
+ bool *zero)
+{
+ void *ret;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ size_t csize = CHUNK_CEILING(usize);
+
+ malloc_mutex_lock(&arena->lock);
+
+ /* Optimistically update stats. */
+ if (config_stats) {
+ arena_huge_malloc_stats_update(arena, usize);
+ arena->stats.mapped += usize;
+ }
+ arena->nactive += (usize >> LG_PAGE);
+
+ ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
+ zero, true);
+ malloc_mutex_unlock(&arena->lock);
+ if (ret == NULL) {
+ ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
+ alignment, zero, csize);
+ }
+
+ if (config_stats && ret != NULL)
+ stats_cactive_add(usize);
+ return (ret);
+}
+
+void
+arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
+{
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ size_t csize;
+
+ csize = CHUNK_CEILING(usize);
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats) {
+ arena_huge_dalloc_stats_update(arena, usize);
+ arena->stats.mapped -= usize;
+ stats_cactive_sub(usize);
+ }
+ arena->nactive -= (usize >> LG_PAGE);
+
+ chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
+ malloc_mutex_unlock(&arena->lock);
+}
+
+void
+arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
+ size_t usize)
+{
+
+ assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
+ assert(oldsize != usize);
+
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats)
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ if (oldsize < usize) {
+ size_t udiff = usize - oldsize;
+ arena->nactive += udiff >> LG_PAGE;
+ if (config_stats)
+ stats_cactive_add(udiff);
+ } else {
+ size_t udiff = oldsize - usize;
+ arena->nactive -= udiff >> LG_PAGE;
+ if (config_stats)
+ stats_cactive_sub(udiff);
+ }
+ malloc_mutex_unlock(&arena->lock);
+}
+
+void
+arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
+ size_t usize)
+{
+ size_t udiff = oldsize - usize;
+ size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
+
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats) {
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ if (cdiff != 0) {
+ arena->stats.mapped -= cdiff;
+ stats_cactive_sub(udiff);
+ }
+ }
+ arena->nactive -= udiff >> LG_PAGE;
+
+ if (cdiff != 0) {
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ void *nchunk = (void *)((uintptr_t)chunk +
+ CHUNK_CEILING(usize));
+
+ chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
+ }
+ malloc_mutex_unlock(&arena->lock);
+}
+
+static bool
+arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
+ size_t udiff, size_t cdiff)
+{
+ bool err;
+ bool commit = true;
+
+ err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
+ zero, &commit) == NULL);
+ if (err) {
+ /* Revert optimistic stats updates. */
+ malloc_mutex_lock(&arena->lock);
+ if (config_stats) {
+ arena_huge_ralloc_stats_update_undo(arena, oldsize,
+ usize);
+ arena->stats.mapped -= cdiff;
+ }
+ arena->nactive -= (udiff >> LG_PAGE);
+ malloc_mutex_unlock(&arena->lock);
+ } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
+ cdiff, true, arena->ind)) {
+ chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
+ true);
+ err = true;
+ }
+ return (err);
+}
+
+bool
+arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
+ size_t usize, bool *zero)
+{
+ bool err;
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
+ void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
+ size_t udiff = usize - oldsize;
+ size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
+
+ malloc_mutex_lock(&arena->lock);
+
+ /* Optimistically update stats. */
+ if (config_stats) {
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ arena->stats.mapped += cdiff;
+ }
+ arena->nactive += (udiff >> LG_PAGE);
+
+ err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
+ chunksize, zero, true) == NULL);
+ malloc_mutex_unlock(&arena->lock);
+ if (err) {
+ err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
+ chunk, oldsize, usize, zero, nchunk, udiff,
+ cdiff);
+ } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
+ cdiff, true, arena->ind)) {
+ chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
+ true);
+ err = true;
+ }
+
+ if (config_stats && !err)
+ stats_cactive_add(udiff);
+ return (err);
+}
+
+/*
+ * Do first-best-fit run selection, i.e. select the lowest run that best fits.
+ * Run sizes are quantized, so not all candidate runs are necessarily exactly
+ * the same size.
+ */
+static arena_run_t *
+arena_run_first_best_fit(arena_t *arena, size_t size)
+{
+ size_t search_size = run_quantize_first(size);
+ arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size);
+ arena_chunk_map_misc_t *miscelm =
+ arena_avail_tree_nsearch(&arena->runs_avail, key);
+ if (miscelm == NULL)
+ return (NULL);
+ return (&miscelm->run);
+}
+
+static arena_run_t *
+arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
+{
+ arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
+ if (run != NULL) {
+ if (arena_run_split_large(arena, run, size, zero))
+ run = NULL;
+ }
+ return (run);
}
static arena_run_t *
@@ -688,8 +1097,8 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
arena_chunk_t *chunk;
arena_run_t *run;
- assert(size <= arena_maxclass);
- assert((size & PAGE_MASK) == 0);
+ assert(size <= arena_maxrun);
+ assert(size == PAGE_CEILING(size));
/* Search the arena's chunks for the lowest best fit. */
run = arena_run_alloc_large_helper(arena, size, zero);
@@ -701,8 +1110,9 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
*/
chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
- run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
- arena_run_split_large(arena, run, size, zero);
+ run = &arena_miscelm_get(chunk, map_bias)->run;
+ if (arena_run_split_large(arena, run, size, zero))
+ run = NULL;
return (run);
}
@@ -715,36 +1125,24 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
}
static arena_run_t *
-arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
+arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
{
- arena_run_t *run;
- arena_chunk_map_t *mapelm, key;
-
- key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
-
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- LG_PAGE));
- arena_run_split_small(arena, run, size, binind);
- return (run);
+ arena_run_t *run = arena_run_first_best_fit(arena, size);
+ if (run != NULL) {
+ if (arena_run_split_small(arena, run, size, binind))
+ run = NULL;
}
-
- return (NULL);
+ return (run);
}
static arena_run_t *
-arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
+arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
- assert(size <= arena_maxclass);
- assert((size & PAGE_MASK) == 0);
+ assert(size <= arena_maxrun);
+ assert(size == PAGE_CEILING(size));
assert(binind != BININD_INVALID);
/* Search the arena's chunks for the lowest best fit. */
@@ -757,8 +1155,9 @@ arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
*/
chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
- run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
- arena_run_split_small(arena, run, size, binind);
+ run = &arena_miscelm_get(chunk, map_bias)->run;
+ if (arena_run_split_small(arena, run, size, binind))
+ run = NULL;
return (run);
}
@@ -770,313 +1169,373 @@ arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
return (arena_run_alloc_small_helper(arena, size, binind));
}
-static inline void
+static bool
+arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
+{
+
+ return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
+ << 3));
+}
+
+ssize_t
+arena_lg_dirty_mult_get(arena_t *arena)
+{
+ ssize_t lg_dirty_mult;
+
+ malloc_mutex_lock(&arena->lock);
+ lg_dirty_mult = arena->lg_dirty_mult;
+ malloc_mutex_unlock(&arena->lock);
+
+ return (lg_dirty_mult);
+}
+
+bool
+arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
+{
+
+ if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
+ return (true);
+
+ malloc_mutex_lock(&arena->lock);
+ arena->lg_dirty_mult = lg_dirty_mult;
+ arena_maybe_purge(arena);
+ malloc_mutex_unlock(&arena->lock);
+
+ return (false);
+}
+
+void
arena_maybe_purge(arena_t *arena)
{
- size_t npurgeable, threshold;
/* Don't purge if the option is disabled. */
- if (opt_lg_dirty_mult < 0)
+ if (arena->lg_dirty_mult < 0)
return;
- /* Don't purge if all dirty pages are already being purged. */
- if (arena->ndirty <= arena->npurgatory)
+ /* Don't recursively purge. */
+ if (arena->purging)
return;
- npurgeable = arena->ndirty - arena->npurgatory;
- threshold = (arena->nactive >> opt_lg_dirty_mult);
/*
- * Don't purge unless the number of purgeable pages exceeds the
- * threshold.
+ * Iterate, since preventing recursive purging could otherwise leave too
+ * many dirty pages.
*/
- if (npurgeable <= threshold)
- return;
-
- arena_purge(arena, false);
+ while (true) {
+ size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
+ if (threshold < chunk_npages)
+ threshold = chunk_npages;
+ /*
+ * Don't purge unless the number of purgeable pages exceeds the
+ * threshold.
+ */
+ if (arena->ndirty <= threshold)
+ return;
+ arena_purge(arena, false);
+ }
}
-static arena_chunk_t *
-chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
+static size_t
+arena_dirty_count(arena_t *arena)
{
- size_t *ndirty = (size_t *)arg;
+ size_t ndirty = 0;
+ arena_runs_dirty_link_t *rdelm;
+ extent_node_t *chunkselm;
- assert(chunk->ndirty != 0);
- *ndirty += chunk->ndirty;
- return (NULL);
+ for (rdelm = qr_next(&arena->runs_dirty, rd_link),
+ chunkselm = qr_next(&arena->chunks_cache, cc_link);
+ rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
+ size_t npages;
+
+ if (rdelm == &chunkselm->rd) {
+ npages = extent_node_size_get(chunkselm) >> LG_PAGE;
+ chunkselm = qr_next(chunkselm, cc_link);
+ } else {
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ assert(arena_mapbits_allocated_get(chunk, pageind) ==
+ 0);
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
+ assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
+ npages = arena_mapbits_unallocated_size_get(chunk,
+ pageind) >> LG_PAGE;
+ }
+ ndirty += npages;
+ }
+
+ return (ndirty);
}
static size_t
-arena_compute_npurgatory(arena_t *arena, bool all)
+arena_compute_npurge(arena_t *arena, bool all)
{
- size_t npurgatory, npurgeable;
+ size_t npurge;
/*
* Compute the minimum number of pages that this thread should try to
* purge.
*/
- npurgeable = arena->ndirty - arena->npurgatory;
-
- if (all == false) {
- size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
+ if (!all) {
+ size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
+ threshold = threshold < chunk_npages ? chunk_npages : threshold;
- npurgatory = npurgeable - threshold;
+ npurge = arena->ndirty - threshold;
} else
- npurgatory = npurgeable;
+ npurge = arena->ndirty;
- return (npurgatory);
+ return (npurge);
}
-static void
-arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all,
- arena_chunk_mapelms_t *mapelms)
-{
- size_t pageind, npages;
-
- /*
- * Temporarily allocate free dirty runs within chunk. If all is false,
- * only operate on dirty runs that are fragments; otherwise operate on
- * all dirty runs.
- */
- for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
- if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
+static size_t
+arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
+ size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel,
+ extent_node_t *purge_chunks_sentinel)
+{
+ arena_runs_dirty_link_t *rdelm, *rdelm_next;
+ extent_node_t *chunkselm;
+ size_t nstashed = 0;
+
+ /* Stash at least npurge pages. */
+ for (rdelm = qr_next(&arena->runs_dirty, rd_link),
+ chunkselm = qr_next(&arena->chunks_cache, cc_link);
+ rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
+ size_t npages;
+ rdelm_next = qr_next(rdelm, rd_link);
+
+ if (rdelm == &chunkselm->rd) {
+ extent_node_t *chunkselm_next;
+ bool zero;
+ UNUSED void *chunk;
+
+ chunkselm_next = qr_next(chunkselm, cc_link);
+ /*
+ * Allocate. chunkselm remains valid due to the
+ * dalloc_node=false argument to chunk_alloc_cache().
+ */
+ zero = false;
+ chunk = chunk_alloc_cache(arena, chunk_hooks,
+ extent_node_addr_get(chunkselm),
+ extent_node_size_get(chunkselm), chunksize, &zero,
+ false);
+ assert(chunk == extent_node_addr_get(chunkselm));
+ assert(zero == extent_node_zeroed_get(chunkselm));
+ extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
+ purge_chunks_sentinel);
+ npages = extent_node_size_get(chunkselm) >> LG_PAGE;
+ chunkselm = chunkselm_next;
+ } else {
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ arena_run_t *run = &miscelm->run;
size_t run_size =
arena_mapbits_unallocated_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
+
assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) ==
arena_mapbits_dirty_get(chunk, pageind+npages-1));
- if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
- (all || arena_avail_adjac(chunk, pageind,
- npages))) {
- arena_run_t *run = (arena_run_t *)((uintptr_t)
- chunk + (uintptr_t)(pageind << LG_PAGE));
-
- arena_run_split_large(arena, run, run_size,
- false);
- /* Append to list for later processing. */
- ql_elm_new(mapelm, u.ql_link);
- ql_tail_insert(mapelms, mapelm, u.ql_link);
- }
- } else {
- /* Skip run. */
- if (arena_mapbits_large_get(chunk, pageind) != 0) {
- npages = arena_mapbits_large_size_get(chunk,
- pageind) >> LG_PAGE;
- } else {
- size_t binind;
- arena_bin_info_t *bin_info;
- arena_run_t *run = (arena_run_t *)((uintptr_t)
- chunk + (uintptr_t)(pageind << LG_PAGE));
-
- assert(arena_mapbits_small_runind_get(chunk,
- pageind) == 0);
- binind = arena_bin_index(arena, run->bin);
- bin_info = &arena_bin_info[binind];
- npages = bin_info->run_size >> LG_PAGE;
+ /*
+ * If purging the spare chunk's run, make it available
+ * prior to allocation.
+ */
+ if (chunk == arena->spare)
+ arena_chunk_alloc(arena);
+
+ /* Temporarily allocate the free dirty run. */
+ arena_run_split_large(arena, run, run_size, false);
+ /* Stash. */
+ if (false)
+ qr_new(rdelm, rd_link); /* Redundant. */
+ else {
+ assert(qr_next(rdelm, rd_link) == rdelm);
+ assert(qr_prev(rdelm, rd_link) == rdelm);
}
+ qr_meld(purge_runs_sentinel, rdelm, rd_link);
}
+
+ nstashed += npages;
+ if (!all && nstashed >= npurge)
+ break;
}
- assert(pageind == chunk_npages);
- assert(chunk->ndirty == 0 || all == false);
- assert(chunk->nruns_adjac == 0);
+
+ return (nstashed);
}
static size_t
-arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk,
- arena_chunk_mapelms_t *mapelms)
+arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ arena_runs_dirty_link_t *purge_runs_sentinel,
+ extent_node_t *purge_chunks_sentinel)
{
- size_t npurged, pageind, npages, nmadvise;
- arena_chunk_map_t *mapelm;
+ size_t npurged, nmadvise;
+ arena_runs_dirty_link_t *rdelm;
+ extent_node_t *chunkselm;
- malloc_mutex_unlock(&arena->lock);
if (config_stats)
nmadvise = 0;
npurged = 0;
- ql_foreach(mapelm, mapelms, u.ql_link) {
- bool unzeroed;
- size_t flag_unzeroed, i;
-
- pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- npages = arena_mapbits_large_size_get(chunk, pageind) >>
- LG_PAGE;
- assert(pageind + npages <= chunk_npages);
- unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
- LG_PAGE)), (npages << LG_PAGE));
- flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
- /*
- * Set the unzeroed flag for all pages, now that pages_purge()
- * has returned whether the pages were zeroed as a side effect
- * of purging. This chunk map modification is safe even though
- * the arena mutex isn't currently owned by this thread,
- * because the run is marked as allocated, thus protecting it
- * from being modified by any other thread. As long as these
- * writes don't perturb the first and last elements'
- * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
- */
- for (i = 0; i < npages; i++) {
- arena_mapbits_unzeroed_set(chunk, pageind+i,
- flag_unzeroed);
+
+ malloc_mutex_unlock(&arena->lock);
+ for (rdelm = qr_next(purge_runs_sentinel, rd_link),
+ chunkselm = qr_next(purge_chunks_sentinel, cc_link);
+ rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
+ size_t npages;
+
+ if (rdelm == &chunkselm->rd) {
+ /*
+ * Don't actually purge the chunk here because 1)
+ * chunkselm is embedded in the chunk and must remain
+ * valid, and 2) we deallocate the chunk in
+ * arena_unstash_purged(), where it is destroyed,
+ * decommitted, or purged, depending on chunk
+ * deallocation policy.
+ */
+ size_t size = extent_node_size_get(chunkselm);
+ npages = size >> LG_PAGE;
+ chunkselm = qr_next(chunkselm, cc_link);
+ } else {
+ size_t pageind, run_size, flag_unzeroed, flags, i;
+ bool decommitted;
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ pageind = arena_miscelm_to_pageind(miscelm);
+ run_size = arena_mapbits_large_size_get(chunk, pageind);
+ npages = run_size >> LG_PAGE;
+
+ assert(pageind + npages <= chunk_npages);
+ assert(!arena_mapbits_decommitted_get(chunk, pageind));
+ assert(!arena_mapbits_decommitted_get(chunk,
+ pageind+npages-1));
+ decommitted = !chunk_hooks->decommit(chunk, chunksize,
+ pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
+ if (decommitted) {
+ flag_unzeroed = 0;
+ flags = CHUNK_MAP_DECOMMITTED;
+ } else {
+ flag_unzeroed = chunk_purge_wrapper(arena,
+ chunk_hooks, chunk, chunksize, pageind <<
+ LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
+ flags = flag_unzeroed;
+ }
+ arena_mapbits_large_set(chunk, pageind+npages-1, 0,
+ flags);
+ arena_mapbits_large_set(chunk, pageind, run_size,
+ flags);
+
+ /*
+ * Set the unzeroed flag for internal pages, now that
+ * chunk_purge_wrapper() has returned whether the pages
+ * were zeroed as a side effect of purging. This chunk
+ * map modification is safe even though the arena mutex
+ * isn't currently owned by this thread, because the run
+ * is marked as allocated, thus protecting it from being
+ * modified by any other thread. As long as these
+ * writes don't perturb the first and last elements'
+ * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
+ */
+ for (i = 1; i < npages-1; i++) {
+ arena_mapbits_internal_set(chunk, pageind+i,
+ flag_unzeroed);
+ }
}
+
npurged += npages;
if (config_stats)
nmadvise++;
}
malloc_mutex_lock(&arena->lock);
- if (config_stats)
+
+ if (config_stats) {
arena->stats.nmadvise += nmadvise;
+ arena->stats.purged += npurged;
+ }
return (npurged);
}
static void
-arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk,
- arena_chunk_mapelms_t *mapelms)
-{
- arena_chunk_map_t *mapelm;
- size_t pageind;
-
- /* Deallocate runs. */
- for (mapelm = ql_first(mapelms); mapelm != NULL;
- mapelm = ql_first(mapelms)) {
- arena_run_t *run;
-
- pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
- LG_PAGE));
- ql_remove(mapelms, mapelm, u.ql_link);
- arena_run_dalloc(arena, run, false, true);
+arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ arena_runs_dirty_link_t *purge_runs_sentinel,
+ extent_node_t *purge_chunks_sentinel)
+{
+ arena_runs_dirty_link_t *rdelm, *rdelm_next;
+ extent_node_t *chunkselm;
+
+ /* Deallocate chunks/runs. */
+ for (rdelm = qr_next(purge_runs_sentinel, rd_link),
+ chunkselm = qr_next(purge_chunks_sentinel, cc_link);
+ rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
+ rdelm_next = qr_next(rdelm, rd_link);
+ if (rdelm == &chunkselm->rd) {
+ extent_node_t *chunkselm_next = qr_next(chunkselm,
+ cc_link);
+ void *addr = extent_node_addr_get(chunkselm);
+ size_t size = extent_node_size_get(chunkselm);
+ bool zeroed = extent_node_zeroed_get(chunkselm);
+ bool committed = extent_node_committed_get(chunkselm);
+ extent_node_dirty_remove(chunkselm);
+ arena_node_dalloc(arena, chunkselm);
+ chunkselm = chunkselm_next;
+ chunk_dalloc_arena(arena, chunk_hooks, addr, size,
+ zeroed, committed);
+ } else {
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ bool decommitted = (arena_mapbits_decommitted_get(chunk,
+ pageind) != 0);
+ arena_run_t *run = &miscelm->run;
+ qr_remove(rdelm, rd_link);
+ arena_run_dalloc(arena, run, false, true, decommitted);
+ }
}
}
-static inline size_t
-arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
+static void
+arena_purge(arena_t *arena, bool all)
{
- size_t npurged;
- arena_chunk_mapelms_t mapelms;
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
+ size_t npurge, npurgeable, npurged;
+ arena_runs_dirty_link_t purge_runs_sentinel;
+ extent_node_t purge_chunks_sentinel;
- ql_new(&mapelms);
-
- /*
- * If chunk is the spare, temporarily re-allocate it, 1) so that its
- * run is reinserted into runs_avail, and 2) so that it cannot be
- * completely discarded by another thread while arena->lock is dropped
- * by this thread. Note that the arena_run_dalloc() call will
- * implicitly deallocate the chunk, so no explicit action is required
- * in this function to deallocate the chunk.
- *
- * Note that once a chunk contains dirty pages, it cannot again contain
- * a single run unless 1) it is a dirty run, or 2) this function purges
- * dirty pages and causes the transition to a single clean run. Thus
- * (chunk == arena->spare) is possible, but it is not possible for
- * this function to be called on the spare unless it contains a dirty
- * run.
- */
- if (chunk == arena->spare) {
- assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
- assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
-
- arena_chunk_alloc(arena);
- }
-
- if (config_stats)
- arena->stats.purged += chunk->ndirty;
+ arena->purging = true;
/*
- * Operate on all dirty runs if there is no clean/dirty run
- * fragmentation.
+ * Calls to arena_dirty_count() are disabled even for debug builds
+ * because overhead grows nonlinearly as memory usage increases.
*/
- if (chunk->nruns_adjac == 0)
- all = true;
-
- arena_chunk_stash_dirty(arena, chunk, all, &mapelms);
- npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms);
- arena_chunk_unstash_purged(arena, chunk, &mapelms);
-
- return (npurged);
-}
-
-static void
-arena_purge(arena_t *arena, bool all)
-{
- arena_chunk_t *chunk;
- size_t npurgatory;
- if (config_debug) {
- size_t ndirty = 0;
-
- arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
- chunks_dirty_iter_cb, (void *)&ndirty);
+ if (false && config_debug) {
+ size_t ndirty = arena_dirty_count(arena);
assert(ndirty == arena->ndirty);
}
- assert(arena->ndirty > arena->npurgatory || all);
- assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
- arena->npurgatory) || all);
+ assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all);
if (config_stats)
arena->stats.npurge++;
- /*
- * Add the minimum number of pages this thread should try to purge to
- * arena->npurgatory. This will keep multiple threads from racing to
- * reduce ndirty below the threshold.
- */
- npurgatory = arena_compute_npurgatory(arena, all);
- arena->npurgatory += npurgatory;
-
- while (npurgatory > 0) {
- size_t npurgeable, npurged, nunpurged;
-
- /* Get next chunk with dirty pages. */
- chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
- if (chunk == NULL) {
- /*
- * This thread was unable to purge as many pages as
- * originally intended, due to races with other threads
- * that either did some of the purging work, or re-used
- * dirty pages.
- */
- arena->npurgatory -= npurgatory;
- return;
- }
- npurgeable = chunk->ndirty;
- assert(npurgeable != 0);
+ npurge = arena_compute_npurge(arena, all);
+ qr_new(&purge_runs_sentinel, rd_link);
+ extent_node_dirty_linkage_init(&purge_chunks_sentinel);
- if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
- /*
- * This thread will purge all the dirty pages in chunk,
- * so set npurgatory to reflect this thread's intent to
- * purge the pages. This tends to reduce the chances
- * of the following scenario:
- *
- * 1) This thread sets arena->npurgatory such that
- * (arena->ndirty - arena->npurgatory) is at the
- * threshold.
- * 2) This thread drops arena->lock.
- * 3) Another thread causes one or more pages to be
- * dirtied, and immediately determines that it must
- * purge dirty pages.
- *
- * If this scenario *does* play out, that's okay,
- * because all of the purging work being done really
- * needs to happen.
- */
- arena->npurgatory += npurgeable - npurgatory;
- npurgatory = npurgeable;
- }
+ npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge,
+ &purge_runs_sentinel, &purge_chunks_sentinel);
+ assert(npurgeable >= npurge);
+ npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
+ &purge_chunks_sentinel);
+ assert(npurged == npurgeable);
+ arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
+ &purge_chunks_sentinel);
- /*
- * Keep track of how many pages are purgeable, versus how many
- * actually get purged, and adjust counters accordingly.
- */
- arena->npurgatory -= npurgeable;
- npurgatory -= npurgeable;
- npurged = arena_chunk_purge(arena, chunk, all);
- nunpurged = npurgeable - npurged;
- arena->npurgatory += nunpurged;
- npurgatory += nunpurged;
- }
+ arena->purging = false;
}
void
@@ -1090,7 +1549,8 @@ arena_purge_all(arena_t *arena)
static void
arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
- size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
+ size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
+ size_t flag_decommitted)
{
size_t size = *p_size;
size_t run_ind = *p_run_ind;
@@ -1099,7 +1559,9 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
/* Try to coalesce forward. */
if (run_ind + run_pages < chunk_npages &&
arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
- arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
+ arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
+ flag_decommitted) {
size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
run_ind+run_pages);
size_t nrun_pages = nrun_size >> LG_PAGE;
@@ -1112,8 +1574,18 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
run_ind+run_pages+nrun_pages-1) == nrun_size);
assert(arena_mapbits_dirty_get(chunk,
run_ind+run_pages+nrun_pages-1) == flag_dirty);
- arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
- false, true);
+ assert(arena_mapbits_decommitted_get(chunk,
+ run_ind+run_pages+nrun_pages-1) == flag_decommitted);
+ arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
+
+ /*
+ * If the successor is dirty, remove it from the set of dirty
+ * pages.
+ */
+ if (flag_dirty != 0) {
+ arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
+ nrun_pages);
+ }
size += nrun_size;
run_pages += nrun_pages;
@@ -1126,7 +1598,8 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
/* Try to coalesce backward. */
if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
- flag_dirty) {
+ flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
+ flag_decommitted) {
size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
run_ind-1);
size_t prun_pages = prun_size >> LG_PAGE;
@@ -1140,8 +1613,18 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
prun_size);
assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
- arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
- false);
+ assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
+ flag_decommitted);
+ arena_avail_remove(arena, chunk, run_ind, prun_pages);
+
+ /*
+ * If the predecessor is dirty, remove it from the set of dirty
+ * pages.
+ */
+ if (flag_dirty != 0) {
+ arena_run_dirty_remove(arena, chunk, run_ind,
+ prun_pages);
+ }
size += prun_size;
run_pages += prun_pages;
@@ -1156,26 +1639,53 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
*p_run_pages = run_pages;
}
-static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
+static size_t
+arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ size_t run_ind)
{
- arena_chunk_t *chunk;
- size_t size, run_ind, run_pages, flag_dirty;
+ size_t size;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
assert(run_ind >= map_bias);
assert(run_ind < chunk_npages);
+
if (arena_mapbits_large_get(chunk, run_ind) != 0) {
size = arena_mapbits_large_size_get(chunk, run_ind);
- assert(size == PAGE ||
- arena_mapbits_large_size_get(chunk,
+ assert(size == PAGE || arena_mapbits_large_size_get(chunk,
run_ind+(size>>LG_PAGE)-1) == 0);
} else {
- size_t binind = arena_bin_index(arena, run->bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
size = bin_info->run_size;
}
+
+ return (size);
+}
+
+static bool
+arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run)
+{
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ size_t run_ind = arena_miscelm_to_pageind(miscelm);
+ size_t offset = run_ind << LG_PAGE;
+ size_t length = arena_run_size_get(arena, chunk, run, run_ind);
+
+ return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length,
+ arena->ind));
+}
+
+static void
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
+ bool decommitted)
+{
+ arena_chunk_t *chunk;
+ arena_chunk_map_misc_t *miscelm;
+ size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ miscelm = arena_run_to_miscelm(run);
+ run_ind = arena_miscelm_to_pageind(miscelm);
+ assert(run_ind >= map_bias);
+ assert(run_ind < chunk_npages);
+ size = arena_run_size_get(arena, chunk, run, run_ind);
run_pages = (size >> LG_PAGE);
arena_cactive_update(arena, 0, run_pages);
arena->nactive -= run_pages;
@@ -1187,16 +1697,18 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
*/
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
+ if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
+ != 0)
dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
+ flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
/* Mark pages as unallocated in the chunk map. */
- if (dirty) {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- CHUNK_MAP_DIRTY);
+ if (dirty || decommitted) {
+ size_t flags = flag_dirty | flag_decommitted;
+ arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- CHUNK_MAP_DIRTY);
+ flags);
} else {
arena_mapbits_unallocated_set(chunk, run_ind, size,
arena_mapbits_unzeroed_get(chunk, run_ind));
@@ -1205,20 +1717,25 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
}
arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
- flag_dirty);
+ flag_dirty, flag_decommitted);
/* Insert into runs_avail, now that coalescing is complete. */
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
+ assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
+ arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
+ arena_avail_insert(arena, chunk, run_ind, run_pages);
+
+ if (dirty)
+ arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
/* Deallocate chunk if it is now completely unused. */
- if (size == arena_maxclass) {
+ if (size == arena_maxrun) {
assert(run_ind == map_bias);
- assert(run_pages == (arena_maxclass >> LG_PAGE));
- arena_chunk_dealloc(arena, chunk);
+ assert(run_pages == (arena_maxrun >> LG_PAGE));
+ arena_chunk_dalloc(arena, chunk);
}
/*
@@ -1233,12 +1750,25 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
}
static void
+arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run)
+{
+ bool committed = arena_run_decommit(arena, chunk, run);
+
+ arena_run_dalloc(arena, run, committed, false, !committed);
+}
+
+static void
arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize)
{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
size_t head_npages = (oldsize - newsize) >> LG_PAGE;
size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
+ size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
+ size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+ CHUNK_MAP_UNZEROED : 0;
assert(oldsize > newsize);
@@ -1248,8 +1778,11 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* run first, in case of single-page runs.
*/
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
+ arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages-1)));
+ arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
if (config_debug) {
UNUSED size_t tail_npages = newsize >> LG_PAGE;
@@ -1259,18 +1792,25 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
pageind+head_npages+tail_npages-1) == flag_dirty);
}
arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
- flag_dirty);
+ flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages)));
- arena_run_dalloc(arena, run, false, false);
+ arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
}
static void
arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize, bool dirty)
{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
size_t head_npages = newsize >> LG_PAGE;
size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
+ size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
+ size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+ CHUNK_MAP_UNZEROED : 0;
+ arena_chunk_map_misc_t *tail_miscelm;
+ arena_run_t *tail_run;
assert(oldsize > newsize);
@@ -1280,8 +1820,11 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* run first, in case of single-page runs.
*/
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
+ arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages-1)));
+ arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
if (config_debug) {
UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
@@ -1291,29 +1834,21 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
pageind+head_npages+tail_npages-1) == flag_dirty);
}
arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
- flag_dirty);
+ flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages)));
- arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
- dirty, false);
+ tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
+ tail_run = &tail_miscelm->run;
+ arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
+ 0));
}
static arena_run_t *
arena_bin_runs_first(arena_bin_t *bin)
{
- arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
- if (mapelm != NULL) {
- arena_chunk_t *chunk;
- size_t pageind;
- arena_run_t *run;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
- pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t))) + map_bias;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) <<
- LG_PAGE));
- return (run);
- }
+ arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
+ if (miscelm != NULL)
+ return (&miscelm->run);
return (NULL);
}
@@ -1321,25 +1856,21 @@ arena_bin_runs_first(arena_bin_t *bin)
static void
arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
+ assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
- arena_run_tree_insert(&bin->runs, mapelm);
+ arena_run_tree_insert(&bin->runs, miscelm);
}
static void
arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
{
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
+ assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
- arena_run_tree_remove(&bin->runs, mapelm);
+ arena_run_tree_remove(&bin->runs, miscelm);
}
static arena_run_t *
@@ -1358,7 +1889,7 @@ static arena_run_t *
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
{
arena_run_t *run;
- size_t binind;
+ szind_t binind;
arena_bin_info_t *bin_info;
/* Look for a usable run. */
@@ -1376,14 +1907,10 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_small(arena, bin_info->run_size, binind);
if (run != NULL) {
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
-
/* Initialize run internals. */
- run->bin = bin;
- run->nextind = 0;
+ run->binind = binind;
run->nfree = bin_info->nregs;
- bitmap_init(bitmap, &bin_info->bitmap_info);
+ bitmap_init(run->bitmap, &bin_info->bitmap_info);
}
malloc_mutex_unlock(&arena->lock);
/********************************/
@@ -1412,8 +1939,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
static void *
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
{
- void *ret;
- size_t binind;
+ szind_t binind;
arena_bin_info_t *bin_info;
arena_run_t *run;
@@ -1426,6 +1952,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
* Another thread updated runcur while this one ran without the
* bin lock in arena_bin_nonfull_run_get().
*/
+ void *ret;
assert(bin->runcur->nfree > 0);
ret = arena_run_reg_alloc(bin->runcur, bin_info);
if (run != NULL) {
@@ -1459,13 +1986,11 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
}
void
-arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
+arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
uint64_t prof_accumbytes)
{
unsigned i, nfill;
arena_bin_t *bin;
- arena_run_t *run;
- void *ptr;
assert(tbin->ncached == 0);
@@ -1475,13 +2000,26 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) {
+ arena_run_t *run;
+ void *ptr;
if ((run = bin->runcur) != NULL && run->nfree > 0)
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
ptr = arena_bin_malloc_hard(arena, bin);
- if (ptr == NULL)
+ if (ptr == NULL) {
+ /*
+ * OOM. tbin->avail isn't yet filled down to its first
+ * element, so the successful allocations (if any) must
+ * be moved to the base of tbin->avail before bailing
+ * out.
+ */
+ if (i > 0) {
+ memmove(tbin->avail, &tbin->avail[nfill - i],
+ i * sizeof(void *));
+ }
break;
- if (config_fill && opt_junk) {
+ }
+ if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ptr, &arena_bin_info[binind],
true);
}
@@ -1489,9 +2027,9 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
tbin->avail[nfill - 1 - i] = ptr;
}
if (config_stats) {
- bin->stats.allocated += i * arena_bin_info[binind].reg_size;
bin->stats.nmalloc += i;
bin->stats.nrequests += tbin->tstats.nrequests;
+ bin->stats.curregs += i;
bin->stats.nfills++;
tbin->tstats.nrequests = 0;
}
@@ -1538,29 +2076,35 @@ arena_redzone_corruption_t *arena_redzone_corruption =
static void
arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
{
- size_t size = bin_info->reg_size;
- size_t redzone_size = bin_info->redzone_size;
- size_t i;
bool error = false;
- for (i = 1; i <= redzone_size; i++) {
- uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
- if (*byte != 0xa5) {
- error = true;
- arena_redzone_corruption(ptr, size, false, i, *byte);
- if (reset)
- *byte = 0xa5;
+ if (opt_junk_alloc) {
+ size_t size = bin_info->reg_size;
+ size_t redzone_size = bin_info->redzone_size;
+ size_t i;
+
+ for (i = 1; i <= redzone_size; i++) {
+ uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
+ if (*byte != 0xa5) {
+ error = true;
+ arena_redzone_corruption(ptr, size, false, i,
+ *byte);
+ if (reset)
+ *byte = 0xa5;
+ }
}
- }
- for (i = 0; i < redzone_size; i++) {
- uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
- if (*byte != 0xa5) {
- error = true;
- arena_redzone_corruption(ptr, size, true, i, *byte);
- if (reset)
- *byte = 0xa5;
+ for (i = 0; i < redzone_size; i++) {
+ uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
+ if (*byte != 0xa5) {
+ error = true;
+ arena_redzone_corruption(ptr, size, true, i,
+ *byte);
+ if (reset)
+ *byte = 0xa5;
+ }
}
}
+
if (opt_abort && error)
abort();
}
@@ -1588,14 +2132,14 @@ arena_dalloc_junk_small_t *arena_dalloc_junk_small =
void
arena_quarantine_junk_small(void *ptr, size_t usize)
{
- size_t binind;
+ szind_t binind;
arena_bin_info_t *bin_info;
cassert(config_fill);
- assert(opt_junk);
+ assert(opt_junk_free);
assert(opt_quarantine);
assert(usize <= SMALL_MAXCLASS);
- binind = SMALL_SIZE2BIN(usize);
+ binind = size2index(usize);
bin_info = &arena_bin_info[binind];
arena_redzones_validate(ptr, bin_info, true);
}
@@ -1606,12 +2150,12 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
void *ret;
arena_bin_t *bin;
arena_run_t *run;
- size_t binind;
+ szind_t binind;
- binind = SMALL_SIZE2BIN(size);
+ binind = size2index(size);
assert(binind < NBINS);
bin = &arena->bins[binind];
- size = arena_bin_info[binind].reg_size;
+ size = index2size(binind);
malloc_mutex_lock(&bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0)
@@ -1625,29 +2169,29 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
}
if (config_stats) {
- bin->stats.allocated += size;
bin->stats.nmalloc++;
bin->stats.nrequests++;
+ bin->stats.curregs++;
}
malloc_mutex_unlock(&bin->lock);
- if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
+ if (config_prof && !isthreaded && arena_prof_accum(arena, size))
prof_idump();
- if (zero == false) {
+ if (!zero) {
if (config_fill) {
- if (opt_junk) {
+ if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
- } else if (opt_zero)
+ } else if (unlikely(opt_zero))
memset(ret, 0, size);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
- if (config_fill && opt_junk) {
+ if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
@@ -1658,36 +2202,59 @@ void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
+ size_t usize;
+ uintptr_t random_offset;
+ arena_run_t *run;
+ arena_chunk_map_misc_t *miscelm;
UNUSED bool idump;
/* Large allocation. */
- size = PAGE_CEILING(size);
+ usize = s2u(size);
malloc_mutex_lock(&arena->lock);
- ret = (void *)arena_run_alloc_large(arena, size, zero);
- if (ret == NULL) {
+ if (config_cache_oblivious) {
+ uint64_t r;
+
+ /*
+ * Compute a uniformly distributed offset within the first page
+ * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
+ * for 4 KiB pages and 64-byte cachelines.
+ */
+ prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state,
+ UINT64_C(6364136223846793009),
+ UINT64_C(1442695040888963409));
+ random_offset = ((uintptr_t)r) << LG_CACHELINE;
+ } else
+ random_offset = 0;
+ run = arena_run_alloc_large(arena, usize + large_pad, zero);
+ if (run == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
+ miscelm = arena_run_to_miscelm(run);
+ ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
+ random_offset);
if (config_stats) {
+ szind_t index = size2index(usize) - NBINS;
+
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ arena->stats.allocated_large += usize;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
}
if (config_prof)
- idump = arena_prof_accum_locked(arena, size);
+ idump = arena_prof_accum_locked(arena, usize);
malloc_mutex_unlock(&arena->lock);
if (config_prof && idump)
prof_idump();
- if (zero == false) {
+ if (!zero) {
if (config_fill) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
+ if (unlikely(opt_junk_alloc))
+ memset(ret, 0xa5, usize);
+ else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
}
}
@@ -1695,18 +2262,25 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
}
/* Only handles large allocations that require more than page alignment. */
-void *
-arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
+static void *
+arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
+ bool zero)
{
void *ret;
size_t alloc_size, leadsize, trailsize;
arena_run_t *run;
arena_chunk_t *chunk;
+ arena_chunk_map_misc_t *miscelm;
+ void *rpages;
+
+ assert(usize == PAGE_CEILING(usize));
- assert((size & PAGE_MASK) == 0);
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL))
+ return (NULL);
alignment = PAGE_CEILING(alignment);
- alloc_size = size + alignment - PAGE;
+ alloc_size = usize + large_pad + alignment - PAGE;
malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_large(arena, alloc_size, false);
@@ -1715,37 +2289,94 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
return (NULL);
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ miscelm = arena_run_to_miscelm(run);
+ rpages = arena_miscelm_to_rpages(miscelm);
- leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
- (uintptr_t)run;
- assert(alloc_size >= leadsize + size);
- trailsize = alloc_size - leadsize - size;
- ret = (void *)((uintptr_t)run + leadsize);
+ leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
+ (uintptr_t)rpages;
+ assert(alloc_size >= leadsize + usize);
+ trailsize = alloc_size - leadsize - usize - large_pad;
if (leadsize != 0) {
- arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
- leadsize);
+ arena_chunk_map_misc_t *head_miscelm = miscelm;
+ arena_run_t *head_run = run;
+
+ miscelm = arena_miscelm_get(chunk,
+ arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
+ LG_PAGE));
+ run = &miscelm->run;
+
+ arena_run_trim_head(arena, chunk, head_run, alloc_size,
+ alloc_size - leadsize);
}
if (trailsize != 0) {
- arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
- false);
+ arena_run_trim_tail(arena, chunk, run, usize + large_pad +
+ trailsize, usize + large_pad, false);
+ }
+ if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
+ size_t run_ind =
+ arena_miscelm_to_pageind(arena_run_to_miscelm(run));
+ bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
+ bool decommitted = (arena_mapbits_decommitted_get(chunk,
+ run_ind) != 0);
+
+ assert(decommitted); /* Cause of OOM. */
+ arena_run_dalloc(arena, run, dirty, false, decommitted);
+ malloc_mutex_unlock(&arena->lock);
+ return (NULL);
}
- arena_run_init_large(arena, (arena_run_t *)ret, size, zero);
+ ret = arena_miscelm_to_rpages(miscelm);
if (config_stats) {
+ szind_t index = size2index(usize) - NBINS;
+
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ arena->stats.allocated_large += usize;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
}
malloc_mutex_unlock(&arena->lock);
- if (config_fill && zero == false) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
+ if (config_fill && !zero) {
+ if (unlikely(opt_junk_alloc))
+ memset(ret, 0xa5, usize);
+ else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
+ }
+ return (ret);
+}
+
+void *
+arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
+ bool zero, tcache_t *tcache)
+{
+ void *ret;
+
+ if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
+ && (usize & PAGE_MASK) == 0))) {
+ /* Small; alignment doesn't require special run placement. */
+ ret = arena_malloc(tsd, arena, usize, zero, tcache);
+ } else if (usize <= large_maxclass && alignment <= PAGE) {
+ /*
+ * Large; alignment doesn't require special run placement.
+ * However, the cached pointer may be at a random offset from
+ * the base of the run, so do some bit manipulation to retrieve
+ * the base.
+ */
+ ret = arena_malloc(tsd, arena, usize, zero, tcache);
+ if (config_cache_oblivious)
+ ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
+ } else {
+ if (likely(usize <= large_maxclass)) {
+ ret = arena_palloc_large(tsd, arena, usize, alignment,
+ zero);
+ } else if (likely(alignment <= chunksize))
+ ret = huge_malloc(tsd, arena, usize, zero, tcache);
+ else {
+ ret = huge_palloc(tsd, arena, usize, alignment, zero,
+ tcache);
+ }
}
return (ret);
}
@@ -1754,22 +2385,23 @@ void
arena_prof_promoted(const void *ptr, size_t size)
{
arena_chunk_t *chunk;
- size_t pageind, binind;
+ size_t pageind;
+ szind_t binind;
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
- assert(isalloc(ptr, false) == PAGE);
- assert(isalloc(ptr, true) == PAGE);
+ assert(isalloc(ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(ptr, true) == LARGE_MINCLASS);
assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- binind = SMALL_SIZE2BIN(size);
+ binind = size2index(size);
assert(binind < NBINS);
arena_mapbits_large_binind_set(chunk, pageind, binind);
- assert(isalloc(ptr, false) == PAGE);
+ assert(isalloc(ptr, false) == LARGE_MINCLASS);
assert(isalloc(ptr, true) == size);
}
@@ -1782,7 +2414,8 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
if (run == bin->runcur)
bin->runcur = NULL;
else {
- size_t binind = arena_bin_index(chunk->arena, bin);
+ szind_t binind = arena_bin_index(extent_node_arena_get(
+ &chunk->node), bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
if (bin_info->nregs != 1) {
@@ -1800,46 +2433,15 @@ static void
arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
- size_t binind;
- arena_bin_info_t *bin_info;
- size_t npages, run_ind, past;
assert(run != bin->runcur);
- assert(arena_run_tree_search(&bin->runs,
- arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
- == NULL);
-
- binind = arena_bin_index(chunk->arena, run->bin);
- bin_info = &arena_bin_info[binind];
+ assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
+ NULL);
malloc_mutex_unlock(&bin->lock);
/******************************/
- npages = bin_info->run_size >> LG_PAGE;
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- past = (size_t)(PAGE_CEILING((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
- bin_info->reg_interval - bin_info->redzone_size) -
- (uintptr_t)chunk) >> LG_PAGE);
malloc_mutex_lock(&arena->lock);
-
- /*
- * If the run was originally clean, and some pages were never touched,
- * trim the clean pages before deallocating the dirty portion of the
- * run.
- */
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+npages-1));
- if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
- npages) {
- /* Trim clean pages. Convert to large run beforehand. */
- assert(npages > 0);
- arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
- arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
- arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
- ((past - run_ind) << LG_PAGE), false);
- /* npages = past - run_ind; */
- }
- arena_run_dalloc(arena, run, true, false);
+ arena_run_dalloc_decommit(arena, chunk, run);
malloc_mutex_unlock(&arena->lock);
/****************************/
malloc_mutex_lock(&bin->lock);
@@ -1868,26 +2470,24 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_runs_insert(bin, run);
}
-void
-arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- arena_chunk_map_t *mapelm)
+static void
+arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ arena_chunk_map_bits_t *bitselm, bool junked)
{
- size_t pageind;
+ size_t pageind, rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
arena_bin_info_t *bin_info;
- size_t size, binind;
+ szind_t binind;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
- bin = run->bin;
- binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
+ rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
+ run = &arena_miscelm_get(chunk, rpages_ind)->run;
+ binind = run->binind;
+ bin = &arena->bins[binind];
bin_info = &arena_bin_info[binind];
- if (config_fill || config_stats)
- size = bin_info->reg_size;
- if (config_fill && opt_junk)
+ if (!junked && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, bin_info);
arena_run_reg_dalloc(run, ptr);
@@ -1898,23 +2498,32 @@ arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_bin_lower_run(arena, chunk, run, bin);
if (config_stats) {
- bin->stats.allocated -= size;
bin->stats.ndalloc++;
+ bin->stats.curregs--;
}
}
void
+arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ arena_chunk_map_bits_t *bitselm)
+{
+
+ arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
+}
+
+void
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind, arena_chunk_map_t *mapelm)
+ size_t pageind, arena_chunk_map_bits_t *bitselm)
{
arena_run_t *run;
arena_bin_t *bin;
+ size_t rpages_ind;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
- bin = run->bin;
+ rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
+ run = &arena_miscelm_get(chunk, rpages_ind)->run;
+ bin = &arena->bins[run->binind];
malloc_mutex_lock(&bin->lock);
- arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
+ arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
malloc_mutex_unlock(&bin->lock);
}
@@ -1922,26 +2531,26 @@ void
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind)
{
- arena_chunk_map_t *mapelm;
+ arena_chunk_map_bits_t *bitselm;
if (config_debug) {
/* arena_ptr_small_binind_get() does extra sanity checking. */
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID);
}
- mapelm = arena_mapp_get(chunk, pageind);
- arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
+ bitselm = arena_bitselm_get(chunk, pageind);
+ arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
#endif
-static void
+void
arena_dalloc_junk_large(void *ptr, size_t usize)
{
- if (config_fill && opt_junk)
+ if (config_fill && unlikely(opt_junk_free))
memset(ptr, 0x5a, usize);
}
#ifdef JEMALLOC_JET
@@ -1951,24 +2560,39 @@ arena_dalloc_junk_large_t *arena_dalloc_junk_large =
JEMALLOC_N(arena_dalloc_junk_large_impl);
#endif
-void
-arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+static void
+arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, bool junked)
{
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+ arena_run_t *run = &miscelm->run;
if (config_fill || config_stats) {
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t usize = arena_mapbits_large_size_get(chunk, pageind);
+ size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
+ large_pad;
- arena_dalloc_junk_large(ptr, usize);
+ if (!junked)
+ arena_dalloc_junk_large(ptr, usize);
if (config_stats) {
+ szind_t index = size2index(usize) - NBINS;
+
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= usize;
- arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
+ arena->stats.lstats[index].ndalloc++;
+ arena->stats.lstats[index].curruns--;
}
}
- arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
+ arena_run_dalloc_decommit(arena, chunk, run);
+}
+
+void
+arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr)
+{
+
+ arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
}
void
@@ -1976,7 +2600,7 @@ arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
malloc_mutex_lock(&arena->lock);
- arena_dalloc_large_locked(arena, chunk, ptr);
+ arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
malloc_mutex_unlock(&arena->lock);
}
@@ -1984,6 +2608,9 @@ static void
arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t oldsize, size_t size)
{
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+ arena_run_t *run = &miscelm->run;
assert(size < oldsize);
@@ -1992,54 +2619,78 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
* allocations.
*/
malloc_mutex_lock(&arena->lock);
- arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
- true);
+ arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
+ large_pad, true);
if (config_stats) {
+ szind_t oldindex = size2index(oldsize) - NBINS;
+ szind_t index = size2index(size) - NBINS;
+
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
+ arena->stats.lstats[oldindex].ndalloc++;
+ arena->stats.lstats[oldindex].curruns--;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
}
malloc_mutex_unlock(&arena->lock);
}
static bool
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t oldsize, size_t size, size_t extra, bool zero)
+ size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t npages = oldsize >> LG_PAGE;
+ size_t npages = (oldsize + large_pad) >> LG_PAGE;
size_t followsize;
- assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
+ assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
+ large_pad);
/* Try to extend the run. */
- assert(size + extra > oldsize);
malloc_mutex_lock(&arena->lock);
- if (pageind + npages < chunk_npages &&
- arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
- (followsize = arena_mapbits_unallocated_size_get(chunk,
- pageind+npages)) >= size - oldsize) {
+ if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
+ pageind+npages) != 0)
+ goto label_fail;
+ followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
+ if (oldsize + followsize >= usize_min) {
/*
* The next run is available and sufficiently large. Split the
* following run, then merge the first part with the existing
* allocation.
*/
- size_t flag_dirty;
- size_t splitsize = (oldsize + followsize <= size + extra)
- ? followsize : size + extra - oldsize;
- arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk +
- ((pageind+npages) << LG_PAGE)), splitsize, zero);
+ arena_run_t *run;
+ size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
+
+ usize = usize_max;
+ while (oldsize + followsize < usize)
+ usize = index2size(size2index(usize)-1);
+ assert(usize >= usize_min);
+ assert(usize >= oldsize);
+ splitsize = usize - oldsize;
+ if (splitsize == 0)
+ goto label_fail;
+
+ run = &arena_miscelm_get(chunk, pageind+npages)->run;
+ if (arena_run_split_large(arena, run, splitsize, zero))
+ goto label_fail;
+
+ if (config_cache_oblivious && zero) {
+ /*
+ * Zero the trailing bytes of the original allocation's
+ * last page, since they are in an indeterminate state.
+ */
+ assert(PAGE_CEILING(oldsize) == oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ PAGE_CEILING((uintptr_t)ptr) - (uintptr_t)ptr);
+ }
size = oldsize + splitsize;
- npages = size >> LG_PAGE;
+ npages = (size + large_pad) >> LG_PAGE;
/*
* Mark the extended run as dirty if either portion of the run
@@ -2051,27 +2702,35 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
*/
flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
arena_mapbits_dirty_get(chunk, pageind+npages-1);
- arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
- arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
+ flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
+ arena_mapbits_large_set(chunk, pageind, size + large_pad,
+ flag_dirty | (flag_unzeroed_mask &
+ arena_mapbits_unzeroed_get(chunk, pageind)));
+ arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+npages-1)));
if (config_stats) {
+ szind_t oldindex = size2index(oldsize) - NBINS;
+ szind_t index = size2index(size) - NBINS;
+
arena->stats.ndalloc_large++;
arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
+ arena->stats.lstats[oldindex].ndalloc++;
+ arena->stats.lstats[oldindex].curruns--;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
}
malloc_mutex_unlock(&arena->lock);
return (false);
}
+label_fail:
malloc_mutex_unlock(&arena->lock);
-
return (true);
}
@@ -2083,7 +2742,7 @@ static void
arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
{
- if (config_fill && opt_junk) {
+ if (config_fill && unlikely(opt_junk_free)) {
memset((void *)((uintptr_t)ptr + usize), 0x5a,
old_usize - usize);
}
@@ -2100,131 +2759,132 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large =
* always fail if growing an object, and the following run is already in use.
*/
static bool
-arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
- bool zero)
+arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero)
{
- size_t psize;
+ arena_chunk_t *chunk;
+ arena_t *arena;
- psize = PAGE_CEILING(size + extra);
- if (psize == oldsize) {
- /* Same size class. */
+ if (oldsize == usize_max) {
+ /* Current size class is compatible and maximal. */
return (false);
- } else {
- arena_chunk_t *chunk;
- arena_t *arena;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena = chunk->arena;
-
- if (psize < oldsize) {
- /* Fill before shrinking in order avoid a race. */
- arena_ralloc_junk_large(ptr, oldsize, psize);
- arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
- psize);
- return (false);
- } else {
- bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
- oldsize, PAGE_CEILING(size),
- psize - PAGE_CEILING(size), zero);
- if (config_fill && ret == false && zero == false) {
- if (opt_junk) {
- memset((void *)((uintptr_t)ptr +
- oldsize), 0xa5, isalloc(ptr,
- config_prof) - oldsize);
- } else if (opt_zero) {
- memset((void *)((uintptr_t)ptr +
- oldsize), 0, isalloc(ptr,
- config_prof) - oldsize);
- }
+ }
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ arena = extent_node_arena_get(&chunk->node);
+
+ if (oldsize < usize_max) {
+ bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
+ usize_min, usize_max, zero);
+ if (config_fill && !ret && !zero) {
+ if (unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
+ isalloc(ptr, config_prof) - oldsize);
+ } else if (unlikely(opt_zero)) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ isalloc(ptr, config_prof) - oldsize);
}
- return (ret);
}
+ return (ret);
}
+
+ assert(oldsize > usize_max);
+ /* Fill before shrinking in order avoid a race. */
+ arena_ralloc_junk_large(ptr, oldsize, usize_max);
+ arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
+ return (false);
}
bool
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{
+ size_t usize_min, usize_max;
- /*
- * Avoid moving the allocation if the size class can be left the same.
- */
- if (oldsize <= arena_maxclass) {
+ usize_min = s2u(size);
+ usize_max = s2u(size + extra);
+ if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
+ /*
+ * Avoid moving the allocation if the size class can be left the
+ * same.
+ */
if (oldsize <= SMALL_MAXCLASS) {
- assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
- == oldsize);
- if ((size + extra <= SMALL_MAXCLASS &&
- SMALL_SIZE2BIN(size + extra) ==
- SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
- size + extra >= oldsize))
+ assert(arena_bin_info[size2index(oldsize)].reg_size ==
+ oldsize);
+ if ((usize_max <= SMALL_MAXCLASS &&
+ size2index(usize_max) == size2index(oldsize)) ||
+ (size <= oldsize && usize_max >= oldsize))
return (false);
} else {
- assert(size <= arena_maxclass);
- if (size + extra > SMALL_MAXCLASS) {
- if (arena_ralloc_large(ptr, oldsize, size,
- extra, zero) == false)
+ if (usize_max > SMALL_MAXCLASS) {
+ if (!arena_ralloc_large(ptr, oldsize, usize_min,
+ usize_max, zero))
return (false);
}
}
+
+ /* Reallocation would require a move. */
+ return (true);
+ } else {
+ return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max,
+ zero));
}
+}
- /* Reallocation would require a move. */
- return (true);
+static void *
+arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
+{
+
+ if (alignment == 0)
+ return (arena_malloc(tsd, arena, usize, zero, tcache));
+ usize = sa2u(usize, alignment);
+ if (usize == 0)
+ return (NULL);
+ return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
}
void *
-arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
- bool try_tcache_dalloc)
+arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
- size_t copysize;
+ size_t usize;
- /* Try to avoid moving the allocation. */
- if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
- return (ptr);
+ usize = s2u(size);
+ if (usize == 0)
+ return (NULL);
- /*
- * size and oldsize are different enough that we need to move the
- * object. In that case, fall back to allocating new space and
- * copying.
- */
- if (alignment != 0) {
- size_t usize = sa2u(size + extra, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
- } else
- ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
+ if (likely(usize <= large_maxclass)) {
+ size_t copysize;
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, this time without extra. */
- if (alignment != 0) {
- size_t usize = sa2u(size, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
- arena);
- } else
- ret = arena_malloc(arena, size, zero, try_tcache_alloc);
+ /* Try to avoid moving the allocation. */
+ if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero))
+ return (ptr);
+ /*
+ * size and oldsize are different enough that we need to move
+ * the object. In that case, fall back to allocating new space
+ * and copying.
+ */
+ ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
+ zero, tcache);
if (ret == NULL)
return (NULL);
- }
- /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
+ /*
+ * Junk/zero-filling were already done by
+ * ipalloc()/arena_malloc().
+ */
- /*
- * Copy at most size bytes (not size+extra), since the caller has no
- * expectation that the extra bytes will be reliably preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
- VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
- memcpy(ret, ptr, copysize);
- iqalloct(ptr, try_tcache_dalloc);
+ copysize = (usize < oldsize) ? usize : oldsize;
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
+ memcpy(ret, ptr, copysize);
+ isqalloc(tsd, ptr, oldsize, tcache);
+ } else {
+ ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
+ zero, tcache);
+ }
return (ret);
}
@@ -2239,24 +2899,46 @@ arena_dss_prec_get(arena_t *arena)
return (ret);
}
-void
+bool
arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
{
+ if (!have_dss)
+ return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&arena->lock);
arena->dss_prec = dss_prec;
malloc_mutex_unlock(&arena->lock);
+ return (false);
+}
+
+ssize_t
+arena_lg_dirty_mult_default_get(void)
+{
+
+ return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
+}
+
+bool
+arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
+{
+
+ if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
+ return (true);
+ atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
+ return (false);
}
void
-arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
- size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats)
+arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
+ size_t *nactive, size_t *ndirty, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
+ malloc_huge_stats_t *hstats)
{
unsigned i;
malloc_mutex_lock(&arena->lock);
*dss = dss_prec_names[arena->dss_prec];
+ *lg_dirty_mult = arena->lg_dirty_mult;
*nactive += arena->nactive;
*ndirty += arena->ndirty;
@@ -2264,10 +2946,15 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
+ astats->metadata_mapped += arena->stats.metadata_mapped;
+ astats->metadata_allocated += arena_metadata_allocated_get(arena);
astats->allocated_large += arena->stats.allocated_large;
astats->nmalloc_large += arena->stats.nmalloc_large;
astats->ndalloc_large += arena->stats.ndalloc_large;
astats->nrequests_large += arena->stats.nrequests_large;
+ astats->allocated_huge += arena->stats.allocated_huge;
+ astats->nmalloc_huge += arena->stats.nmalloc_huge;
+ astats->ndalloc_huge += arena->stats.ndalloc_huge;
for (i = 0; i < nlclasses; i++) {
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
@@ -2275,16 +2962,22 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
lstats[i].nrequests += arena->stats.lstats[i].nrequests;
lstats[i].curruns += arena->stats.lstats[i].curruns;
}
+
+ for (i = 0; i < nhclasses; i++) {
+ hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
+ hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
+ hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
+ }
malloc_mutex_unlock(&arena->lock);
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
- bstats[i].allocated += bin->stats.allocated;
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
+ bstats[i].curregs += bin->stats.curregs;
if (config_tcache) {
bstats[i].nfills += bin->stats.nfills;
bstats[i].nflushes += bin->stats.nflushes;
@@ -2296,27 +2989,42 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
}
}
-bool
-arena_new(arena_t *arena, unsigned ind)
+arena_t *
+arena_new(unsigned ind)
{
+ arena_t *arena;
unsigned i;
arena_bin_t *bin;
+ /*
+ * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
+ * because there is no way to clean up if base_alloc() OOMs.
+ */
+ if (config_stats) {
+ arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t))
+ + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
+ nhclasses) * sizeof(malloc_huge_stats_t));
+ } else
+ arena = (arena_t *)base_alloc(sizeof(arena_t));
+ if (arena == NULL)
+ return (NULL);
+
arena->ind = ind;
arena->nthreads = 0;
-
if (malloc_mutex_init(&arena->lock))
- return (true);
+ return (NULL);
if (config_stats) {
memset(&arena->stats, 0, sizeof(arena_stats_t));
- arena->stats.lstats =
- (malloc_large_stats_t *)base_alloc(nlclasses *
- sizeof(malloc_large_stats_t));
- if (arena->stats.lstats == NULL)
- return (true);
+ arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
+ + CACHELINE_CEILING(sizeof(arena_t)));
memset(arena->stats.lstats, 0, nlclasses *
sizeof(malloc_large_stats_t));
+ arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
+ + CACHELINE_CEILING(sizeof(arena_t)) +
+ QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
+ memset(arena->stats.hstats, 0, nhclasses *
+ sizeof(malloc_huge_stats_t));
if (config_tcache)
ql_new(&arena->tcache_ql);
}
@@ -2324,56 +3032,76 @@ arena_new(arena_t *arena, unsigned ind)
if (config_prof)
arena->prof_accumbytes = 0;
+ if (config_cache_oblivious) {
+ /*
+ * A nondeterministic seed based on the address of arena reduces
+ * the likelihood of lockstep non-uniform cache index
+ * utilization among identical concurrent processes, but at the
+ * cost of test repeatability. For debug builds, instead use a
+ * deterministic seed.
+ */
+ arena->offset_state = config_debug ? ind :
+ (uint64_t)(uintptr_t)arena;
+ }
+
arena->dss_prec = chunk_dss_prec_get();
- /* Initialize chunks. */
- arena_chunk_dirty_new(&arena->chunks_dirty);
arena->spare = NULL;
+ arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
+ arena->purging = false;
arena->nactive = 0;
arena->ndirty = 0;
- arena->npurgatory = 0;
arena_avail_tree_new(&arena->runs_avail);
+ qr_new(&arena->runs_dirty, rd_link);
+ qr_new(&arena->chunks_cache, cc_link);
+
+ ql_new(&arena->huge);
+ if (malloc_mutex_init(&arena->huge_mtx))
+ return (NULL);
+
+ extent_tree_szad_new(&arena->chunks_szad_cached);
+ extent_tree_ad_new(&arena->chunks_ad_cached);
+ extent_tree_szad_new(&arena->chunks_szad_retained);
+ extent_tree_ad_new(&arena->chunks_ad_retained);
+ if (malloc_mutex_init(&arena->chunks_mtx))
+ return (NULL);
+ ql_new(&arena->node_cache);
+ if (malloc_mutex_init(&arena->node_cache_mtx))
+ return (NULL);
+
+ arena->chunk_hooks = chunk_hooks_default;
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock))
- return (true);
+ return (NULL);
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
- return (false);
+ return (arena);
}
/*
* Calculate bin_info->run_size such that it meets the following constraints:
*
- * *) bin_info->run_size >= min_run_size
- * *) bin_info->run_size <= arena_maxclass
- * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
+ * *) bin_info->run_size <= arena_maxrun
* *) bin_info->nregs <= RUN_MAXREGS
*
- * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
- * calculated here, since these settings are all interdependent.
+ * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
+ * these settings are all interdependent.
*/
-static size_t
-bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
+static void
+bin_info_run_size_calc(arena_bin_info_t *bin_info)
{
size_t pad_size;
- size_t try_run_size, good_run_size;
- uint32_t try_nregs, good_nregs;
- uint32_t try_hdr_size, good_hdr_size;
- uint32_t try_bitmap_offset, good_bitmap_offset;
- uint32_t try_ctx0_offset, good_ctx0_offset;
- uint32_t try_redzone0_offset, good_redzone0_offset;
-
- assert(min_run_size >= PAGE);
- assert(min_run_size <= arena_maxclass);
+ size_t try_run_size, perfect_run_size, actual_run_size;
+ uint32_t try_nregs, perfect_nregs, actual_nregs;
/*
* Determine redzone size based on minimum alignment and minimum
@@ -2382,8 +3110,9 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
* minimum alignment; without the padding, each redzone would have to
* be twice as large in order to maintain alignment.
*/
- if (config_fill && opt_redzone) {
- size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
+ if (config_fill && unlikely(opt_redzone)) {
+ size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
+ 1);
if (align_min <= REDZONE_MINSIZE) {
bin_info->redzone_size = REDZONE_MINSIZE;
pad_size = 0;
@@ -2399,127 +3128,113 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
(bin_info->redzone_size << 1);
/*
- * Calculate known-valid settings before entering the run_size
- * expansion loop, so that the first part of the loop always copies
- * valid settings.
- *
- * The do..while loop iteratively reduces the number of regions until
- * the run header and the regions no longer overlap. A closed formula
- * would be quite messy, since there is an interdependency between the
- * header's mask length and the number of regions.
+ * Compute run size under ideal conditions (no redzones, no limit on run
+ * size).
*/
- try_run_size = min_run_size;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) /
- bin_info->reg_interval)
- + 1; /* Counter-act try_nregs-- in loop. */
- if (try_nregs > RUN_MAXREGS) {
- try_nregs = RUN_MAXREGS
- + 1; /* Counter-act try_nregs-- in loop. */
- }
- do {
- try_nregs--;
- try_hdr_size = sizeof(arena_run_t);
- /* Pad to a long boundary. */
- try_hdr_size = LONG_CEILING(try_hdr_size);
- try_bitmap_offset = try_hdr_size;
- /* Add space for bitmap. */
- try_hdr_size += bitmap_size(try_nregs);
- if (config_prof && opt_prof && prof_promote == false) {
- /* Pad to a quantum boundary. */
- try_hdr_size = QUANTUM_CEILING(try_hdr_size);
- try_ctx0_offset = try_hdr_size;
- /* Add space for one (prof_ctx_t *) per region. */
- try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
- } else
- try_ctx0_offset = 0;
- try_redzone0_offset = try_run_size - (try_nregs *
- bin_info->reg_interval) - pad_size;
- } while (try_hdr_size > try_redzone0_offset);
-
- /* run_size expansion loop. */
+ try_run_size = PAGE;
+ try_nregs = try_run_size / bin_info->reg_size;
do {
- /*
- * Copy valid settings before trying more aggressive settings.
- */
- good_run_size = try_run_size;
- good_nregs = try_nregs;
- good_hdr_size = try_hdr_size;
- good_bitmap_offset = try_bitmap_offset;
- good_ctx0_offset = try_ctx0_offset;
- good_redzone0_offset = try_redzone0_offset;
-
- /* Try more aggressive settings. */
+ perfect_run_size = try_run_size;
+ perfect_nregs = try_nregs;
+
try_run_size += PAGE;
- try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
- bin_info->reg_interval)
- + 1; /* Counter-act try_nregs-- in loop. */
- if (try_nregs > RUN_MAXREGS) {
- try_nregs = RUN_MAXREGS
- + 1; /* Counter-act try_nregs-- in loop. */
- }
- do {
- try_nregs--;
- try_hdr_size = sizeof(arena_run_t);
- /* Pad to a long boundary. */
- try_hdr_size = LONG_CEILING(try_hdr_size);
- try_bitmap_offset = try_hdr_size;
- /* Add space for bitmap. */
- try_hdr_size += bitmap_size(try_nregs);
- if (config_prof && opt_prof && prof_promote == false) {
- /* Pad to a quantum boundary. */
- try_hdr_size = QUANTUM_CEILING(try_hdr_size);
- try_ctx0_offset = try_hdr_size;
- /*
- * Add space for one (prof_ctx_t *) per region.
- */
- try_hdr_size += try_nregs *
- sizeof(prof_ctx_t *);
- }
- try_redzone0_offset = try_run_size - (try_nregs *
- bin_info->reg_interval) - pad_size;
- } while (try_hdr_size > try_redzone0_offset);
- } while (try_run_size <= arena_maxclass
- && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
- RUN_MAX_OVRHD_RELAX
- && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
- && try_nregs < RUN_MAXREGS);
+ try_nregs = try_run_size / bin_info->reg_size;
+ } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
+ assert(perfect_nregs <= RUN_MAXREGS);
+
+ actual_run_size = perfect_run_size;
+ actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
+
+ /*
+ * Redzones can require enough padding that not even a single region can
+ * fit within the number of pages that would normally be dedicated to a
+ * run for this size class. Increase the run size until at least one
+ * region fits.
+ */
+ while (actual_nregs == 0) {
+ assert(config_fill && unlikely(opt_redzone));
- assert(good_hdr_size <= good_redzone0_offset);
+ actual_run_size += PAGE;
+ actual_nregs = (actual_run_size - pad_size) /
+ bin_info->reg_interval;
+ }
+
+ /*
+ * Make sure that the run will fit within an arena chunk.
+ */
+ while (actual_run_size > arena_maxrun) {
+ actual_run_size -= PAGE;
+ actual_nregs = (actual_run_size - pad_size) /
+ bin_info->reg_interval;
+ }
+ assert(actual_nregs > 0);
+ assert(actual_run_size == s2u(actual_run_size));
/* Copy final settings. */
- bin_info->run_size = good_run_size;
- bin_info->nregs = good_nregs;
- bin_info->bitmap_offset = good_bitmap_offset;
- bin_info->ctx0_offset = good_ctx0_offset;
- bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
+ bin_info->run_size = actual_run_size;
+ bin_info->nregs = actual_nregs;
+ bin_info->reg0_offset = actual_run_size - (actual_nregs *
+ bin_info->reg_interval) - pad_size + bin_info->redzone_size;
+
+ if (actual_run_size > small_maxrun)
+ small_maxrun = actual_run_size;
assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
* bin_info->reg_interval) + pad_size == bin_info->run_size);
-
- return (good_run_size);
}
static void
bin_info_init(void)
{
arena_bin_info_t *bin_info;
- size_t prev_run_size = PAGE;
-#define SIZE_CLASS(bin, delta, size) \
- bin_info = &arena_bin_info[bin]; \
+#define BIN_INFO_INIT_bin_yes(index, size) \
+ bin_info = &arena_bin_info[index]; \
bin_info->reg_size = size; \
- prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
+ bin_info_run_size_calc(bin_info); \
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
+#define BIN_INFO_INIT_bin_no(index, size)
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+ BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
-#undef SIZE_CLASS
+#undef BIN_INFO_INIT_bin_yes
+#undef BIN_INFO_INIT_bin_no
+#undef SC
}
-void
+static bool
+small_run_size_init(void)
+{
+
+ assert(small_maxrun != 0);
+
+ small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
+ LG_PAGE));
+ if (small_run_tab == NULL)
+ return (true);
+
+#define TAB_INIT_bin_yes(index, size) { \
+ arena_bin_info_t *bin_info = &arena_bin_info[index]; \
+ small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
+ }
+#define TAB_INIT_bin_no(index, size)
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+ TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
+ SIZE_CLASSES
+#undef TAB_INIT_bin_yes
+#undef TAB_INIT_bin_no
+#undef SC
+
+ return (false);
+}
+
+bool
arena_boot(void)
{
- size_t header_size;
unsigned i;
+ arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
+
/*
* Compute the header size such that it is large enough to contain the
* page map. The page map is biased to omit entries for the header
@@ -2534,16 +3249,33 @@ arena_boot(void)
*/
map_bias = 0;
for (i = 0; i < 3; i++) {
- header_size = offsetof(arena_chunk_t, map) +
- (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
- map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
- != 0);
+ size_t header_size = offsetof(arena_chunk_t, map_bits) +
+ ((sizeof(arena_chunk_map_bits_t) +
+ sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
+ map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
}
assert(map_bias > 0);
- arena_maxclass = chunksize - (map_bias << LG_PAGE);
+ map_misc_offset = offsetof(arena_chunk_t, map_bits) +
+ sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
+
+ arena_maxrun = chunksize - (map_bias << LG_PAGE);
+ assert(arena_maxrun > 0);
+ large_maxclass = index2size(size2index(chunksize)-1);
+ if (large_maxclass > arena_maxrun) {
+ /*
+ * For small chunk sizes it's possible for there to be fewer
+ * non-header pages available than are necessary to serve the
+ * size classes just below chunksize.
+ */
+ large_maxclass = arena_maxrun;
+ }
+ assert(large_maxclass > 0);
+ nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
+ nhclasses = NSIZES - nlclasses - NBINS;
bin_info_init();
+ return (small_run_size_init());
}
void
@@ -2552,6 +3284,9 @@ arena_prefork(arena_t *arena)
unsigned i;
malloc_mutex_prefork(&arena->lock);
+ malloc_mutex_prefork(&arena->huge_mtx);
+ malloc_mutex_prefork(&arena->chunks_mtx);
+ malloc_mutex_prefork(&arena->node_cache_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_prefork(&arena->bins[i].lock);
}
@@ -2563,6 +3298,9 @@ arena_postfork_parent(arena_t *arena)
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_parent(&arena->bins[i].lock);
+ malloc_mutex_postfork_parent(&arena->node_cache_mtx);
+ malloc_mutex_postfork_parent(&arena->chunks_mtx);
+ malloc_mutex_postfork_parent(&arena->huge_mtx);
malloc_mutex_postfork_parent(&arena->lock);
}
@@ -2573,5 +3311,8 @@ arena_postfork_child(arena_t *arena)
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_child(&arena->bins[i].lock);
+ malloc_mutex_postfork_child(&arena->node_cache_mtx);
+ malloc_mutex_postfork_child(&arena->chunks_mtx);
+ malloc_mutex_postfork_child(&arena->huge_mtx);
malloc_mutex_postfork_child(&arena->lock);
}
diff --git a/deps/jemalloc/src/base.c b/deps/jemalloc/src/base.c
index 4e62e8fa9..7cdcfed86 100644
--- a/deps/jemalloc/src/base.c
+++ b/deps/jemalloc/src/base.c
@@ -5,107 +5,138 @@
/* Data. */
static malloc_mutex_t base_mtx;
-
-/*
- * Current pages that are being used for internal memory allocations. These
- * pages are carved up in cacheline-size quanta, so that there is no chance of
- * false cache line sharing.
- */
-static void *base_pages;
-static void *base_next_addr;
-static void *base_past_addr; /* Addr immediately past base_pages. */
+static extent_tree_t base_avail_szad;
static extent_node_t *base_nodes;
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static bool base_pages_alloc(size_t minsize);
+static size_t base_allocated;
+static size_t base_resident;
+static size_t base_mapped;
/******************************************************************************/
-static bool
-base_pages_alloc(size_t minsize)
+/* base_mtx must be held. */
+static extent_node_t *
+base_node_try_alloc(void)
{
- size_t csize;
- bool zero;
+ extent_node_t *node;
+
+ if (base_nodes == NULL)
+ return (NULL);
+ node = base_nodes;
+ base_nodes = *(extent_node_t **)node;
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
+ return (node);
+}
- assert(minsize != 0);
- csize = CHUNK_CEILING(minsize);
- zero = false;
- base_pages = chunk_alloc(csize, chunksize, true, &zero,
- chunk_dss_prec_get());
- if (base_pages == NULL)
- return (true);
- base_next_addr = base_pages;
- base_past_addr = (void *)((uintptr_t)base_pages + csize);
+/* base_mtx must be held. */
+static void
+base_node_dalloc(extent_node_t *node)
+{
- return (false);
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
+ *(extent_node_t **)node = base_nodes;
+ base_nodes = node;
}
-void *
-base_alloc(size_t size)
+/* base_mtx must be held. */
+static extent_node_t *
+base_chunk_alloc(size_t minsize)
{
- void *ret;
- size_t csize;
+ extent_node_t *node;
+ size_t csize, nsize;
+ void *addr;
- /* Round size up to nearest multiple of the cacheline size. */
- csize = CACHELINE_CEILING(size);
-
- malloc_mutex_lock(&base_mtx);
- /* Make sure there's enough space for the allocation. */
- if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
- if (base_pages_alloc(csize)) {
- malloc_mutex_unlock(&base_mtx);
- return (NULL);
+ assert(minsize != 0);
+ node = base_node_try_alloc();
+ /* Allocate enough space to also carve a node out if necessary. */
+ nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
+ csize = CHUNK_CEILING(minsize + nsize);
+ addr = chunk_alloc_base(csize);
+ if (addr == NULL) {
+ if (node != NULL)
+ base_node_dalloc(node);
+ return (NULL);
+ }
+ base_mapped += csize;
+ if (node == NULL) {
+ node = (extent_node_t *)addr;
+ addr = (void *)((uintptr_t)addr + nsize);
+ csize -= nsize;
+ if (config_stats) {
+ base_allocated += nsize;
+ base_resident += PAGE_CEILING(nsize);
}
}
- /* Allocate. */
- ret = base_next_addr;
- base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
- malloc_mutex_unlock(&base_mtx);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
-
- return (ret);
+ extent_node_init(node, NULL, addr, csize, true, true);
+ return (node);
}
+/*
+ * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
+ * sparse data structures such as radix tree nodes efficient with respect to
+ * physical memory usage.
+ */
void *
-base_calloc(size_t number, size_t size)
-{
- void *ret = base_alloc(number * size);
-
- if (ret != NULL)
- memset(ret, 0, number * size);
-
- return (ret);
-}
-
-extent_node_t *
-base_node_alloc(void)
+base_alloc(size_t size)
{
- extent_node_t *ret;
+ void *ret;
+ size_t csize, usize;
+ extent_node_t *node;
+ extent_node_t key;
+
+ /*
+ * Round size up to nearest multiple of the cacheline size, so that
+ * there is no chance of false cache line sharing.
+ */
+ csize = CACHELINE_CEILING(size);
+ usize = s2u(csize);
+ extent_node_init(&key, NULL, NULL, usize, false, false);
malloc_mutex_lock(&base_mtx);
- if (base_nodes != NULL) {
- ret = base_nodes;
- base_nodes = *(extent_node_t **)ret;
- malloc_mutex_unlock(&base_mtx);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
+ node = extent_tree_szad_nsearch(&base_avail_szad, &key);
+ if (node != NULL) {
+ /* Use existing space. */
+ extent_tree_szad_remove(&base_avail_szad, node);
} else {
- malloc_mutex_unlock(&base_mtx);
- ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
+ /* Try to allocate more space. */
+ node = base_chunk_alloc(csize);
+ }
+ if (node == NULL) {
+ ret = NULL;
+ goto label_return;
}
+ ret = extent_node_addr_get(node);
+ if (extent_node_size_get(node) > csize) {
+ extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
+ extent_node_size_set(node, extent_node_size_get(node) - csize);
+ extent_tree_szad_insert(&base_avail_szad, node);
+ } else
+ base_node_dalloc(node);
+ if (config_stats) {
+ base_allocated += csize;
+ /*
+ * Add one PAGE to base_resident for every page boundary that is
+ * crossed by the new allocation.
+ */
+ base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
+ PAGE_CEILING((uintptr_t)ret);
+ }
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
+label_return:
+ malloc_mutex_unlock(&base_mtx);
return (ret);
}
void
-base_node_dealloc(extent_node_t *node)
+base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
{
- VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
malloc_mutex_lock(&base_mtx);
- *(extent_node_t **)node = base_nodes;
- base_nodes = node;
+ assert(base_allocated <= base_resident);
+ assert(base_resident <= base_mapped);
+ *allocated = base_allocated;
+ *resident = base_resident;
+ *mapped = base_mapped;
malloc_mutex_unlock(&base_mtx);
}
@@ -113,9 +144,10 @@ bool
base_boot(void)
{
- base_nodes = NULL;
if (malloc_mutex_init(&base_mtx))
return (true);
+ extent_tree_szad_new(&base_avail_szad);
+ base_nodes = NULL;
return (false);
}
diff --git a/deps/jemalloc/src/bitmap.c b/deps/jemalloc/src/bitmap.c
index e2bd907d5..c733372b4 100644
--- a/deps/jemalloc/src/bitmap.c
+++ b/deps/jemalloc/src/bitmap.c
@@ -2,19 +2,6 @@
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static size_t bits2groups(size_t nbits);
-
-/******************************************************************************/
-
-static size_t
-bits2groups(size_t nbits)
-{
-
- return ((nbits >> LG_BITMAP_GROUP_NBITS) +
- !!(nbits & BITMAP_GROUP_NBITS_MASK));
-}
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
@@ -31,15 +18,16 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
* that requires only one group.
*/
binfo->levels[0].group_offset = 0;
- group_count = bits2groups(nbits);
+ group_count = BITMAP_BITS2GROUPS(nbits);
for (i = 1; group_count > 1; i++) {
assert(i < BITMAP_MAX_LEVELS);
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
- group_count = bits2groups(group_count);
+ group_count = BITMAP_BITS2GROUPS(group_count);
}
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
+ assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
binfo->nlevels = i;
binfo->nbits = nbits;
}
diff --git a/deps/jemalloc/src/chunk.c b/deps/jemalloc/src/chunk.c
index 90ab116ae..6ba1ca7a5 100644
--- a/deps/jemalloc/src/chunk.c
+++ b/deps/jemalloc/src/chunk.c
@@ -5,129 +5,315 @@
/* Data. */
const char *opt_dss = DSS_DEFAULT;
-size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
+size_t opt_lg_chunk = 0;
-malloc_mutex_t chunks_mtx;
-chunk_stats_t stats_chunks;
+/* Used exclusively for gdump triggering. */
+static size_t curchunks;
+static size_t highchunks;
-/*
- * Trees of chunks that were previously allocated (trees differ only in node
- * ordering). These are used when allocating chunks, in an attempt to re-use
- * address space. Depending on function, different tree orderings are needed,
- * which is why there are two trees with the same contents.
- */
-static extent_tree_t chunks_szad_mmap;
-static extent_tree_t chunks_ad_mmap;
-static extent_tree_t chunks_szad_dss;
-static extent_tree_t chunks_ad_dss;
-
-rtree_t *chunks_rtree;
+rtree_t chunks_rtree;
/* Various chunk-related settings. */
size_t chunksize;
size_t chunksize_mask; /* (chunksize - 1). */
size_t chunk_npages;
-size_t map_bias;
-size_t arena_maxclass; /* Max size class for arenas. */
+
+static void *chunk_alloc_default(void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
+static bool chunk_dalloc_default(void *chunk, size_t size, bool committed,
+ unsigned arena_ind);
+static bool chunk_commit_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_decommit_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
+ size_t size_b, bool committed, unsigned arena_ind);
+static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
+ size_t size_b, bool committed, unsigned arena_ind);
+
+const chunk_hooks_t chunk_hooks_default = {
+ chunk_alloc_default,
+ chunk_dalloc_default,
+ chunk_commit_default,
+ chunk_decommit_default,
+ chunk_purge_default,
+ chunk_split_default,
+ chunk_merge_default
+};
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
-static void *chunk_recycle(extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
- bool *zero);
-static void chunk_record(extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, void *chunk, size_t size);
+static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, bool zeroed, bool committed);
/******************************************************************************/
+static chunk_hooks_t
+chunk_hooks_get_locked(arena_t *arena)
+{
+
+ return (arena->chunk_hooks);
+}
+
+chunk_hooks_t
+chunk_hooks_get(arena_t *arena)
+{
+ chunk_hooks_t chunk_hooks;
+
+ malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks = chunk_hooks_get_locked(arena);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+
+ return (chunk_hooks);
+}
+
+chunk_hooks_t
+chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
+{
+ chunk_hooks_t old_chunk_hooks;
+
+ malloc_mutex_lock(&arena->chunks_mtx);
+ old_chunk_hooks = arena->chunk_hooks;
+ /*
+ * Copy each field atomically so that it is impossible for readers to
+ * see partially updated pointers. There are places where readers only
+ * need one hook function pointer (therefore no need to copy the
+ * entirety of arena->chunk_hooks), and stale reads do not affect
+ * correctness, so they perform unlocked reads.
+ */
+#define ATOMIC_COPY_HOOK(n) do { \
+ union { \
+ chunk_##n##_t **n; \
+ void **v; \
+ } u; \
+ u.n = &arena->chunk_hooks.n; \
+ atomic_write_p(u.v, chunk_hooks->n); \
+} while (0)
+ ATOMIC_COPY_HOOK(alloc);
+ ATOMIC_COPY_HOOK(dalloc);
+ ATOMIC_COPY_HOOK(commit);
+ ATOMIC_COPY_HOOK(decommit);
+ ATOMIC_COPY_HOOK(purge);
+ ATOMIC_COPY_HOOK(split);
+ ATOMIC_COPY_HOOK(merge);
+#undef ATOMIC_COPY_HOOK
+ malloc_mutex_unlock(&arena->chunks_mtx);
+
+ return (old_chunk_hooks);
+}
+
+static void
+chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ bool locked)
+{
+ static const chunk_hooks_t uninitialized_hooks =
+ CHUNK_HOOKS_INITIALIZER;
+
+ if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
+ 0) {
+ *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
+ chunk_hooks_get(arena);
+ }
+}
+
+static void
+chunk_hooks_assure_initialized_locked(arena_t *arena,
+ chunk_hooks_t *chunk_hooks)
+{
+
+ chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
+}
+
+static void
+chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
+{
+
+ chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
+}
+
+bool
+chunk_register(const void *chunk, const extent_node_t *node)
+{
+
+ assert(extent_node_addr_get(node) == chunk);
+
+ if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
+ return (true);
+ if (config_prof && opt_prof) {
+ size_t size = extent_node_size_get(node);
+ size_t nadd = (size == 0) ? 1 : size / chunksize;
+ size_t cur = atomic_add_z(&curchunks, nadd);
+ size_t high = atomic_read_z(&highchunks);
+ while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
+ /*
+ * Don't refresh cur, because it may have decreased
+ * since this thread lost the highchunks update race.
+ */
+ high = atomic_read_z(&highchunks);
+ }
+ if (cur > high && prof_gdump_get_unlocked())
+ prof_gdump();
+ }
+
+ return (false);
+}
+
+void
+chunk_deregister(const void *chunk, const extent_node_t *node)
+{
+ bool err;
+
+ err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
+ assert(!err);
+ if (config_prof && opt_prof) {
+ size_t size = extent_node_size_get(node);
+ size_t nsub = (size == 0) ? 1 : size / chunksize;
+ assert(atomic_read_z(&curchunks) >= nsub);
+ atomic_sub_z(&curchunks, nsub);
+ }
+}
+
+/*
+ * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
+ * fits.
+ */
+static extent_node_t *
+chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, size_t size)
+{
+ extent_node_t key;
+
+ assert(size == CHUNK_CEILING(size));
+
+ extent_node_init(&key, arena, NULL, size, false, false);
+ return (extent_tree_szad_nsearch(chunks_szad, &key));
+}
+
static void *
-chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
- size_t alignment, bool base, bool *zero)
+chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
+ bool dalloc_node)
{
void *ret;
extent_node_t *node;
- extent_node_t key;
size_t alloc_size, leadsize, trailsize;
- bool zeroed;
+ bool zeroed, committed;
- if (base) {
- /*
- * This function may need to call base_node_{,de}alloc(), but
- * the current chunk allocation request is on behalf of the
- * base allocator. Avoid deadlock (and if that weren't an
- * issue, potential for infinite recursion) by returning NULL.
- */
- return (NULL);
- }
+ assert(new_addr == NULL || alignment == chunksize);
+ /*
+ * Cached chunks use the node linkage embedded in their headers, in
+ * which case dalloc_node is true, and new_addr is non-NULL because
+ * we're operating on a specific chunk.
+ */
+ assert(dalloc_node || new_addr != NULL);
- alloc_size = size + alignment - chunksize;
+ alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
- key.addr = NULL;
- key.size = alloc_size;
- malloc_mutex_lock(&chunks_mtx);
- node = extent_tree_szad_nsearch(chunks_szad, &key);
- if (node == NULL) {
- malloc_mutex_unlock(&chunks_mtx);
+ malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
+ if (new_addr != NULL) {
+ extent_node_t key;
+ extent_node_init(&key, arena, new_addr, alloc_size, false,
+ false);
+ node = extent_tree_ad_search(chunks_ad, &key);
+ } else {
+ node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
+ alloc_size);
+ }
+ if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
+ size)) {
+ malloc_mutex_unlock(&arena->chunks_mtx);
return (NULL);
}
- leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
- (uintptr_t)node->addr;
- assert(node->size >= leadsize + size);
- trailsize = node->size - leadsize - size;
- ret = (void *)((uintptr_t)node->addr + leadsize);
- zeroed = node->zeroed;
+ leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
+ alignment) - (uintptr_t)extent_node_addr_get(node);
+ assert(new_addr == NULL || leadsize == 0);
+ assert(extent_node_size_get(node) >= leadsize + size);
+ trailsize = extent_node_size_get(node) - leadsize - size;
+ ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
+ zeroed = extent_node_zeroed_get(node);
if (zeroed)
- *zero = true;
+ *zero = true;
+ committed = extent_node_committed_get(node);
+ if (committed)
+ *commit = true;
+ /* Split the lead. */
+ if (leadsize != 0 &&
+ chunk_hooks->split(extent_node_addr_get(node),
+ extent_node_size_get(node), leadsize, size, false, arena->ind)) {
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ return (NULL);
+ }
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
+ arena_chunk_cache_maybe_remove(arena, node, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
- node->size = leadsize;
+ extent_node_size_set(node, leadsize);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
if (trailsize != 0) {
+ /* Split the trail. */
+ if (chunk_hooks->split(ret, size + trailsize, size,
+ trailsize, false, arena->ind)) {
+ if (dalloc_node && node != NULL)
+ arena_node_dalloc(arena, node);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
+ cache, ret, size + trailsize, zeroed, committed);
+ return (NULL);
+ }
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
- /*
- * An additional node is required, but
- * base_node_alloc() can cause a new base chunk to be
- * allocated. Drop chunks_mtx in order to avoid
- * deadlock, and if node allocation fails, deallocate
- * the result before returning an error.
- */
- malloc_mutex_unlock(&chunks_mtx);
- node = base_node_alloc();
+ node = arena_node_alloc(arena);
if (node == NULL) {
- chunk_dealloc(ret, size, true);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad,
+ chunks_ad, cache, ret, size + trailsize,
+ zeroed, committed);
return (NULL);
}
- malloc_mutex_lock(&chunks_mtx);
}
- node->addr = (void *)((uintptr_t)(ret) + size);
- node->size = trailsize;
- node->zeroed = zeroed;
+ extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
+ trailsize, zeroed, committed);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
- malloc_mutex_unlock(&chunks_mtx);
+ if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
+ ret, size, zeroed, committed);
+ return (NULL);
+ }
+ malloc_mutex_unlock(&arena->chunks_mtx);
- if (node != NULL)
- base_node_dealloc(node);
+ assert(dalloc_node || node != NULL);
+ if (dalloc_node && node != NULL)
+ arena_node_dalloc(arena, node);
if (*zero) {
- if (zeroed == false)
+ if (!zeroed)
memset(ret, 0, size);
else if (config_debug) {
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
- VALGRIND_MAKE_MEM_DEFINED(ret, size);
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
@@ -136,138 +322,214 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
}
/*
- * If the caller specifies (*zero == false), it is still possible to receive
- * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc()
- * takes advantage of this to avoid demanding zeroed chunks, but taking
- * advantage of them if they are returned.
+ * If the caller specifies (!*zero), it is still possible to receive zeroed
+ * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
+ * advantage of this to avoid demanding zeroed chunks, but taking advantage of
+ * them if they are returned.
*/
-void *
-chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
- dss_prec_t dss_prec)
+static void *
+chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit, dss_prec_t dss_prec)
{
void *ret;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
+ /* Retained. */
+ if ((ret = chunk_recycle(arena, &chunk_hooks,
+ &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
+ new_addr, size, alignment, zero, commit, true)) != NULL)
+ return (ret);
+
/* "primary" dss. */
- if (config_dss && dss_prec == dss_prec_primary) {
- if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
- alignment, base, zero)) != NULL)
- goto label_return;
- if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
- goto label_return;
- }
- /* mmap. */
- if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
- alignment, base, zero)) != NULL)
- goto label_return;
- if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
- goto label_return;
+ if (have_dss && dss_prec == dss_prec_primary && (ret =
+ chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
+ NULL)
+ return (ret);
+ /*
+ * mmap. Requesting an address is not implemented for
+ * chunk_alloc_mmap(), so only call it if (new_addr == NULL).
+ */
+ if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
+ commit)) != NULL)
+ return (ret);
/* "secondary" dss. */
- if (config_dss && dss_prec == dss_prec_secondary) {
- if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
- alignment, base, zero)) != NULL)
- goto label_return;
- if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
- goto label_return;
- }
+ if (have_dss && dss_prec == dss_prec_secondary && (ret =
+ chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
+ NULL)
+ return (ret);
/* All strategies for allocation failed. */
- ret = NULL;
-label_return:
- if (ret != NULL) {
- if (config_ivsalloc && base == false) {
- if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
- chunk_dealloc(ret, size, true);
- return (NULL);
- }
- }
- if (config_stats || config_prof) {
- bool gdump;
- malloc_mutex_lock(&chunks_mtx);
- if (config_stats)
- stats_chunks.nchunks += (size / chunksize);
- stats_chunks.curchunks += (size / chunksize);
- if (stats_chunks.curchunks > stats_chunks.highchunks) {
- stats_chunks.highchunks =
- stats_chunks.curchunks;
- if (config_prof)
- gdump = true;
- } else if (config_prof)
- gdump = false;
- malloc_mutex_unlock(&chunks_mtx);
- if (config_prof && opt_prof && opt_prof_gdump && gdump)
- prof_gdump();
- }
- if (config_valgrind)
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- }
- assert(CHUNK_ADDR2BASE(ret) == ret);
+ return (NULL);
+}
+
+void *
+chunk_alloc_base(size_t size)
+{
+ void *ret;
+ bool zero, commit;
+
+ /*
+ * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
+ * because it's critical that chunk_alloc_base() return untouched
+ * demand-zeroed virtual memory.
+ */
+ zero = true;
+ commit = true;
+ ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+
return (ret);
}
-static void
-chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
- size_t size)
+void *
+chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool dalloc_node)
{
- bool unzeroed;
- extent_node_t *xnode, *node, *prev, *xprev, key;
+ void *ret;
+ bool commit;
+
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
- unzeroed = pages_purge(chunk, size);
- VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
+ commit = true;
+ ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
+ &arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
+ &commit, dalloc_node);
+ if (ret == NULL)
+ return (NULL);
+ assert(commit);
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ return (ret);
+}
+
+static arena_t *
+chunk_arena_get(unsigned arena_ind)
+{
+ arena_t *arena;
+ /* Dodge tsd for a0 in order to avoid bootstrapping issues. */
+ arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
+ false, true);
/*
- * Allocate a node before acquiring chunks_mtx even though it might not
- * be needed, because base_node_alloc() may cause a new base chunk to
- * be allocated, which could cause deadlock if chunks_mtx were already
- * held.
+ * The arena we're allocating on behalf of must have been initialized
+ * already.
*/
- xnode = base_node_alloc();
- /* Use xprev to implement conditional deferred deallocation of prev. */
- xprev = NULL;
+ assert(arena != NULL);
+ return (arena);
+}
+
+static void *
+chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit, unsigned arena_ind)
+{
+ void *ret;
+ arena_t *arena;
+
+ arena = chunk_arena_get(arena_ind);
+ ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
+ commit, arena->dss_prec);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- malloc_mutex_lock(&chunks_mtx);
- key.addr = (void *)((uintptr_t)chunk + size);
+ return (ret);
+}
+
+void *
+chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit)
+{
+ void *ret;
+
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
+ arena->ind);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
+ return (ret);
+}
+
+static void
+chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, bool zeroed, bool committed)
+{
+ bool unzeroed;
+ extent_node_t *node, *prev;
+ extent_node_t key;
+
+ assert(!cache || !zeroed);
+ unzeroed = cache || !zeroed;
+ JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
+
+ malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
+ extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
+ false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
- if (node != NULL && node->addr == key.addr) {
+ if (node != NULL && extent_node_addr_get(node) ==
+ extent_node_addr_get(&key) && extent_node_committed_get(node) ==
+ committed && !chunk_hooks->merge(chunk, size,
+ extent_node_addr_get(node), extent_node_size_get(node), false,
+ arena->ind)) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, node);
- node->addr = chunk;
- node->size += size;
- node->zeroed = (node->zeroed && (unzeroed == false));
+ arena_chunk_cache_maybe_remove(arena, node, cache);
+ extent_node_addr_set(node, chunk);
+ extent_node_size_set(node, size + extent_node_size_get(node));
+ extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
+ !unzeroed);
extent_tree_szad_insert(chunks_szad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
- if (xnode == NULL) {
+ node = arena_node_alloc(arena);
+ if (node == NULL) {
/*
- * base_node_alloc() failed, which is an exceedingly
- * unlikely failure. Leak chunk; its pages have
- * already been purged, so this is only a virtual
- * memory leak.
+ * Node allocation failed, which is an exceedingly
+ * unlikely failure. Leak chunk after making sure its
+ * pages have already been purged, so that this is only
+ * a virtual memory leak.
*/
+ if (cache) {
+ chunk_purge_wrapper(arena, chunk_hooks, chunk,
+ size, 0, size);
+ }
goto label_return;
}
- node = xnode;
- xnode = NULL; /* Prevent deallocation below. */
- node->addr = chunk;
- node->size = size;
- node->zeroed = (unzeroed == false);
+ extent_node_init(node, arena, chunk, size, !unzeroed,
+ committed);
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
}
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(chunks_ad, node);
- if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
- chunk) {
+ if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
+ extent_node_size_get(prev)) == chunk &&
+ extent_node_committed_get(prev) == committed &&
+ !chunk_hooks->merge(extent_node_addr_get(prev),
+ extent_node_size_get(prev), chunk, size, false, arena->ind)) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
@@ -275,44 +537,42 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
*/
extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev);
-
+ arena_chunk_cache_maybe_remove(arena, prev, cache);
extent_tree_szad_remove(chunks_szad, node);
- node->addr = prev->addr;
- node->size += prev->size;
- node->zeroed = (node->zeroed && prev->zeroed);
+ arena_chunk_cache_maybe_remove(arena, node, cache);
+ extent_node_addr_set(node, extent_node_addr_get(prev));
+ extent_node_size_set(node, extent_node_size_get(prev) +
+ extent_node_size_get(node));
+ extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
+ extent_node_zeroed_get(node));
extent_tree_szad_insert(chunks_szad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
- xprev = prev;
+ arena_node_dalloc(arena, prev);
}
label_return:
- malloc_mutex_unlock(&chunks_mtx);
- /*
- * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
- * avoid potential deadlock.
- */
- if (xnode != NULL)
- base_node_dealloc(xnode);
- if (xprev != NULL)
- base_node_dealloc(xprev);
+ malloc_mutex_unlock(&arena->chunks_mtx);
}
void
-chunk_unmap(void *chunk, size_t size)
+chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool committed)
{
+
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
- if (config_dss && chunk_in_dss(chunk))
- chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
- else if (chunk_dealloc_mmap(chunk, size))
- chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
+ chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
+ &arena->chunks_ad_cached, true, chunk, size, false, committed);
+ arena_maybe_purge(arena);
}
void
-chunk_dealloc(void *chunk, size_t size, bool unmap)
+chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool zeroed, bool committed)
{
assert(chunk != NULL);
@@ -320,22 +580,149 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
assert(size != 0);
assert((size & chunksize_mask) == 0);
- if (config_ivsalloc)
- rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
- if (config_stats || config_prof) {
- malloc_mutex_lock(&chunks_mtx);
- assert(stats_chunks.curchunks >= (size / chunksize));
- stats_chunks.curchunks -= (size / chunksize);
- malloc_mutex_unlock(&chunks_mtx);
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ /* Try to deallocate. */
+ if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
+ return;
+ /* Try to decommit; purge if that fails. */
+ if (committed) {
+ committed = chunk_hooks->decommit(chunk, size, 0, size,
+ arena->ind);
}
+ zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
+ arena->ind);
+ chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
+ &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
+}
+
+static bool
+chunk_dalloc_default(void *chunk, size_t size, bool committed,
+ unsigned arena_ind)
+{
+
+ if (!have_dss || !chunk_in_dss(chunk))
+ return (chunk_dalloc_mmap(chunk, size));
+ return (true);
+}
+
+void
+chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool committed)
+{
+
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ chunk_hooks->dalloc(chunk, size, committed, arena->ind);
+ if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
+ JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
+}
+
+static bool
+chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
+ length));
+}
+
+static bool
+chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
- if (unmap)
- chunk_unmap(chunk, size);
+ return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
+ length));
+}
+
+bool
+chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
+{
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+
+ return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
+ length));
+}
+
+static bool
+chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
+ length));
+}
+
+bool
+chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, size_t offset, size_t length)
+{
+
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
+}
+
+static bool
+chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ if (!maps_coalesce)
+ return (true);
+ return (false);
+}
+
+static bool
+chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ if (!maps_coalesce)
+ return (true);
+ if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
+ return (true);
+
+ return (false);
+}
+
+static rtree_node_elm_t *
+chunks_rtree_node_alloc(size_t nelms)
+{
+
+ return ((rtree_node_elm_t *)base_alloc(nelms *
+ sizeof(rtree_node_elm_t)));
}
bool
chunk_boot(void)
{
+#ifdef _WIN32
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+
+ /*
+ * Verify actual page size is equal to or an integral multiple of
+ * configured page size.
+ */
+ if (info.dwPageSize & ((1U << LG_PAGE) - 1))
+ return (true);
+
+ /*
+ * Configure chunksize (if not set) to match granularity (usually 64K),
+ * so pages_map will always take fast path.
+ */
+ if (!opt_lg_chunk) {
+ opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
+ - 1;
+ }
+#else
+ if (!opt_lg_chunk)
+ opt_lg_chunk = LG_CHUNK_DEFAULT;
+#endif
/* Set variables according to the value of opt_lg_chunk. */
chunksize = (ZU(1) << opt_lg_chunk);
@@ -343,23 +730,11 @@ chunk_boot(void)
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE);
- if (config_stats || config_prof) {
- if (malloc_mutex_init(&chunks_mtx))
- return (true);
- memset(&stats_chunks, 0, sizeof(chunk_stats_t));
- }
- if (config_dss && chunk_dss_boot())
+ if (have_dss && chunk_dss_boot())
+ return (true);
+ if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
+ opt_lg_chunk, chunks_rtree_node_alloc, NULL))
return (true);
- extent_tree_szad_new(&chunks_szad_mmap);
- extent_tree_ad_new(&chunks_ad_mmap);
- extent_tree_szad_new(&chunks_szad_dss);
- extent_tree_ad_new(&chunks_ad_dss);
- if (config_ivsalloc) {
- chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
- opt_lg_chunk, base_alloc, NULL);
- if (chunks_rtree == NULL)
- return (true);
- }
return (false);
}
@@ -368,9 +743,6 @@ void
chunk_prefork(void)
{
- malloc_mutex_prefork(&chunks_mtx);
- if (config_ivsalloc)
- rtree_prefork(chunks_rtree);
chunk_dss_prefork();
}
@@ -379,9 +751,6 @@ chunk_postfork_parent(void)
{
chunk_dss_postfork_parent();
- if (config_ivsalloc)
- rtree_postfork_parent(chunks_rtree);
- malloc_mutex_postfork_parent(&chunks_mtx);
}
void
@@ -389,7 +758,4 @@ chunk_postfork_child(void)
{
chunk_dss_postfork_child();
- if (config_ivsalloc)
- rtree_postfork_child(chunks_rtree);
- malloc_mutex_postfork_child(&chunks_mtx);
}
diff --git a/deps/jemalloc/src/chunk_dss.c b/deps/jemalloc/src/chunk_dss.c
index 510bb8bee..61fc91696 100644
--- a/deps/jemalloc/src/chunk_dss.c
+++ b/deps/jemalloc/src/chunk_dss.c
@@ -32,7 +32,7 @@ static void *
chunk_dss_sbrk(intptr_t increment)
{
-#ifdef JEMALLOC_HAVE_SBRK
+#ifdef JEMALLOC_DSS
return (sbrk(increment));
#else
not_implemented();
@@ -45,7 +45,7 @@ chunk_dss_prec_get(void)
{
dss_prec_t ret;
- if (config_dss == false)
+ if (!have_dss)
return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
ret = dss_prec_default;
@@ -57,8 +57,8 @@ bool
chunk_dss_prec_set(dss_prec_t dss_prec)
{
- if (config_dss == false)
- return (true);
+ if (!have_dss)
+ return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
@@ -66,11 +66,10 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
}
void *
-chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
+chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit)
{
- void *ret;
-
- cassert(config_dss);
+ cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
@@ -83,9 +82,6 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
malloc_mutex_lock(&dss_mtx);
if (dss_prev != (void *)-1) {
- size_t gap_size, cpad_size;
- void *cpad, *dss_next;
- intptr_t incr;
/*
* The loop is necessary to recover from races with other
@@ -93,8 +89,20 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
* malloc.
*/
do {
+ void *ret, *cpad, *dss_next;
+ size_t gap_size, cpad_size;
+ intptr_t incr;
+ /* Avoid an unnecessary system call. */
+ if (new_addr != NULL && dss_max != new_addr)
+ break;
+
/* Get the current end of the DSS. */
dss_max = chunk_dss_sbrk(0);
+
+ /* Make sure the earlier condition still holds. */
+ if (new_addr != NULL && dss_max != new_addr)
+ break;
+
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
@@ -123,12 +131,20 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
/* Success. */
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
- if (cpad_size != 0)
- chunk_unmap(cpad, cpad_size);
+ if (cpad_size != 0) {
+ chunk_hooks_t chunk_hooks =
+ CHUNK_HOOKS_INITIALIZER;
+ chunk_dalloc_wrapper(arena,
+ &chunk_hooks, cpad, cpad_size,
+ true);
+ }
if (*zero) {
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
+ ret, size);
memset(ret, 0, size);
}
+ if (!*commit)
+ *commit = pages_decommit(ret, size);
return (ret);
}
} while (dss_prev != (void *)-1);
@@ -143,7 +159,7 @@ chunk_in_dss(void *chunk)
{
bool ret;
- cassert(config_dss);
+ cassert(have_dss);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
@@ -160,7 +176,7 @@ bool
chunk_dss_boot(void)
{
- cassert(config_dss);
+ cassert(have_dss);
if (malloc_mutex_init(&dss_mtx))
return (true);
@@ -175,7 +191,7 @@ void
chunk_dss_prefork(void)
{
- if (config_dss)
+ if (have_dss)
malloc_mutex_prefork(&dss_mtx);
}
@@ -183,7 +199,7 @@ void
chunk_dss_postfork_parent(void)
{
- if (config_dss)
+ if (have_dss)
malloc_mutex_postfork_parent(&dss_mtx);
}
@@ -191,7 +207,7 @@ void
chunk_dss_postfork_child(void)
{
- if (config_dss)
+ if (have_dss)
malloc_mutex_postfork_child(&dss_mtx);
}
diff --git a/deps/jemalloc/src/chunk_mmap.c b/deps/jemalloc/src/chunk_mmap.c
index 2056d793f..b9ba74191 100644
--- a/deps/jemalloc/src/chunk_mmap.c
+++ b/deps/jemalloc/src/chunk_mmap.c
@@ -2,154 +2,20 @@
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void *pages_map(void *addr, size_t size);
-static void pages_unmap(void *addr, size_t size);
-static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
- bool *zero);
-
-/******************************************************************************/
static void *
-pages_map(void *addr, size_t size)
+chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
-
- assert(size != 0);
-
-#ifdef _WIN32
- /*
- * If VirtualAlloc can't allocate at the given address when one is
- * given, it fails and returns NULL.
- */
- ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
- PAGE_READWRITE);
-#else
- /*
- * We don't use MAP_FIXED here, because it can cause the *replacement*
- * of existing mappings, and we only want to create new mappings.
- */
- ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
- -1, 0);
- assert(ret != NULL);
-
- if (ret == MAP_FAILED)
- ret = NULL;
- else if (addr != NULL && ret != addr) {
- /*
- * We succeeded in mapping memory, but not in the right place.
- */
- if (munmap(ret, size) == -1) {
- char buf[BUFERROR_BUF];
-
- buferror(get_errno(), buf, sizeof(buf));
- malloc_printf("<jemalloc: Error in munmap(): %s\n",
- buf);
- if (opt_abort)
- abort();
- }
- ret = NULL;
- }
-#endif
- assert(ret == NULL || (addr == NULL && ret != addr)
- || (addr != NULL && ret == addr));
- return (ret);
-}
-
-static void
-pages_unmap(void *addr, size_t size)
-{
-
-#ifdef _WIN32
- if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
-#else
- if (munmap(addr, size) == -1)
-#endif
- {
- char buf[BUFERROR_BUF];
-
- buferror(get_errno(), buf, sizeof(buf));
- malloc_printf("<jemalloc>: Error in "
-#ifdef _WIN32
- "VirtualFree"
-#else
- "munmap"
-#endif
- "(): %s\n", buf);
- if (opt_abort)
- abort();
- }
-}
-
-static void *
-pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
-{
- void *ret = (void *)((uintptr_t)addr + leadsize);
-
- assert(alloc_size >= leadsize + size);
-#ifdef _WIN32
- {
- void *new_addr;
-
- pages_unmap(addr, alloc_size);
- new_addr = pages_map(ret, size);
- if (new_addr == ret)
- return (ret);
- if (new_addr)
- pages_unmap(new_addr, size);
- return (NULL);
- }
-#else
- {
- size_t trailsize = alloc_size - leadsize - size;
-
- if (leadsize != 0)
- pages_unmap(addr, leadsize);
- if (trailsize != 0)
- pages_unmap((void *)((uintptr_t)ret + size), trailsize);
- return (ret);
- }
-#endif
-}
-
-bool
-pages_purge(void *addr, size_t length)
-{
- bool unzeroed;
-
-#ifdef _WIN32
- VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
- unzeroed = true;
-#else
-# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
-# define JEMALLOC_MADV_PURGE MADV_DONTNEED
-# define JEMALLOC_MADV_ZEROS true
-# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
-# define JEMALLOC_MADV_PURGE MADV_FREE
-# define JEMALLOC_MADV_ZEROS false
-# else
-# error "No method defined for purging unused dirty pages."
-# endif
- int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
- unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
-# undef JEMALLOC_MADV_PURGE
-# undef JEMALLOC_MADV_ZEROS
-#endif
- return (unzeroed);
-}
-
-static void *
-chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
-{
- void *ret, *pages;
- size_t alloc_size, leadsize;
+ size_t alloc_size;
alloc_size = size + alignment - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
do {
+ void *pages;
+ size_t leadsize;
pages = pages_map(NULL, alloc_size);
if (pages == NULL)
return (NULL);
@@ -160,11 +26,13 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
assert(ret != NULL);
*zero = true;
+ if (!*commit)
+ *commit = pages_decommit(ret, size);
return (ret);
}
void *
-chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
+chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
size_t offset;
@@ -191,20 +59,22 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
- return (chunk_alloc_mmap_slow(size, alignment, zero));
+ return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
}
assert(ret != NULL);
*zero = true;
+ if (!*commit)
+ *commit = pages_decommit(ret, size);
return (ret);
}
bool
-chunk_dealloc_mmap(void *chunk, size_t size)
+chunk_dalloc_mmap(void *chunk, size_t size)
{
if (config_munmap)
pages_unmap(chunk, size);
- return (config_munmap == false);
+ return (!config_munmap);
}
diff --git a/deps/jemalloc/src/ckh.c b/deps/jemalloc/src/ckh.c
index 04c529661..53a1c1ef1 100644
--- a/deps/jemalloc/src/ckh.c
+++ b/deps/jemalloc/src/ckh.c
@@ -40,8 +40,8 @@
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static bool ckh_grow(ckh_t *ckh);
-static void ckh_shrink(ckh_t *ckh);
+static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
+static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
/******************************************************************************/
@@ -185,7 +185,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
}
bucket = tbucket;
- if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data))
return (false);
}
}
@@ -201,12 +201,12 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
/* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data))
return (false);
/* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data))
return (false);
/*
@@ -243,7 +243,7 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
}
static bool
-ckh_grow(ckh_t *ckh)
+ckh_grow(tsd_t *tsd, ckh_t *ckh)
{
bool ret;
ckhc_t *tab, *ttab;
@@ -270,7 +270,8 @@ ckh_grow(ckh_t *ckh)
ret = true;
goto label_return;
}
- tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+ tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
+ true, NULL);
if (tab == NULL) {
ret = true;
goto label_return;
@@ -281,13 +282,13 @@ ckh_grow(ckh_t *ckh)
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
- if (ckh_rebuild(ckh, tab) == false) {
- idalloc(tab);
+ if (!ckh_rebuild(ckh, tab)) {
+ idalloctm(tsd, tab, tcache_get(tsd, false), true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloc(ckh->tab);
+ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
@@ -298,7 +299,7 @@ label_return:
}
static void
-ckh_shrink(ckh_t *ckh)
+ckh_shrink(tsd_t *tsd, ckh_t *ckh)
{
ckhc_t *tab, *ttab;
size_t lg_curcells, usize;
@@ -313,7 +314,8 @@ ckh_shrink(ckh_t *ckh)
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0)
return;
- tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+ tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
+ NULL);
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
@@ -327,8 +329,8 @@ ckh_shrink(ckh_t *ckh)
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
- if (ckh_rebuild(ckh, tab) == false) {
- idalloc(tab);
+ if (!ckh_rebuild(ckh, tab)) {
+ idalloctm(tsd, tab, tcache_get(tsd, false), true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@@ -336,7 +338,7 @@ ckh_shrink(ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloc(ckh->tab);
+ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@@ -345,7 +347,8 @@ ckh_shrink(ckh_t *ckh)
}
bool
-ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
+ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+ ckh_keycomp_t *keycomp)
{
bool ret;
size_t mincells, usize;
@@ -366,10 +369,10 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ckh->count = 0;
/*
- * Find the minimum power of 2 that is large enough to fit aBaseCount
+ * Find the minimum power of 2 that is large enough to fit minitems
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
- * factor that will typically allow 2^aLgMinItems to fit without ever
+ * factor that will typically allow mincells items to fit without ever
* growing the table.
*/
assert(LG_CKH_BUCKET_CELLS > 0);
@@ -388,7 +391,8 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ret = true;
goto label_return;
}
- ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+ ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
+ NULL);
if (ckh->tab == NULL) {
ret = true;
goto label_return;
@@ -400,16 +404,16 @@ label_return:
}
void
-ckh_delete(ckh_t *ckh)
+ckh_delete(tsd_t *tsd, ckh_t *ckh)
{
assert(ckh != NULL);
#ifdef CKH_VERBOSE
malloc_printf(
- "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
- " nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
- " nrelocs: %"PRIu64"\n", __func__, ckh,
+ "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
+ " nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
+ " nrelocs: %"FMTu64"\n", __func__, ckh,
(unsigned long long)ckh->ngrows,
(unsigned long long)ckh->nshrinks,
(unsigned long long)ckh->nshrinkfails,
@@ -417,7 +421,7 @@ ckh_delete(ckh_t *ckh)
(unsigned long long)ckh->nrelocs);
#endif
- idalloc(ckh->tab);
+ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
if (config_debug)
memset(ckh, 0x5a, sizeof(ckh_t));
}
@@ -452,7 +456,7 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
}
bool
-ckh_insert(ckh_t *ckh, const void *key, const void *data)
+ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
{
bool ret;
@@ -464,7 +468,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
#endif
while (ckh_try_insert(ckh, &key, &data)) {
- if (ckh_grow(ckh)) {
+ if (ckh_grow(tsd, ckh)) {
ret = true;
goto label_return;
}
@@ -476,7 +480,8 @@ label_return:
}
bool
-ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
+ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+ void **data)
{
size_t cell;
@@ -497,7 +502,7 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
- ckh_shrink(ckh);
+ ckh_shrink(tsd, ckh);
}
return (false);
diff --git a/deps/jemalloc/src/ctl.c b/deps/jemalloc/src/ctl.c
index cc2c5aef5..3de8e602d 100644
--- a/deps/jemalloc/src/ctl.c
+++ b/deps/jemalloc/src/ctl.c
@@ -7,7 +7,6 @@
/*
* ctl_mtx protects the following:
* - ctl_stats.*
- * - opt_prof_active
*/
static malloc_mutex_t ctl_mtx;
static bool ctl_initialized;
@@ -17,14 +16,14 @@ static ctl_stats_t ctl_stats;
/******************************************************************************/
/* Helpers for named and indexed nodes. */
-static inline const ctl_named_node_t *
+JEMALLOC_INLINE_C const ctl_named_node_t *
ctl_named_node(const ctl_node_t *node)
{
return ((node->named) ? (const ctl_named_node_t *)node : NULL);
}
-static inline const ctl_named_node_t *
+JEMALLOC_INLINE_C const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t *node, int index)
{
const ctl_named_node_t *children = ctl_named_node(node->children);
@@ -32,12 +31,11 @@ ctl_named_children(const ctl_named_node_t *node, int index)
return (children ? &children[index] : NULL);
}
-static inline const ctl_indexed_node_t *
+JEMALLOC_INLINE_C const ctl_indexed_node_t *
ctl_indexed_node(const ctl_node_t *node)
{
- return ((node->named == false) ? (const ctl_indexed_node_t *)node :
- NULL);
+ return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
}
/******************************************************************************/
@@ -68,16 +66,17 @@ CTL_PROTO(version)
CTL_PROTO(epoch)
CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_flush)
+CTL_PROTO(thread_prof_name)
+CTL_PROTO(thread_prof_active)
CTL_PROTO(thread_arena)
CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
+CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug)
-CTL_PROTO(config_dss)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
-CTL_PROTO(config_mremap)
CTL_PROTO(config_munmap)
CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
@@ -99,22 +98,27 @@ CTL_PROTO(opt_zero)
CTL_PROTO(opt_quarantine)
CTL_PROTO(opt_redzone)
CTL_PROTO(opt_utrace)
-CTL_PROTO(opt_valgrind)
CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_tcache)
CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
+CTL_PROTO(opt_prof_thread_active_init)
CTL_PROTO(opt_lg_prof_sample)
CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
+CTL_PROTO(tcache_create)
+CTL_PROTO(tcache_flush)
+CTL_PROTO(tcache_destroy)
CTL_PROTO(arena_i_purge)
static void arena_purge(unsigned arena_ind);
CTL_PROTO(arena_i_dss)
+CTL_PROTO(arena_i_lg_dirty_mult)
+CTL_PROTO(arena_i_chunk_hooks)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
@@ -122,25 +126,26 @@ CTL_PROTO(arenas_bin_i_run_size)
INDEX_PROTO(arenas_bin_i)
CTL_PROTO(arenas_lrun_i_size)
INDEX_PROTO(arenas_lrun_i)
+CTL_PROTO(arenas_hchunk_i_size)
+INDEX_PROTO(arenas_hchunk_i)
CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized)
+CTL_PROTO(arenas_lg_dirty_mult)
CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins)
CTL_PROTO(arenas_nlruns)
-CTL_PROTO(arenas_purge)
+CTL_PROTO(arenas_nhchunks)
CTL_PROTO(arenas_extend)
+CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
+CTL_PROTO(prof_gdump)
+CTL_PROTO(prof_reset)
CTL_PROTO(prof_interval)
-CTL_PROTO(stats_chunks_current)
-CTL_PROTO(stats_chunks_total)
-CTL_PROTO(stats_chunks_high)
-CTL_PROTO(stats_huge_allocated)
-CTL_PROTO(stats_huge_nmalloc)
-CTL_PROTO(stats_huge_ndalloc)
+CTL_PROTO(lg_prof_sample)
CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
@@ -149,10 +154,14 @@ CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO(stats_arenas_i_large_nmalloc)
CTL_PROTO(stats_arenas_i_large_ndalloc)
CTL_PROTO(stats_arenas_i_large_nrequests)
-CTL_PROTO(stats_arenas_i_bins_j_allocated)
+CTL_PROTO(stats_arenas_i_huge_allocated)
+CTL_PROTO(stats_arenas_i_huge_nmalloc)
+CTL_PROTO(stats_arenas_i_huge_ndalloc)
+CTL_PROTO(stats_arenas_i_huge_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
CTL_PROTO(stats_arenas_i_bins_j_nrequests)
+CTL_PROTO(stats_arenas_i_bins_j_curregs)
CTL_PROTO(stats_arenas_i_bins_j_nfills)
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
CTL_PROTO(stats_arenas_i_bins_j_nruns)
@@ -164,18 +173,28 @@ CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
CTL_PROTO(stats_arenas_i_lruns_j_curruns)
INDEX_PROTO(stats_arenas_i_lruns_j)
+CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
+CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
+CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
+CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
+INDEX_PROTO(stats_arenas_i_hchunks_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss)
+CTL_PROTO(stats_arenas_i_lg_dirty_mult)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
+CTL_PROTO(stats_arenas_i_metadata_mapped)
+CTL_PROTO(stats_arenas_i_metadata_allocated)
INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
+CTL_PROTO(stats_metadata)
+CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
/******************************************************************************/
@@ -197,71 +216,84 @@ CTL_PROTO(stats_mapped)
*/
#define INDEX(i) {false}, i##_index
-static const ctl_named_node_t tcache_node[] = {
+static const ctl_named_node_t thread_tcache_node[] = {
{NAME("enabled"), CTL(thread_tcache_enabled)},
{NAME("flush"), CTL(thread_tcache_flush)}
};
+static const ctl_named_node_t thread_prof_node[] = {
+ {NAME("name"), CTL(thread_prof_name)},
+ {NAME("active"), CTL(thread_prof_active)}
+};
+
static const ctl_named_node_t thread_node[] = {
{NAME("arena"), CTL(thread_arena)},
{NAME("allocated"), CTL(thread_allocated)},
{NAME("allocatedp"), CTL(thread_allocatedp)},
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)},
- {NAME("tcache"), CHILD(named, tcache)}
+ {NAME("tcache"), CHILD(named, thread_tcache)},
+ {NAME("prof"), CHILD(named, thread_prof)}
};
static const ctl_named_node_t config_node[] = {
- {NAME("debug"), CTL(config_debug)},
- {NAME("dss"), CTL(config_dss)},
- {NAME("fill"), CTL(config_fill)},
- {NAME("lazy_lock"), CTL(config_lazy_lock)},
- {NAME("mremap"), CTL(config_mremap)},
- {NAME("munmap"), CTL(config_munmap)},
- {NAME("prof"), CTL(config_prof)},
- {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
- {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
- {NAME("stats"), CTL(config_stats)},
- {NAME("tcache"), CTL(config_tcache)},
- {NAME("tls"), CTL(config_tls)},
- {NAME("utrace"), CTL(config_utrace)},
- {NAME("valgrind"), CTL(config_valgrind)},
- {NAME("xmalloc"), CTL(config_xmalloc)}
+ {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
+ {NAME("debug"), CTL(config_debug)},
+ {NAME("fill"), CTL(config_fill)},
+ {NAME("lazy_lock"), CTL(config_lazy_lock)},
+ {NAME("munmap"), CTL(config_munmap)},
+ {NAME("prof"), CTL(config_prof)},
+ {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
+ {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
+ {NAME("stats"), CTL(config_stats)},
+ {NAME("tcache"), CTL(config_tcache)},
+ {NAME("tls"), CTL(config_tls)},
+ {NAME("utrace"), CTL(config_utrace)},
+ {NAME("valgrind"), CTL(config_valgrind)},
+ {NAME("xmalloc"), CTL(config_xmalloc)}
};
static const ctl_named_node_t opt_node[] = {
- {NAME("abort"), CTL(opt_abort)},
- {NAME("dss"), CTL(opt_dss)},
- {NAME("lg_chunk"), CTL(opt_lg_chunk)},
- {NAME("narenas"), CTL(opt_narenas)},
- {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
- {NAME("stats_print"), CTL(opt_stats_print)},
- {NAME("junk"), CTL(opt_junk)},
- {NAME("zero"), CTL(opt_zero)},
- {NAME("quarantine"), CTL(opt_quarantine)},
- {NAME("redzone"), CTL(opt_redzone)},
- {NAME("utrace"), CTL(opt_utrace)},
- {NAME("valgrind"), CTL(opt_valgrind)},
- {NAME("xmalloc"), CTL(opt_xmalloc)},
- {NAME("tcache"), CTL(opt_tcache)},
- {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
- {NAME("prof"), CTL(opt_prof)},
- {NAME("prof_prefix"), CTL(opt_prof_prefix)},
- {NAME("prof_active"), CTL(opt_prof_active)},
- {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
- {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
- {NAME("prof_gdump"), CTL(opt_prof_gdump)},
- {NAME("prof_final"), CTL(opt_prof_final)},
- {NAME("prof_leak"), CTL(opt_prof_leak)},
- {NAME("prof_accum"), CTL(opt_prof_accum)}
+ {NAME("abort"), CTL(opt_abort)},
+ {NAME("dss"), CTL(opt_dss)},
+ {NAME("lg_chunk"), CTL(opt_lg_chunk)},
+ {NAME("narenas"), CTL(opt_narenas)},
+ {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
+ {NAME("stats_print"), CTL(opt_stats_print)},
+ {NAME("junk"), CTL(opt_junk)},
+ {NAME("zero"), CTL(opt_zero)},
+ {NAME("quarantine"), CTL(opt_quarantine)},
+ {NAME("redzone"), CTL(opt_redzone)},
+ {NAME("utrace"), CTL(opt_utrace)},
+ {NAME("xmalloc"), CTL(opt_xmalloc)},
+ {NAME("tcache"), CTL(opt_tcache)},
+ {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
+ {NAME("prof"), CTL(opt_prof)},
+ {NAME("prof_prefix"), CTL(opt_prof_prefix)},
+ {NAME("prof_active"), CTL(opt_prof_active)},
+ {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
+ {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
+ {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
+ {NAME("prof_gdump"), CTL(opt_prof_gdump)},
+ {NAME("prof_final"), CTL(opt_prof_final)},
+ {NAME("prof_leak"), CTL(opt_prof_leak)},
+ {NAME("prof_accum"), CTL(opt_prof_accum)}
+};
+
+static const ctl_named_node_t tcache_node[] = {
+ {NAME("create"), CTL(tcache_create)},
+ {NAME("flush"), CTL(tcache_flush)},
+ {NAME("destroy"), CTL(tcache_destroy)}
};
static const ctl_named_node_t arena_i_node[] = {
- {NAME("purge"), CTL(arena_i_purge)},
- {NAME("dss"), CTL(arena_i_dss)}
+ {NAME("purge"), CTL(arena_i_purge)},
+ {NAME("dss"), CTL(arena_i_dss)},
+ {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
+ {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
};
static const ctl_named_node_t super_arena_i_node[] = {
- {NAME(""), CHILD(named, arena_i)}
+ {NAME(""), CHILD(named, arena_i)}
};
static const ctl_indexed_node_t arena_node[] = {
@@ -269,12 +301,12 @@ static const ctl_indexed_node_t arena_node[] = {
};
static const ctl_named_node_t arenas_bin_i_node[] = {
- {NAME("size"), CTL(arenas_bin_i_size)},
- {NAME("nregs"), CTL(arenas_bin_i_nregs)},
- {NAME("run_size"), CTL(arenas_bin_i_run_size)}
+ {NAME("size"), CTL(arenas_bin_i_size)},
+ {NAME("nregs"), CTL(arenas_bin_i_nregs)},
+ {NAME("run_size"), CTL(arenas_bin_i_run_size)}
};
static const ctl_named_node_t super_arenas_bin_i_node[] = {
- {NAME(""), CHILD(named, arenas_bin_i)}
+ {NAME(""), CHILD(named, arenas_bin_i)}
};
static const ctl_indexed_node_t arenas_bin_node[] = {
@@ -282,76 +314,93 @@ static const ctl_indexed_node_t arenas_bin_node[] = {
};
static const ctl_named_node_t arenas_lrun_i_node[] = {
- {NAME("size"), CTL(arenas_lrun_i_size)}
+ {NAME("size"), CTL(arenas_lrun_i_size)}
};
static const ctl_named_node_t super_arenas_lrun_i_node[] = {
- {NAME(""), CHILD(named, arenas_lrun_i)}
+ {NAME(""), CHILD(named, arenas_lrun_i)}
};
static const ctl_indexed_node_t arenas_lrun_node[] = {
{INDEX(arenas_lrun_i)}
};
+static const ctl_named_node_t arenas_hchunk_i_node[] = {
+ {NAME("size"), CTL(arenas_hchunk_i_size)}
+};
+static const ctl_named_node_t super_arenas_hchunk_i_node[] = {
+ {NAME(""), CHILD(named, arenas_hchunk_i)}
+};
+
+static const ctl_indexed_node_t arenas_hchunk_node[] = {
+ {INDEX(arenas_hchunk_i)}
+};
+
static const ctl_named_node_t arenas_node[] = {
- {NAME("narenas"), CTL(arenas_narenas)},
- {NAME("initialized"), CTL(arenas_initialized)},
- {NAME("quantum"), CTL(arenas_quantum)},
- {NAME("page"), CTL(arenas_page)},
- {NAME("tcache_max"), CTL(arenas_tcache_max)},
- {NAME("nbins"), CTL(arenas_nbins)},
- {NAME("nhbins"), CTL(arenas_nhbins)},
- {NAME("bin"), CHILD(indexed, arenas_bin)},
- {NAME("nlruns"), CTL(arenas_nlruns)},
- {NAME("lrun"), CHILD(indexed, arenas_lrun)},
- {NAME("purge"), CTL(arenas_purge)},
- {NAME("extend"), CTL(arenas_extend)}
+ {NAME("narenas"), CTL(arenas_narenas)},
+ {NAME("initialized"), CTL(arenas_initialized)},
+ {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
+ {NAME("quantum"), CTL(arenas_quantum)},
+ {NAME("page"), CTL(arenas_page)},
+ {NAME("tcache_max"), CTL(arenas_tcache_max)},
+ {NAME("nbins"), CTL(arenas_nbins)},
+ {NAME("nhbins"), CTL(arenas_nhbins)},
+ {NAME("bin"), CHILD(indexed, arenas_bin)},
+ {NAME("nlruns"), CTL(arenas_nlruns)},
+ {NAME("lrun"), CHILD(indexed, arenas_lrun)},
+ {NAME("nhchunks"), CTL(arenas_nhchunks)},
+ {NAME("hchunk"), CHILD(indexed, arenas_hchunk)},
+ {NAME("extend"), CTL(arenas_extend)}
};
static const ctl_named_node_t prof_node[] = {
+ {NAME("thread_active_init"), CTL(prof_thread_active_init)},
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
- {NAME("interval"), CTL(prof_interval)}
+ {NAME("gdump"), CTL(prof_gdump)},
+ {NAME("reset"), CTL(prof_reset)},
+ {NAME("interval"), CTL(prof_interval)},
+ {NAME("lg_sample"), CTL(lg_prof_sample)}
};
-static const ctl_named_node_t stats_chunks_node[] = {
- {NAME("current"), CTL(stats_chunks_current)},
- {NAME("total"), CTL(stats_chunks_total)},
- {NAME("high"), CTL(stats_chunks_high)}
-};
-
-static const ctl_named_node_t stats_huge_node[] = {
- {NAME("allocated"), CTL(stats_huge_allocated)},
- {NAME("nmalloc"), CTL(stats_huge_nmalloc)},
- {NAME("ndalloc"), CTL(stats_huge_ndalloc)}
+static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
+ {NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)},
+ {NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)}
};
static const ctl_named_node_t stats_arenas_i_small_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
+ {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
};
static const ctl_named_node_t stats_arenas_i_large_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
+ {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
+};
+
+static const ctl_named_node_t stats_arenas_i_huge_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)}
};
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
- {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
- {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
- {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
- {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
- {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
+ {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
+ {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
+ {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
+ {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
+ {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
+ {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
+ {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
};
static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
+ {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
};
static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
@@ -359,35 +408,53 @@ static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
};
static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
- {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
- {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
+ {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
+ {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
};
static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
+ {NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
};
static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
{INDEX(stats_arenas_i_lruns_j)}
};
+static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
+ {NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)},
+ {NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)}
+};
+static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_hchunks_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = {
+ {INDEX(stats_arenas_i_hchunks_j)}
+};
+
static const ctl_named_node_t stats_arenas_i_node[] = {
- {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
- {NAME("dss"), CTL(stats_arenas_i_dss)},
- {NAME("pactive"), CTL(stats_arenas_i_pactive)},
- {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
- {NAME("mapped"), CTL(stats_arenas_i_mapped)},
- {NAME("npurge"), CTL(stats_arenas_i_npurge)},
- {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
- {NAME("purged"), CTL(stats_arenas_i_purged)},
- {NAME("small"), CHILD(named, stats_arenas_i_small)},
- {NAME("large"), CHILD(named, stats_arenas_i_large)},
- {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
- {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}
+ {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
+ {NAME("dss"), CTL(stats_arenas_i_dss)},
+ {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
+ {NAME("pactive"), CTL(stats_arenas_i_pactive)},
+ {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
+ {NAME("mapped"), CTL(stats_arenas_i_mapped)},
+ {NAME("npurge"), CTL(stats_arenas_i_npurge)},
+ {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
+ {NAME("purged"), CTL(stats_arenas_i_purged)},
+ {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
+ {NAME("small"), CHILD(named, stats_arenas_i_small)},
+ {NAME("large"), CHILD(named, stats_arenas_i_large)},
+ {NAME("huge"), CHILD(named, stats_arenas_i_huge)},
+ {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
+ {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)},
+ {NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)}
};
static const ctl_named_node_t super_stats_arenas_i_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i)}
+ {NAME(""), CHILD(named, stats_arenas_i)}
};
static const ctl_indexed_node_t stats_arenas_node[] = {
@@ -395,13 +462,13 @@ static const ctl_indexed_node_t stats_arenas_node[] = {
};
static const ctl_named_node_t stats_node[] = {
- {NAME("cactive"), CTL(stats_cactive)},
- {NAME("allocated"), CTL(stats_allocated)},
- {NAME("active"), CTL(stats_active)},
- {NAME("mapped"), CTL(stats_mapped)},
- {NAME("chunks"), CHILD(named, stats_chunks)},
- {NAME("huge"), CHILD(named, stats_huge)},
- {NAME("arenas"), CHILD(indexed, stats_arenas)}
+ {NAME("cactive"), CTL(stats_cactive)},
+ {NAME("allocated"), CTL(stats_allocated)},
+ {NAME("active"), CTL(stats_active)},
+ {NAME("metadata"), CTL(stats_metadata)},
+ {NAME("resident"), CTL(stats_resident)},
+ {NAME("mapped"), CTL(stats_mapped)},
+ {NAME("arenas"), CHILD(indexed, stats_arenas)}
};
static const ctl_named_node_t root_node[] = {
@@ -410,6 +477,7 @@ static const ctl_named_node_t root_node[] = {
{NAME("thread"), CHILD(named, thread)},
{NAME("config"), CHILD(named, config)},
{NAME("opt"), CHILD(named, opt)},
+ {NAME("tcache"), CHILD(named, tcache)},
{NAME("arena"), CHILD(indexed, arena)},
{NAME("arenas"), CHILD(named, arenas)},
{NAME("prof"), CHILD(named, prof)},
@@ -431,12 +499,19 @@ ctl_arena_init(ctl_arena_stats_t *astats)
{
if (astats->lstats == NULL) {
- astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
+ astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
sizeof(malloc_large_stats_t));
if (astats->lstats == NULL)
return (true);
}
+ if (astats->hstats == NULL) {
+ astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
+ sizeof(malloc_huge_stats_t));
+ if (astats->hstats == NULL)
+ return (true);
+ }
+
return (false);
}
@@ -445,6 +520,7 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
{
astats->dss = dss_prec_names[dss_prec_limit];
+ astats->lg_dirty_mult = -1;
astats->pactive = 0;
astats->pdirty = 0;
if (config_stats) {
@@ -456,6 +532,8 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
memset(astats->lstats, 0, nlclasses *
sizeof(malloc_large_stats_t));
+ memset(astats->hstats, 0, nhclasses *
+ sizeof(malloc_huge_stats_t));
}
}
@@ -464,11 +542,13 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
- arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
- &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
+ arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult,
+ &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats,
+ cstats->lstats, cstats->hstats);
for (i = 0; i < NBINS; i++) {
- cstats->allocated_small += cstats->bstats[i].allocated;
+ cstats->allocated_small += cstats->bstats[i].curregs *
+ index2size(i);
cstats->nmalloc_small += cstats->bstats[i].nmalloc;
cstats->ndalloc_small += cstats->bstats[i].ndalloc;
cstats->nrequests_small += cstats->bstats[i].nrequests;
@@ -488,6 +568,9 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.purged += astats->astats.purged;
+ sstats->astats.metadata_mapped += astats->astats.metadata_mapped;
+ sstats->astats.metadata_allocated += astats->astats.metadata_allocated;
+
sstats->allocated_small += astats->allocated_small;
sstats->nmalloc_small += astats->nmalloc_small;
sstats->ndalloc_small += astats->ndalloc_small;
@@ -498,18 +581,15 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
sstats->astats.nrequests_large += astats->astats.nrequests_large;
- for (i = 0; i < nlclasses; i++) {
- sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
- sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
- sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
- sstats->lstats[i].curruns += astats->lstats[i].curruns;
- }
+ sstats->astats.allocated_huge += astats->astats.allocated_huge;
+ sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
+ sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
for (i = 0; i < NBINS; i++) {
- sstats->bstats[i].allocated += astats->bstats[i].allocated;
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
+ sstats->bstats[i].curregs += astats->bstats[i].curregs;
if (config_tcache) {
sstats->bstats[i].nfills += astats->bstats[i].nfills;
sstats->bstats[i].nflushes +=
@@ -519,6 +599,19 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
+
+ for (i = 0; i < nlclasses; i++) {
+ sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
+ sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
+ sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
+ sstats->lstats[i].curruns += astats->lstats[i].curruns;
+ }
+
+ for (i = 0; i < nhclasses; i++) {
+ sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
+ sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
+ sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks;
+ }
}
static void
@@ -547,27 +640,23 @@ static bool
ctl_grow(void)
{
ctl_arena_stats_t *astats;
- arena_t **tarenas;
- /* Allocate extended arena stats and arenas arrays. */
- astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) *
+ /* Initialize new arena. */
+ if (arena_init(ctl_stats.narenas) == NULL)
+ return (true);
+
+ /* Allocate extended arena stats. */
+ astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) *
sizeof(ctl_arena_stats_t));
if (astats == NULL)
return (true);
- tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
- sizeof(arena_t *));
- if (tarenas == NULL) {
- idalloc(astats);
- return (true);
- }
/* Initialize the new astats element. */
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
- idalloc(tarenas);
- idalloc(astats);
+ a0dalloc(astats);
return (true);
}
/* Swap merged stats to their new location. */
@@ -580,32 +669,7 @@ ctl_grow(void)
memcpy(&astats[ctl_stats.narenas + 1], &tstats,
sizeof(ctl_arena_stats_t));
}
- /* Initialize the new arenas element. */
- tarenas[ctl_stats.narenas] = NULL;
- {
- arena_t **arenas_old = arenas;
- /*
- * Swap extended arenas array into place. Although ctl_mtx
- * protects this function from other threads extending the
- * array, it does not protect from other threads mutating it
- * (i.e. initializing arenas and setting array elements to
- * point to them). Therefore, array copying must happen under
- * the protection of arenas_lock.
- */
- malloc_mutex_lock(&arenas_lock);
- arenas = tarenas;
- memcpy(arenas, arenas_old, ctl_stats.narenas *
- sizeof(arena_t *));
- narenas_total++;
- arenas_extend(narenas_total - 1);
- malloc_mutex_unlock(&arenas_lock);
- /*
- * Deallocate arenas_old only if it came from imalloc() (not
- * base_alloc()).
- */
- if (ctl_stats.narenas != narenas_auto)
- idalloc(arenas_old);
- }
+ a0dalloc(ctl_stats.arenas);
ctl_stats.arenas = astats;
ctl_stats.narenas++;
@@ -615,23 +679,11 @@ ctl_grow(void)
static void
ctl_refresh(void)
{
+ tsd_t *tsd;
unsigned i;
+ bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
- if (config_stats) {
- malloc_mutex_lock(&chunks_mtx);
- ctl_stats.chunks.current = stats_chunks.curchunks;
- ctl_stats.chunks.total = stats_chunks.nchunks;
- ctl_stats.chunks.high = stats_chunks.highchunks;
- malloc_mutex_unlock(&chunks_mtx);
-
- malloc_mutex_lock(&huge_mtx);
- ctl_stats.huge.allocated = huge_allocated;
- ctl_stats.huge.nmalloc = huge_nmalloc;
- ctl_stats.huge.ndalloc = huge_ndalloc;
- malloc_mutex_unlock(&huge_mtx);
- }
-
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
@@ -639,15 +691,22 @@ ctl_refresh(void)
ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
- malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
+ tsd = tsd_fetch();
+ for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
+ tarenas[i] = arena_get(tsd, i, false, false);
+ if (tarenas[i] == NULL && !refreshed) {
+ tarenas[i] = arena_get(tsd, i, false, true);
+ refreshed = true;
+ }
+ }
+
for (i = 0; i < ctl_stats.narenas; i++) {
- if (arenas[i] != NULL)
- ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
+ if (tarenas[i] != NULL)
+ ctl_stats.arenas[i].nthreads = arena_nbound(i);
else
ctl_stats.arenas[i].nthreads = 0;
}
- malloc_mutex_unlock(&arenas_lock);
+
for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL);
@@ -657,14 +716,24 @@ ctl_refresh(void)
}
if (config_stats) {
+ size_t base_allocated, base_resident, base_mapped;
+ base_stats_get(&base_allocated, &base_resident, &base_mapped);
ctl_stats.allocated =
- ctl_stats.arenas[ctl_stats.narenas].allocated_small
- + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
- + ctl_stats.huge.allocated;
+ ctl_stats.arenas[ctl_stats.narenas].allocated_small +
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
ctl_stats.active =
- (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
- + ctl_stats.huge.allocated;
- ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
+ (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
+ ctl_stats.metadata = base_allocated +
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
+ ctl_stats.arenas[ctl_stats.narenas].astats
+ .metadata_allocated;
+ ctl_stats.resident = base_resident +
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
+ ((ctl_stats.arenas[ctl_stats.narenas].pactive +
+ ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
+ ctl_stats.mapped = base_mapped +
+ ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
}
ctl_epoch++;
@@ -676,14 +745,13 @@ ctl_init(void)
bool ret;
malloc_mutex_lock(&ctl_mtx);
- if (ctl_initialized == false) {
+ if (!ctl_initialized) {
/*
* Allocate space for one extra arena stats element, which
* contains summed stats across all arenas.
*/
- assert(narenas_auto == narenas_total_get());
- ctl_stats.narenas = narenas_auto;
- ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
+ ctl_stats.narenas = narenas_total_get();
+ ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc(
(ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
if (ctl_stats.arenas == NULL) {
ret = true;
@@ -701,6 +769,15 @@ ctl_init(void)
unsigned i;
for (i = 0; i <= ctl_stats.narenas; i++) {
if (ctl_arena_init(&ctl_stats.arenas[i])) {
+ unsigned j;
+ for (j = 0; j < i; j++) {
+ a0dalloc(
+ ctl_stats.arenas[j].lstats);
+ a0dalloc(
+ ctl_stats.arenas[j].hstats);
+ }
+ a0dalloc(ctl_stats.arenas);
+ ctl_stats.arenas = NULL;
ret = true;
goto label_return;
}
@@ -826,7 +903,7 @@ ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
- if (ctl_initialized == false && ctl_init()) {
+ if (!ctl_initialized && ctl_init()) {
ret = EAGAIN;
goto label_return;
}
@@ -853,7 +930,7 @@ ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
{
int ret;
- if (ctl_initialized == false && ctl_init()) {
+ if (!ctl_initialized && ctl_init()) {
ret = EAGAIN;
goto label_return;
}
@@ -871,7 +948,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
const ctl_named_node_t *node;
size_t i;
- if (ctl_initialized == false && ctl_init()) {
+ if (!ctl_initialized && ctl_init()) {
ret = EAGAIN;
goto label_return;
}
@@ -963,6 +1040,14 @@ ctl_postfork_child(void)
} \
} while (0)
+#define READ_XOR_WRITE() do { \
+ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
+ newlen != 0)) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
@@ -971,8 +1056,8 @@ ctl_postfork_child(void)
memcpy(oldp, (void *)&(v), copylen); \
ret = EINVAL; \
goto label_return; \
- } else \
- *(t *)oldp = (v); \
+ } \
+ *(t *)oldp = (v); \
} \
} while (0)
@@ -998,7 +1083,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
int ret; \
t oldval; \
\
- if ((c) == false) \
+ if (!(c)) \
return (ENOENT); \
if (l) \
malloc_mutex_lock(&ctl_mtx); \
@@ -1021,7 +1106,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
int ret; \
t oldval; \
\
- if ((c) == false) \
+ if (!(c)) \
return (ENOENT); \
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
@@ -1065,7 +1150,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
int ret; \
t oldval; \
\
- if ((c) == false) \
+ if (!(c)) \
return (ENOENT); \
READONLY(); \
oldval = (v); \
@@ -1093,6 +1178,27 @@ label_return: \
return (ret); \
}
+#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ tsd_t *tsd; \
+ \
+ if (!(c)) \
+ return (ENOENT); \
+ READONLY(); \
+ tsd = tsd_fetch(); \
+ oldval = (m(tsd)); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
#define CTL_RO_BOOL_CONFIG_GEN(n) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
@@ -1135,11 +1241,10 @@ label_return:
/******************************************************************************/
+CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious)
CTL_RO_BOOL_CONFIG_GEN(config_debug)
-CTL_RO_BOOL_CONFIG_GEN(config_dss)
CTL_RO_BOOL_CONFIG_GEN(config_fill)
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
-CTL_RO_BOOL_CONFIG_GEN(config_mremap)
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
CTL_RO_BOOL_CONFIG_GEN(config_prof)
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
@@ -1159,18 +1264,19 @@ CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
-CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
-CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
-CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
+CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
+ opt_prof_thread_active_init, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
@@ -1185,14 +1291,21 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
+ tsd_t *tsd;
+ arena_t *oldarena;
unsigned newind, oldind;
+ tsd = tsd_fetch();
+ oldarena = arena_choose(tsd, NULL);
+ if (oldarena == NULL)
+ return (EAGAIN);
+
malloc_mutex_lock(&ctl_mtx);
- newind = oldind = choose_arena(NULL)->ind;
+ newind = oldind = oldarena->ind;
WRITE(newind, unsigned);
READ(oldind, unsigned);
if (newind != oldind) {
- arena_t *arena;
+ arena_t *newarena;
if (newind >= ctl_stats.narenas) {
/* New arena index is out of range. */
@@ -1201,28 +1314,20 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
}
/* Initialize arena if necessary. */
- malloc_mutex_lock(&arenas_lock);
- if ((arena = arenas[newind]) == NULL && (arena =
- arenas_extend(newind)) == NULL) {
- malloc_mutex_unlock(&arenas_lock);
+ newarena = arena_get(tsd, newind, true, true);
+ if (newarena == NULL) {
ret = EAGAIN;
goto label_return;
}
- assert(arena == arenas[newind]);
- arenas[oldind]->nthreads--;
- arenas[newind]->nthreads++;
- malloc_mutex_unlock(&arenas_lock);
-
- /* Set new arena association. */
+ /* Set new arena/tcache associations. */
+ arena_migrate(tsd, oldind, newind);
if (config_tcache) {
- tcache_t *tcache;
- if ((uintptr_t)(tcache = *tcache_tsd_get()) >
- (uintptr_t)TCACHE_STATE_MAX) {
- tcache_arena_dissociate(tcache);
- tcache_arena_associate(tcache, arena);
+ tcache_t *tcache = tsd_tcache_get(tsd);
+ if (tcache != NULL) {
+ tcache_arena_reassociate(tcache, oldarena,
+ newarena);
}
}
- arenas_tsd_set(&arena);
}
ret = 0;
@@ -1231,14 +1336,14 @@ label_return:
return (ret);
}
-CTL_RO_NL_CGEN(config_stats, thread_allocated,
- thread_allocated_tsd_get()->allocated, uint64_t)
-CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
- &thread_allocated_tsd_get()->allocated, uint64_t *)
-CTL_RO_NL_CGEN(config_stats, thread_deallocated,
- thread_allocated_tsd_get()->deallocated, uint64_t)
-CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
- &thread_allocated_tsd_get()->deallocated, uint64_t *)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
+ uint64_t)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
+ uint64_t *)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
+ uint64_t)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
+ tsd_thread_deallocatedp_get, uint64_t *)
static int
thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
@@ -1247,7 +1352,7 @@ thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
int ret;
bool oldval;
- if (config_tcache == false)
+ if (!config_tcache)
return (ENOENT);
oldval = tcache_enabled_get();
@@ -1271,7 +1376,7 @@ thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
{
int ret;
- if (config_tcache == false)
+ if (!config_tcache)
return (ENOENT);
READONLY();
@@ -1284,17 +1389,170 @@ label_return:
return (ret);
}
+static int
+thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ READ_XOR_WRITE();
+
+ if (newp != NULL) {
+ tsd_t *tsd;
+
+ if (newlen != sizeof(const char *)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ tsd = tsd_fetch();
+
+ if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
+ 0)
+ goto label_return;
+ } else {
+ const char *oldname = prof_thread_name_get();
+ READ(oldname, const char *);
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ oldval = prof_thread_active_get();
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (prof_thread_active_set(*(bool *)newp)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ }
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+/******************************************************************************/
+
+static int
+tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ tsd_t *tsd;
+ unsigned tcache_ind;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ tsd = tsd_fetch();
+
+ malloc_mutex_lock(&ctl_mtx);
+ READONLY();
+ if (tcaches_create(tsd, &tcache_ind)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ READ(tcache_ind, unsigned);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+static int
+tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ tsd_t *tsd;
+ unsigned tcache_ind;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ tsd = tsd_fetch();
+
+ WRITEONLY();
+ tcache_ind = UINT_MAX;
+ WRITE(tcache_ind, unsigned);
+ if (tcache_ind == UINT_MAX) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ tcaches_flush(tsd, tcache_ind);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ tsd_t *tsd;
+ unsigned tcache_ind;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ tsd = tsd_fetch();
+
+ WRITEONLY();
+ tcache_ind = UINT_MAX;
+ WRITE(tcache_ind, unsigned);
+ if (tcache_ind == UINT_MAX) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ tcaches_destroy(tsd, tcache_ind);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
/******************************************************************************/
/* ctl_mutex must be held during execution of this function. */
static void
arena_purge(unsigned arena_ind)
{
+ tsd_t *tsd;
+ unsigned i;
+ bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
- malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
- malloc_mutex_unlock(&arenas_lock);
+ tsd = tsd_fetch();
+ for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
+ tarenas[i] = arena_get(tsd, i, false, false);
+ if (tarenas[i] == NULL && !refreshed) {
+ tarenas[i] = arena_get(tsd, i, false, true);
+ refreshed = true;
+ }
+ }
if (arena_ind == ctl_stats.narenas) {
unsigned i;
@@ -1330,47 +1588,117 @@ static int
arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
- int ret, i;
- bool match, err;
- const char *dss;
+ int ret;
+ const char *dss = NULL;
unsigned arena_ind = mib[1];
dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit;
malloc_mutex_lock(&ctl_mtx);
WRITE(dss, const char *);
- match = false;
- for (i = 0; i < dss_prec_limit; i++) {
- if (strcmp(dss_prec_names[i], dss) == 0) {
- dss_prec = i;
- match = true;
- break;
+ if (dss != NULL) {
+ int i;
+ bool match = false;
+
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strcmp(dss_prec_names[i], dss) == 0) {
+ dss_prec = i;
+ match = true;
+ break;
+ }
+ }
+
+ if (!match) {
+ ret = EINVAL;
+ goto label_return;
}
- }
- if (match == false) {
- ret = EINVAL;
- goto label_return;
}
if (arena_ind < ctl_stats.narenas) {
- arena_t *arena = arenas[arena_ind];
- if (arena != NULL) {
- dss_prec_old = arena_dss_prec_get(arena);
- arena_dss_prec_set(arena, dss_prec);
- err = false;
- } else
- err = true;
+ arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true);
+ if (arena == NULL || (dss_prec != dss_prec_limit &&
+ arena_dss_prec_set(arena, dss_prec))) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ dss_prec_old = arena_dss_prec_get(arena);
} else {
+ if (dss_prec != dss_prec_limit &&
+ chunk_dss_prec_set(dss_prec)) {
+ ret = EFAULT;
+ goto label_return;
+ }
dss_prec_old = chunk_dss_prec_get();
- err = chunk_dss_prec_set(dss_prec);
}
+
dss = dss_prec_names[dss_prec_old];
READ(dss, const char *);
- if (err) {
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+static int
+arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind = mib[1];
+ arena_t *arena;
+
+ arena = arena_get(tsd_fetch(), arena_ind, false, true);
+ if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = arena_lg_dirty_mult_get(arena);
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind = mib[1];
+ arena_t *arena;
+
+ malloc_mutex_lock(&ctl_mtx);
+ if (arena_ind < narenas_total_get() && (arena =
+ arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) {
+ if (newp != NULL) {
+ chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
+ WRITE(new_chunk_hooks, chunk_hooks_t);
+ old_chunk_hooks = chunk_hooks_set(arena,
+ &new_chunk_hooks);
+ READ(old_chunk_hooks, chunk_hooks_t);
+ } else {
+ chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena);
+ READ(old_chunk_hooks, chunk_hooks_t);
+ }
+ } else {
+ ret = EFAULT;
+ goto label_return;
+ }
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
@@ -1444,6 +1772,32 @@ label_return:
return (ret);
}
+static int
+arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = arena_lg_dirty_mult_default_get();
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
@@ -1461,8 +1815,8 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
return (super_arenas_bin_i_node);
}
-CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
-CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
+CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
+CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t)
static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{
@@ -1472,29 +1826,15 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
return (super_arenas_lrun_i_node);
}
-static int
-arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
+CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t)
+static const ctl_named_node_t *
+arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
{
- int ret;
- unsigned arena_ind;
-
- malloc_mutex_lock(&ctl_mtx);
- WRITEONLY();
- arena_ind = UINT_MAX;
- WRITE(arena_ind, unsigned);
- if (newp != NULL && arena_ind >= ctl_stats.narenas)
- ret = EFAULT;
- else {
- if (arena_ind == UINT_MAX)
- arena_ind = ctl_stats.narenas;
- arena_purge(arena_ind);
- ret = 0;
- }
-label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ if (i > nhclasses)
+ return (NULL);
+ return (super_arenas_hchunk_i_node);
}
static int
@@ -1522,31 +1862,52 @@ label_return:
/******************************************************************************/
static int
+prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_thread_active_init_set(*(bool *)newp);
+ } else
+ oldval = prof_thread_active_init_get();
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
bool oldval;
- if (config_prof == false)
+ if (!config_prof)
return (ENOENT);
- malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
- oldval = opt_prof_active;
if (newp != NULL) {
- /*
- * The memory barriers will tend to make opt_prof_active
- * propagate faster on systems with weak memory ordering.
- */
- mb_write();
- WRITE(opt_prof_active, bool);
- mb_write();
- }
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_active_set(*(bool *)newp);
+ } else
+ oldval = prof_active_get();
READ(oldval, bool);
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@@ -1557,7 +1918,7 @@ prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret;
const char *filename = NULL;
- if (config_prof == false)
+ if (!config_prof)
return (ENOENT);
WRITEONLY();
@@ -1573,24 +1934,71 @@ label_return:
return (ret);
}
+static int
+prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_gdump_set(*(bool *)newp);
+ } else
+ oldval = prof_gdump_get();
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ size_t lg_sample = lg_prof_sample;
+ tsd_t *tsd;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ WRITEONLY();
+ WRITE(lg_sample, size_t);
+ if (lg_sample >= (sizeof(uint64_t) << 3))
+ lg_sample = (sizeof(uint64_t) << 3) - 1;
+
+ tsd = tsd_fetch();
+
+ prof_reset(tsd, lg_sample);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
+CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
/******************************************************************************/
CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
+CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
- size_t)
-CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
-CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
-CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
-
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
+CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
+ ssize_t)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
@@ -1602,6 +2010,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped,
+ ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated,
+ ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
ctl_stats.arenas[mib[2]].allocated_small, size_t)
@@ -1619,15 +2031,23 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
+ ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
+ ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
+ ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
+ ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t)
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
@@ -1666,13 +2086,32 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
return (super_stats_arenas_i_lruns_j_node);
}
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
+
+static const ctl_named_node_t *
+stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
+{
+
+ if (j > nhclasses)
+ return (NULL);
+ return (super_stats_arenas_i_hchunks_j_node);
+}
+
static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
malloc_mutex_lock(&ctl_mtx);
- if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) {
+ if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
ret = NULL;
goto label_return;
}
diff --git a/deps/jemalloc/src/extent.c b/deps/jemalloc/src/extent.c
index 8c09b486e..13f94411c 100644
--- a/deps/jemalloc/src/extent.c
+++ b/deps/jemalloc/src/extent.c
@@ -3,17 +3,32 @@
/******************************************************************************/
-static inline int
+JEMALLOC_INLINE_C size_t
+extent_quantize(size_t size)
+{
+
+ /*
+ * Round down to the nearest chunk size that can actually be requested
+ * during normal huge allocation.
+ */
+ return (index2size(size2index(size + 1) - 1));
+}
+
+JEMALLOC_INLINE_C int
extent_szad_comp(extent_node_t *a, extent_node_t *b)
{
int ret;
- size_t a_size = a->size;
- size_t b_size = b->size;
-
- ret = (a_size > b_size) - (a_size < b_size);
+ size_t a_qsize = extent_quantize(extent_node_size_get(a));
+ size_t b_qsize = extent_quantize(extent_node_size_get(b));
+
+ /*
+ * Compare based on quantized size rather than size, in order to sort
+ * equally useful extents only by address.
+ */
+ ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
- uintptr_t a_addr = (uintptr_t)a->addr;
- uintptr_t b_addr = (uintptr_t)b->addr;
+ uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
ret = (a_addr > b_addr) - (a_addr < b_addr);
}
@@ -22,18 +37,17 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
}
/* Generate red-black tree functions. */
-rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
+rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
extent_szad_comp)
-static inline int
+JEMALLOC_INLINE_C int
extent_ad_comp(extent_node_t *a, extent_node_t *b)
{
- uintptr_t a_addr = (uintptr_t)a->addr;
- uintptr_t b_addr = (uintptr_t)b->addr;
+ uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
return ((a_addr > b_addr) - (a_addr < b_addr));
}
/* Generate red-black tree functions. */
-rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
- extent_ad_comp)
+rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
diff --git a/deps/jemalloc/src/huge.c b/deps/jemalloc/src/huge.c
index d72f21357..1e9a66512 100644
--- a/deps/jemalloc/src/huge.c
+++ b/deps/jemalloc/src/huge.c
@@ -2,44 +2,68 @@
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
-/* Data. */
-uint64_t huge_nmalloc;
-uint64_t huge_ndalloc;
-size_t huge_allocated;
+static extent_node_t *
+huge_node_get(const void *ptr)
+{
+ extent_node_t *node;
-malloc_mutex_t huge_mtx;
+ node = chunk_lookup(ptr, true);
+ assert(!extent_node_achunk_get(node));
-/******************************************************************************/
+ return (node);
+}
+
+static bool
+huge_node_set(const void *ptr, extent_node_t *node)
+{
+
+ assert(extent_node_addr_get(node) == ptr);
+ assert(!extent_node_achunk_get(node));
+ return (chunk_register(ptr, node));
+}
-/* Tree of chunks that are stand-alone huge allocations. */
-static extent_tree_t huge;
+static void
+huge_node_unset(const void *ptr, const extent_node_t *node)
+{
+
+ chunk_deregister(ptr, node);
+}
void *
-huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
+huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache)
{
+ size_t usize;
- return (huge_palloc(size, chunksize, zero, dss_prec));
+ usize = s2u(size);
+ if (usize == 0) {
+ /* size_t overflow. */
+ return (NULL);
+ }
+
+ return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
}
void *
-huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
+huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
+ bool zero, tcache_t *tcache)
{
void *ret;
- size_t csize;
+ size_t usize;
extent_node_t *node;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
- csize = CHUNK_CEILING(size);
- if (csize == 0) {
- /* size is large enough to cause size_t wrap-around. */
+ usize = sa2u(size, alignment);
+ if (unlikely(usize == 0))
return (NULL);
- }
+ assert(usize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
- node = base_node_alloc();
+ node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
+ CACHELINE, false, tcache, true, arena);
if (node == NULL)
return (NULL);
@@ -48,145 +72,33 @@ huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
- if (ret == NULL) {
- base_node_dealloc(node);
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
+ size, alignment, &is_zeroed)) == NULL) {
+ idalloctm(tsd, node, tcache, true);
return (NULL);
}
- /* Insert node into huge. */
- node->addr = ret;
- node->size = csize;
-
- malloc_mutex_lock(&huge_mtx);
- extent_tree_ad_insert(&huge, node);
- if (config_stats) {
- stats_cactive_add(csize);
- huge_nmalloc++;
- huge_allocated += csize;
- }
- malloc_mutex_unlock(&huge_mtx);
-
- if (config_fill && zero == false) {
- if (opt_junk)
- memset(ret, 0xa5, csize);
- else if (opt_zero && is_zeroed == false)
- memset(ret, 0, csize);
- }
-
- return (ret);
-}
-
-bool
-huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
-{
-
- /*
- * Avoid moving the allocation if the size class can be left the same.
- */
- if (oldsize > arena_maxclass
- && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
- && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
- assert(CHUNK_CEILING(oldsize) == oldsize);
- return (false);
- }
-
- /* Reallocation would require a move. */
- return (true);
-}
-
-void *
-huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
-{
- void *ret;
- size_t copysize;
-
- /* Try to avoid moving the allocation. */
- if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
- return (ptr);
+ extent_node_init(node, arena, ret, size, is_zeroed, true);
- /*
- * size and oldsize are different enough that we need to use a
- * different size class. In that case, fall back to allocating new
- * space and copying.
- */
- if (alignment > chunksize)
- ret = huge_palloc(size + extra, alignment, zero, dss_prec);
- else
- ret = huge_malloc(size + extra, zero, dss_prec);
-
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, this time without extra. */
- if (alignment > chunksize)
- ret = huge_palloc(size, alignment, zero, dss_prec);
- else
- ret = huge_malloc(size, zero, dss_prec);
-
- if (ret == NULL)
- return (NULL);
+ if (huge_node_set(ret, node)) {
+ arena_chunk_dalloc_huge(arena, ret, size);
+ idalloctm(tsd, node, tcache, true);
+ return (NULL);
}
- /*
- * Copy at most size bytes (not size+extra), since the caller has no
- * expectation that the extra bytes will be reliably preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
+ /* Insert node into huge. */
+ malloc_mutex_lock(&arena->huge_mtx);
+ ql_elm_new(node, ql_link);
+ ql_tail_insert(&arena->huge, node, ql_link);
+ malloc_mutex_unlock(&arena->huge_mtx);
-#ifdef JEMALLOC_MREMAP
- /*
- * Use mremap(2) if this is a huge-->huge reallocation, and neither the
- * source nor the destination are in dss.
- */
- if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
- == false && chunk_in_dss(ret) == false))) {
- size_t newsize = huge_salloc(ret);
+ if (zero || (config_fill && unlikely(opt_zero))) {
+ if (!is_zeroed)
+ memset(ret, 0, size);
+ } else if (config_fill && unlikely(opt_junk_alloc))
+ memset(ret, 0xa5, size);
- /*
- * Remove ptr from the tree of huge allocations before
- * performing the remap operation, in order to avoid the
- * possibility of another thread acquiring that mapping before
- * this one removes it from the tree.
- */
- huge_dalloc(ptr, false);
- if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
- ret) == MAP_FAILED) {
- /*
- * Assuming no chunk management bugs in the allocator,
- * the only documented way an error can occur here is
- * if the application changed the map type for a
- * portion of the old allocation. This is firmly in
- * undefined behavior territory, so write a diagnostic
- * message, and optionally abort.
- */
- char buf[BUFERROR_BUF];
-
- buferror(get_errno(), buf, sizeof(buf));
- malloc_printf("<jemalloc>: Error in mremap(): %s\n",
- buf);
- if (opt_abort)
- abort();
- memcpy(ret, ptr, copysize);
- chunk_dealloc_mmap(ptr, oldsize);
- } else if (config_fill && zero == false && opt_junk && oldsize
- < newsize) {
- /*
- * mremap(2) clobbers the original mapping, so
- * junk/zero filling is not preserved. There is no
- * need to zero fill here, since any trailing
- * uninititialized memory is demand-zeroed by the
- * kernel, but junk filling must be redone.
- */
- memset(ret + oldsize, 0xa5, newsize - oldsize);
- }
- } else
-#endif
- {
- memcpy(ret, ptr, copysize);
- iqalloct(ptr, try_tcache_dalloc);
- }
return (ret);
}
@@ -198,12 +110,12 @@ static void
huge_dalloc_junk(void *ptr, size_t usize)
{
- if (config_fill && config_dss && opt_junk) {
+ if (config_fill && have_dss && unlikely(opt_junk_free)) {
/*
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
- if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
+ if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
memset(ptr, 0x5a, usize);
}
}
@@ -213,135 +125,311 @@ huge_dalloc_junk(void *ptr, size_t usize)
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
-void
-huge_dalloc(void *ptr, bool unmap)
+static void
+huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero)
{
- extent_node_t *node, key;
+ size_t usize, usize_next;
+ extent_node_t *node;
+ arena_t *arena;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ bool pre_zeroed, post_zeroed;
+
+ /* Increase usize to incorporate extra. */
+ for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
+ <= oldsize; usize = usize_next)
+ ; /* Do nothing. */
+
+ if (oldsize == usize)
+ return;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ pre_zeroed = extent_node_zeroed_get(node);
+
+ /* Fill if necessary (shrinking). */
+ if (oldsize > usize) {
+ size_t sdiff = oldsize - usize;
+ if (config_fill && unlikely(opt_junk_free)) {
+ memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
+ post_zeroed = false;
+ } else {
+ post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
+ ptr, CHUNK_CEILING(oldsize), usize, sdiff);
+ }
+ } else
+ post_zeroed = pre_zeroed;
+
+ malloc_mutex_lock(&arena->huge_mtx);
+ /* Update the size of the huge allocation. */
+ assert(extent_node_size_get(node) != usize);
+ extent_node_size_set(node, usize);
+ /* Update zeroed. */
+ extent_node_zeroed_set(node, post_zeroed);
+ malloc_mutex_unlock(&arena->huge_mtx);
+
+ arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
+
+ /* Fill if necessary (growing). */
+ if (oldsize < usize) {
+ if (zero || (config_fill && unlikely(opt_zero))) {
+ if (!pre_zeroed) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ usize - oldsize);
+ }
+ } else if (config_fill && unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
+ oldsize);
+ }
+ }
+}
- malloc_mutex_lock(&huge_mtx);
+static bool
+huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
+{
+ extent_node_t *node;
+ arena_t *arena;
+ chunk_hooks_t chunk_hooks;
+ size_t cdiff;
+ bool pre_zeroed, post_zeroed;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ pre_zeroed = extent_node_zeroed_get(node);
+ chunk_hooks = chunk_hooks_get(arena);
+
+ assert(oldsize > usize);
+
+ /* Split excess chunks. */
+ cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
+ if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
+ CHUNK_CEILING(usize), cdiff, true, arena->ind))
+ return (true);
- /* Extract from tree of huge allocations. */
- key.addr = ptr;
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
- assert(node->addr == ptr);
- extent_tree_ad_remove(&huge, node);
+ if (oldsize > usize) {
+ size_t sdiff = oldsize - usize;
+ if (config_fill && unlikely(opt_junk_free)) {
+ huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
+ sdiff);
+ post_zeroed = false;
+ } else {
+ post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
+ CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
+ CHUNK_CEILING(oldsize),
+ CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
+ }
+ } else
+ post_zeroed = pre_zeroed;
- if (config_stats) {
- stats_cactive_sub(node->size);
- huge_ndalloc++;
- huge_allocated -= node->size;
- }
+ malloc_mutex_lock(&arena->huge_mtx);
+ /* Update the size of the huge allocation. */
+ extent_node_size_set(node, usize);
+ /* Update zeroed. */
+ extent_node_zeroed_set(node, post_zeroed);
+ malloc_mutex_unlock(&arena->huge_mtx);
+
+ /* Zap the excess chunks. */
+ arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
+
+ return (false);
+}
+
+static bool
+huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
+ extent_node_t *node;
+ arena_t *arena;
+ bool is_zeroed_subchunk, is_zeroed_chunk;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(&arena->huge_mtx);
+ is_zeroed_subchunk = extent_node_zeroed_get(node);
+ malloc_mutex_unlock(&arena->huge_mtx);
+
+ /*
+ * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
+ * that it is possible to make correct junk/zero fill decisions below.
+ */
+ is_zeroed_chunk = zero;
- malloc_mutex_unlock(&huge_mtx);
+ if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
+ &is_zeroed_chunk))
+ return (true);
- if (unmap)
- huge_dalloc_junk(node->addr, node->size);
+ malloc_mutex_lock(&arena->huge_mtx);
+ /* Update the size of the huge allocation. */
+ extent_node_size_set(node, usize);
+ malloc_mutex_unlock(&arena->huge_mtx);
- chunk_dealloc(node->addr, node->size, unmap);
+ if (zero || (config_fill && unlikely(opt_zero))) {
+ if (!is_zeroed_subchunk) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ CHUNK_CEILING(oldsize) - oldsize);
+ }
+ if (!is_zeroed_chunk) {
+ memset((void *)((uintptr_t)ptr +
+ CHUNK_CEILING(oldsize)), 0, usize -
+ CHUNK_CEILING(oldsize));
+ }
+ } else if (config_fill && unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
+ oldsize);
+ }
- base_node_dealloc(node);
+ return (false);
}
-size_t
-huge_salloc(const void *ptr)
+bool
+huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero)
{
- size_t ret;
- extent_node_t *node, key;
- malloc_mutex_lock(&huge_mtx);
+ assert(s2u(oldsize) == oldsize);
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
+ /* Both allocations must be huge to avoid a move. */
+ if (oldsize < chunksize || usize_max < chunksize)
+ return (true);
- ret = node->size;
+ if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
+ /* Attempt to expand the allocation in-place. */
+ if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
+ return (false);
+ /* Try again, this time with usize_min. */
+ if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
+ CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
+ oldsize, usize_min, zero))
+ return (false);
+ }
- malloc_mutex_unlock(&huge_mtx);
+ /*
+ * Avoid moving the allocation if the existing chunk size accommodates
+ * the new size.
+ */
+ if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
+ && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
+ huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
+ zero);
+ return (false);
+ }
- return (ret);
+ /* Attempt to shrink the allocation in-place. */
+ if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
+ return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
+ return (true);
}
-dss_prec_t
-huge_dss_prec_get(arena_t *arena)
+static void *
+huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
{
- return (arena_dss_prec_get(choose_arena(arena)));
+ if (alignment <= chunksize)
+ return (huge_malloc(tsd, arena, usize, zero, tcache));
+ return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
}
-prof_ctx_t *
-huge_prof_ctx_get(const void *ptr)
+void *
+huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
{
- prof_ctx_t *ret;
- extent_node_t *node, key;
-
- malloc_mutex_lock(&huge_mtx);
-
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
+ void *ret;
+ size_t copysize;
- ret = node->prof_ctx;
+ /* Try to avoid moving the allocation. */
+ if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
+ return (ptr);
- malloc_mutex_unlock(&huge_mtx);
+ /*
+ * usize and oldsize are different enough that we need to use a
+ * different size class. In that case, fall back to allocating new
+ * space and copying.
+ */
+ ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
+ tcache);
+ if (ret == NULL)
+ return (NULL);
+ copysize = (usize < oldsize) ? usize : oldsize;
+ memcpy(ret, ptr, copysize);
+ isqalloc(tsd, ptr, oldsize, tcache);
return (ret);
}
void
-huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
+huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
- extent_node_t *node, key;
-
- malloc_mutex_lock(&huge_mtx);
-
- /* Extract from tree of huge allocations. */
- key.addr = __DECONST(void *, ptr);
- node = extent_tree_ad_search(&huge, &key);
- assert(node != NULL);
+ extent_node_t *node;
+ arena_t *arena;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ huge_node_unset(ptr, node);
+ malloc_mutex_lock(&arena->huge_mtx);
+ ql_remove(&arena->huge, node, ql_link);
+ malloc_mutex_unlock(&arena->huge_mtx);
+
+ huge_dalloc_junk(extent_node_addr_get(node),
+ extent_node_size_get(node));
+ arena_chunk_dalloc_huge(extent_node_arena_get(node),
+ extent_node_addr_get(node), extent_node_size_get(node));
+ idalloctm(tsd, node, tcache, true);
+}
- node->prof_ctx = ctx;
+arena_t *
+huge_aalloc(const void *ptr)
+{
- malloc_mutex_unlock(&huge_mtx);
+ return (extent_node_arena_get(huge_node_get(ptr)));
}
-bool
-huge_boot(void)
+size_t
+huge_salloc(const void *ptr)
{
+ size_t size;
+ extent_node_t *node;
+ arena_t *arena;
- /* Initialize chunks data. */
- if (malloc_mutex_init(&huge_mtx))
- return (true);
- extent_tree_ad_new(&huge);
-
- if (config_stats) {
- huge_nmalloc = 0;
- huge_ndalloc = 0;
- huge_allocated = 0;
- }
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(&arena->huge_mtx);
+ size = extent_node_size_get(node);
+ malloc_mutex_unlock(&arena->huge_mtx);
- return (false);
+ return (size);
}
-void
-huge_prefork(void)
+prof_tctx_t *
+huge_prof_tctx_get(const void *ptr)
{
+ prof_tctx_t *tctx;
+ extent_node_t *node;
+ arena_t *arena;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(&arena->huge_mtx);
+ tctx = extent_node_prof_tctx_get(node);
+ malloc_mutex_unlock(&arena->huge_mtx);
- malloc_mutex_prefork(&huge_mtx);
+ return (tctx);
}
void
-huge_postfork_parent(void)
+huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{
+ extent_node_t *node;
+ arena_t *arena;
- malloc_mutex_postfork_parent(&huge_mtx);
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(&arena->huge_mtx);
+ extent_node_prof_tctx_set(node, tctx);
+ malloc_mutex_unlock(&arena->huge_mtx);
}
void
-huge_postfork_child(void)
+huge_prof_tctx_reset(const void *ptr)
{
- malloc_mutex_postfork_child(&huge_mtx);
+ huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
}
diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c
index 204778bc8..5a2d32406 100644
--- a/deps/jemalloc/src/jemalloc.c
+++ b/deps/jemalloc/src/jemalloc.c
@@ -4,12 +4,8 @@
/******************************************************************************/
/* Data. */
-malloc_tsd_data(, arenas, arena_t *, NULL)
-malloc_tsd_data(, thread_allocated, thread_allocated_t,
- THREAD_ALLOCATED_INITIALIZER)
-
/* Runtime configuration options. */
-const char *je_malloc_conf;
+const char *je_malloc_conf JEMALLOC_ATTR(weak);
bool opt_abort =
#ifdef JEMALLOC_DEBUG
true
@@ -17,30 +13,152 @@ bool opt_abort =
false
#endif
;
-bool opt_junk =
+const char *opt_junk =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ "true"
+#else
+ "false"
+#endif
+ ;
+bool opt_junk_alloc =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ true
+#else
+ false
+#endif
+ ;
+bool opt_junk_free =
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
true
#else
false
#endif
;
+
size_t opt_quarantine = ZU(0);
bool opt_redzone = false;
bool opt_utrace = false;
-bool opt_valgrind = false;
bool opt_xmalloc = false;
bool opt_zero = false;
size_t opt_narenas = 0;
-unsigned ncpus;
+/* Initialized to true if the process is running inside Valgrind. */
+bool in_valgrind;
-malloc_mutex_t arenas_lock;
-arena_t **arenas;
-unsigned narenas_total;
-unsigned narenas_auto;
+unsigned ncpus;
-/* Set to true once the allocator has been initialized. */
-static bool malloc_initialized = false;
+/* Protects arenas initialization (arenas, narenas_total). */
+static malloc_mutex_t arenas_lock;
+/*
+ * Arenas that are used to service external requests. Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ *
+ * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
+ * arenas. arenas[narenas_auto..narenas_total) are only used if the application
+ * takes some action to create them and allocate from them.
+ */
+static arena_t **arenas;
+static unsigned narenas_total;
+static arena_t *a0; /* arenas[0]; read-only after initialization. */
+static unsigned narenas_auto; /* Read-only after initialization. */
+
+typedef enum {
+ malloc_init_uninitialized = 3,
+ malloc_init_a0_initialized = 2,
+ malloc_init_recursible = 1,
+ malloc_init_initialized = 0 /* Common case --> jnz. */
+} malloc_init_t;
+static malloc_init_t malloc_init_state = malloc_init_uninitialized;
+
+JEMALLOC_ALIGNED(CACHELINE)
+const size_t index2size_tab[NSIZES] = {
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+ ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
+ SIZE_CLASSES
+#undef SC
+};
+
+JEMALLOC_ALIGNED(CACHELINE)
+const uint8_t size2index_tab[] = {
+#if LG_TINY_MIN == 0
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_0(i) i,
+#elif LG_TINY_MIN == 1
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_1(i) i,
+#elif LG_TINY_MIN == 2
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_2(i) i,
+#elif LG_TINY_MIN == 3
+#define S2B_3(i) i,
+#elif LG_TINY_MIN == 4
+#define S2B_4(i) i,
+#elif LG_TINY_MIN == 5
+#define S2B_5(i) i,
+#elif LG_TINY_MIN == 6
+#define S2B_6(i) i,
+#elif LG_TINY_MIN == 7
+#define S2B_7(i) i,
+#elif LG_TINY_MIN == 8
+#define S2B_8(i) i,
+#elif LG_TINY_MIN == 9
+#define S2B_9(i) i,
+#elif LG_TINY_MIN == 10
+#define S2B_10(i) i,
+#elif LG_TINY_MIN == 11
+#define S2B_11(i) i,
+#else
+#error "Unsupported LG_TINY_MIN"
+#endif
+#if LG_TINY_MIN < 1
+#define S2B_1(i) S2B_0(i) S2B_0(i)
+#endif
+#if LG_TINY_MIN < 2
+#define S2B_2(i) S2B_1(i) S2B_1(i)
+#endif
+#if LG_TINY_MIN < 3
+#define S2B_3(i) S2B_2(i) S2B_2(i)
+#endif
+#if LG_TINY_MIN < 4
+#define S2B_4(i) S2B_3(i) S2B_3(i)
+#endif
+#if LG_TINY_MIN < 5
+#define S2B_5(i) S2B_4(i) S2B_4(i)
+#endif
+#if LG_TINY_MIN < 6
+#define S2B_6(i) S2B_5(i) S2B_5(i)
+#endif
+#if LG_TINY_MIN < 7
+#define S2B_7(i) S2B_6(i) S2B_6(i)
+#endif
+#if LG_TINY_MIN < 8
+#define S2B_8(i) S2B_7(i) S2B_7(i)
+#endif
+#if LG_TINY_MIN < 9
+#define S2B_9(i) S2B_8(i) S2B_8(i)
+#endif
+#if LG_TINY_MIN < 10
+#define S2B_10(i) S2B_9(i) S2B_9(i)
+#endif
+#if LG_TINY_MIN < 11
+#define S2B_11(i) S2B_10(i) S2B_10(i)
+#endif
+#define S2B_no(i)
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+ S2B_##lg_delta_lookup(index)
+ SIZE_CLASSES
+#undef S2B_3
+#undef S2B_4
+#undef S2B_5
+#undef S2B_6
+#undef S2B_7
+#undef S2B_8
+#undef S2B_9
+#undef S2B_10
+#undef S2B_11
+#undef S2B_no
+#undef SC
+};
#ifdef JEMALLOC_THREADED_INIT
/* Used to let the initializing thread recursively allocate. */
@@ -57,14 +175,28 @@ static bool malloc_initializer = NO_INITIALIZER;
/* Used to avoid initialization races. */
#ifdef _WIN32
+#if _WIN32_WINNT >= 0x0600
+static malloc_mutex_t init_lock = SRWLOCK_INIT;
+#else
static malloc_mutex_t init_lock;
+static bool init_lock_initialized = false;
JEMALLOC_ATTR(constructor)
static void WINAPI
_init_init_lock(void)
{
- malloc_mutex_init(&init_lock);
+ /* If another constructor in the same binary is using mallctl to
+ * e.g. setup chunk hooks, it may end up running before this one,
+ * and malloc_init_hard will crash trying to lock the uninitialized
+ * lock. So we force an initialization of the lock in
+ * malloc_init_hard as well. We don't try to care about atomicity
+ * of the accessed to the init_lock_initialized boolean, since it
+ * really only matters early in the process creation, before any
+ * separate thread normally starts doing anything. */
+ if (!init_lock_initialized)
+ malloc_mutex_init(&init_lock);
+ init_lock_initialized = true;
}
#ifdef _MSC_VER
@@ -72,7 +204,7 @@ _init_init_lock(void)
JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
#endif
-
+#endif
#else
static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
#endif
@@ -85,7 +217,7 @@ typedef struct {
#ifdef JEMALLOC_UTRACE
# define UTRACE(a, b, c) do { \
- if (opt_utrace) { \
+ if (unlikely(opt_utrace)) { \
int utrace_serrno = errno; \
malloc_utrace_t ut; \
ut.p = (a); \
@@ -105,6 +237,7 @@ typedef struct {
* definition.
*/
+static bool malloc_init_hard_a0(void);
static bool malloc_init_hard(void);
/******************************************************************************/
@@ -112,35 +245,333 @@ static bool malloc_init_hard(void);
* Begin miscellaneous support functions.
*/
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_initialized(void)
+{
+
+ return (malloc_init_state == malloc_init_initialized);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void
+malloc_thread_init(void)
+{
+
+ /*
+ * TSD initialization can't be safely done as a side effect of
+ * deallocation, because it is possible for a thread to do nothing but
+ * deallocate its TLS data via free(), in which case writing to TLS
+ * would cause write-after-free memory corruption. The quarantine
+ * facility *only* gets used as a side effect of deallocation, so make
+ * a best effort attempt at initializing its TSD by hooking all
+ * allocation events.
+ */
+ if (config_fill && unlikely(opt_quarantine))
+ quarantine_alloc_hook();
+}
+
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_init_a0(void)
+{
+
+ if (unlikely(malloc_init_state == malloc_init_uninitialized))
+ return (malloc_init_hard_a0());
+ return (false);
+}
+
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_init(void)
+{
+
+ if (unlikely(!malloc_initialized()) && malloc_init_hard())
+ return (true);
+ malloc_thread_init();
+
+ return (false);
+}
+
+/*
+ * The a0*() functions are used instead of i[mcd]alloc() in situations that
+ * cannot tolerate TLS variable access.
+ */
+
+arena_t *
+a0get(void)
+{
+
+ assert(a0 != NULL);
+ return (a0);
+}
+
+static void *
+a0ialloc(size_t size, bool zero, bool is_metadata)
+{
+
+ if (unlikely(malloc_init_a0()))
+ return (NULL);
+
+ return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
+}
+
+static void
+a0idalloc(void *ptr, bool is_metadata)
+{
+
+ idalloctm(NULL, ptr, false, is_metadata);
+}
+
+void *
+a0malloc(size_t size)
+{
+
+ return (a0ialloc(size, false, true));
+}
+
+void
+a0dalloc(void *ptr)
+{
+
+ a0idalloc(ptr, true);
+}
+
+/*
+ * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
+ * situations that cannot tolerate TLS variable access (TLS allocation and very
+ * early internal data structure initialization).
+ */
+
+void *
+bootstrap_malloc(size_t size)
+{
+
+ if (unlikely(size == 0))
+ size = 1;
+
+ return (a0ialloc(size, false, false));
+}
+
+void *
+bootstrap_calloc(size_t num, size_t size)
+{
+ size_t num_size;
+
+ num_size = num * size;
+ if (unlikely(num_size == 0)) {
+ assert(num == 0 || size == 0);
+ num_size = 1;
+ }
+
+ return (a0ialloc(num_size, true, false));
+}
+
+void
+bootstrap_free(void *ptr)
+{
+
+ if (unlikely(ptr == NULL))
+ return;
+
+ a0idalloc(ptr, false);
+}
+
/* Create a new arena and insert it into the arenas array at index ind. */
+static arena_t *
+arena_init_locked(unsigned ind)
+{
+ arena_t *arena;
+
+ /* Expand arenas if necessary. */
+ assert(ind <= narenas_total);
+ if (ind > MALLOCX_ARENA_MAX)
+ return (NULL);
+ if (ind == narenas_total) {
+ unsigned narenas_new = narenas_total + 1;
+ arena_t **arenas_new =
+ (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
+ sizeof(arena_t *)));
+ if (arenas_new == NULL)
+ return (NULL);
+ memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
+ arenas_new[ind] = NULL;
+ /*
+ * Deallocate only if arenas came from a0malloc() (not
+ * base_alloc()).
+ */
+ if (narenas_total != narenas_auto)
+ a0dalloc(arenas);
+ arenas = arenas_new;
+ narenas_total = narenas_new;
+ }
+
+ /*
+ * Another thread may have already initialized arenas[ind] if it's an
+ * auto arena.
+ */
+ arena = arenas[ind];
+ if (arena != NULL) {
+ assert(ind < narenas_auto);
+ return (arena);
+ }
+
+ /* Actually initialize the arena. */
+ arena = arenas[ind] = arena_new(ind);
+ return (arena);
+}
+
arena_t *
-arenas_extend(unsigned ind)
+arena_init(unsigned ind)
{
- arena_t *ret;
+ arena_t *arena;
+
+ malloc_mutex_lock(&arenas_lock);
+ arena = arena_init_locked(ind);
+ malloc_mutex_unlock(&arenas_lock);
+ return (arena);
+}
+
+unsigned
+narenas_total_get(void)
+{
+ unsigned narenas;
+
+ malloc_mutex_lock(&arenas_lock);
+ narenas = narenas_total;
+ malloc_mutex_unlock(&arenas_lock);
+
+ return (narenas);
+}
+
+static void
+arena_bind_locked(tsd_t *tsd, unsigned ind)
+{
+ arena_t *arena;
+
+ arena = arenas[ind];
+ arena->nthreads++;
+
+ if (tsd_nominal(tsd))
+ tsd_arena_set(tsd, arena);
+}
+
+static void
+arena_bind(tsd_t *tsd, unsigned ind)
+{
+
+ malloc_mutex_lock(&arenas_lock);
+ arena_bind_locked(tsd, ind);
+ malloc_mutex_unlock(&arenas_lock);
+}
+
+void
+arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
+{
+ arena_t *oldarena, *newarena;
+
+ malloc_mutex_lock(&arenas_lock);
+ oldarena = arenas[oldind];
+ newarena = arenas[newind];
+ oldarena->nthreads--;
+ newarena->nthreads++;
+ malloc_mutex_unlock(&arenas_lock);
+ tsd_arena_set(tsd, newarena);
+}
+
+unsigned
+arena_nbound(unsigned ind)
+{
+ unsigned nthreads;
+
+ malloc_mutex_lock(&arenas_lock);
+ nthreads = arenas[ind]->nthreads;
+ malloc_mutex_unlock(&arenas_lock);
+ return (nthreads);
+}
+
+static void
+arena_unbind(tsd_t *tsd, unsigned ind)
+{
+ arena_t *arena;
+
+ malloc_mutex_lock(&arenas_lock);
+ arena = arenas[ind];
+ arena->nthreads--;
+ malloc_mutex_unlock(&arenas_lock);
+ tsd_arena_set(tsd, NULL);
+}
- ret = (arena_t *)base_alloc(sizeof(arena_t));
- if (ret != NULL && arena_new(ret, ind) == false) {
- arenas[ind] = ret;
- return (ret);
+arena_t *
+arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
+{
+ arena_t *arena;
+ arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
+ unsigned narenas_cache = tsd_narenas_cache_get(tsd);
+ unsigned narenas_actual = narenas_total_get();
+
+ /* Deallocate old cache if it's too small. */
+ if (arenas_cache != NULL && narenas_cache < narenas_actual) {
+ a0dalloc(arenas_cache);
+ arenas_cache = NULL;
+ narenas_cache = 0;
+ tsd_arenas_cache_set(tsd, arenas_cache);
+ tsd_narenas_cache_set(tsd, narenas_cache);
+ }
+
+ /* Allocate cache if it's missing. */
+ if (arenas_cache == NULL) {
+ bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
+ assert(ind < narenas_actual || !init_if_missing);
+ narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
+
+ if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
+ *arenas_cache_bypassp = true;
+ arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
+ narenas_cache);
+ *arenas_cache_bypassp = false;
+ }
+ if (arenas_cache == NULL) {
+ /*
+ * This function must always tell the truth, even if
+ * it's slow, so don't let OOM, thread cleanup (note
+ * tsd_nominal check), nor recursive allocation
+ * avoidance (note arenas_cache_bypass check) get in the
+ * way.
+ */
+ if (ind >= narenas_actual)
+ return (NULL);
+ malloc_mutex_lock(&arenas_lock);
+ arena = arenas[ind];
+ malloc_mutex_unlock(&arenas_lock);
+ return (arena);
+ }
+ assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
+ tsd_arenas_cache_set(tsd, arenas_cache);
+ tsd_narenas_cache_set(tsd, narenas_cache);
}
- /* Only reached if there is an OOM error. */
/*
- * OOM here is quite inconvenient to propagate, since dealing with it
- * would require a check for failure in the fast path. Instead, punt
- * by using arenas[0]. In practice, this is an extremely unlikely
- * failure.
+ * Copy to cache. It's possible that the actual number of arenas has
+ * increased since narenas_total_get() was called above, but that causes
+ * no correctness issues unless two threads concurrently execute the
+ * arenas.extend mallctl, which we trust mallctl synchronization to
+ * prevent.
*/
- malloc_write("<jemalloc>: Error initializing arena\n");
- if (opt_abort)
- abort();
+ malloc_mutex_lock(&arenas_lock);
+ memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
+ malloc_mutex_unlock(&arenas_lock);
+ if (narenas_cache > narenas_actual) {
+ memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
+ (narenas_cache - narenas_actual));
+ }
- return (arenas[0]);
+ /* Read the refreshed cache, and init the arena if necessary. */
+ arena = arenas_cache[ind];
+ if (init_if_missing && arena == NULL)
+ arena = arenas_cache[ind] = arena_init(ind);
+ return (arena);
}
-/* Slow path, called only by choose_arena(). */
+/* Slow path, called only by arena_choose(). */
arena_t *
-choose_arena_hard(void)
+arena_choose_hard(tsd_t *tsd)
{
arena_t *ret;
@@ -150,7 +581,7 @@ choose_arena_hard(void)
choose = 0;
first_null = narenas_auto;
malloc_mutex_lock(&arenas_lock);
- assert(arenas[0] != NULL);
+ assert(a0get() != NULL);
for (i = 1; i < narenas_auto; i++) {
if (arenas[i] != NULL) {
/*
@@ -183,22 +614,73 @@ choose_arena_hard(void)
ret = arenas[choose];
} else {
/* Initialize a new arena. */
- ret = arenas_extend(first_null);
+ choose = first_null;
+ ret = arena_init_locked(choose);
+ if (ret == NULL) {
+ malloc_mutex_unlock(&arenas_lock);
+ return (NULL);
+ }
}
- ret->nthreads++;
+ arena_bind_locked(tsd, choose);
malloc_mutex_unlock(&arenas_lock);
} else {
- ret = arenas[0];
- malloc_mutex_lock(&arenas_lock);
- ret->nthreads++;
- malloc_mutex_unlock(&arenas_lock);
+ ret = a0get();
+ arena_bind(tsd, 0);
}
- arenas_tsd_set(&ret);
-
return (ret);
}
+void
+thread_allocated_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+thread_deallocated_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+arena_cleanup(tsd_t *tsd)
+{
+ arena_t *arena;
+
+ arena = tsd_arena_get(tsd);
+ if (arena != NULL)
+ arena_unbind(tsd, arena->ind);
+}
+
+void
+arenas_cache_cleanup(tsd_t *tsd)
+{
+ arena_t **arenas_cache;
+
+ arenas_cache = tsd_arenas_cache_get(tsd);
+ if (arenas_cache != NULL) {
+ tsd_arenas_cache_set(tsd, NULL);
+ a0dalloc(arenas_cache);
+ }
+}
+
+void
+narenas_cache_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+arenas_cache_bypass_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
static void
stats_print_atexit(void)
{
@@ -243,6 +725,19 @@ stats_print_atexit(void)
* Begin initialization functions.
*/
+#ifndef JEMALLOC_HAVE_SECURE_GETENV
+static char *
+secure_getenv(const char *name)
+{
+
+# ifdef JEMALLOC_HAVE_ISSETUGID
+ if (issetugid() != 0)
+ return (NULL);
+# endif
+ return (getenv(name));
+}
+#endif
+
static unsigned
malloc_ncpus(void)
{
@@ -258,44 +753,6 @@ malloc_ncpus(void)
return ((result == -1) ? 1 : (unsigned)result);
}
-void
-arenas_cleanup(void *arg)
-{
- arena_t *arena = *(arena_t **)arg;
-
- malloc_mutex_lock(&arenas_lock);
- arena->nthreads--;
- malloc_mutex_unlock(&arenas_lock);
-}
-
-JEMALLOC_ALWAYS_INLINE_C void
-malloc_thread_init(void)
-{
-
- /*
- * TSD initialization can't be safely done as a side effect of
- * deallocation, because it is possible for a thread to do nothing but
- * deallocate its TLS data via free(), in which case writing to TLS
- * would cause write-after-free memory corruption. The quarantine
- * facility *only* gets used as a side effect of deallocation, so make
- * a best effort attempt at initializing its TSD by hooking all
- * allocation events.
- */
- if (config_fill && opt_quarantine)
- quarantine_alloc_hook();
-}
-
-JEMALLOC_ALWAYS_INLINE_C bool
-malloc_init(void)
-{
-
- if (malloc_initialized == false && malloc_init_hard())
- return (true);
- malloc_thread_init();
-
- return (false);
-}
-
static bool
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
char const **v_p, size_t *vlen_p)
@@ -305,7 +762,7 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
*k_p = opts;
- for (accept = false; accept == false;) {
+ for (accept = false; !accept;) {
switch (*opts) {
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
@@ -340,7 +797,7 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
}
}
- for (accept = false; accept == false;) {
+ for (accept = false; !accept;) {
switch (*opts) {
case ',':
opts++;
@@ -394,14 +851,16 @@ malloc_conf_init(void)
* valgrind option remains in jemalloc 3.x for compatibility reasons.
*/
if (config_valgrind) {
- opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
- if (config_fill && opt_valgrind) {
- opt_junk = false;
- assert(opt_zero == false);
+ in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
+ if (config_fill && unlikely(in_valgrind)) {
+ opt_junk = "false";
+ opt_junk_alloc = false;
+ opt_junk_free = false;
+ assert(!opt_zero);
opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
opt_redzone = true;
}
- if (config_tcache && opt_valgrind)
+ if (config_tcache && unlikely(in_valgrind))
opt_tcache = false;
}
@@ -441,7 +900,7 @@ malloc_conf_init(void)
if (linklen == -1) {
/* No configuration specified. */
linklen = 0;
- /* restore errno */
+ /* Restore errno. */
set_errno(saved_errno);
}
#endif
@@ -457,7 +916,7 @@ malloc_conf_init(void)
#endif
;
- if ((opts = getenv(envname)) != NULL) {
+ if ((opts = secure_getenv(envname)) != NULL) {
/*
* Do nothing; opts is already initialized to
* the value of the MALLOC_CONF environment
@@ -475,27 +934,28 @@ malloc_conf_init(void)
opts = buf;
}
- while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
- &vlen) == false) {
-#define CONF_HANDLE_BOOL(o, n) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
- if (strncmp("true", v, vlen) == 0 && \
- vlen == sizeof("true")-1) \
+ while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
+ &vlen)) {
+#define CONF_MATCH(n) \
+ (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
+#define CONF_MATCH_VALUE(n) \
+ (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
+#define CONF_HANDLE_BOOL(o, n, cont) \
+ if (CONF_MATCH(n)) { \
+ if (CONF_MATCH_VALUE("true")) \
o = true; \
- else if (strncmp("false", v, vlen) == \
- 0 && vlen == sizeof("false")-1) \
+ else if (CONF_MATCH_VALUE("false")) \
o = false; \
else { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} \
- continue; \
+ if (cont) \
+ continue; \
}
#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
+ if (CONF_MATCH(n)) { \
uintmax_t um; \
char *end; \
\
@@ -507,15 +967,15 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} else if (clip) { \
- if (min != 0 && um < min) \
- o = min; \
- else if (um > max) \
- o = max; \
+ if ((min) != 0 && um < (min)) \
+ o = (min); \
+ else if (um > (max)) \
+ o = (max); \
else \
o = um; \
} else { \
- if ((min != 0 && um < min) || \
- um > max) { \
+ if (((min) != 0 && um < (min)) \
+ || um > (max)) { \
malloc_conf_error( \
"Out-of-range " \
"conf value", \
@@ -526,8 +986,7 @@ malloc_conf_init(void)
continue; \
}
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
+ if (CONF_MATCH(n)) { \
long l; \
char *end; \
\
@@ -538,8 +997,8 @@ malloc_conf_init(void)
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
- } else if (l < (ssize_t)min || l > \
- (ssize_t)max) { \
+ } else if (l < (ssize_t)(min) || l > \
+ (ssize_t)(max)) { \
malloc_conf_error( \
"Out-of-range conf value", \
k, klen, v, vlen); \
@@ -548,8 +1007,7 @@ malloc_conf_init(void)
continue; \
}
#define CONF_HANDLE_CHAR_P(o, n, d) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
+ if (CONF_MATCH(n)) { \
size_t cpylen = (vlen <= \
sizeof(o)-1) ? vlen : \
sizeof(o)-1; \
@@ -558,17 +1016,18 @@ malloc_conf_init(void)
continue; \
}
- CONF_HANDLE_BOOL(opt_abort, "abort")
+ CONF_HANDLE_BOOL(opt_abort, "abort", true)
/*
- * Chunks always require at least one header page, plus
- * one data page in the absence of redzones, or three
- * pages in the presence of redzones. In order to
- * simplify options processing, fix the limit based on
- * config_fill.
+ * Chunks always require at least one header page,
+ * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
+ * possibly an additional page in the presence of
+ * redzones. In order to simplify options processing,
+ * use a conservative bound that accommodates all these
+ * constraints.
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
- (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
- true)
+ LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
+ (sizeof(size_t) << 3) - 1, true)
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
@@ -587,7 +1046,7 @@ malloc_conf_init(void)
}
}
}
- if (match == false) {
+ if (!match) {
malloc_conf_error("Invalid conf value",
k, klen, v, vlen);
}
@@ -597,47 +1056,87 @@ malloc_conf_init(void)
SIZE_T_MAX, false)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1)
- CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
+ CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
if (config_fill) {
- CONF_HANDLE_BOOL(opt_junk, "junk")
+ if (CONF_MATCH("junk")) {
+ if (CONF_MATCH_VALUE("true")) {
+ opt_junk = "true";
+ opt_junk_alloc = opt_junk_free =
+ true;
+ } else if (CONF_MATCH_VALUE("false")) {
+ opt_junk = "false";
+ opt_junk_alloc = opt_junk_free =
+ false;
+ } else if (CONF_MATCH_VALUE("alloc")) {
+ opt_junk = "alloc";
+ opt_junk_alloc = true;
+ opt_junk_free = false;
+ } else if (CONF_MATCH_VALUE("free")) {
+ opt_junk = "free";
+ opt_junk_alloc = false;
+ opt_junk_free = true;
+ } else {
+ malloc_conf_error(
+ "Invalid conf value", k,
+ klen, v, vlen);
+ }
+ continue;
+ }
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
0, SIZE_T_MAX, false)
- CONF_HANDLE_BOOL(opt_redzone, "redzone")
- CONF_HANDLE_BOOL(opt_zero, "zero")
+ CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
+ CONF_HANDLE_BOOL(opt_zero, "zero", true)
}
if (config_utrace) {
- CONF_HANDLE_BOOL(opt_utrace, "utrace")
- }
- if (config_valgrind) {
- CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
+ CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
}
if (config_xmalloc) {
- CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
+ CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
}
if (config_tcache) {
- CONF_HANDLE_BOOL(opt_tcache, "tcache")
+ CONF_HANDLE_BOOL(opt_tcache, "tcache",
+ !config_valgrind || !in_valgrind)
+ if (CONF_MATCH("tcache")) {
+ assert(config_valgrind && in_valgrind);
+ if (opt_tcache) {
+ opt_tcache = false;
+ malloc_conf_error(
+ "tcache cannot be enabled "
+ "while running inside Valgrind",
+ k, klen, v, vlen);
+ }
+ continue;
+ }
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
"lg_tcache_max", -1,
(sizeof(size_t) << 3) - 1)
}
if (config_prof) {
- CONF_HANDLE_BOOL(opt_prof, "prof")
+ CONF_HANDLE_BOOL(opt_prof, "prof", true)
CONF_HANDLE_CHAR_P(opt_prof_prefix,
"prof_prefix", "jeprof")
- CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
- CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
+ CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
+ true)
+ CONF_HANDLE_BOOL(opt_prof_thread_active_init,
+ "prof_thread_active_init", true)
+ CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
"lg_prof_sample", 0,
- (sizeof(uint64_t) << 3) - 1)
- CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
+ (sizeof(uint64_t) << 3) - 1, true)
+ CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
+ true)
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
"lg_prof_interval", -1,
(sizeof(uint64_t) << 3) - 1)
- CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
- CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
- CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
+ CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
+ true)
+ CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
+ true)
+ CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
+ true)
}
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
+#undef CONF_MATCH
#undef CONF_HANDLE_BOOL
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
@@ -646,41 +1145,44 @@ malloc_conf_init(void)
}
}
+/* init_lock must be held. */
static bool
-malloc_init_hard(void)
+malloc_init_hard_needed(void)
{
- arena_t *init_arenas[1];
- malloc_mutex_lock(&init_lock);
- if (malloc_initialized || IS_INITIALIZER) {
+ if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
+ malloc_init_recursible)) {
/*
* Another thread initialized the allocator before this one
* acquired init_lock, or this thread is the initializing
* thread, and it is recursively allocating.
*/
- malloc_mutex_unlock(&init_lock);
return (false);
}
#ifdef JEMALLOC_THREADED_INIT
- if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
+ if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
/* Busy-wait until the initializing thread completes. */
do {
malloc_mutex_unlock(&init_lock);
CPU_SPINWAIT;
malloc_mutex_lock(&init_lock);
- } while (malloc_initialized == false);
- malloc_mutex_unlock(&init_lock);
+ } while (!malloc_initialized());
return (false);
}
#endif
+ return (true);
+}
+
+/* init_lock must be held. */
+static bool
+malloc_init_hard_a0_locked(void)
+{
+
malloc_initializer = INITIALIZER;
- malloc_tsd_boot();
if (config_prof)
prof_boot0();
-
malloc_conf_init();
-
if (opt_stats_print) {
/* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) {
@@ -689,94 +1191,64 @@ malloc_init_hard(void)
abort();
}
}
-
- if (base_boot()) {
- malloc_mutex_unlock(&init_lock);
+ if (base_boot())
return (true);
- }
-
- if (chunk_boot()) {
- malloc_mutex_unlock(&init_lock);
+ if (chunk_boot())
return (true);
- }
-
- if (ctl_boot()) {
- malloc_mutex_unlock(&init_lock);
+ if (ctl_boot())
return (true);
- }
-
if (config_prof)
prof_boot1();
-
- arena_boot();
-
- if (config_tcache && tcache_boot0()) {
- malloc_mutex_unlock(&init_lock);
+ if (arena_boot())
return (true);
- }
-
- if (huge_boot()) {
- malloc_mutex_unlock(&init_lock);
+ if (config_tcache && tcache_boot())
return (true);
- }
-
- if (malloc_mutex_init(&arenas_lock)) {
- malloc_mutex_unlock(&init_lock);
+ if (malloc_mutex_init(&arenas_lock))
return (true);
- }
-
/*
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
narenas_total = narenas_auto = 1;
- arenas = init_arenas;
+ arenas = &a0;
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
-
/*
* Initialize one arena here. The rest are lazily created in
- * choose_arena_hard().
+ * arena_choose_hard().
*/
- arenas_extend(0);
- if (arenas[0] == NULL) {
- malloc_mutex_unlock(&init_lock);
+ if (arena_init(0) == NULL)
return (true);
- }
-
- /* Initialize allocation counters before any allocations can occur. */
- if (config_stats && thread_allocated_tsd_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (arenas_tsd_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
+ malloc_init_state = malloc_init_a0_initialized;
+ return (false);
+}
- if (config_tcache && tcache_boot1()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
+static bool
+malloc_init_hard_a0(void)
+{
+ bool ret;
- if (config_fill && quarantine_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
+ malloc_mutex_lock(&init_lock);
+ ret = malloc_init_hard_a0_locked();
+ malloc_mutex_unlock(&init_lock);
+ return (ret);
+}
- if (config_prof && prof_boot2()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
+/*
+ * Initialize data structures which may trigger recursive allocation.
+ *
+ * init_lock must be held.
+ */
+static void
+malloc_init_hard_recursible(void)
+{
+ malloc_init_state = malloc_init_recursible;
malloc_mutex_unlock(&init_lock);
- /**********************************************************************/
- /* Recursive allocation may follow. */
ncpus = malloc_ncpus();
#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
- && !defined(_WIN32))
+ && !defined(_WIN32) && !defined(__native_client__))
/* LinuxThreads's pthread_atfork() allocates. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
@@ -785,15 +1257,16 @@ malloc_init_hard(void)
abort();
}
#endif
-
- /* Done recursively allocating. */
- /**********************************************************************/
malloc_mutex_lock(&init_lock);
+}
- if (mutex_boot()) {
- malloc_mutex_unlock(&init_lock);
+/* init_lock must be held. */
+static bool
+malloc_init_hard_finish(void)
+{
+
+ if (mutex_boot())
return (true);
- }
if (opt_narenas == 0) {
/*
@@ -820,21 +1293,56 @@ malloc_init_hard(void)
/* Allocate and initialize arenas. */
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
- if (arenas == NULL) {
- malloc_mutex_unlock(&init_lock);
+ if (arenas == NULL)
return (true);
- }
/*
* Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure.
*/
memset(arenas, 0, sizeof(arena_t *) * narenas_total);
/* Copy the pointer to the one arena that was already initialized. */
- arenas[0] = init_arenas[0];
+ arenas[0] = a0;
- malloc_initialized = true;
- malloc_mutex_unlock(&init_lock);
+ malloc_init_state = malloc_init_initialized;
+ return (false);
+}
+
+static bool
+malloc_init_hard(void)
+{
+#if defined(_WIN32) && _WIN32_WINNT < 0x0600
+ _init_init_lock();
+#endif
+ malloc_mutex_lock(&init_lock);
+ if (!malloc_init_hard_needed()) {
+ malloc_mutex_unlock(&init_lock);
+ return (false);
+ }
+
+ if (malloc_init_state != malloc_init_a0_initialized &&
+ malloc_init_hard_a0_locked()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+ if (malloc_tsd_boot0()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+ if (config_prof && prof_boot2()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ malloc_init_hard_recursible();
+
+ if (malloc_init_hard_finish()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ malloc_mutex_unlock(&init_lock);
+ malloc_tsd_boot1();
return (false);
}
@@ -847,98 +1355,87 @@ malloc_init_hard(void)
*/
static void *
-imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
+imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
void *p;
- if (cnt == NULL)
+ if (tctx == NULL)
return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- p = imalloc(SMALL_MAXCLASS+1);
+ if (usize <= SMALL_MAXCLASS) {
+ p = imalloc(tsd, LARGE_MINCLASS);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
- p = imalloc(usize);
+ p = imalloc(tsd, usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
+imalloc_prof(tsd_t *tsd, size_t usize)
{
void *p;
+ prof_tctx_t *tctx;
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = imalloc_prof_sample(usize, cnt);
+ tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = imalloc_prof_sample(tsd, usize, tctx);
else
- p = imalloc(usize);
- if (p == NULL)
+ p = imalloc(tsd, usize);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
return (NULL);
- prof_malloc(p, usize, cnt);
+ }
+ prof_malloc(p, usize, tctx);
return (p);
}
-/*
- * MALLOC_BODY() is a macro rather than a function because its contents are in
- * the fast path, but inlining would cause reliability issues when determining
- * how many frames to discard from heap profiling backtraces.
- */
-#define MALLOC_BODY(ret, size, usize) do { \
- if (malloc_init()) \
- ret = NULL; \
- else { \
- if (config_prof && opt_prof) { \
- prof_thr_cnt_t *cnt; \
- \
- usize = s2u(size); \
- /* \
- * Call PROF_ALLOC_PREP() here rather than in \
- * imalloc_prof() so that imalloc_prof() can be \
- * inlined without introducing uncertainty \
- * about the number of backtrace frames to \
- * ignore. imalloc_prof() is in the fast path \
- * when heap profiling is enabled, so inlining \
- * is critical to performance. (For \
- * consistency all callers of PROF_ALLOC_PREP() \
- * are structured similarly, even though e.g. \
- * realloc() isn't called enough for inlining \
- * to be critical.) \
- */ \
- PROF_ALLOC_PREP(1, usize, cnt); \
- ret = imalloc_prof(usize, cnt); \
- } else { \
- if (config_stats || (config_valgrind && \
- opt_valgrind)) \
- usize = s2u(size); \
- ret = imalloc(size); \
- } \
- } \
-} while (0)
+JEMALLOC_ALWAYS_INLINE_C void *
+imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
+{
-void *
+ if (unlikely(malloc_init()))
+ return (NULL);
+ *tsd = tsd_fetch();
+
+ if (config_prof && opt_prof) {
+ *usize = s2u(size);
+ if (unlikely(*usize == 0))
+ return (NULL);
+ return (imalloc_prof(*tsd, *usize));
+ }
+
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ *usize = s2u(size);
+ return (imalloc(*tsd, size));
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_malloc(size_t size)
{
void *ret;
+ tsd_t *tsd;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0)
size = 1;
- MALLOC_BODY(ret, size, usize);
-
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
+ ret = imalloc_body(size, &tsd, &usize);
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in malloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
}
- if (config_stats && ret != NULL) {
+ if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(ret, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
@@ -946,107 +1443,103 @@ je_malloc(size_t size)
}
static void *
-imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
+imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
+ prof_tctx_t *tctx)
{
void *p;
- if (cnt == NULL)
+ if (tctx == NULL)
return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0);
- p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment,
- false);
+ if (usize <= SMALL_MAXCLASS) {
+ assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
+ p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
- p = ipalloc(usize, alignment, false);
+ p = ipalloc(tsd, usize, alignment, false);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
+imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
{
void *p;
+ prof_tctx_t *tctx;
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = imemalign_prof_sample(alignment, usize, cnt);
+ tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = imemalign_prof_sample(tsd, alignment, usize, tctx);
else
- p = ipalloc(usize, alignment, false);
- if (p == NULL)
+ p = ipalloc(tsd, usize, alignment, false);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
return (NULL);
- prof_malloc(p, usize, cnt);
+ }
+ prof_malloc(p, usize, tctx);
return (p);
}
JEMALLOC_ATTR(nonnull(1))
-#ifdef JEMALLOC_PROF
-/*
- * Avoid any uncertainty as to how many backtrace frames to ignore in
- * PROF_ALLOC_PREP().
- */
-JEMALLOC_NOINLINE
-#endif
static int
imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
{
int ret;
+ tsd_t *tsd;
size_t usize;
void *result;
assert(min_alignment != 0);
- if (malloc_init()) {
+ if (unlikely(malloc_init())) {
result = NULL;
goto label_oom;
- } else {
- if (size == 0)
- size = 1;
-
- /* Make sure that alignment is a large enough power of 2. */
- if (((alignment - 1) & alignment) != 0
- || (alignment < min_alignment)) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error allocating "
- "aligned memory: invalid alignment\n");
- abort();
- }
- result = NULL;
- ret = EINVAL;
- goto label_return;
- }
+ }
+ tsd = tsd_fetch();
+ if (size == 0)
+ size = 1;
- usize = sa2u(size, alignment);
- if (usize == 0) {
- result = NULL;
- goto label_oom;
+ /* Make sure that alignment is a large enough power of 2. */
+ if (unlikely(((alignment - 1) & alignment) != 0
+ || (alignment < min_alignment))) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error allocating "
+ "aligned memory: invalid alignment\n");
+ abort();
}
+ result = NULL;
+ ret = EINVAL;
+ goto label_return;
+ }
- if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
- PROF_ALLOC_PREP(2, usize, cnt);
- result = imemalign_prof(alignment, usize, cnt);
- } else
- result = ipalloc(usize, alignment, false);
- if (result == NULL)
- goto label_oom;
+ usize = sa2u(size, alignment);
+ if (unlikely(usize == 0)) {
+ result = NULL;
+ goto label_oom;
}
+ if (config_prof && opt_prof)
+ result = imemalign_prof(tsd, alignment, usize);
+ else
+ result = ipalloc(tsd, usize, alignment, false);
+ if (unlikely(result == NULL))
+ goto label_oom;
+ assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
+
*memptr = result;
ret = 0;
label_return:
- if (config_stats && result != NULL) {
+ if (config_stats && likely(result != NULL)) {
assert(usize == isalloc(result, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, result);
return (ret);
label_oom:
assert(result == NULL);
- if (config_xmalloc && opt_xmalloc) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error allocating aligned memory: "
"out of memory\n");
abort();
@@ -1055,7 +1548,8 @@ label_oom:
goto label_return;
}
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void **memptr, size_t alignment, size_t size)
{
int ret = imemalign(memptr, alignment, size, sizeof(void *));
@@ -1064,13 +1558,15 @@ je_posix_memalign(void **memptr, size_t alignment, size_t size)
return (ret);
}
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
je_aligned_alloc(size_t alignment, size_t size)
{
void *ret;
int err;
- if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
+ if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
ret = NULL;
set_errno(err);
}
@@ -1080,54 +1576,62 @@ je_aligned_alloc(size_t alignment, size_t size)
}
static void *
-icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
+icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
void *p;
- if (cnt == NULL)
+ if (tctx == NULL)
return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- p = icalloc(SMALL_MAXCLASS+1);
+ if (usize <= SMALL_MAXCLASS) {
+ p = icalloc(tsd, LARGE_MINCLASS);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
- p = icalloc(usize);
+ p = icalloc(tsd, usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-icalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
+icalloc_prof(tsd_t *tsd, size_t usize)
{
void *p;
+ prof_tctx_t *tctx;
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = icalloc_prof_sample(usize, cnt);
+ tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = icalloc_prof_sample(tsd, usize, tctx);
else
- p = icalloc(usize);
- if (p == NULL)
+ p = icalloc(tsd, usize);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
return (NULL);
- prof_malloc(p, usize, cnt);
+ }
+ prof_malloc(p, usize, tctx);
return (p);
}
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
je_calloc(size_t num, size_t size)
{
void *ret;
+ tsd_t *tsd;
size_t num_size;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- if (malloc_init()) {
+ if (unlikely(malloc_init())) {
num_size = 0;
ret = NULL;
goto label_return;
}
+ tsd = tsd_fetch();
num_size = num * size;
- if (num_size == 0) {
+ if (unlikely(num_size == 0)) {
if (num == 0 || size == 0)
num_size = 1;
else {
@@ -1139,37 +1643,38 @@ je_calloc(size_t num, size_t size)
* overflow during multiplication if neither operand uses any of the
* most significant half of the bits in a size_t.
*/
- } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
- && (num_size / size != num)) {
+ } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
+ 2))) && (num_size / size != num))) {
/* size_t overflow. */
ret = NULL;
goto label_return;
}
if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
usize = s2u(num_size);
- PROF_ALLOC_PREP(1, usize, cnt);
- ret = icalloc_prof(usize, cnt);
+ if (unlikely(usize == 0)) {
+ ret = NULL;
+ goto label_return;
+ }
+ ret = icalloc_prof(tsd, usize);
} else {
- if (config_stats || (config_valgrind && opt_valgrind))
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
usize = s2u(num_size);
- ret = icalloc(num_size);
+ ret = icalloc(tsd, num_size);
}
label_return:
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in calloc(): out of "
"memory\n");
abort();
}
set_errno(ENOMEM);
}
- if (config_stats && ret != NULL) {
+ if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(ret, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, num_size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
@@ -1177,135 +1682,162 @@ label_return:
}
static void *
-irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt)
+irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
+ prof_tctx_t *tctx)
{
void *p;
- if (cnt == NULL)
+ if (tctx == NULL)
return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false);
+ if (usize <= SMALL_MAXCLASS) {
+ p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
- p = iralloc(oldptr, usize, 0, 0, false);
+ p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt)
+irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
{
void *p;
- prof_ctx_t *old_ctx;
-
- old_ctx = prof_ctx_get(oldptr);
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = irealloc_prof_sample(oldptr, usize, cnt);
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(old_ptr);
+ tctx = prof_alloc_prep(tsd, usize, prof_active, true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
else
- p = iralloc(oldptr, usize, 0, 0, false);
- if (p == NULL)
+ p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
return (NULL);
- prof_realloc(p, usize, cnt, old_usize, old_ctx);
+ }
+ prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
+ old_tctx);
return (p);
}
JEMALLOC_INLINE_C void
-ifree(void *ptr)
+ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
assert(ptr != NULL);
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
if (config_prof && opt_prof) {
usize = isalloc(ptr, config_prof);
- prof_free(ptr, usize);
+ prof_free(tsd, ptr, usize);
} else if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof);
if (config_stats)
- thread_allocated_tsd_get()->deallocated += usize;
- if (config_valgrind && opt_valgrind)
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+ if (config_valgrind && unlikely(in_valgrind))
rzsize = p2rz(ptr);
- iqalloc(ptr);
+ iqalloc(tsd, ptr, tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
-void *
+JEMALLOC_INLINE_C void
+isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
+{
+ UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ if (config_prof && opt_prof)
+ prof_free(tsd, ptr, usize);
+ if (config_stats)
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+ if (config_valgrind && unlikely(in_valgrind))
+ rzsize = p2rz(ptr);
+ isqalloc(tsd, ptr, usize, tcache);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
je_realloc(void *ptr, size_t size)
{
void *ret;
+ tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- if (size == 0) {
+ if (unlikely(size == 0)) {
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
- ifree(ptr);
+ tsd = tsd_fetch();
+ ifree(tsd, ptr, tcache_get(tsd, false));
return (NULL);
}
size = 1;
}
- if (ptr != NULL) {
- assert(malloc_initialized || IS_INITIALIZER);
+ if (likely(ptr != NULL)) {
+ assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
+ tsd = tsd_fetch();
- if ((config_prof && opt_prof) || config_stats ||
- (config_valgrind && opt_valgrind))
- old_usize = isalloc(ptr, config_prof);
- if (config_valgrind && opt_valgrind)
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && unlikely(in_valgrind))
old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
usize = s2u(size);
- PROF_ALLOC_PREP(1, usize, cnt);
- ret = irealloc_prof(ptr, old_usize, usize, cnt);
+ ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd,
+ ptr, old_usize, usize);
} else {
- if (config_stats || (config_valgrind && opt_valgrind))
+ if (config_stats || (config_valgrind &&
+ unlikely(in_valgrind)))
usize = s2u(size);
- ret = iralloc(ptr, size, 0, 0, false);
+ ret = iralloc(tsd, ptr, old_usize, size, 0, false);
}
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
- MALLOC_BODY(ret, size, usize);
+ ret = imalloc_body(size, &tsd, &usize);
}
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in realloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
}
- if (config_stats && ret != NULL) {
- thread_allocated_t *ta;
+ if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(ret, config_prof));
- ta = thread_allocated_tsd_get();
- ta->allocated += usize;
- ta->deallocated += old_usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
- JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize,
- false);
+ JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
+ old_rzsize, true, false);
return (ret);
}
-void
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_free(void *ptr)
{
UTRACE(ptr, 0, 0);
- if (ptr != NULL)
- ifree(ptr);
+ if (likely(ptr != NULL)) {
+ tsd_t *tsd = tsd_fetch();
+ ifree(tsd, ptr, tcache_get(tsd, false));
+ }
}
/*
@@ -1317,22 +1849,28 @@ je_free(void *ptr)
*/
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
je_memalign(size_t alignment, size_t size)
{
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
- imemalign(&ret, alignment, size, 1);
+ if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
+ ret = NULL;
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
je_valloc(size_t size)
{
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
- imemalign(&ret, PAGE, size, 1);
+ if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
+ ret = NULL;
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
@@ -1346,7 +1884,7 @@ je_valloc(size_t size)
#define is_malloc_(a) malloc_is_ ## a
#define is_malloc(a) is_malloc_(a)
-#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
+#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
/*
* glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
* to inconsistently reference libc's malloc(3)-compatible functions
@@ -1356,11 +1894,13 @@ je_valloc(size_t size)
* passed an extra argument for the caller return address, which will be
* ignored.
*/
-JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
-JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
-JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
-JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
+JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
+JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
+JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
+# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
+JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
je_memalign;
+# endif
#endif
/*
@@ -1371,111 +1911,173 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
* Begin non-standard functions.
*/
-JEMALLOC_ALWAYS_INLINE_C void *
-imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena)
+JEMALLOC_ALWAYS_INLINE_C bool
+imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
+ size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
{
- assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
- alignment)));
+ if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
+ *alignment = 0;
+ *usize = s2u(size);
+ } else {
+ *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
+ *usize = sa2u(size, *alignment);
+ }
+ assert(*usize != 0);
+ *zero = MALLOCX_ZERO_GET(flags);
+ if ((flags & MALLOCX_TCACHE_MASK) != 0) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ *tcache = NULL;
+ else
+ *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ *tcache = tcache_get(tsd, true);
+ if ((flags & MALLOCX_ARENA_MASK) != 0) {
+ unsigned arena_ind = MALLOCX_ARENA_GET(flags);
+ *arena = arena_get(tsd, arena_ind, true, true);
+ if (unlikely(*arena == NULL))
+ return (true);
+ } else
+ *arena = NULL;
+ return (false);
+}
- if (alignment != 0)
- return (ipalloct(usize, alignment, zero, try_tcache, arena));
- else if (zero)
- return (icalloct(usize, try_tcache, arena));
- else
- return (imalloct(usize, try_tcache, arena));
+JEMALLOC_ALWAYS_INLINE_C bool
+imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
+ size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
+{
+
+ if (likely(flags == 0)) {
+ *usize = s2u(size);
+ assert(*usize != 0);
+ *alignment = 0;
+ *zero = false;
+ *tcache = tcache_get(tsd, true);
+ *arena = NULL;
+ return (false);
+ } else {
+ return (imallocx_flags_decode_hard(tsd, size, flags, usize,
+ alignment, zero, tcache, arena));
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena)
+{
+
+ if (unlikely(alignment != 0))
+ return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
+ if (unlikely(zero))
+ return (icalloct(tsd, usize, tcache, arena));
+ return (imalloct(tsd, usize, tcache, arena));
}
static void *
-imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena, prof_thr_cnt_t *cnt)
+imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena)
{
void *p;
- if (cnt == NULL)
- return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- size_t usize_promoted = (alignment == 0) ?
- s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment);
- assert(usize_promoted != 0);
- p = imallocx(usize_promoted, alignment, zero, try_tcache,
+ if (usize <= SMALL_MAXCLASS) {
+ assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
+ sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
+ p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
arena);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else
- p = imallocx(usize, alignment, zero, try_tcache, arena);
+ p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena, prof_thr_cnt_t *cnt)
+imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
{
void *p;
+ size_t alignment;
+ bool zero;
+ tcache_t *tcache;
+ arena_t *arena;
+ prof_tctx_t *tctx;
- if ((uintptr_t)cnt != (uintptr_t)1U) {
- p = imallocx_prof_sample(usize, alignment, zero, try_tcache,
- arena, cnt);
+ if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
+ &zero, &tcache, &arena)))
+ return (NULL);
+ tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
+ if (likely((uintptr_t)tctx == (uintptr_t)1U))
+ p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
+ else if ((uintptr_t)tctx > (uintptr_t)1U) {
+ p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
+ arena);
} else
- p = imallocx(usize, alignment, zero, try_tcache, arena);
- if (p == NULL)
+ p = NULL;
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
return (NULL);
- prof_malloc(p, usize, cnt);
+ }
+ prof_malloc(p, *usize, tctx);
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
return (p);
}
-void *
-je_mallocx(size_t size, int flags)
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
{
void *p;
- size_t usize;
- size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
- bool zero = flags & MALLOCX_ZERO;
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ size_t alignment;
+ bool zero;
+ tcache_t *tcache;
arena_t *arena;
- bool try_tcache;
- assert(size != 0);
+ if (likely(flags == 0)) {
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ *usize = s2u(size);
+ return (imalloc(tsd, size));
+ }
- if (malloc_init())
- goto label_oom;
+ if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
+ &alignment, &zero, &tcache, &arena)))
+ return (NULL);
+ p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+ return (p);
+}
- if (arena_ind != UINT_MAX) {
- arena = arenas[arena_ind];
- try_tcache = false;
- } else {
- arena = NULL;
- try_tcache = true;
- }
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
+je_mallocx(size_t size, int flags)
+{
+ tsd_t *tsd;
+ void *p;
+ size_t usize;
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- assert(usize != 0);
+ assert(size != 0);
- if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
+ if (unlikely(malloc_init()))
+ goto label_oom;
+ tsd = tsd_fetch();
- PROF_ALLOC_PREP(1, usize, cnt);
- p = imallocx_prof(usize, alignment, zero, try_tcache, arena,
- cnt);
- } else
- p = imallocx(usize, alignment, zero, try_tcache, arena);
- if (p == NULL)
+ if (config_prof && opt_prof)
+ p = imallocx_prof(tsd, size, flags, &usize);
+ else
+ p = imallocx_no_prof(tsd, size, flags, &usize);
+ if (unlikely(p == NULL))
goto label_oom;
if (config_stats) {
assert(usize == isalloc(p, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, p);
- JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
+ JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
return (p);
label_oom:
- if (config_xmalloc && opt_xmalloc) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
abort();
}
@@ -1484,49 +2086,53 @@ label_oom:
}
static void *
-irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize,
- bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena,
- prof_thr_cnt_t *cnt)
+irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
+ size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
+ prof_tctx_t *tctx)
{
void *p;
- if (cnt == NULL)
+ if (tctx == NULL)
return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
- size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero,
- try_tcache_alloc, try_tcache_dalloc, arena);
+ if (usize <= SMALL_MAXCLASS) {
+ p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
+ zero, tcache, arena);
if (p == NULL)
return (NULL);
arena_prof_promoted(p, usize);
} else {
- p = iralloct(oldptr, size, 0, alignment, zero,
- try_tcache_alloc, try_tcache_dalloc, arena);
+ p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
+ tcache, arena);
}
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
- size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
- arena_t *arena, prof_thr_cnt_t *cnt)
+irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
+ size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
+ arena_t *arena)
{
void *p;
- prof_ctx_t *old_ctx;
-
- old_ctx = prof_ctx_get(oldptr);
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero,
- try_tcache_alloc, try_tcache_dalloc, arena, cnt);
- else {
- p = iralloct(oldptr, size, 0, alignment, zero,
- try_tcache_alloc, try_tcache_dalloc, arena);
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(old_ptr);
+ tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
+ p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
+ alignment, zero, tcache, arena, tctx);
+ } else {
+ p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
+ tcache, arena);
}
- if (p == NULL)
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
return (NULL);
+ }
- if (p == oldptr && alignment != 0) {
+ if (p == old_ptr && alignment != 0) {
/*
* The allocation did not move, so it is possible that the size
* class is smaller than would guarantee the requested
@@ -1537,78 +2143,80 @@ irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
*/
*usize = isalloc(p, config_prof);
}
- prof_realloc(p, *usize, cnt, old_usize, old_ctx);
+ prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
+ old_usize, old_tctx);
return (p);
}
-void *
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
je_rallocx(void *ptr, size_t size, int flags)
{
void *p;
- size_t usize, old_usize;
+ tsd_t *tsd;
+ size_t usize;
+ size_t old_usize;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- bool try_tcache_alloc, try_tcache_dalloc;
arena_t *arena;
+ tcache_t *tcache;
assert(ptr != NULL);
assert(size != 0);
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
+ tsd = tsd_fetch();
- if (arena_ind != UINT_MAX) {
- arena_chunk_t *chunk;
- try_tcache_alloc = false;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- try_tcache_dalloc = (chunk == ptr || chunk->arena !=
- arenas[arena_ind]);
- arena = arenas[arena_ind];
- } else {
- try_tcache_alloc = true;
- try_tcache_dalloc = true;
+ if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
+ unsigned arena_ind = MALLOCX_ARENA_GET(flags);
+ arena = arena_get(tsd, arena_ind, true, true);
+ if (unlikely(arena == NULL))
+ goto label_oom;
+ } else
arena = NULL;
- }
- if ((config_prof && opt_prof) || config_stats ||
- (config_valgrind && opt_valgrind))
- old_usize = isalloc(ptr, config_prof);
- if (config_valgrind && opt_valgrind)
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ tcache = NULL;
+ else
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ tcache = tcache_get(tsd, true);
+
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
assert(usize != 0);
- PROF_ALLOC_PREP(1, usize, cnt);
- p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero,
- try_tcache_alloc, try_tcache_dalloc, arena, cnt);
- if (p == NULL)
+ p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
+ zero, tcache, arena);
+ if (unlikely(p == NULL))
goto label_oom;
} else {
- p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc,
- try_tcache_dalloc, arena);
- if (p == NULL)
+ p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
+ tcache, arena);
+ if (unlikely(p == NULL))
goto label_oom;
- if (config_stats || (config_valgrind && opt_valgrind))
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
usize = isalloc(p, config_prof);
}
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
if (config_stats) {
- thread_allocated_t *ta;
- ta = thread_allocated_tsd_get();
- ta->allocated += usize;
- ta->deallocated += old_usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, p);
- JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero);
+ JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
+ old_rzsize, false, zero);
return (p);
label_oom:
- if (config_xmalloc && opt_xmalloc) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
abort();
}
@@ -1618,11 +2226,11 @@ label_oom:
JEMALLOC_ALWAYS_INLINE_C size_t
ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
- size_t alignment, bool zero, arena_t *arena)
+ size_t alignment, bool zero)
{
size_t usize;
- if (ixalloc(ptr, size, extra, alignment, zero))
+ if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
return (old_usize);
usize = isalloc(ptr, config_prof);
@@ -1631,215 +2239,229 @@ ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
static size_t
ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
- size_t alignment, size_t max_usize, bool zero, arena_t *arena,
- prof_thr_cnt_t *cnt)
+ size_t alignment, bool zero, prof_tctx_t *tctx)
{
size_t usize;
- if (cnt == NULL)
+ if (tctx == NULL)
return (old_usize);
- /* Use minimum usize to determine whether promotion may happen. */
- if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size,
- alignment)) <= SMALL_MAXCLASS) {
- if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
- size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
- alignment, zero))
- return (old_usize);
- usize = isalloc(ptr, config_prof);
- if (max_usize < PAGE)
- arena_prof_promoted(ptr, usize);
- } else {
- usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
- zero, arena);
- }
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero);
return (usize);
}
JEMALLOC_ALWAYS_INLINE_C size_t
-ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
- size_t alignment, size_t max_usize, bool zero, arena_t *arena,
- prof_thr_cnt_t *cnt)
+ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero)
{
- size_t usize;
- prof_ctx_t *old_ctx;
+ size_t usize_max, usize;
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
- old_ctx = prof_ctx_get(ptr);
- if ((uintptr_t)cnt != (uintptr_t)1U) {
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(ptr);
+ /*
+ * usize isn't knowable before ixalloc() returns when extra is non-zero.
+ * Therefore, compute its maximum possible value and use that in
+ * prof_alloc_prep() to decide whether to capture a backtrace.
+ * prof_realloc() will use the actual usize to decide whether to sample.
+ */
+ usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
+ alignment);
+ assert(usize_max != 0);
+ tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
- alignment, zero, max_usize, arena, cnt);
+ alignment, zero, tctx);
} else {
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
- zero, arena);
+ zero);
}
- if (usize == old_usize)
+ if (usize == old_usize) {
+ prof_alloc_rollback(tsd, tctx, false);
return (usize);
- prof_realloc(ptr, usize, cnt, old_usize, old_ctx);
+ }
+ prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
+ old_tctx);
return (usize);
}
-size_t
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_xallocx(void *ptr, size_t size, size_t extra, int flags)
{
+ tsd_t *tsd;
size_t usize, old_usize;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- arena_t *arena;
assert(ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
-
- if (arena_ind != UINT_MAX)
- arena = arenas[arena_ind];
- else
- arena = NULL;
+ tsd = tsd_fetch();
old_usize = isalloc(ptr, config_prof);
- if (config_valgrind && opt_valgrind)
+
+ /* Clamp extra if necessary to avoid (size + extra) overflow. */
+ if (unlikely(size + extra > HUGE_MAXCLASS)) {
+ /* Check for size overflow. */
+ if (unlikely(size > HUGE_MAXCLASS)) {
+ usize = old_usize;
+ goto label_not_resized;
+ }
+ extra = HUGE_MAXCLASS - size;
+ }
+
+ if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
- /*
- * usize isn't knowable before ixalloc() returns when extra is
- * non-zero. Therefore, compute its maximum possible value and
- * use that in PROF_ALLOC_PREP() to decide whether to capture a
- * backtrace. prof_realloc() will use the actual usize to
- * decide whether to sample.
- */
- size_t max_usize = (alignment == 0) ? s2u(size+extra) :
- sa2u(size+extra, alignment);
- PROF_ALLOC_PREP(1, max_usize, cnt);
- usize = ixallocx_prof(ptr, old_usize, size, extra, alignment,
- max_usize, zero, arena, cnt);
+ usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
+ alignment, zero);
} else {
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
- zero, arena);
+ zero);
}
- if (usize == old_usize)
+ if (unlikely(usize == old_usize))
goto label_not_resized;
if (config_stats) {
- thread_allocated_t *ta;
- ta = thread_allocated_tsd_get();
- ta->allocated += usize;
- ta->deallocated += old_usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
}
- JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero);
+ JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
+ old_rzsize, false, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
return (usize);
}
-size_t
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
je_sallocx(const void *ptr, int flags)
{
size_t usize;
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
if (config_ivsalloc)
usize = ivsalloc(ptr, config_prof);
- else {
- assert(ptr != NULL);
+ else
usize = isalloc(ptr, config_prof);
- }
return (usize);
}
-void
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_dallocx(void *ptr, int flags)
{
- size_t usize;
- UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- bool try_tcache;
+ tsd_t *tsd;
+ tcache_t *tcache;
assert(ptr != NULL);
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
- if (arena_ind != UINT_MAX) {
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- try_tcache = (chunk == ptr || chunk->arena !=
- arenas[arena_ind]);
+ tsd = tsd_fetch();
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ tcache = NULL;
+ else
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
} else
- try_tcache = true;
+ tcache = tcache_get(tsd, false);
UTRACE(ptr, 0, 0);
- if (config_stats || config_valgrind)
- usize = isalloc(ptr, config_prof);
- if (config_prof && opt_prof) {
- if (config_stats == false && config_valgrind == false)
- usize = isalloc(ptr, config_prof);
- prof_free(ptr, usize);
- }
- if (config_stats)
- thread_allocated_tsd_get()->deallocated += usize;
- if (config_valgrind && opt_valgrind)
- rzsize = p2rz(ptr);
- iqalloct(ptr, try_tcache);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+ ifree(tsd_fetch(), ptr, tcache);
}
-size_t
-je_nallocx(size_t size, int flags)
+JEMALLOC_ALWAYS_INLINE_C size_t
+inallocx(size_t size, int flags)
+{
+ size_t usize;
+
+ if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
+ usize = s2u(size);
+ else
+ usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
+ assert(usize != 0);
+ return (usize);
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_sdallocx(void *ptr, size_t size, int flags)
{
+ tsd_t *tsd;
+ tcache_t *tcache;
size_t usize;
- size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+ usize = inallocx(size, flags);
+ assert(usize == isalloc(ptr, config_prof));
+
+ tsd = tsd_fetch();
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ tcache = NULL;
+ else
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ tcache = tcache_get(tsd, false);
+
+ UTRACE(ptr, 0, 0);
+ isfree(tsd, ptr, usize, tcache);
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
+je_nallocx(size_t size, int flags)
+{
assert(size != 0);
- if (malloc_init())
+ if (unlikely(malloc_init()))
return (0);
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- assert(usize != 0);
- return (usize);
+ return (inallocx(size, flags));
}
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
- if (malloc_init())
+ if (unlikely(malloc_init()))
return (EAGAIN);
return (ctl_byname(name, oldp, oldlenp, newp, newlen));
}
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
{
- if (malloc_init())
+ if (unlikely(malloc_init()))
return (EAGAIN);
return (ctl_nametomib(name, mibp, miblenp));
}
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
- if (malloc_init())
+ if (unlikely(malloc_init()))
return (EAGAIN);
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
}
-void
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
@@ -1847,18 +2469,18 @@ je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
stats_print(write_cb, cbopaque, opts);
}
-size_t
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{
size_t ret;
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
if (config_ivsalloc)
ret = ivsalloc(ptr, config_prof);
else
- ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
+ ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
return (ret);
}
@@ -1868,91 +2490,6 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
*/
/******************************************************************************/
/*
- * Begin experimental functions.
- */
-#ifdef JEMALLOC_EXPERIMENTAL
-
-int
-je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
-{
- void *p;
-
- assert(ptr != NULL);
-
- p = je_mallocx(size, flags);
- if (p == NULL)
- return (ALLOCM_ERR_OOM);
- if (rsize != NULL)
- *rsize = isalloc(p, config_prof);
- *ptr = p;
- return (ALLOCM_SUCCESS);
-}
-
-int
-je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
-{
- int ret;
- bool no_move = flags & ALLOCM_NO_MOVE;
-
- assert(ptr != NULL);
- assert(*ptr != NULL);
- assert(size != 0);
- assert(SIZE_T_MAX - size >= extra);
-
- if (no_move) {
- size_t usize = je_xallocx(*ptr, size, extra, flags);
- ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
- if (rsize != NULL)
- *rsize = usize;
- } else {
- void *p = je_rallocx(*ptr, size+extra, flags);
- if (p != NULL) {
- *ptr = p;
- ret = ALLOCM_SUCCESS;
- } else
- ret = ALLOCM_ERR_OOM;
- if (rsize != NULL)
- *rsize = isalloc(*ptr, config_prof);
- }
- return (ret);
-}
-
-int
-je_sallocm(const void *ptr, size_t *rsize, int flags)
-{
-
- assert(rsize != NULL);
- *rsize = je_sallocx(ptr, flags);
- return (ALLOCM_SUCCESS);
-}
-
-int
-je_dallocm(void *ptr, int flags)
-{
-
- je_dallocx(ptr, flags);
- return (ALLOCM_SUCCESS);
-}
-
-int
-je_nallocm(size_t *rsize, size_t size, int flags)
-{
- size_t usize;
-
- usize = je_nallocx(size, flags);
- if (usize == 0)
- return (ALLOCM_ERR_OOM);
- if (rsize != NULL)
- *rsize = usize;
- return (ALLOCM_SUCCESS);
-}
-
-#endif
-/*
- * End experimental functions.
- */
-/******************************************************************************/
-/*
* The following functions are used by threading libraries for protection of
* malloc during fork().
*/
@@ -1966,9 +2503,9 @@ je_nallocm(size_t *rsize, size_t size, int flags)
* fork/malloc races via the following functions it registers during
* initialization using pthread_atfork(), but of course that does no good if
* the allocator isn't fully initialized at fork time. The following library
- * constructor is a partial solution to this problem. It may still possible to
- * trigger the deadlock described above, but doing so would involve forking via
- * a library constructor that runs before jemalloc's runs.
+ * constructor is a partial solution to this problem. It may still be possible
+ * to trigger the deadlock described above, but doing so would involve forking
+ * via a library constructor that runs before jemalloc's runs.
*/
JEMALLOC_ATTR(constructor)
static void
@@ -1989,10 +2526,10 @@ _malloc_prefork(void)
unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB
- if (malloc_initialized == false)
+ if (!malloc_initialized())
return;
#endif
- assert(malloc_initialized);
+ assert(malloc_initialized());
/* Acquire all mutexes in a safe order. */
ctl_prefork();
@@ -2004,7 +2541,6 @@ _malloc_prefork(void)
}
chunk_prefork();
base_prefork();
- huge_prefork();
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -2018,13 +2554,12 @@ _malloc_postfork(void)
unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB
- if (malloc_initialized == false)
+ if (!malloc_initialized())
return;
#endif
- assert(malloc_initialized);
+ assert(malloc_initialized());
/* Release all mutexes, now that fork() has completed. */
- huge_postfork_parent();
base_postfork_parent();
chunk_postfork_parent();
for (i = 0; i < narenas_total; i++) {
@@ -2041,10 +2576,9 @@ jemalloc_postfork_child(void)
{
unsigned i;
- assert(malloc_initialized);
+ assert(malloc_initialized());
/* Release all mutexes, now that fork() has completed. */
- huge_postfork_child();
base_postfork_child();
chunk_postfork_child();
for (i = 0; i < narenas_total; i++) {
@@ -2057,55 +2591,3 @@ jemalloc_postfork_child(void)
}
/******************************************************************************/
-/*
- * The following functions are used for TLS allocation/deallocation in static
- * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
- * is that these avoid accessing TLS variables.
- */
-
-static void *
-a0alloc(size_t size, bool zero)
-{
-
- if (malloc_init())
- return (NULL);
-
- if (size == 0)
- size = 1;
-
- if (size <= arena_maxclass)
- return (arena_malloc(arenas[0], size, zero, false));
- else
- return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
-}
-
-void *
-a0malloc(size_t size)
-{
-
- return (a0alloc(size, false));
-}
-
-void *
-a0calloc(size_t num, size_t size)
-{
-
- return (a0alloc(num * size, true));
-}
-
-void
-a0free(void *ptr)
-{
- arena_chunk_t *chunk;
-
- if (ptr == NULL)
- return;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr)
- arena_dalloc(chunk->arena, chunk, ptr, false);
- else
- huge_dalloc(ptr, true);
-}
-
-/******************************************************************************/
diff --git a/deps/jemalloc/src/mutex.c b/deps/jemalloc/src/mutex.c
index 788eca387..2d47af976 100644
--- a/deps/jemalloc/src/mutex.c
+++ b/deps/jemalloc/src/mutex.c
@@ -73,9 +73,13 @@ malloc_mutex_init(malloc_mutex_t *mutex)
{
#ifdef _WIN32
+# if _WIN32_WINNT >= 0x0600
+ InitializeSRWLock(&mutex->lock);
+# else
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
_CRT_SPINCOUNT))
return (true);
+# endif
#elif (defined(JEMALLOC_OSSPIN))
mutex->lock = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
@@ -83,8 +87,8 @@ malloc_mutex_init(malloc_mutex_t *mutex)
mutex->postponed_next = postponed_mutexes;
postponed_mutexes = mutex;
} else {
- if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
- 0)
+ if (_pthread_mutex_init_calloc_cb(&mutex->lock,
+ bootstrap_calloc) != 0)
return (true);
}
#else
@@ -140,7 +144,7 @@ mutex_boot(void)
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
- base_calloc) != 0)
+ bootstrap_calloc) != 0)
return (true);
postponed_mutexes = postponed_mutexes->postponed_next;
}
diff --git a/deps/jemalloc/src/pages.c b/deps/jemalloc/src/pages.c
new file mode 100644
index 000000000..83a167f67
--- /dev/null
+++ b/deps/jemalloc/src/pages.c
@@ -0,0 +1,173 @@
+#define JEMALLOC_PAGES_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+void *
+pages_map(void *addr, size_t size)
+{
+ void *ret;
+
+ assert(size != 0);
+
+#ifdef _WIN32
+ /*
+ * If VirtualAlloc can't allocate at the given address when one is
+ * given, it fails and returns NULL.
+ */
+ ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
+ PAGE_READWRITE);
+#else
+ /*
+ * We don't use MAP_FIXED here, because it can cause the *replacement*
+ * of existing mappings, and we only want to create new mappings.
+ */
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+ -1, 0);
+ assert(ret != NULL);
+
+ if (ret == MAP_FAILED)
+ ret = NULL;
+ else if (addr != NULL && ret != addr) {
+ /*
+ * We succeeded in mapping memory, but not in the right place.
+ */
+ pages_unmap(ret, size);
+ ret = NULL;
+ }
+#endif
+ assert(ret == NULL || (addr == NULL && ret != addr)
+ || (addr != NULL && ret == addr));
+ return (ret);
+}
+
+void
+pages_unmap(void *addr, size_t size)
+{
+
+#ifdef _WIN32
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
+#else
+ if (munmap(addr, size) == -1)
+#endif
+ {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ malloc_printf("<jemalloc>: Error in "
+#ifdef _WIN32
+ "VirtualFree"
+#else
+ "munmap"
+#endif
+ "(): %s\n", buf);
+ if (opt_abort)
+ abort();
+ }
+}
+
+void *
+pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
+{
+ void *ret = (void *)((uintptr_t)addr + leadsize);
+
+ assert(alloc_size >= leadsize + size);
+#ifdef _WIN32
+ {
+ void *new_addr;
+
+ pages_unmap(addr, alloc_size);
+ new_addr = pages_map(ret, size);
+ if (new_addr == ret)
+ return (ret);
+ if (new_addr)
+ pages_unmap(new_addr, size);
+ return (NULL);
+ }
+#else
+ {
+ size_t trailsize = alloc_size - leadsize - size;
+
+ if (leadsize != 0)
+ pages_unmap(addr, leadsize);
+ if (trailsize != 0)
+ pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ return (ret);
+ }
+#endif
+}
+
+static bool
+pages_commit_impl(void *addr, size_t size, bool commit)
+{
+
+#ifndef _WIN32
+ /*
+ * The following decommit/commit implementation is functional, but
+ * always disabled because it doesn't add value beyong improved
+ * debugging (at the cost of extra system calls) on systems that
+ * overcommit.
+ */
+ if (false) {
+ int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
+ void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
+ MAP_FIXED, -1, 0);
+ if (result == MAP_FAILED)
+ return (true);
+ if (result != addr) {
+ /*
+ * We succeeded in mapping memory, but not in the right
+ * place.
+ */
+ pages_unmap(result, size);
+ return (true);
+ }
+ return (false);
+ }
+#endif
+ return (true);
+}
+
+bool
+pages_commit(void *addr, size_t size)
+{
+
+ return (pages_commit_impl(addr, size, true));
+}
+
+bool
+pages_decommit(void *addr, size_t size)
+{
+
+ return (pages_commit_impl(addr, size, false));
+}
+
+bool
+pages_purge(void *addr, size_t size)
+{
+ bool unzeroed;
+
+#ifdef _WIN32
+ VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
+ unzeroed = true;
+#elif defined(JEMALLOC_HAVE_MADVISE)
+# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
+# define JEMALLOC_MADV_PURGE MADV_DONTNEED
+# define JEMALLOC_MADV_ZEROS true
+# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+# define JEMALLOC_MADV_PURGE MADV_FREE
+# define JEMALLOC_MADV_ZEROS false
+# else
+# error "No madvise(2) flag defined for purging unused dirty pages."
+# endif
+ int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
+ unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
+# undef JEMALLOC_MADV_PURGE
+# undef JEMALLOC_MADV_ZEROS
+#else
+ /* Last resort no-op. */
+ unzeroed = true;
+#endif
+ return (unzeroed);
+}
+
diff --git a/deps/jemalloc/src/prof.c b/deps/jemalloc/src/prof.c
index 7722b7b43..5d2b9598f 100644
--- a/deps/jemalloc/src/prof.c
+++ b/deps/jemalloc/src/prof.c
@@ -14,14 +14,13 @@
/******************************************************************************/
/* Data. */
-malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL)
-
bool opt_prof = false;
bool opt_prof_active = true;
+bool opt_prof_thread_active_init = true;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
-bool opt_prof_final = true;
+bool opt_prof_final = false;
bool opt_prof_leak = false;
bool opt_prof_accum = false;
char opt_prof_prefix[
@@ -31,25 +30,65 @@ char opt_prof_prefix[
#endif
1];
+/*
+ * Initialized as opt_prof_active, and accessed via
+ * prof_active_[gs]et{_unlocked,}().
+ */
+bool prof_active;
+static malloc_mutex_t prof_active_mtx;
+
+/*
+ * Initialized as opt_prof_thread_active_init, and accessed via
+ * prof_thread_active_init_[gs]et().
+ */
+static bool prof_thread_active_init;
+static malloc_mutex_t prof_thread_active_init_mtx;
+
+/*
+ * Initialized as opt_prof_gdump, and accessed via
+ * prof_gdump_[gs]et{_unlocked,}().
+ */
+bool prof_gdump_val;
+static malloc_mutex_t prof_gdump_mtx;
+
uint64_t prof_interval = 0;
-bool prof_promote;
+
+size_t lg_prof_sample;
/*
- * Table of mutexes that are shared among ctx's. These are leaf locks, so
- * there is no problem with using them for more than one ctx at the same time.
- * The primary motivation for this sharing though is that ctx's are ephemeral,
+ * Table of mutexes that are shared among gctx's. These are leaf locks, so
+ * there is no problem with using them for more than one gctx at the same time.
+ * The primary motivation for this sharing though is that gctx's are ephemeral,
* and destroying mutexes causes complications for systems that allocate when
* creating/destroying mutexes.
*/
-static malloc_mutex_t *ctx_locks;
-static unsigned cum_ctxs; /* Atomic counter. */
+static malloc_mutex_t *gctx_locks;
+static unsigned cum_gctxs; /* Atomic counter. */
+
+/*
+ * Table of mutexes that are shared among tdata's. No operations require
+ * holding multiple tdata locks, so there is no problem with using them for more
+ * than one tdata at the same time, even though a gctx lock may be acquired
+ * while holding a tdata lock.
+ */
+static malloc_mutex_t *tdata_locks;
/*
- * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data
+ * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
* structure that knows about all backtraces currently captured.
*/
-static ckh_t bt2ctx;
-static malloc_mutex_t bt2ctx_mtx;
+static ckh_t bt2gctx;
+static malloc_mutex_t bt2gctx_mtx;
+
+/*
+ * Tree of all extant prof_tdata_t structures, regardless of state,
+ * {attached,detached,expired}.
+ */
+static prof_tdata_tree_t tdatas;
+static malloc_mutex_t tdatas_mtx;
+
+static uint64_t next_thr_uid;
+static malloc_mutex_t next_thr_uid_mtx;
static malloc_mutex_t prof_dump_seq_mtx;
static uint64_t prof_dump_seq;
@@ -77,120 +116,210 @@ static int prof_dump_fd;
static bool prof_booted = false;
/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
+static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
+static bool prof_tdata_should_destroy(prof_tdata_t *tdata,
+ bool even_if_attached);
+static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached);
+static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
+
+/******************************************************************************/
+/* Red-black trees. */
+
+JEMALLOC_INLINE_C int
+prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
+{
+ uint64_t a_thr_uid = a->thr_uid;
+ uint64_t b_thr_uid = b->thr_uid;
+ int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
+ if (ret == 0) {
+ uint64_t a_thr_discrim = a->thr_discrim;
+ uint64_t b_thr_discrim = b->thr_discrim;
+ ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
+ b_thr_discrim);
+ if (ret == 0) {
+ uint64_t a_tctx_uid = a->tctx_uid;
+ uint64_t b_tctx_uid = b->tctx_uid;
+ ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
+ b_tctx_uid);
+ }
+ }
+ return (ret);
+}
+
+rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
+ tctx_link, prof_tctx_comp)
+
+JEMALLOC_INLINE_C int
+prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
+{
+ unsigned a_len = a->bt.len;
+ unsigned b_len = b->bt.len;
+ unsigned comp_len = (a_len < b_len) ? a_len : b_len;
+ int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
+ if (ret == 0)
+ ret = (a_len > b_len) - (a_len < b_len);
+ return (ret);
+}
+
+rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
+ prof_gctx_comp)
+
+JEMALLOC_INLINE_C int
+prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
+{
+ int ret;
+ uint64_t a_uid = a->thr_uid;
+ uint64_t b_uid = b->thr_uid;
+
+ ret = ((a_uid > b_uid) - (a_uid < b_uid));
+ if (ret == 0) {
+ uint64_t a_discrim = a->thr_discrim;
+ uint64_t b_discrim = b->thr_discrim;
+
+ ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
+ }
+ return (ret);
+}
+
+rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
+ prof_tdata_comp)
+
+/******************************************************************************/
void
-bt_init(prof_bt_t *bt, void **vec)
+prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
{
+ prof_tdata_t *tdata;
cassert(config_prof);
- bt->vec = vec;
- bt->len = 0;
+ if (updated) {
+ /*
+ * Compute a new sample threshold. This isn't very important in
+ * practice, because this function is rarely executed, so the
+ * potential for sample bias is minimal except in contrived
+ * programs.
+ */
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata != NULL)
+ prof_sample_threshold_update(tdata);
+ }
+
+ if ((uintptr_t)tctx > (uintptr_t)1U) {
+ malloc_mutex_lock(tctx->tdata->lock);
+ tctx->prepared = false;
+ if (prof_tctx_should_destroy(tctx))
+ prof_tctx_destroy(tsd, tctx);
+ else
+ malloc_mutex_unlock(tctx->tdata->lock);
+ }
}
-static void
-bt_destroy(prof_bt_t *bt)
+void
+prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
- cassert(config_prof);
+ prof_tctx_set(ptr, usize, tctx);
- idalloc(bt);
+ malloc_mutex_lock(tctx->tdata->lock);
+ tctx->cnts.curobjs++;
+ tctx->cnts.curbytes += usize;
+ if (opt_prof_accum) {
+ tctx->cnts.accumobjs++;
+ tctx->cnts.accumbytes += usize;
+ }
+ tctx->prepared = false;
+ malloc_mutex_unlock(tctx->tdata->lock);
}
-static prof_bt_t *
-bt_dup(prof_bt_t *bt)
+void
+prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
- prof_bt_t *ret;
- cassert(config_prof);
+ malloc_mutex_lock(tctx->tdata->lock);
+ assert(tctx->cnts.curobjs > 0);
+ assert(tctx->cnts.curbytes >= usize);
+ tctx->cnts.curobjs--;
+ tctx->cnts.curbytes -= usize;
- /*
- * Create a single allocation that has space for vec immediately
- * following the prof_bt_t structure. The backtraces that get
- * stored in the backtrace caches are copied from stack-allocated
- * temporary variables, so size is known at creation time. Making this
- * a contiguous object improves cache locality.
- */
- ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) +
- (bt->len * sizeof(void *)));
- if (ret == NULL)
- return (NULL);
- ret->vec = (void **)((uintptr_t)ret +
- QUANTUM_CEILING(sizeof(prof_bt_t)));
- memcpy(ret->vec, bt->vec, bt->len * sizeof(void *));
- ret->len = bt->len;
+ if (prof_tctx_should_destroy(tctx))
+ prof_tctx_destroy(tsd, tctx);
+ else
+ malloc_mutex_unlock(tctx->tdata->lock);
+}
- return (ret);
+void
+bt_init(prof_bt_t *bt, void **vec)
+{
+
+ cassert(config_prof);
+
+ bt->vec = vec;
+ bt->len = 0;
}
-static inline void
-prof_enter(prof_tdata_t *prof_tdata)
+JEMALLOC_INLINE_C void
+prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
{
cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
- assert(prof_tdata->enq == false);
- prof_tdata->enq = true;
+ if (tdata != NULL) {
+ assert(!tdata->enq);
+ tdata->enq = true;
+ }
- malloc_mutex_lock(&bt2ctx_mtx);
+ malloc_mutex_lock(&bt2gctx_mtx);
}
-static inline void
-prof_leave(prof_tdata_t *prof_tdata)
+JEMALLOC_INLINE_C void
+prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
{
- bool idump, gdump;
cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
+
+ malloc_mutex_unlock(&bt2gctx_mtx);
- malloc_mutex_unlock(&bt2ctx_mtx);
+ if (tdata != NULL) {
+ bool idump, gdump;
- assert(prof_tdata->enq);
- prof_tdata->enq = false;
- idump = prof_tdata->enq_idump;
- prof_tdata->enq_idump = false;
- gdump = prof_tdata->enq_gdump;
- prof_tdata->enq_gdump = false;
+ assert(tdata->enq);
+ tdata->enq = false;
+ idump = tdata->enq_idump;
+ tdata->enq_idump = false;
+ gdump = tdata->enq_gdump;
+ tdata->enq_gdump = false;
- if (idump)
- prof_idump();
- if (gdump)
- prof_gdump();
+ if (idump)
+ prof_idump();
+ if (gdump)
+ prof_gdump();
+ }
}
#ifdef JEMALLOC_PROF_LIBUNWIND
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
+prof_backtrace(prof_bt_t *bt)
{
- unw_context_t uc;
- unw_cursor_t cursor;
- unsigned i;
- int err;
+ int nframes;
cassert(config_prof);
assert(bt->len == 0);
assert(bt->vec != NULL);
- unw_getcontext(&uc);
- unw_init_local(&cursor, &uc);
-
- /* Throw away (nignore+1) stack frames, if that many exist. */
- for (i = 0; i < nignore + 1; i++) {
- err = unw_step(&cursor);
- if (err <= 0)
- return;
- }
-
- /*
- * Iterate over stack frames until there are no more, or until no space
- * remains in bt.
- */
- for (i = 0; i < PROF_BT_MAX; i++) {
- unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
- bt->len++;
- err = unw_step(&cursor);
- if (err <= 0)
- break;
- }
+ nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
+ if (nframes <= 0)
+ return;
+ bt->len = nframes;
}
#elif (defined(JEMALLOC_PROF_LIBGCC))
static _Unwind_Reason_Code
@@ -206,25 +335,25 @@ static _Unwind_Reason_Code
prof_unwind_callback(struct _Unwind_Context *context, void *arg)
{
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
+ void *ip;
cassert(config_prof);
- if (data->nignore > 0)
- data->nignore--;
- else {
- data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context);
- data->bt->len++;
- if (data->bt->len == data->max)
- return (_URC_END_OF_STACK);
- }
+ ip = (void *)_Unwind_GetIP(context);
+ if (ip == NULL)
+ return (_URC_END_OF_STACK);
+ data->bt->vec[data->bt->len] = ip;
+ data->bt->len++;
+ if (data->bt->len == data->max)
+ return (_URC_END_OF_STACK);
return (_URC_NO_REASON);
}
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
+prof_backtrace(prof_bt_t *bt)
{
- prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX};
+ prof_unwind_data_t data = {bt, PROF_BT_MAX};
cassert(config_prof);
@@ -232,25 +361,22 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore)
}
#elif (defined(JEMALLOC_PROF_GCC))
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
+prof_backtrace(prof_bt_t *bt)
{
#define BT_FRAME(i) \
- if ((i) < nignore + PROF_BT_MAX) { \
+ if ((i) < PROF_BT_MAX) { \
void *p; \
if (__builtin_frame_address(i) == 0) \
return; \
p = __builtin_return_address(i); \
if (p == NULL) \
return; \
- if (i >= nignore) { \
- bt->vec[(i) - nignore] = p; \
- bt->len = (i) - nignore + 1; \
- } \
+ bt->vec[(i)] = p; \
+ bt->len = (i) + 1; \
} else \
return;
cassert(config_prof);
- assert(nignore <= 3);
BT_FRAME(0)
BT_FRAME(1)
@@ -392,16 +518,11 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore)
BT_FRAME(125)
BT_FRAME(126)
BT_FRAME(127)
-
- /* Extras to compensate for nignore. */
- BT_FRAME(128)
- BT_FRAME(129)
- BT_FRAME(130)
#undef BT_FRAME
}
#else
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
+prof_backtrace(prof_bt_t *bt)
{
cassert(config_prof);
@@ -410,256 +531,394 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore)
#endif
static malloc_mutex_t *
-prof_ctx_mutex_choose(void)
+prof_gctx_mutex_choose(void)
{
- unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
+ unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
- return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
+ return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
}
-static void
-prof_ctx_init(prof_ctx_t *ctx, prof_bt_t *bt)
+static malloc_mutex_t *
+prof_tdata_mutex_choose(uint64_t thr_uid)
{
- ctx->bt = bt;
- ctx->lock = prof_ctx_mutex_choose();
+ return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
+}
+
+static prof_gctx_t *
+prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
+{
+ /*
+ * Create a single allocation that has space for vec of length bt->len.
+ */
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t,
+ vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true),
+ true, NULL);
+ if (gctx == NULL)
+ return (NULL);
+ gctx->lock = prof_gctx_mutex_choose();
/*
* Set nlimbo to 1, in order to avoid a race condition with
- * prof_ctx_merge()/prof_ctx_destroy().
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
*/
- ctx->nlimbo = 1;
- ql_elm_new(ctx, dump_link);
- memset(&ctx->cnt_merged, 0, sizeof(prof_cnt_t));
- ql_new(&ctx->cnts_ql);
+ gctx->nlimbo = 1;
+ tctx_tree_new(&gctx->tctxs);
+ /* Duplicate bt. */
+ memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
+ gctx->bt.vec = gctx->vec;
+ gctx->bt.len = bt->len;
+ return (gctx);
}
static void
-prof_ctx_destroy(prof_ctx_t *ctx)
+prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
+ prof_tdata_t *tdata)
{
- prof_tdata_t *prof_tdata;
cassert(config_prof);
/*
- * Check that ctx is still unused by any thread cache before destroying
- * it. prof_lookup() increments ctx->nlimbo in order to avoid a race
- * condition with this function, as does prof_ctx_merge() in order to
- * avoid a race between the main body of prof_ctx_merge() and entry
+ * Check that gctx is still unused by any thread cache before destroying
+ * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
+ * condition with this function, as does prof_tctx_destroy() in order to
+ * avoid a race between the main body of prof_tctx_destroy() and entry
* into this function.
*/
- prof_tdata = prof_tdata_get(false);
- assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
- prof_enter(prof_tdata);
- malloc_mutex_lock(ctx->lock);
- if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
- ctx->nlimbo == 1) {
- assert(ctx->cnt_merged.curbytes == 0);
- assert(ctx->cnt_merged.accumobjs == 0);
- assert(ctx->cnt_merged.accumbytes == 0);
- /* Remove ctx from bt2ctx. */
- if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
+ prof_enter(tsd, tdata_self);
+ malloc_mutex_lock(gctx->lock);
+ assert(gctx->nlimbo != 0);
+ if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
+ /* Remove gctx from bt2gctx. */
+ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
not_reached();
- prof_leave(prof_tdata);
- /* Destroy ctx. */
- malloc_mutex_unlock(ctx->lock);
- bt_destroy(ctx->bt);
- idalloc(ctx);
+ prof_leave(tsd, tdata_self);
+ /* Destroy gctx. */
+ malloc_mutex_unlock(gctx->lock);
+ idalloctm(tsd, gctx, tcache_get(tsd, false), true);
} else {
/*
- * Compensate for increment in prof_ctx_merge() or
+ * Compensate for increment in prof_tctx_destroy() or
* prof_lookup().
*/
- ctx->nlimbo--;
- malloc_mutex_unlock(ctx->lock);
- prof_leave(prof_tdata);
+ gctx->nlimbo--;
+ malloc_mutex_unlock(gctx->lock);
+ prof_leave(tsd, tdata_self);
}
}
-static void
-prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
+/* tctx->tdata->lock must be held. */
+static bool
+prof_tctx_should_destroy(prof_tctx_t *tctx)
{
- bool destroy;
- cassert(config_prof);
+ if (opt_prof_accum)
+ return (false);
+ if (tctx->cnts.curobjs != 0)
+ return (false);
+ if (tctx->prepared)
+ return (false);
+ return (true);
+}
+
+static bool
+prof_gctx_should_destroy(prof_gctx_t *gctx)
+{
+
+ if (opt_prof_accum)
+ return (false);
+ if (!tctx_tree_empty(&gctx->tctxs))
+ return (false);
+ if (gctx->nlimbo != 0)
+ return (false);
+ return (true);
+}
- /* Merge cnt stats and detach from ctx. */
- malloc_mutex_lock(ctx->lock);
- ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
- ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
- ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
- ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
- ql_remove(&ctx->cnts_ql, cnt, cnts_link);
- if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
- ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
+/* tctx->tdata->lock is held upon entry, and released before return. */
+static void
+prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
+{
+ prof_tdata_t *tdata = tctx->tdata;
+ prof_gctx_t *gctx = tctx->gctx;
+ bool destroy_tdata, destroy_tctx, destroy_gctx;
+
+ assert(tctx->cnts.curobjs == 0);
+ assert(tctx->cnts.curbytes == 0);
+ assert(!opt_prof_accum);
+ assert(tctx->cnts.accumobjs == 0);
+ assert(tctx->cnts.accumbytes == 0);
+
+ ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
+ destroy_tdata = prof_tdata_should_destroy(tdata, false);
+ malloc_mutex_unlock(tdata->lock);
+
+ malloc_mutex_lock(gctx->lock);
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ tctx_tree_remove(&gctx->tctxs, tctx);
+ destroy_tctx = true;
+ if (prof_gctx_should_destroy(gctx)) {
+ /*
+ * Increment gctx->nlimbo in order to keep another
+ * thread from winning the race to destroy gctx while
+ * this one has gctx->lock dropped. Without this, it
+ * would be possible for another thread to:
+ *
+ * 1) Sample an allocation associated with gctx.
+ * 2) Deallocate the sampled object.
+ * 3) Successfully prof_gctx_try_destroy(gctx).
+ *
+ * The result would be that gctx no longer exists by the
+ * time this thread accesses it in
+ * prof_gctx_try_destroy().
+ */
+ gctx->nlimbo++;
+ destroy_gctx = true;
+ } else
+ destroy_gctx = false;
+ break;
+ case prof_tctx_state_dumping:
/*
- * Increment ctx->nlimbo in order to keep another thread from
- * winning the race to destroy ctx while this one has ctx->lock
- * dropped. Without this, it would be possible for another
- * thread to:
- *
- * 1) Sample an allocation associated with ctx.
- * 2) Deallocate the sampled object.
- * 3) Successfully prof_ctx_destroy(ctx).
- *
- * The result would be that ctx no longer exists by the time
- * this thread accesses it in prof_ctx_destroy().
+ * A dumping thread needs tctx to remain valid until dumping
+ * has finished. Change state such that the dumping thread will
+ * complete destruction during a late dump iteration phase.
*/
- ctx->nlimbo++;
- destroy = true;
- } else
- destroy = false;
- malloc_mutex_unlock(ctx->lock);
- if (destroy)
- prof_ctx_destroy(ctx);
+ tctx->state = prof_tctx_state_purgatory;
+ destroy_tctx = false;
+ destroy_gctx = false;
+ break;
+ default:
+ not_reached();
+ destroy_tctx = false;
+ destroy_gctx = false;
+ }
+ malloc_mutex_unlock(gctx->lock);
+ if (destroy_gctx) {
+ prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
+ tdata);
+ }
+
+ if (destroy_tdata)
+ prof_tdata_destroy(tsd, tdata, false);
+
+ if (destroy_tctx)
+ idalloctm(tsd, tctx, tcache_get(tsd, false), true);
}
static bool
-prof_lookup_global(prof_bt_t *bt, prof_tdata_t *prof_tdata, void **p_btkey,
- prof_ctx_t **p_ctx, bool *p_new_ctx)
+prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
+ void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
{
union {
- prof_ctx_t *p;
+ prof_gctx_t *p;
void *v;
- } ctx;
+ } gctx;
union {
prof_bt_t *p;
void *v;
} btkey;
- bool new_ctx;
+ bool new_gctx;
- prof_enter(prof_tdata);
- if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
/* bt has never been seen before. Insert it. */
- ctx.v = imalloc(sizeof(prof_ctx_t));
- if (ctx.v == NULL) {
- prof_leave(prof_tdata);
+ gctx.p = prof_gctx_create(tsd, bt);
+ if (gctx.v == NULL) {
+ prof_leave(tsd, tdata);
return (true);
}
- btkey.p = bt_dup(bt);
- if (btkey.v == NULL) {
- prof_leave(prof_tdata);
- idalloc(ctx.v);
- return (true);
- }
- prof_ctx_init(ctx.p, btkey.p);
- if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
+ btkey.p = &gctx.p->bt;
+ if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
/* OOM. */
- prof_leave(prof_tdata);
- idalloc(btkey.v);
- idalloc(ctx.v);
+ prof_leave(tsd, tdata);
+ idalloctm(tsd, gctx.v, tcache_get(tsd, false), true);
return (true);
}
- new_ctx = true;
+ new_gctx = true;
} else {
/*
* Increment nlimbo, in order to avoid a race condition with
- * prof_ctx_merge()/prof_ctx_destroy().
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
*/
- malloc_mutex_lock(ctx.p->lock);
- ctx.p->nlimbo++;
- malloc_mutex_unlock(ctx.p->lock);
- new_ctx = false;
+ malloc_mutex_lock(gctx.p->lock);
+ gctx.p->nlimbo++;
+ malloc_mutex_unlock(gctx.p->lock);
+ new_gctx = false;
}
- prof_leave(prof_tdata);
+ prof_leave(tsd, tdata);
*p_btkey = btkey.v;
- *p_ctx = ctx.p;
- *p_new_ctx = new_ctx;
+ *p_gctx = gctx.p;
+ *p_new_gctx = new_gctx;
return (false);
}
-prof_thr_cnt_t *
-prof_lookup(prof_bt_t *bt)
+prof_tctx_t *
+prof_lookup(tsd_t *tsd, prof_bt_t *bt)
{
union {
- prof_thr_cnt_t *p;
+ prof_tctx_t *p;
void *v;
} ret;
- prof_tdata_t *prof_tdata;
+ prof_tdata_t *tdata;
+ bool not_found;
cassert(config_prof);
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
return (NULL);
- if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
+ malloc_mutex_lock(tdata->lock);
+ not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
+ if (!not_found) /* Note double negative! */
+ ret.p->prepared = true;
+ malloc_mutex_unlock(tdata->lock);
+ if (not_found) {
+ tcache_t *tcache;
void *btkey;
- prof_ctx_t *ctx;
- bool new_ctx;
+ prof_gctx_t *gctx;
+ bool new_gctx, error;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
- if (prof_lookup_global(bt, prof_tdata, &btkey, &ctx, &new_ctx))
+ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
+ &new_gctx))
return (NULL);
- /* Link a prof_thd_cnt_t into ctx for this thread. */
- if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
- assert(ckh_count(&prof_tdata->bt2cnt) > 0);
- /*
- * Flush the least recently used cnt in order to keep
- * bt2cnt from becoming too large.
- */
- ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
- assert(ret.v != NULL);
- if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
- NULL, NULL))
- not_reached();
- ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
- prof_ctx_merge(ret.p->ctx, ret.p);
- /* ret can now be re-used. */
- } else {
- assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
- /* Allocate and partially initialize a new cnt. */
- ret.v = imalloc(sizeof(prof_thr_cnt_t));
- if (ret.p == NULL) {
- if (new_ctx)
- prof_ctx_destroy(ctx);
- return (NULL);
- }
- ql_elm_new(ret.p, cnts_link);
- ql_elm_new(ret.p, lru_link);
+ /* Link a prof_tctx_t into gctx for this thread. */
+ tcache = tcache_get(tsd, true);
+ ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true,
+ NULL);
+ if (ret.p == NULL) {
+ if (new_gctx)
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ return (NULL);
}
- /* Finish initializing ret. */
- ret.p->ctx = ctx;
- ret.p->epoch = 0;
+ ret.p->tdata = tdata;
+ ret.p->thr_uid = tdata->thr_uid;
+ ret.p->thr_discrim = tdata->thr_discrim;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
- if (ckh_insert(&prof_tdata->bt2cnt, btkey, ret.v)) {
- if (new_ctx)
- prof_ctx_destroy(ctx);
- idalloc(ret.v);
+ ret.p->gctx = gctx;
+ ret.p->tctx_uid = tdata->tctx_uid_next++;
+ ret.p->prepared = true;
+ ret.p->state = prof_tctx_state_initializing;
+ malloc_mutex_lock(tdata->lock);
+ error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
+ malloc_mutex_unlock(tdata->lock);
+ if (error) {
+ if (new_gctx)
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ idalloctm(tsd, ret.v, tcache, true);
return (NULL);
}
- ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
- malloc_mutex_lock(ctx->lock);
- ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link);
- ctx->nlimbo--;
- malloc_mutex_unlock(ctx->lock);
- } else {
- /* Move ret to the front of the LRU. */
- ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
- ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
+ malloc_mutex_lock(gctx->lock);
+ ret.p->state = prof_tctx_state_nominal;
+ tctx_tree_insert(&gctx->tctxs, ret.p);
+ gctx->nlimbo--;
+ malloc_mutex_unlock(gctx->lock);
}
return (ret.p);
}
+void
+prof_sample_threshold_update(prof_tdata_t *tdata)
+{
+ /*
+ * The body of this function is compiled out unless heap profiling is
+ * enabled, so that it is possible to compile jemalloc with floating
+ * point support completely disabled. Avoiding floating point code is
+ * important on memory-constrained systems, but it also enables a
+ * workaround for versions of glibc that don't properly save/restore
+ * floating point registers during dynamic lazy symbol loading (which
+ * internally calls into whatever malloc implementation happens to be
+ * integrated into the application). Note that some compilers (e.g.
+ * gcc 4.8) may use floating point registers for fast memory moves, so
+ * jemalloc must be compiled with such optimizations disabled (e.g.
+ * -mno-sse) in order for the workaround to be complete.
+ */
+#ifdef JEMALLOC_PROF
+ uint64_t r;
+ double u;
+
+ if (!config_prof)
+ return;
+
+ if (lg_prof_sample == 0) {
+ tdata->bytes_until_sample = 0;
+ return;
+ }
+
+ /*
+ * Compute sample interval as a geometrically distributed random
+ * variable with mean (2^lg_prof_sample).
+ *
+ * __ __
+ * | log(u) | 1
+ * tdata->bytes_until_sample = | -------- |, where p = ---------------
+ * | log(1-p) | lg_prof_sample
+ * 2
+ *
+ * For more information on the math, see:
+ *
+ * Non-Uniform Random Variate Generation
+ * Luc Devroye
+ * Springer-Verlag, New York, 1986
+ * pp 500
+ * (http://luc.devroye.org/rnbookindex.html)
+ */
+ prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005),
+ UINT64_C(1442695040888963407));
+ u = (double)r * (1.0/9007199254740992.0L);
+ tdata->bytes_until_sample = (uint64_t)(log(u) /
+ log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ + (uint64_t)1U;
+#endif
+}
+
+#ifdef JEMALLOC_JET
+static prof_tdata_t *
+prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ size_t *tdata_count = (size_t *)arg;
+
+ (*tdata_count)++;
+
+ return (NULL);
+}
+
+size_t
+prof_tdata_count(void)
+{
+ size_t tdata_count = 0;
+
+ malloc_mutex_lock(&tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
+ (void *)&tdata_count);
+ malloc_mutex_unlock(&tdatas_mtx);
+
+ return (tdata_count);
+}
+#endif
+
#ifdef JEMALLOC_JET
size_t
prof_bt_count(void)
{
size_t bt_count;
- prof_tdata_t *prof_tdata;
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
return (0);
- prof_enter(prof_tdata);
- bt_count = ckh_count(&bt2ctx);
- prof_leave(prof_tdata);
+ malloc_mutex_lock(&bt2gctx_mtx);
+ bt_count = ckh_count(&bt2gctx);
+ malloc_mutex_unlock(&bt2gctx_mtx);
return (bt_count);
}
@@ -675,7 +934,7 @@ prof_dump_open(bool propagate_err, const char *filename)
int fd;
fd = creat(filename, 0644);
- if (fd == -1 && propagate_err == false) {
+ if (fd == -1 && !propagate_err) {
malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
filename);
if (opt_abort)
@@ -700,7 +959,7 @@ prof_dump_flush(bool propagate_err)
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
if (err == -1) {
- if (propagate_err == false) {
+ if (!propagate_err) {
malloc_write("<jemalloc>: write() failed during heap "
"profile flush\n");
if (opt_abort)
@@ -756,7 +1015,7 @@ prof_dump_write(bool propagate_err, const char *s)
return (false);
}
-JEMALLOC_ATTR(format(printf, 2, 3))
+JEMALLOC_FORMAT_PRINTF(2, 3)
static bool
prof_dump_printf(bool propagate_err, const char *format, ...)
{
@@ -772,176 +1031,367 @@ prof_dump_printf(bool propagate_err, const char *format, ...)
return (ret);
}
+/* tctx->tdata->lock is held. */
static void
-prof_dump_ctx_prep(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx,
- prof_ctx_list_t *ctx_ql)
+prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
{
- prof_thr_cnt_t *thr_cnt;
- prof_cnt_t tcnt;
-
- cassert(config_prof);
-
- malloc_mutex_lock(ctx->lock);
-
- /*
- * Increment nlimbo so that ctx won't go away before dump.
- * Additionally, link ctx into the dump list so that it is included in
- * prof_dump()'s second pass.
- */
- ctx->nlimbo++;
- ql_tail_insert(ctx_ql, ctx, dump_link);
- memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
- ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
- volatile unsigned *epoch = &thr_cnt->epoch;
+ malloc_mutex_lock(tctx->gctx->lock);
- while (true) {
- unsigned epoch0 = *epoch;
-
- /* Make sure epoch is even. */
- if (epoch0 & 1U)
- continue;
-
- memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t));
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ malloc_mutex_unlock(tctx->gctx->lock);
+ return;
+ case prof_tctx_state_nominal:
+ tctx->state = prof_tctx_state_dumping;
+ malloc_mutex_unlock(tctx->gctx->lock);
- /* Terminate if epoch didn't change while reading. */
- if (*epoch == epoch0)
- break;
- }
+ memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
- ctx->cnt_summed.curobjs += tcnt.curobjs;
- ctx->cnt_summed.curbytes += tcnt.curbytes;
+ tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
if (opt_prof_accum) {
- ctx->cnt_summed.accumobjs += tcnt.accumobjs;
- ctx->cnt_summed.accumbytes += tcnt.accumbytes;
+ tdata->cnt_summed.accumobjs +=
+ tctx->dump_cnts.accumobjs;
+ tdata->cnt_summed.accumbytes +=
+ tctx->dump_cnts.accumbytes;
}
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ not_reached();
}
+}
- if (ctx->cnt_summed.curobjs != 0)
- (*leak_nctx)++;
+/* gctx->lock is held. */
+static void
+prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
+{
- /* Add to cnt_all. */
- cnt_all->curobjs += ctx->cnt_summed.curobjs;
- cnt_all->curbytes += ctx->cnt_summed.curbytes;
+ gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
if (opt_prof_accum) {
- cnt_all->accumobjs += ctx->cnt_summed.accumobjs;
- cnt_all->accumbytes += ctx->cnt_summed.accumbytes;
+ gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
+ gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
}
-
- malloc_mutex_unlock(ctx->lock);
}
-static bool
-prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
+/* tctx->gctx is held. */
+static prof_tctx_t *
+prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
- if (opt_lg_prof_sample == 0) {
- if (prof_dump_printf(propagate_err,
- "heap profile: %"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
- cnt_all->curobjs, cnt_all->curbytes,
- cnt_all->accumobjs, cnt_all->accumbytes))
- return (true);
- } else {
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ prof_tctx_merge_gctx(tctx, tctx->gctx);
+ break;
+ default:
+ not_reached();
+ }
+
+ return (NULL);
+}
+
+/* gctx->lock is held. */
+static prof_tctx_t *
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
+{
+ bool propagate_err = *(bool *)arg;
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ case prof_tctx_state_nominal:
+ /* Not captured by this dump. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
if (prof_dump_printf(propagate_err,
- "heap profile: %"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
- cnt_all->curobjs, cnt_all->curbytes,
- cnt_all->accumobjs, cnt_all->accumbytes,
- ((uint64_t)1U << opt_lg_prof_sample)))
- return (true);
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
+ "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
+ tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
+ tctx->dump_cnts.accumbytes))
+ return (tctx);
+ break;
+ default:
+ not_reached();
}
+ return (NULL);
+}
- return (false);
+/* tctx->gctx is held. */
+static prof_tctx_t *
+prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
+{
+ prof_tctx_t *ret;
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ tctx->state = prof_tctx_state_nominal;
+ break;
+ case prof_tctx_state_purgatory:
+ ret = tctx;
+ goto label_return;
+ default:
+ not_reached();
+ }
+
+ ret = NULL;
+label_return:
+ return (ret);
}
static void
-prof_dump_ctx_cleanup_locked(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
+prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
{
- ctx->nlimbo--;
- ql_remove(ctx_ql, ctx, dump_link);
+ cassert(config_prof);
+
+ malloc_mutex_lock(gctx->lock);
+
+ /*
+ * Increment nlimbo so that gctx won't go away before dump.
+ * Additionally, link gctx into the dump list so that it is included in
+ * prof_dump()'s second pass.
+ */
+ gctx->nlimbo++;
+ gctx_tree_insert(gctxs, gctx);
+
+ memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
+
+ malloc_mutex_unlock(gctx->lock);
+}
+
+static prof_gctx_t *
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
+{
+ size_t *leak_ngctx = (size_t *)arg;
+
+ malloc_mutex_lock(gctx->lock);
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
+ if (gctx->cnt_summed.curobjs != 0)
+ (*leak_ngctx)++;
+ malloc_mutex_unlock(gctx->lock);
+
+ return (NULL);
}
static void
-prof_dump_ctx_cleanup(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
+prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
{
+ prof_tdata_t *tdata = prof_tdata_get(tsd, false);
+ prof_gctx_t *gctx;
- malloc_mutex_lock(ctx->lock);
- prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
- malloc_mutex_unlock(ctx->lock);
+ /*
+ * Standard tree iteration won't work here, because as soon as we
+ * decrement gctx->nlimbo and unlock gctx, another thread can
+ * concurrently destroy it, which will corrupt the tree. Therefore,
+ * tear down the tree one node at a time during iteration.
+ */
+ while ((gctx = gctx_tree_first(gctxs)) != NULL) {
+ gctx_tree_remove(gctxs, gctx);
+ malloc_mutex_lock(gctx->lock);
+ {
+ prof_tctx_t *next;
+
+ next = NULL;
+ do {
+ prof_tctx_t *to_destroy =
+ tctx_tree_iter(&gctx->tctxs, next,
+ prof_tctx_finish_iter, NULL);
+ if (to_destroy != NULL) {
+ next = tctx_tree_next(&gctx->tctxs,
+ to_destroy);
+ tctx_tree_remove(&gctx->tctxs,
+ to_destroy);
+ idalloctm(tsd, to_destroy,
+ tcache_get(tsd, false), true);
+ } else
+ next = NULL;
+ } while (next != NULL);
+ }
+ gctx->nlimbo--;
+ if (prof_gctx_should_destroy(gctx)) {
+ gctx->nlimbo++;
+ malloc_mutex_unlock(gctx->lock);
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ } else
+ malloc_mutex_unlock(gctx->lock);
+ }
}
+static prof_tdata_t *
+prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
+
+ malloc_mutex_lock(tdata->lock);
+ if (!tdata->expired) {
+ size_t tabind;
+ union {
+ prof_tctx_t *p;
+ void *v;
+ } tctx;
+
+ tdata->dumping = true;
+ memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
+ for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
+ &tctx.v);)
+ prof_tctx_merge_tdata(tctx.p, tdata);
+
+ cnt_all->curobjs += tdata->cnt_summed.curobjs;
+ cnt_all->curbytes += tdata->cnt_summed.curbytes;
+ if (opt_prof_accum) {
+ cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
+ cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
+ }
+ } else
+ tdata->dumping = false;
+ malloc_mutex_unlock(tdata->lock);
+
+ return (NULL);
+}
+
+static prof_tdata_t *
+prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ bool propagate_err = *(bool *)arg;
+
+ if (!tdata->dumping)
+ return (NULL);
+
+ if (prof_dump_printf(propagate_err,
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
+ tdata->thr_uid, tdata->cnt_summed.curobjs,
+ tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
+ tdata->cnt_summed.accumbytes,
+ (tdata->thread_name != NULL) ? " " : "",
+ (tdata->thread_name != NULL) ? tdata->thread_name : ""))
+ return (tdata);
+ return (NULL);
+}
+
+#ifdef JEMALLOC_JET
+#undef prof_dump_header
+#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
+#endif
+static bool
+prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
+{
+ bool ret;
+
+ if (prof_dump_printf(propagate_err,
+ "heap_v2/%"FMTu64"\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
+ cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
+ return (true);
+
+ malloc_mutex_lock(&tdatas_mtx);
+ ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
+ (void *)&propagate_err) != NULL);
+ malloc_mutex_unlock(&tdatas_mtx);
+ return (ret);
+}
+#ifdef JEMALLOC_JET
+#undef prof_dump_header
+#define prof_dump_header JEMALLOC_N(prof_dump_header)
+prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
+#endif
+
+/* gctx->lock is held. */
static bool
-prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, const prof_bt_t *bt,
- prof_ctx_list_t *ctx_ql)
+prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
+ prof_gctx_tree_t *gctxs)
{
bool ret;
unsigned i;
cassert(config_prof);
- /*
- * Current statistics can sum to 0 as a result of unmerged per thread
- * statistics. Additionally, interval- and growth-triggered dumps can
- * occur between the time a ctx is created and when its statistics are
- * filled in. Avoid dumping any ctx that is an artifact of either
- * implementation detail.
- */
- malloc_mutex_lock(ctx->lock);
- if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) ||
- (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) {
- assert(ctx->cnt_summed.curobjs == 0);
- assert(ctx->cnt_summed.curbytes == 0);
- assert(ctx->cnt_summed.accumobjs == 0);
- assert(ctx->cnt_summed.accumbytes == 0);
+ /* Avoid dumping such gctx's that have no useful data. */
+ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
+ (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
+ assert(gctx->cnt_summed.curobjs == 0);
+ assert(gctx->cnt_summed.curbytes == 0);
+ assert(gctx->cnt_summed.accumobjs == 0);
+ assert(gctx->cnt_summed.accumbytes == 0);
ret = false;
goto label_return;
}
- if (prof_dump_printf(propagate_err, "%"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @",
- ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
- ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) {
+ if (prof_dump_printf(propagate_err, "@")) {
ret = true;
goto label_return;
}
-
for (i = 0; i < bt->len; i++) {
- if (prof_dump_printf(propagate_err, " %#"PRIxPTR,
+ if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
(uintptr_t)bt->vec[i])) {
ret = true;
goto label_return;
}
}
- if (prof_dump_write(propagate_err, "\n")) {
+ if (prof_dump_printf(propagate_err,
+ "\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
+ gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
+ ret = true;
+ goto label_return;
+ }
+
+ if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
+ (void *)&propagate_err) != NULL) {
ret = true;
goto label_return;
}
ret = false;
label_return:
- prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
- malloc_mutex_unlock(ctx->lock);
return (ret);
}
+JEMALLOC_FORMAT_PRINTF(1, 2)
+static int
+prof_open_maps(const char *format, ...)
+{
+ int mfd;
+ va_list ap;
+ char filename[PATH_MAX + 1];
+
+ va_start(ap, format);
+ malloc_vsnprintf(filename, sizeof(filename), format, ap);
+ va_end(ap);
+ mfd = open(filename, O_RDONLY);
+
+ return (mfd);
+}
+
static bool
prof_dump_maps(bool propagate_err)
{
bool ret;
int mfd;
- char filename[PATH_MAX + 1];
cassert(config_prof);
#ifdef __FreeBSD__
- malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map");
+ mfd = prof_open_maps("/proc/curproc/map");
#else
- malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
- (int)getpid());
+ {
+ int pid = getpid();
+
+ mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
+ if (mfd == -1)
+ mfd = prof_open_maps("/proc/%d/maps", pid);
+ }
#endif
- mfd = open(filename, O_RDONLY);
if (mfd != -1) {
ssize_t nread;
@@ -977,51 +1427,85 @@ label_return:
}
static void
-prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_nctx,
+prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
const char *filename)
{
if (cnt_all->curbytes != 0) {
- malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
- PRId64" object%s, %zu context%s\n",
+ malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
+ FMTu64" object%s, %zu context%s\n",
cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
- leak_nctx, (leak_nctx != 1) ? "s" : "");
+ leak_ngctx, (leak_ngctx != 1) ? "s" : "");
malloc_printf(
- "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
+ "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
filename);
}
}
+static prof_gctx_t *
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
+{
+ prof_gctx_t *ret;
+ bool propagate_err = *(bool *)arg;
+
+ malloc_mutex_lock(gctx->lock);
+
+ if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
+ ret = gctx;
+ goto label_return;
+ }
+
+ ret = NULL;
+label_return:
+ malloc_mutex_unlock(gctx->lock);
+ return (ret);
+}
+
static bool
-prof_dump(bool propagate_err, const char *filename, bool leakcheck)
+prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
{
- prof_tdata_t *prof_tdata;
+ prof_tdata_t *tdata;
prof_cnt_t cnt_all;
size_t tabind;
union {
- prof_ctx_t *p;
+ prof_gctx_t *p;
void *v;
- } ctx;
- size_t leak_nctx;
- prof_ctx_list_t ctx_ql;
+ } gctx;
+ size_t leak_ngctx;
+ prof_gctx_tree_t gctxs;
cassert(config_prof);
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
return (true);
malloc_mutex_lock(&prof_dump_mtx);
+ prof_enter(tsd, tdata);
- /* Merge per thread profile stats, and sum them in cnt_all. */
+ /*
+ * Put gctx's in limbo and clear their counters in preparation for
+ * summing.
+ */
+ gctx_tree_new(&gctxs);
+ for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
+ prof_dump_gctx_prep(gctx.p, &gctxs);
+
+ /*
+ * Iterate over tdatas, and for the non-expired ones snapshot their tctx
+ * stats and merge them into the associated gctx's.
+ */
memset(&cnt_all, 0, sizeof(prof_cnt_t));
- leak_nctx = 0;
- ql_new(&ctx_ql);
- prof_enter(prof_tdata);
- for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
- prof_dump_ctx_prep(ctx.p, &cnt_all, &leak_nctx, &ctx_ql);
- prof_leave(prof_tdata);
+ malloc_mutex_lock(&tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
+ malloc_mutex_unlock(&tdatas_mtx);
+
+ /* Merge tctx stats into gctx's. */
+ leak_ngctx = 0;
+ gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
+
+ prof_leave(tsd, tdata);
/* Create dump file. */
if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
@@ -1031,11 +1515,10 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck)
if (prof_dump_header(propagate_err, &cnt_all))
goto label_write_error;
- /* Dump per ctx profile stats. */
- while ((ctx.p = ql_first(&ctx_ql)) != NULL) {
- if (prof_dump_ctx(propagate_err, ctx.p, ctx.p->bt, &ctx_ql))
- goto label_write_error;
- }
+ /* Dump per gctx profile stats. */
+ if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
+ (void *)&propagate_err) != NULL)
+ goto label_write_error;
/* Dump /proc/<pid>/maps if possible. */
if (prof_dump_maps(propagate_err))
@@ -1044,17 +1527,17 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck)
if (prof_dump_close(propagate_err))
goto label_open_close_error;
+ prof_gctx_finish(tsd, &gctxs);
malloc_mutex_unlock(&prof_dump_mtx);
if (leakcheck)
- prof_leakcheck(&cnt_all, leak_nctx, filename);
+ prof_leakcheck(&cnt_all, leak_ngctx, filename);
return (false);
label_write_error:
prof_dump_close(propagate_err);
label_open_close_error:
- while ((ctx.p = ql_first(&ctx_ql)) != NULL)
- prof_dump_ctx_cleanup(ctx.p, &ctx_ql);
+ prof_gctx_finish(tsd, &gctxs);
malloc_mutex_unlock(&prof_dump_mtx);
return (true);
}
@@ -1062,7 +1545,7 @@ label_open_close_error:
#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
static void
-prof_dump_filename(char *filename, char v, int64_t vseq)
+prof_dump_filename(char *filename, char v, uint64_t vseq)
{
cassert(config_prof);
@@ -1070,12 +1553,12 @@ prof_dump_filename(char *filename, char v, int64_t vseq)
if (vseq != VSEQ_INVALID) {
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"PRIu64".%c%"PRId64".heap",
+ "%s.%d.%"FMTu64".%c%"FMTu64".heap",
opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
} else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"PRIu64".%c.heap",
+ "%s.%d.%"FMTu64".%c.heap",
opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
}
prof_dump_seq++;
@@ -1084,57 +1567,63 @@ prof_dump_filename(char *filename, char v, int64_t vseq)
static void
prof_fdump(void)
{
+ tsd_t *tsd;
char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
+ assert(opt_prof_final);
+ assert(opt_prof_prefix[0] != '\0');
- if (prof_booted == false)
+ if (!prof_booted)
return;
+ tsd = tsd_fetch();
- if (opt_prof_final && opt_prof_prefix[0] != '\0') {
- malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'f', VSEQ_INVALID);
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(false, filename, opt_prof_leak);
- }
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'f', VSEQ_INVALID);
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, opt_prof_leak);
}
void
prof_idump(void)
{
- prof_tdata_t *prof_tdata;
- char filename[PATH_MAX + 1];
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
cassert(config_prof);
- if (prof_booted == false)
+ if (!prof_booted)
return;
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
return;
- if (prof_tdata->enq) {
- prof_tdata->enq_idump = true;
+ if (tdata->enq) {
+ tdata->enq_idump = true;
return;
}
if (opt_prof_prefix[0] != '\0') {
+ char filename[PATH_MAX + 1];
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(false, filename, false);
+ prof_dump(tsd, false, filename, false);
}
}
bool
prof_mdump(const char *filename)
{
+ tsd_t *tsd;
char filename_buf[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
- if (opt_prof == false || prof_booted == false)
+ if (!opt_prof || !prof_booted)
return (true);
+ tsd = tsd_fetch();
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
@@ -1146,33 +1635,35 @@ prof_mdump(const char *filename)
malloc_mutex_unlock(&prof_dump_seq_mtx);
filename = filename_buf;
}
- return (prof_dump(true, filename, false));
+ return (prof_dump(tsd, true, filename, false));
}
void
prof_gdump(void)
{
- prof_tdata_t *prof_tdata;
- char filename[DUMP_FILENAME_BUFSIZE];
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
cassert(config_prof);
- if (prof_booted == false)
+ if (!prof_booted)
return;
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
return;
- if (prof_tdata->enq) {
- prof_tdata->enq_gdump = true;
+ if (tdata->enq) {
+ tdata->enq_gdump = true;
return;
}
if (opt_prof_prefix[0] != '\0') {
+ char filename[DUMP_FILENAME_BUFSIZE];
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++;
malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(false, filename, false);
+ prof_dump(tsd, false, filename, false);
}
}
@@ -1199,88 +1690,375 @@ prof_bt_keycomp(const void *k1, const void *k2)
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
-prof_tdata_t *
-prof_tdata_init(void)
+JEMALLOC_INLINE_C uint64_t
+prof_thr_uid_alloc(void)
+{
+ uint64_t thr_uid;
+
+ malloc_mutex_lock(&next_thr_uid_mtx);
+ thr_uid = next_thr_uid;
+ next_thr_uid++;
+ malloc_mutex_unlock(&next_thr_uid_mtx);
+
+ return (thr_uid);
+}
+
+static prof_tdata_t *
+prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
+ char *thread_name, bool active)
{
- prof_tdata_t *prof_tdata;
+ prof_tdata_t *tdata;
+ tcache_t *tcache;
cassert(config_prof);
/* Initialize an empty cache for this thread. */
- prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
- if (prof_tdata == NULL)
+ tcache = tcache_get(tsd, true);
+ tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false,
+ tcache, true, NULL);
+ if (tdata == NULL)
return (NULL);
- if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS,
+ tdata->lock = prof_tdata_mutex_choose(thr_uid);
+ tdata->thr_uid = thr_uid;
+ tdata->thr_discrim = thr_discrim;
+ tdata->thread_name = thread_name;
+ tdata->attached = true;
+ tdata->expired = false;
+ tdata->tctx_uid_next = 0;
+
+ if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
prof_bt_hash, prof_bt_keycomp)) {
- idalloc(prof_tdata);
+ idalloctm(tsd, tdata, tcache, true);
return (NULL);
}
- ql_new(&prof_tdata->lru_ql);
- prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX);
- if (prof_tdata->vec == NULL) {
- ckh_delete(&prof_tdata->bt2cnt);
- idalloc(prof_tdata);
- return (NULL);
- }
+ tdata->prng_state = (uint64_t)(uintptr_t)tdata;
+ prof_sample_threshold_update(tdata);
- prof_tdata->prng_state = 0;
- prof_tdata->threshold = 0;
- prof_tdata->accum = 0;
+ tdata->enq = false;
+ tdata->enq_idump = false;
+ tdata->enq_gdump = false;
- prof_tdata->enq = false;
- prof_tdata->enq_idump = false;
- prof_tdata->enq_gdump = false;
+ tdata->dumping = false;
+ tdata->active = active;
- prof_tdata_tsd_set(&prof_tdata);
+ malloc_mutex_lock(&tdatas_mtx);
+ tdata_tree_insert(&tdatas, tdata);
+ malloc_mutex_unlock(&tdatas_mtx);
- return (prof_tdata);
+ return (tdata);
}
-void
-prof_tdata_cleanup(void *arg)
+prof_tdata_t *
+prof_tdata_init(tsd_t *tsd)
{
- prof_thr_cnt_t *cnt;
- prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg;
- cassert(config_prof);
+ return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
+ prof_thread_active_init_get()));
+}
- if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) {
- /*
- * Another destructor deallocated memory after this destructor
- * was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY
- * in order to receive another callback.
- */
- prof_tdata = PROF_TDATA_STATE_PURGATORY;
- prof_tdata_tsd_set(&prof_tdata);
- } else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) {
- /*
- * The previous time this destructor was called, we set the key
- * to PROF_TDATA_STATE_PURGATORY so that other destructors
- * wouldn't cause re-creation of the prof_tdata. This time, do
- * nothing, so that the destructor will not be called again.
- */
- } else if (prof_tdata != NULL) {
- /*
- * Delete the hash table. All of its contents can still be
- * iterated over via the LRU.
- */
- ckh_delete(&prof_tdata->bt2cnt);
+/* tdata->lock must be held. */
+static bool
+prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
+{
+
+ if (tdata->attached && !even_if_attached)
+ return (false);
+ if (ckh_count(&tdata->bt2tctx) != 0)
+ return (false);
+ return (true);
+}
+
+/* tdatas_mtx must be held. */
+static void
+prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached)
+{
+ tcache_t *tcache;
+
+ assert(prof_tdata_should_destroy(tdata, even_if_attached));
+ assert(tsd_prof_tdata_get(tsd) != tdata);
+
+ tdata_tree_remove(&tdatas, tdata);
+
+ tcache = tcache_get(tsd, false);
+ if (tdata->thread_name != NULL)
+ idalloctm(tsd, tdata->thread_name, tcache, true);
+ ckh_delete(tsd, &tdata->bt2tctx);
+ idalloctm(tsd, tdata, tcache, true);
+}
+
+static void
+prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
+{
+
+ malloc_mutex_lock(&tdatas_mtx);
+ prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
+ malloc_mutex_unlock(&tdatas_mtx);
+}
+
+static void
+prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
+{
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tdata->lock);
+ if (tdata->attached) {
+ destroy_tdata = prof_tdata_should_destroy(tdata, true);
/*
- * Iteratively merge cnt's into the global stats and delete
- * them.
+ * Only detach if !destroy_tdata, because detaching would allow
+ * another thread to win the race to destroy tdata.
*/
- while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
- ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
- prof_ctx_merge(cnt->ctx, cnt);
- idalloc(cnt);
- }
- idalloc(prof_tdata->vec);
- idalloc(prof_tdata);
- prof_tdata = PROF_TDATA_STATE_PURGATORY;
- prof_tdata_tsd_set(&prof_tdata);
+ if (!destroy_tdata)
+ tdata->attached = false;
+ tsd_prof_tdata_set(tsd, NULL);
+ } else
+ destroy_tdata = false;
+ malloc_mutex_unlock(tdata->lock);
+ if (destroy_tdata)
+ prof_tdata_destroy(tsd, tdata, true);
+}
+
+prof_tdata_t *
+prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
+{
+ uint64_t thr_uid = tdata->thr_uid;
+ uint64_t thr_discrim = tdata->thr_discrim + 1;
+ char *thread_name = (tdata->thread_name != NULL) ?
+ prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
+ bool active = tdata->active;
+
+ prof_tdata_detach(tsd, tdata);
+ return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
+ active));
+}
+
+static bool
+prof_tdata_expire(prof_tdata_t *tdata)
+{
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tdata->lock);
+ if (!tdata->expired) {
+ tdata->expired = true;
+ destroy_tdata = tdata->attached ? false :
+ prof_tdata_should_destroy(tdata, false);
+ } else
+ destroy_tdata = false;
+ malloc_mutex_unlock(tdata->lock);
+
+ return (destroy_tdata);
+}
+
+static prof_tdata_t *
+prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+
+ return (prof_tdata_expire(tdata) ? tdata : NULL);
+}
+
+void
+prof_reset(tsd_t *tsd, size_t lg_sample)
+{
+ prof_tdata_t *next;
+
+ assert(lg_sample < (sizeof(uint64_t) << 3));
+
+ malloc_mutex_lock(&prof_dump_mtx);
+ malloc_mutex_lock(&tdatas_mtx);
+
+ lg_prof_sample = lg_sample;
+
+ next = NULL;
+ do {
+ prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
+ prof_tdata_reset_iter, NULL);
+ if (to_destroy != NULL) {
+ next = tdata_tree_next(&tdatas, to_destroy);
+ prof_tdata_destroy_locked(tsd, to_destroy, false);
+ } else
+ next = NULL;
+ } while (next != NULL);
+
+ malloc_mutex_unlock(&tdatas_mtx);
+ malloc_mutex_unlock(&prof_dump_mtx);
+}
+
+void
+prof_tdata_cleanup(tsd_t *tsd)
+{
+ prof_tdata_t *tdata;
+
+ if (!config_prof)
+ return;
+
+ tdata = tsd_prof_tdata_get(tsd);
+ if (tdata != NULL)
+ prof_tdata_detach(tsd, tdata);
+}
+
+bool
+prof_active_get(void)
+{
+ bool prof_active_current;
+
+ malloc_mutex_lock(&prof_active_mtx);
+ prof_active_current = prof_active;
+ malloc_mutex_unlock(&prof_active_mtx);
+ return (prof_active_current);
+}
+
+bool
+prof_active_set(bool active)
+{
+ bool prof_active_old;
+
+ malloc_mutex_lock(&prof_active_mtx);
+ prof_active_old = prof_active;
+ prof_active = active;
+ malloc_mutex_unlock(&prof_active_mtx);
+ return (prof_active_old);
+}
+
+const char *
+prof_thread_name_get(void)
+{
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return ("");
+ return (tdata->thread_name != NULL ? tdata->thread_name : "");
+}
+
+static char *
+prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
+{
+ char *ret;
+ size_t size;
+
+ if (thread_name == NULL)
+ return (NULL);
+
+ size = strlen(thread_name) + 1;
+ if (size == 1)
+ return ("");
+
+ ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL);
+ if (ret == NULL)
+ return (NULL);
+ memcpy(ret, thread_name, size);
+ return (ret);
+}
+
+int
+prof_thread_name_set(tsd_t *tsd, const char *thread_name)
+{
+ prof_tdata_t *tdata;
+ unsigned i;
+ char *s;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (EAGAIN);
+
+ /* Validate input. */
+ if (thread_name == NULL)
+ return (EFAULT);
+ for (i = 0; thread_name[i] != '\0'; i++) {
+ char c = thread_name[i];
+ if (!isgraph(c) && !isblank(c))
+ return (EFAULT);
}
+
+ s = prof_thread_name_alloc(tsd, thread_name);
+ if (s == NULL)
+ return (EAGAIN);
+
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
+ true);
+ tdata->thread_name = NULL;
+ }
+ if (strlen(s) > 0)
+ tdata->thread_name = s;
+ return (0);
+}
+
+bool
+prof_thread_active_get(void)
+{
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (false);
+ return (tdata->active);
+}
+
+bool
+prof_thread_active_set(bool active)
+{
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (true);
+ tdata->active = active;
+ return (false);
+}
+
+bool
+prof_thread_active_init_get(void)
+{
+ bool active_init;
+
+ malloc_mutex_lock(&prof_thread_active_init_mtx);
+ active_init = prof_thread_active_init;
+ malloc_mutex_unlock(&prof_thread_active_init_mtx);
+ return (active_init);
+}
+
+bool
+prof_thread_active_init_set(bool active_init)
+{
+ bool active_init_old;
+
+ malloc_mutex_lock(&prof_thread_active_init_mtx);
+ active_init_old = prof_thread_active_init;
+ prof_thread_active_init = active_init;
+ malloc_mutex_unlock(&prof_thread_active_init_mtx);
+ return (active_init_old);
+}
+
+bool
+prof_gdump_get(void)
+{
+ bool prof_gdump_current;
+
+ malloc_mutex_lock(&prof_gdump_mtx);
+ prof_gdump_current = prof_gdump_val;
+ malloc_mutex_unlock(&prof_gdump_mtx);
+ return (prof_gdump_current);
+}
+
+bool
+prof_gdump_set(bool gdump)
+{
+ bool prof_gdump_old;
+
+ malloc_mutex_lock(&prof_gdump_mtx);
+ prof_gdump_old = prof_gdump_val;
+ prof_gdump_val = gdump;
+ malloc_mutex_unlock(&prof_gdump_mtx);
+ return (prof_gdump_old);
}
void
@@ -1300,11 +2078,11 @@ prof_boot1(void)
cassert(config_prof);
/*
- * opt_prof and prof_promote must be in their final state before any
- * arenas are initialized, so this function must be executed early.
+ * opt_prof must be in its final state before any arenas are
+ * initialized, so this function must be executed early.
*/
- if (opt_prof_leak && opt_prof == false) {
+ if (opt_prof_leak && !opt_prof) {
/*
* Enable opt_prof, but in such a way that profiles are never
* automatically dumped.
@@ -1317,8 +2095,6 @@ prof_boot1(void)
opt_lg_prof_interval);
}
}
-
- prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
}
bool
@@ -1328,36 +2104,65 @@ prof_boot2(void)
cassert(config_prof);
if (opt_prof) {
+ tsd_t *tsd;
unsigned i;
- if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ lg_prof_sample = opt_lg_prof_sample;
+
+ prof_active = opt_prof_active;
+ if (malloc_mutex_init(&prof_active_mtx))
+ return (true);
+
+ prof_gdump_val = opt_prof_gdump;
+ if (malloc_mutex_init(&prof_gdump_mtx))
+ return (true);
+
+ prof_thread_active_init = opt_prof_thread_active_init;
+ if (malloc_mutex_init(&prof_thread_active_init_mtx))
+ return (true);
+
+ tsd = tsd_fetch();
+ if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp))
return (true);
- if (malloc_mutex_init(&bt2ctx_mtx))
+ if (malloc_mutex_init(&bt2gctx_mtx))
+ return (true);
+
+ tdata_tree_new(&tdatas);
+ if (malloc_mutex_init(&tdatas_mtx))
+ return (true);
+
+ next_thr_uid = 0;
+ if (malloc_mutex_init(&next_thr_uid_mtx))
return (true);
- if (prof_tdata_tsd_boot()) {
- malloc_write(
- "<jemalloc>: Error in pthread_key_create()\n");
- abort();
- }
if (malloc_mutex_init(&prof_dump_seq_mtx))
return (true);
if (malloc_mutex_init(&prof_dump_mtx))
return (true);
- if (atexit(prof_fdump) != 0) {
+ if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
+ atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
if (opt_abort)
abort();
}
- ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
+ gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
sizeof(malloc_mutex_t));
- if (ctx_locks == NULL)
+ if (gctx_locks == NULL)
return (true);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
- if (malloc_mutex_init(&ctx_locks[i]))
+ if (malloc_mutex_init(&gctx_locks[i]))
+ return (true);
+ }
+
+ tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
+ sizeof(malloc_mutex_t));
+ if (tdata_locks == NULL)
+ return (true);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ if (malloc_mutex_init(&tdata_locks[i]))
return (true);
}
}
@@ -1382,10 +2187,14 @@ prof_prefork(void)
if (opt_prof) {
unsigned i;
- malloc_mutex_prefork(&bt2ctx_mtx);
+ malloc_mutex_prefork(&tdatas_mtx);
+ malloc_mutex_prefork(&bt2gctx_mtx);
+ malloc_mutex_prefork(&next_thr_uid_mtx);
malloc_mutex_prefork(&prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_prefork(&ctx_locks[i]);
+ malloc_mutex_prefork(&gctx_locks[i]);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_prefork(&tdata_locks[i]);
}
}
@@ -1396,10 +2205,14 @@ prof_postfork_parent(void)
if (opt_prof) {
unsigned i;
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_postfork_parent(&tdata_locks[i]);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_parent(&ctx_locks[i]);
+ malloc_mutex_postfork_parent(&gctx_locks[i]);
malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
- malloc_mutex_postfork_parent(&bt2ctx_mtx);
+ malloc_mutex_postfork_parent(&next_thr_uid_mtx);
+ malloc_mutex_postfork_parent(&bt2gctx_mtx);
+ malloc_mutex_postfork_parent(&tdatas_mtx);
}
}
@@ -1410,10 +2223,14 @@ prof_postfork_child(void)
if (opt_prof) {
unsigned i;
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_postfork_child(&tdata_locks[i]);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_child(&ctx_locks[i]);
+ malloc_mutex_postfork_child(&gctx_locks[i]);
malloc_mutex_postfork_child(&prof_dump_seq_mtx);
- malloc_mutex_postfork_child(&bt2ctx_mtx);
+ malloc_mutex_postfork_child(&next_thr_uid_mtx);
+ malloc_mutex_postfork_child(&bt2gctx_mtx);
+ malloc_mutex_postfork_child(&tdatas_mtx);
}
}
diff --git a/deps/jemalloc/src/quarantine.c b/deps/jemalloc/src/quarantine.c
index 543151164..6c43dfcaa 100644
--- a/deps/jemalloc/src/quarantine.c
+++ b/deps/jemalloc/src/quarantine.c
@@ -2,7 +2,7 @@
#include "jemalloc/internal/jemalloc_internal.h"
/*
- * quarantine pointers close to NULL are used to encode state information that
+ * Quarantine pointers close to NULL are used to encode state information that
* is used for cleaning up during thread shutdown.
*/
#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
@@ -10,26 +10,25 @@
#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
/******************************************************************************/
-/* Data. */
-
-malloc_tsd_data(, quarantine, quarantine_t *, NULL)
-
-/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static quarantine_t *quarantine_grow(quarantine_t *quarantine);
-static void quarantine_drain_one(quarantine_t *quarantine);
-static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
+static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
+static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine);
+static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
+ size_t upper_bound);
/******************************************************************************/
-quarantine_t *
-quarantine_init(size_t lg_maxobjs)
+static quarantine_t *
+quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
{
quarantine_t *quarantine;
- quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
- ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
+ assert(tsd_nominal(tsd));
+
+ quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs)
+ + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false,
+ tcache_get(tsd, true), true, NULL);
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
@@ -37,19 +36,36 @@ quarantine_init(size_t lg_maxobjs)
quarantine->first = 0;
quarantine->lg_maxobjs = lg_maxobjs;
- quarantine_tsd_set(&quarantine);
-
return (quarantine);
}
+void
+quarantine_alloc_hook_work(tsd_t *tsd)
+{
+ quarantine_t *quarantine;
+
+ if (!tsd_nominal(tsd))
+ return;
+
+ quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
+ /*
+ * Check again whether quarantine has been initialized, because
+ * quarantine_init() may have triggered recursive initialization.
+ */
+ if (tsd_quarantine_get(tsd) == NULL)
+ tsd_quarantine_set(tsd, quarantine);
+ else
+ idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
+}
+
static quarantine_t *
-quarantine_grow(quarantine_t *quarantine)
+quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_t *ret;
- ret = quarantine_init(quarantine->lg_maxobjs + 1);
+ ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
if (ret == NULL) {
- quarantine_drain_one(quarantine);
+ quarantine_drain_one(tsd, quarantine);
return (quarantine);
}
@@ -71,17 +87,18 @@ quarantine_grow(quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
- idalloc(quarantine);
+ idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
+ tsd_quarantine_set(tsd, ret);
return (ret);
}
static void
-quarantine_drain_one(quarantine_t *quarantine)
+quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
assert(obj->usize == isalloc(obj->ptr, config_prof));
- idalloc(obj->ptr);
+ idalloctm(tsd, obj->ptr, NULL, false);
quarantine->curbytes -= obj->usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
@@ -89,15 +106,15 @@ quarantine_drain_one(quarantine_t *quarantine)
}
static void
-quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
+quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
- quarantine_drain_one(quarantine);
+ quarantine_drain_one(tsd, quarantine);
}
void
-quarantine(void *ptr)
+quarantine(tsd_t *tsd, void *ptr)
{
quarantine_t *quarantine;
size_t usize = isalloc(ptr, config_prof);
@@ -105,17 +122,8 @@ quarantine(void *ptr)
cassert(config_fill);
assert(opt_quarantine);
- quarantine = *quarantine_tsd_get();
- if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
- if (quarantine == QUARANTINE_STATE_PURGATORY) {
- /*
- * Make a note that quarantine() was called after
- * quarantine_cleanup() was called.
- */
- quarantine = QUARANTINE_STATE_REINCARNATED;
- quarantine_tsd_set(&quarantine);
- }
- idalloc(ptr);
+ if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
+ idalloctm(tsd, ptr, NULL, false);
return;
}
/*
@@ -125,11 +133,11 @@ quarantine(void *ptr)
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
- quarantine_drain(quarantine, upper_bound);
+ quarantine_drain(tsd, quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
- quarantine = quarantine_grow(quarantine);
+ quarantine = quarantine_grow(tsd, quarantine);
/* quarantine_grow() must free a slot if it fails to grow. */
assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
/* Append ptr if its size doesn't exceed the quarantine size. */
@@ -141,12 +149,12 @@ quarantine(void *ptr)
obj->usize = usize;
quarantine->curbytes += usize;
quarantine->curobjs++;
- if (config_fill && opt_junk) {
+ if (config_fill && unlikely(opt_junk_free)) {
/*
* Only do redzone validation if Valgrind isn't in
* operation.
*/
- if ((config_valgrind == false || opt_valgrind == false)
+ if ((!config_valgrind || likely(!in_valgrind))
&& usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize);
else
@@ -154,46 +162,22 @@ quarantine(void *ptr)
}
} else {
assert(quarantine->curbytes == 0);
- idalloc(ptr);
+ idalloctm(tsd, ptr, NULL, false);
}
}
void
-quarantine_cleanup(void *arg)
-{
- quarantine_t *quarantine = *(quarantine_t **)arg;
-
- if (quarantine == QUARANTINE_STATE_REINCARNATED) {
- /*
- * Another destructor deallocated memory after this destructor
- * was called. Reset quarantine to QUARANTINE_STATE_PURGATORY
- * in order to receive another callback.
- */
- quarantine = QUARANTINE_STATE_PURGATORY;
- quarantine_tsd_set(&quarantine);
- } else if (quarantine == QUARANTINE_STATE_PURGATORY) {
- /*
- * The previous time this destructor was called, we set the key
- * to QUARANTINE_STATE_PURGATORY so that other destructors
- * wouldn't cause re-creation of the quarantine. This time, do
- * nothing, so that the destructor will not be called again.
- */
- } else if (quarantine != NULL) {
- quarantine_drain(quarantine, 0);
- idalloc(quarantine);
- quarantine = QUARANTINE_STATE_PURGATORY;
- quarantine_tsd_set(&quarantine);
- }
-}
-
-bool
-quarantine_boot(void)
+quarantine_cleanup(tsd_t *tsd)
{
+ quarantine_t *quarantine;
- cassert(config_fill);
-
- if (quarantine_tsd_boot())
- return (true);
+ if (!config_fill)
+ return;
- return (false);
+ quarantine = tsd_quarantine_get(tsd);
+ if (quarantine != NULL) {
+ quarantine_drain(tsd, quarantine, 0);
+ idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
+ tsd_quarantine_set(tsd, NULL);
+ }
}
diff --git a/deps/jemalloc/src/rtree.c b/deps/jemalloc/src/rtree.c
index 205957ac4..af0d97e75 100644
--- a/deps/jemalloc/src/rtree.c
+++ b/deps/jemalloc/src/rtree.c
@@ -1,73 +1,74 @@
#define JEMALLOC_RTREE_C_
#include "jemalloc/internal/jemalloc_internal.h"
-rtree_t *
-rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc)
+static unsigned
+hmin(unsigned ha, unsigned hb)
{
- rtree_t *ret;
- unsigned bits_per_level, bits_in_leaf, height, i;
+
+ return (ha < hb ? ha : hb);
+}
+
+/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */
+bool
+rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
+ rtree_node_dalloc_t *dalloc)
+{
+ unsigned bits_in_leaf, height, i;
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
- bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
- bits_in_leaf = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1;
+ bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
+ : (bits % RTREE_BITS_PER_LEVEL);
if (bits > bits_in_leaf) {
- height = 1 + (bits - bits_in_leaf) / bits_per_level;
- if ((height-1) * bits_per_level + bits_in_leaf != bits)
+ height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
+ if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
height++;
- } else {
+ } else
height = 1;
+ assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
+
+ rtree->alloc = alloc;
+ rtree->dalloc = dalloc;
+ rtree->height = height;
+
+ /* Root level. */
+ rtree->levels[0].subtree = NULL;
+ rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL :
+ bits_in_leaf;
+ rtree->levels[0].cumbits = rtree->levels[0].bits;
+ /* Interior levels. */
+ for (i = 1; i < height-1; i++) {
+ rtree->levels[i].subtree = NULL;
+ rtree->levels[i].bits = RTREE_BITS_PER_LEVEL;
+ rtree->levels[i].cumbits = rtree->levels[i-1].cumbits +
+ RTREE_BITS_PER_LEVEL;
}
- assert((height-1) * bits_per_level + bits_in_leaf >= bits);
-
- ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) +
- (sizeof(unsigned) * height));
- if (ret == NULL)
- return (NULL);
- memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
- height));
-
- ret->alloc = alloc;
- ret->dalloc = dalloc;
- if (malloc_mutex_init(&ret->mutex)) {
- if (dalloc != NULL)
- dalloc(ret);
- return (NULL);
- }
- ret->height = height;
+ /* Leaf level. */
if (height > 1) {
- if ((height-1) * bits_per_level + bits_in_leaf > bits) {
- ret->level2bits[0] = (bits - bits_in_leaf) %
- bits_per_level;
- } else
- ret->level2bits[0] = bits_per_level;
- for (i = 1; i < height-1; i++)
- ret->level2bits[i] = bits_per_level;
- ret->level2bits[height-1] = bits_in_leaf;
- } else
- ret->level2bits[0] = bits;
+ rtree->levels[height-1].subtree = NULL;
+ rtree->levels[height-1].bits = bits_in_leaf;
+ rtree->levels[height-1].cumbits = bits;
+ }
- ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]);
- if (ret->root == NULL) {
- if (dalloc != NULL)
- dalloc(ret);
- return (NULL);
+ /* Compute lookup table to be used by rtree_start_level(). */
+ for (i = 0; i < RTREE_HEIGHT_MAX; i++) {
+ rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height -
+ 1);
}
- memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
- return (ret);
+ return (false);
}
static void
-rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
+rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level)
{
- if (level < rtree->height - 1) {
+ if (level + 1 < rtree->height) {
size_t nchildren, i;
- nchildren = ZU(1) << rtree->level2bits[level];
+ nchildren = ZU(1) << rtree->levels[level].bits;
for (i = 0; i < nchildren; i++) {
- void **child = (void **)node[i];
+ rtree_node_elm_t *child = node[i].child;
if (child != NULL)
rtree_delete_subtree(rtree, child, level + 1);
}
@@ -78,28 +79,49 @@ rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
void
rtree_delete(rtree_t *rtree)
{
+ unsigned i;
- rtree_delete_subtree(rtree, rtree->root, 0);
- rtree->dalloc(rtree);
+ for (i = 0; i < rtree->height; i++) {
+ rtree_node_elm_t *subtree = rtree->levels[i].subtree;
+ if (subtree != NULL)
+ rtree_delete_subtree(rtree, subtree, i);
+ }
}
-void
-rtree_prefork(rtree_t *rtree)
+static rtree_node_elm_t *
+rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
{
+ rtree_node_elm_t *node;
+
+ if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
+ /*
+ * Another thread is already in the process of initializing.
+ * Spin-wait until initialization is complete.
+ */
+ do {
+ CPU_SPINWAIT;
+ node = atomic_read_p((void **)elmp);
+ } while (node == RTREE_NODE_INITIALIZING);
+ } else {
+ node = rtree->alloc(ZU(1) << rtree->levels[level].bits);
+ if (node == NULL)
+ return (NULL);
+ atomic_write_p((void **)elmp, node);
+ }
- malloc_mutex_prefork(&rtree->mutex);
+ return (node);
}
-void
-rtree_postfork_parent(rtree_t *rtree)
+rtree_node_elm_t *
+rtree_subtree_read_hard(rtree_t *rtree, unsigned level)
{
- malloc_mutex_postfork_parent(&rtree->mutex);
+ return (rtree_node_init(rtree, level, &rtree->levels[level].subtree));
}
-void
-rtree_postfork_child(rtree_t *rtree)
+rtree_node_elm_t *
+rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
{
- malloc_mutex_postfork_child(&rtree->mutex);
+ return (rtree_node_init(rtree, level, &elm->child));
}
diff --git a/deps/jemalloc/src/stats.c b/deps/jemalloc/src/stats.c
index bef2ab33c..154c3e74c 100644
--- a/deps/jemalloc/src/stats.c
+++ b/deps/jemalloc/src/stats.c
@@ -6,31 +6,22 @@
xmallctl(n, v, &sz, NULL, 0); \
} while (0)
-#define CTL_I_GET(n, v, t) do { \
+#define CTL_M2_GET(n, i, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
- mib[2] = i; \
+ mib[2] = (i); \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
-#define CTL_J_GET(n, v, t) do { \
+#define CTL_M2_M4_GET(n, i, j, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
- mib[2] = j; \
- xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
-} while (0)
-
-#define CTL_IJ_GET(n, v, t) do { \
- size_t mib[6]; \
- size_t miblen = sizeof(mib) / sizeof(size_t); \
- size_t sz = sizeof(t); \
- xmallctlnametomib(n, mib, &miblen); \
- mib[2] = i; \
- mib[4] = j; \
+ mib[2] = (i); \
+ mib[4] = (j); \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
@@ -48,8 +39,10 @@ static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
+static void stats_arena_hchunks_print(
+ void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
- void *cbopaque, unsigned i, bool bins, bool large);
+ void *cbopaque, unsigned i, bool bins, bool large, bool huge);
/******************************************************************************/
@@ -58,100 +51,109 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
{
size_t page;
- bool config_tcache;
- unsigned nbins, j, gap_start;
+ bool config_tcache, in_gap;
+ unsigned nbins, j;
CTL_GET("arenas.page", &page, size_t);
CTL_GET("config.tcache", &config_tcache, bool);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
- "bins: bin size regs pgs allocated nmalloc"
- " ndalloc nrequests nfills nflushes"
- " newruns reruns curruns\n");
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs curruns regs"
+ " pgs util nfills nflushes newruns"
+ " reruns\n");
} else {
malloc_cprintf(write_cb, cbopaque,
- "bins: bin size regs pgs allocated nmalloc"
- " ndalloc newruns reruns curruns\n");
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs curruns regs"
+ " pgs util newruns reruns\n");
}
CTL_GET("arenas.nbins", &nbins, unsigned);
- for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
+ for (j = 0, in_gap = false; j < nbins; j++) {
uint64_t nruns;
- CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
- if (nruns == 0) {
- if (gap_start == UINT_MAX)
- gap_start = j;
- } else {
- size_t reg_size, run_size, allocated;
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
+ uint64_t);
+ if (nruns == 0)
+ in_gap = true;
+ else {
+ size_t reg_size, run_size, curregs, availregs, milli;
+ size_t curruns;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns;
- size_t curruns;
+ char util[6]; /* "x.yyy". */
- if (gap_start != UINT_MAX) {
- if (j > gap_start + 1) {
- /* Gap of more than one size class. */
- malloc_cprintf(write_cb, cbopaque,
- "[%u..%u]\n", gap_start,
- j - 1);
- } else {
- /* Gap of one size class. */
- malloc_cprintf(write_cb, cbopaque,
- "[%u]\n", gap_start);
- }
- gap_start = UINT_MAX;
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ in_gap = false;
}
- CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
- CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
- CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
- &allocated, size_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
+ CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
+ CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
+ CTL_M2_GET("arenas.bin.0.run_size", j, &run_size,
+ size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j,
&nmalloc, uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j,
&ndalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j,
+ &curregs, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
+ &nrequests, uint64_t);
if (config_tcache) {
- CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
- &nrequests, uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
- &nfills, uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
- &nflushes, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i,
+ j, &nfills, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes",
+ i, j, &nflushes, uint64_t);
}
- CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
- uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
- size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j,
+ &reruns, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j,
+ &curruns, size_t);
+
+ availregs = nregs * curruns;
+ milli = (availregs != 0) ? (1000 * curregs) / availregs
+ : 1000;
+ assert(milli <= 1000);
+ if (milli < 10) {
+ malloc_snprintf(util, sizeof(util),
+ "0.00%zu", milli);
+ } else if (milli < 100) {
+ malloc_snprintf(util, sizeof(util), "0.0%zu",
+ milli);
+ } else if (milli < 1000) {
+ malloc_snprintf(util, sizeof(util), "0.%zu",
+ milli);
+ } else
+ malloc_snprintf(util, sizeof(util), "1");
+
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
- "%13u %5zu %4u %3zu %12zu %12"PRIu64
- " %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu\n",
- j, reg_size, nregs, run_size / page,
- allocated, nmalloc, ndalloc, nrequests,
- nfills, nflushes, nruns, reruns, curruns);
+ "%20zu %3u %12zu %12"FMTu64
+ " %12"FMTu64" %12"FMTu64" %12zu"
+ " %12zu %4u %3zu %-5s %12"FMTu64
+ " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
+ reg_size, j, curregs * reg_size, nmalloc,
+ ndalloc, nrequests, curregs, curruns, nregs,
+ run_size / page, util, nfills, nflushes,
+ nruns, reruns);
} else {
malloc_cprintf(write_cb, cbopaque,
- "%13u %5zu %4u %3zu %12zu %12"PRIu64
- " %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu\n",
- j, reg_size, nregs, run_size / page,
- allocated, nmalloc, ndalloc, nruns, reruns,
- curruns);
+ "%20zu %3u %12zu %12"FMTu64
+ " %12"FMTu64" %12"FMTu64" %12zu"
+ " %12zu %4u %3zu %-5s %12"FMTu64
+ " %12"FMTu64"\n",
+ reg_size, j, curregs * reg_size, nmalloc,
+ ndalloc, nrequests, curregs, curruns, nregs,
+ run_size / page, util, nruns, reruns);
}
}
}
- if (gap_start != UINT_MAX) {
- if (j > gap_start + 1) {
- /* Gap of more than one size class. */
- malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
- gap_start, j - 1);
- } else {
- /* Gap of one size class. */
- malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
- }
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
}
}
@@ -159,110 +161,199 @@ static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
{
- size_t page, nlruns, j;
- ssize_t gap_start;
-
- CTL_GET("arenas.page", &page, size_t);
+ unsigned nbins, nlruns, j;
+ bool in_gap;
malloc_cprintf(write_cb, cbopaque,
- "large: size pages nmalloc ndalloc nrequests"
- " curruns\n");
- CTL_GET("arenas.nlruns", &nlruns, size_t);
- for (j = 0, gap_start = -1; j < nlruns; j++) {
+ "large: size ind allocated nmalloc ndalloc"
+ " nrequests curruns\n");
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ CTL_GET("arenas.nlruns", &nlruns, unsigned);
+ for (j = 0, in_gap = false; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, curruns;
- CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc,
uint64_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc,
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc,
uint64_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
- uint64_t);
- if (nrequests == 0) {
- if (gap_start == -1)
- gap_start = j;
- } else {
- CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ if (nrequests == 0)
+ in_gap = true;
+ else {
+ CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j,
+ &curruns, size_t);
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ in_gap = false;
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64" %12zu\n",
+ run_size, nbins + j, curruns * run_size, nmalloc,
+ ndalloc, nrequests, curruns);
+ }
+ }
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+}
+
+static void
+stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, unsigned i)
+{
+ unsigned nbins, nlruns, nhchunks, j;
+ bool in_gap;
+
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: size ind allocated nmalloc ndalloc"
+ " nrequests curhchunks\n");
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ CTL_GET("arenas.nlruns", &nlruns, unsigned);
+ CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
+ for (j = 0, in_gap = false; j < nhchunks; j++) {
+ uint64_t nmalloc, ndalloc, nrequests;
+ size_t hchunk_size, curhchunks;
+
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j,
+ &nmalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j,
+ &ndalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ if (nrequests == 0)
+ in_gap = true;
+ else {
+ CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size,
size_t);
- if (gap_start != -1) {
- malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
- j - gap_start);
- gap_start = -1;
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i,
+ j, &curhchunks, size_t);
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ in_gap = false;
}
malloc_cprintf(write_cb, cbopaque,
- "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu\n",
- run_size, run_size / page, nmalloc, ndalloc,
- nrequests, curruns);
+ "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64" %12zu\n",
+ hchunk_size, nbins + nlruns + j,
+ curhchunks * hchunk_size, nmalloc, ndalloc,
+ nrequests, curhchunks);
}
}
- if (gap_start != -1)
- malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
}
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
- unsigned i, bool bins, bool large)
+ unsigned i, bool bins, bool large, bool huge)
{
unsigned nthreads;
const char *dss;
+ ssize_t lg_dirty_mult;
size_t page, pactive, pdirty, mapped;
+ size_t metadata_mapped, metadata_allocated;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
+ size_t huge_allocated;
+ uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
CTL_GET("arenas.page", &page, size_t);
- CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
+ CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
malloc_cprintf(write_cb, cbopaque,
"assigned threads: %u\n", nthreads);
- CTL_I_GET("stats.arenas.0.dss", &dss, const char *);
+ CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
dss);
- CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
- CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
- CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
- CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t);
- CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t);
+ CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
+ if (lg_dirty_mult >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "min active:dirty page ratio: %u:1\n",
+ (1U << lg_dirty_mult));
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "min active:dirty page ratio: N/A\n");
+ }
+ CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
+ CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
+ CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
+ CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
+ CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
malloc_cprintf(write_cb, cbopaque,
- "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
- " %"PRIu64" madvise%s, %"PRIu64" purged\n",
- pactive, pdirty, npurge, npurge == 1 ? "" : "s",
- nmadvise, nmadvise == 1 ? "" : "s", purged);
+ "dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64
+ " madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge ==
+ 1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged);
malloc_cprintf(write_cb, cbopaque,
- " allocated nmalloc ndalloc nrequests\n");
- CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
- CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
+ " allocated nmalloc ndalloc"
+ " nrequests\n");
+ CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
+ uint64_t);
malloc_cprintf(write_cb, cbopaque,
- "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+ "small: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
- CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
- CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
+ CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
+ uint64_t);
malloc_cprintf(write_cb, cbopaque,
- "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
+ "large: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
+ CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
+ CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
+ uint64_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
+ malloc_cprintf(write_cb, cbopaque,
+ "total: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ small_allocated + large_allocated + huge_allocated,
+ small_nmalloc + large_nmalloc + huge_nmalloc,
+ small_ndalloc + large_ndalloc + huge_ndalloc,
+ small_nrequests + large_nrequests + huge_nrequests);
+ malloc_cprintf(write_cb, cbopaque,
+ "active: %12zu\n", pactive * page);
+ CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
- "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
- small_allocated + large_allocated,
- small_nmalloc + large_nmalloc,
- small_ndalloc + large_ndalloc,
- small_nrequests + large_nrequests);
- malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
- CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
- malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
+ "mapped: %12zu\n", mapped);
+ CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
+ size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "metadata: mapped: %zu, allocated: %zu\n",
+ metadata_mapped, metadata_allocated);
if (bins)
stats_arena_bins_print(write_cb, cbopaque, i);
if (large)
stats_arena_lruns_print(write_cb, cbopaque, i);
+ if (huge)
+ stats_arena_hchunks_print(write_cb, cbopaque, i);
}
void
@@ -277,6 +368,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool unmerged = true;
bool bins = true;
bool large = true;
+ bool huge = true;
/*
* Refresh stats, in case mallctl() was called by the application.
@@ -319,6 +411,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
case 'l':
large = false;
break;
+ case 'h':
+ huge = false;
+ break;
default:;
}
}
@@ -327,7 +422,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"___ Begin jemalloc statistics ___\n");
if (general) {
- int err;
const char *cpv;
bool bv;
unsigned uv;
@@ -346,26 +440,40 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bv ? "enabled" : "disabled");
#define OPT_WRITE_BOOL(n) \
- if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
- == 0) { \
+ if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
}
+#define OPT_WRITE_BOOL_MUTABLE(n, m) { \
+ bool bv2; \
+ if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \
+ je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %s ("#m": %s)\n", bv ? "true" \
+ : "false", bv2 ? "true" : "false"); \
+ } \
+}
#define OPT_WRITE_SIZE_T(n) \
- if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
- == 0) { \
+ if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
}
#define OPT_WRITE_SSIZE_T(n) \
- if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
- == 0) { \
+ if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
}
+#define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \
+ ssize_t ssv2; \
+ if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \
+ je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zd ("#m": %zd)\n", \
+ ssv, ssv2); \
+ } \
+}
#define OPT_WRITE_CHAR_P(n) \
- if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
- == 0) { \
+ if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": \"%s\"\n", cpv); \
}
@@ -376,9 +484,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_SIZE_T(lg_chunk)
OPT_WRITE_CHAR_P(dss)
OPT_WRITE_SIZE_T(narenas)
- OPT_WRITE_SSIZE_T(lg_dirty_mult)
+ OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
- OPT_WRITE_BOOL(junk)
+ OPT_WRITE_CHAR_P(junk)
OPT_WRITE_SIZE_T(quarantine)
OPT_WRITE_BOOL(redzone)
OPT_WRITE_BOOL(zero)
@@ -389,7 +497,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_SSIZE_T(lg_tcache_max)
OPT_WRITE_BOOL(prof)
OPT_WRITE_CHAR_P(prof_prefix)
- OPT_WRITE_BOOL(prof_active)
+ OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active)
+ OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init,
+ prof.thread_active_init)
OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof_accum)
OPT_WRITE_SSIZE_T(lg_prof_interval)
@@ -398,6 +508,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_BOOL(prof_leak)
#undef OPT_WRITE_BOOL
+#undef OPT_WRITE_BOOL_MUTABLE
#undef OPT_WRITE_SIZE_T
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
@@ -411,12 +522,13 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
sizeof(void *));
CTL_GET("arenas.quantum", &sv, size_t);
- malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
+ malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n",
+ sv);
CTL_GET("arenas.page", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
- CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
+ CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: %u:1\n",
@@ -425,22 +537,20 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: N/A\n");
}
- if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
- == 0) {
+ if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
malloc_cprintf(write_cb, cbopaque,
"Maximum thread-cached size class: %zu\n", sv);
}
- if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
- bv) {
- CTL_GET("opt.lg_prof_sample", &sv, size_t);
+ if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) {
+ CTL_GET("prof.lg_sample", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
- "Average profile sample interval: %"PRIu64
+ "Average profile sample interval: %"FMTu64
" (2^%zu)\n", (((uint64_t)1U) << sv), sv);
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
- "Average profile dump interval: %"PRIu64
+ "Average profile dump interval: %"FMTu64
" (2^%zd)\n",
(((uint64_t)1U) << ssv), ssv);
} else {
@@ -449,47 +559,27 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
}
CTL_GET("opt.lg_chunk", &sv, size_t);
- malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
- (ZU(1) << sv), sv);
+ malloc_cprintf(write_cb, cbopaque,
+ "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv);
}
if (config_stats) {
size_t *cactive;
- size_t allocated, active, mapped;
- size_t chunks_current, chunks_high;
- uint64_t chunks_total;
- size_t huge_allocated;
- uint64_t huge_nmalloc, huge_ndalloc;
+ size_t allocated, active, metadata, resident, mapped;
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
+ CTL_GET("stats.metadata", &metadata, size_t);
+ CTL_GET("stats.resident", &resident, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
- "Allocated: %zu, active: %zu, mapped: %zu\n",
- allocated, active, mapped);
- malloc_cprintf(write_cb, cbopaque,
- "Current active ceiling: %zu\n", atomic_read_z(cactive));
-
- /* Print chunk stats. */
- CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
- CTL_GET("stats.chunks.high", &chunks_high, size_t);
- CTL_GET("stats.chunks.current", &chunks_current, size_t);
- malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
- "highchunks curchunks\n");
- malloc_cprintf(write_cb, cbopaque,
- " %13"PRIu64" %12zu %12zu\n",
- chunks_total, chunks_high, chunks_current);
-
- /* Print huge stats. */
- CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
- CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
- CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "huge: nmalloc ndalloc allocated\n");
+ "Allocated: %zu, active: %zu, metadata: %zu,"
+ " resident: %zu, mapped: %zu\n",
+ allocated, active, metadata, resident, mapped);
malloc_cprintf(write_cb, cbopaque,
- " %12"PRIu64" %12"PRIu64" %12zu\n",
- huge_nmalloc, huge_ndalloc, huge_allocated);
+ "Current active ceiling: %zu\n",
+ atomic_read_z(cactive));
if (merged) {
unsigned narenas;
@@ -508,12 +598,12 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
ninitialized++;
}
- if (ninitialized > 1 || unmerged == false) {
+ if (ninitialized > 1 || !unmerged) {
/* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
stats_arena_print(write_cb, cbopaque,
- narenas, bins, large);
+ narenas, bins, large, huge);
}
}
}
@@ -539,7 +629,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
cbopaque,
"\narenas[%u]:\n", i);
stats_arena_print(write_cb,
- cbopaque, i, bins, large);
+ cbopaque, i, bins, large,
+ huge);
}
}
}
diff --git a/deps/jemalloc/src/tcache.c b/deps/jemalloc/src/tcache.c
index 6de92960b..fdafd0c62 100644
--- a/deps/jemalloc/src/tcache.c
+++ b/deps/jemalloc/src/tcache.c
@@ -4,9 +4,6 @@
/******************************************************************************/
/* Data. */
-malloc_tsd_data(, tcache, tcache_t *, NULL)
-malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
-
bool opt_tcache = true;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
@@ -16,6 +13,14 @@ static unsigned stack_nelms; /* Total stack elms per tcache. */
size_t nhbins;
size_t tcache_maxclass;
+tcaches_t *tcaches;
+
+/* Index of first element within tcaches that has never been used. */
+static unsigned tcaches_past;
+
+/* Head of singly linked list tracking available tcaches elements. */
+static tcaches_t *tcaches_avail;
+
/******************************************************************************/
size_t tcache_salloc(const void *ptr)
@@ -25,9 +30,9 @@ size_t tcache_salloc(const void *ptr)
}
void
-tcache_event_hard(tcache_t *tcache)
+tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
{
- size_t binind = tcache->next_gc_bin;
+ szind_t binind = tcache->next_gc_bin;
tcache_bin_t *tbin = &tcache->tbins[binind];
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
@@ -36,11 +41,12 @@ tcache_event_hard(tcache_t *tcache)
* Flush (ceiling) 3/4 of the objects below the low water mark.
*/
if (binind < NBINS) {
- tcache_bin_flush_small(tbin, binind, tbin->ncached -
- tbin->low_water + (tbin->low_water >> 2), tcache);
+ tcache_bin_flush_small(tsd, tcache, tbin, binind,
+ tbin->ncached - tbin->low_water + (tbin->low_water
+ >> 2));
} else {
- tcache_bin_flush_large(tbin, binind, tbin->ncached -
- tbin->low_water + (tbin->low_water >> 2), tcache);
+ tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
+ - tbin->low_water + (tbin->low_water >> 2), tcache);
}
/*
* Reduce fill count by 2X. Limit lg_fill_div such that the
@@ -65,12 +71,13 @@ tcache_event_hard(tcache_t *tcache)
}
void *
-tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
+tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind)
{
void *ret;
- arena_tcache_fill_small(tcache->arena, tbin, binind,
- config_prof ? tcache->prof_accumbytes : 0);
+ arena_tcache_fill_small(arena, tbin, binind, config_prof ?
+ tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin);
@@ -79,9 +86,10 @@ tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
}
void
-tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
- tcache_t *tcache)
+tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
+ szind_t binind, unsigned rem)
{
+ arena_t *arena;
void *ptr;
unsigned i, nflush, ndeferred;
bool merged_stats = false;
@@ -89,22 +97,24 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
assert(binind < NBINS);
assert(rem <= tbin->ncached);
+ arena = arena_choose(tsd, NULL);
+ assert(arena != NULL);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
- arena_t *arena = chunk->arena;
- arena_bin_t *bin = &arena->bins[binind];
+ arena_t *bin_arena = extent_node_arena_get(&chunk->node);
+ arena_bin_t *bin = &bin_arena->bins[binind];
- if (config_prof && arena == tcache->arena) {
+ if (config_prof && bin_arena == arena) {
if (arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
tcache->prof_accumbytes = 0;
}
malloc_mutex_lock(&bin->lock);
- if (config_stats && arena == tcache->arena) {
- assert(merged_stats == false);
+ if (config_stats && bin_arena == arena) {
+ assert(!merged_stats);
merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
@@ -115,17 +125,13 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk->arena == arena) {
+ if (extent_node_arena_get(&chunk->node) == bin_arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm =
- arena_mapp_get(chunk, pageind);
- if (config_fill && opt_junk) {
- arena_alloc_junk_small(ptr,
- &arena_bin_info[binind], true);
- }
- arena_dalloc_bin_locked(arena, chunk, ptr,
- mapelm);
+ arena_chunk_map_bits_t *bitselm =
+ arena_bitselm_get(chunk, pageind);
+ arena_dalloc_bin_junked_locked(bin_arena, chunk,
+ ptr, bitselm);
} else {
/*
* This object was allocated via a different
@@ -139,12 +145,12 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
}
malloc_mutex_unlock(&bin->lock);
}
- if (config_stats && merged_stats == false) {
+ if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
- arena_bin_t *bin = &tcache->arena->bins[binind];
+ arena_bin_t *bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
@@ -160,9 +166,10 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
}
void
-tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
- tcache_t *tcache)
+tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+ unsigned rem, tcache_t *tcache)
{
+ arena_t *arena;
void *ptr;
unsigned i, nflush, ndeferred;
bool merged_stats = false;
@@ -170,17 +177,19 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
assert(binind < nhbins);
assert(rem <= tbin->ncached);
+ arena = arena_choose(tsd, NULL);
+ assert(arena != NULL);
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
tbin->avail[0]);
- arena_t *arena = chunk->arena;
+ arena_t *locked_arena = extent_node_arena_get(&chunk->node);
UNUSED bool idump;
if (config_prof)
idump = false;
- malloc_mutex_lock(&arena->lock);
- if ((config_prof || config_stats) && arena == tcache->arena) {
+ malloc_mutex_lock(&locked_arena->lock);
+ if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
tcache->prof_accumbytes);
@@ -200,9 +209,11 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk->arena == arena)
- arena_dalloc_large_locked(arena, chunk, ptr);
- else {
+ if (extent_node_arena_get(&chunk->node) ==
+ locked_arena) {
+ arena_dalloc_large_junked_locked(locked_arena,
+ chunk, ptr);
+ } else {
/*
* This object was allocated via a different
* arena than the one that is currently locked.
@@ -213,16 +224,15 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
ndeferred++;
}
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(&locked_arena->lock);
if (config_prof && idump)
prof_idump();
}
- if (config_stats && merged_stats == false) {
+ if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
- arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
@@ -249,24 +259,58 @@ tcache_arena_associate(tcache_t *tcache, arena_t *arena)
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&arena->lock);
}
- tcache->arena = arena;
}
void
-tcache_arena_dissociate(tcache_t *tcache)
+tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
+{
+
+ tcache_arena_dissociate(tcache, oldarena);
+ tcache_arena_associate(tcache, newarena);
+}
+
+void
+tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
- malloc_mutex_lock(&tcache->arena->lock);
- ql_remove(&tcache->arena->tcache_ql, tcache, link);
- tcache_stats_merge(tcache, tcache->arena);
- malloc_mutex_unlock(&tcache->arena->lock);
+ malloc_mutex_lock(&arena->lock);
+ if (config_debug) {
+ bool in_ql = false;
+ tcache_t *iter;
+ ql_foreach(iter, &arena->tcache_ql, link) {
+ if (iter == tcache) {
+ in_ql = true;
+ break;
+ }
+ }
+ assert(in_ql);
+ }
+ ql_remove(&arena->tcache_ql, tcache, link);
+ tcache_stats_merge(tcache, arena);
+ malloc_mutex_unlock(&arena->lock);
}
}
tcache_t *
-tcache_create(arena_t *arena)
+tcache_get_hard(tsd_t *tsd)
+{
+ arena_t *arena;
+
+ if (!tcache_enabled_get()) {
+ if (tsd_nominal(tsd))
+ tcache_enabled_set(false); /* Memoize. */
+ return (NULL);
+ }
+ arena = arena_choose(tsd, NULL);
+ if (unlikely(arena == NULL))
+ return (NULL);
+ return (tcache_create(tsd, arena));
+}
+
+tcache_t *
+tcache_create(tsd_t *tsd, arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
@@ -277,23 +321,10 @@ tcache_create(arena_t *arena)
size = PTR_CEILING(size);
stack_offset = size;
size += stack_nelms * sizeof(void *);
- /*
- * Round up to the nearest multiple of the cacheline size, in order to
- * avoid the possibility of false cacheline sharing.
- *
- * That this works relies on the same logic as in ipalloc(), but we
- * cannot directly call ipalloc() here due to tcache bootstrapping
- * issues.
- */
- size = (size + CACHELINE_MASK) & (-CACHELINE);
-
- if (size <= SMALL_MAXCLASS)
- tcache = (tcache_t *)arena_malloc_small(arena, size, true);
- else if (size <= tcache_maxclass)
- tcache = (tcache_t *)arena_malloc_large(arena, size, true);
- else
- tcache = (tcache_t *)icalloct(size, false, arena);
+ /* Avoid false cacheline sharing. */
+ size = sa2u(size, CACHELINE);
+ tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
if (tcache == NULL)
return (NULL);
@@ -307,25 +338,23 @@ tcache_create(arena_t *arena)
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
- tcache_tsd_set(&tcache);
-
return (tcache);
}
-void
-tcache_destroy(tcache_t *tcache)
+static void
+tcache_destroy(tsd_t *tsd, tcache_t *tcache)
{
+ arena_t *arena;
unsigned i;
- size_t tcache_size;
- tcache_arena_dissociate(tcache);
+ arena = arena_choose(tsd, NULL);
+ tcache_arena_dissociate(tcache, arena);
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_small(tbin, i, 0, tcache);
+ tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
if (config_stats && tbin->tstats.nrequests != 0) {
- arena_t *arena = tcache->arena;
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
@@ -335,10 +364,9 @@ tcache_destroy(tcache_t *tcache)
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_large(tbin, i, 0, tcache);
+ tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
- arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
@@ -348,57 +376,33 @@ tcache_destroy(tcache_t *tcache)
}
if (config_prof && tcache->prof_accumbytes > 0 &&
- arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
+ arena_prof_accum(arena, tcache->prof_accumbytes))
prof_idump();
- tcache_size = arena_salloc(tcache, false);
- if (tcache_size <= SMALL_MAXCLASS) {
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
- arena_t *arena = chunk->arena;
- size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
- LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
-
- arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
- } else if (tcache_size <= tcache_maxclass) {
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
- arena_t *arena = chunk->arena;
-
- arena_dalloc_large(arena, chunk, tcache);
- } else
- idalloct(tcache, false);
+ idalloctm(tsd, tcache, false, true);
}
void
-tcache_thread_cleanup(void *arg)
+tcache_cleanup(tsd_t *tsd)
{
- tcache_t *tcache = *(tcache_t **)arg;
+ tcache_t *tcache;
- if (tcache == TCACHE_STATE_DISABLED) {
- /* Do nothing. */
- } else if (tcache == TCACHE_STATE_REINCARNATED) {
- /*
- * Another destructor called an allocator function after this
- * destructor was called. Reset tcache to
- * TCACHE_STATE_PURGATORY in order to receive another callback.
- */
- tcache = TCACHE_STATE_PURGATORY;
- tcache_tsd_set(&tcache);
- } else if (tcache == TCACHE_STATE_PURGATORY) {
- /*
- * The previous time this destructor was called, we set the key
- * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
- * cause re-creation of the tcache. This time, do nothing, so
- * that the destructor will not be called again.
- */
- } else if (tcache != NULL) {
- assert(tcache != TCACHE_STATE_PURGATORY);
- tcache_destroy(tcache);
- tcache = TCACHE_STATE_PURGATORY;
- tcache_tsd_set(&tcache);
+ if (!config_tcache)
+ return;
+
+ if ((tcache = tsd_tcache_get(tsd)) != NULL) {
+ tcache_destroy(tsd, tcache);
+ tsd_tcache_set(tsd, NULL);
}
}
+void
+tcache_enabled_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
/* Caller must own arena->lock. */
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
@@ -427,22 +431,82 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
}
bool
-tcache_boot0(void)
+tcaches_create(tsd_t *tsd, unsigned *r_ind)
+{
+ tcache_t *tcache;
+ tcaches_t *elm;
+
+ if (tcaches == NULL) {
+ tcaches = base_alloc(sizeof(tcache_t *) *
+ (MALLOCX_TCACHE_MAX+1));
+ if (tcaches == NULL)
+ return (true);
+ }
+
+ if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
+ return (true);
+ tcache = tcache_create(tsd, a0get());
+ if (tcache == NULL)
+ return (true);
+
+ if (tcaches_avail != NULL) {
+ elm = tcaches_avail;
+ tcaches_avail = tcaches_avail->next;
+ elm->tcache = tcache;
+ *r_ind = elm - tcaches;
+ } else {
+ elm = &tcaches[tcaches_past];
+ elm->tcache = tcache;
+ *r_ind = tcaches_past;
+ tcaches_past++;
+ }
+
+ return (false);
+}
+
+static void
+tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
+{
+
+ if (elm->tcache == NULL)
+ return;
+ tcache_destroy(tsd, elm->tcache);
+ elm->tcache = NULL;
+}
+
+void
+tcaches_flush(tsd_t *tsd, unsigned ind)
+{
+
+ tcaches_elm_flush(tsd, &tcaches[ind]);
+}
+
+void
+tcaches_destroy(tsd_t *tsd, unsigned ind)
+{
+ tcaches_t *elm = &tcaches[ind];
+ tcaches_elm_flush(tsd, elm);
+ elm->next = tcaches_avail;
+ tcaches_avail = elm;
+}
+
+bool
+tcache_boot(void)
{
unsigned i;
/*
- * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
+ * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
* known.
*/
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
- else if ((1U << opt_lg_tcache_max) > arena_maxclass)
- tcache_maxclass = arena_maxclass;
+ else if ((1U << opt_lg_tcache_max) > large_maxclass)
+ tcache_maxclass = large_maxclass;
else
tcache_maxclass = (1U << opt_lg_tcache_max);
- nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
+ nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
@@ -451,7 +515,11 @@ tcache_boot0(void)
return (true);
stack_nelms = 0;
for (i = 0; i < NBINS; i++) {
- if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
+ if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
+ tcache_bin_info[i].ncached_max =
+ TCACHE_NSLOTS_SMALL_MIN;
+ } else if ((arena_bin_info[i].nregs << 1) <=
+ TCACHE_NSLOTS_SMALL_MAX) {
tcache_bin_info[i].ncached_max =
(arena_bin_info[i].nregs << 1);
} else {
@@ -467,13 +535,3 @@ tcache_boot0(void)
return (false);
}
-
-bool
-tcache_boot1(void)
-{
-
- if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
- return (true);
-
- return (false);
-}
diff --git a/deps/jemalloc/src/tsd.c b/deps/jemalloc/src/tsd.c
index 700caabfe..9ffe9afef 100644
--- a/deps/jemalloc/src/tsd.c
+++ b/deps/jemalloc/src/tsd.c
@@ -7,21 +7,22 @@
static unsigned ncleanups;
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
+malloc_tsd_data(, , tsd_t, TSD_INITIALIZER)
+
/******************************************************************************/
void *
malloc_tsd_malloc(size_t size)
{
- /* Avoid choose_arena() in order to dodge bootstrapping issues. */
- return (arena_malloc(arenas[0], size, false, false));
+ return (a0malloc(CACHELINE_CEILING(size)));
}
void
malloc_tsd_dalloc(void *wrapper)
{
- idalloct(wrapper, false);
+ a0dalloc(wrapper);
}
void
@@ -67,10 +68,61 @@ malloc_tsd_cleanup_register(bool (*f)(void))
}
void
-malloc_tsd_boot(void)
+tsd_cleanup(void *arg)
+{
+ tsd_t *tsd = (tsd_t *)arg;
+
+ switch (tsd->state) {
+ case tsd_state_uninitialized:
+ /* Do nothing. */
+ break;
+ case tsd_state_nominal:
+#define O(n, t) \
+ n##_cleanup(tsd);
+MALLOC_TSD
+#undef O
+ tsd->state = tsd_state_purgatory;
+ tsd_set(tsd);
+ break;
+ case tsd_state_purgatory:
+ /*
+ * The previous time this destructor was called, we set the
+ * state to tsd_state_purgatory so that other destructors
+ * wouldn't cause re-creation of the tsd. This time, do
+ * nothing, and do not request another callback.
+ */
+ break;
+ case tsd_state_reincarnated:
+ /*
+ * Another destructor deallocated memory after this destructor
+ * was called. Reset state to tsd_state_purgatory and request
+ * another callback.
+ */
+ tsd->state = tsd_state_purgatory;
+ tsd_set(tsd);
+ break;
+ default:
+ not_reached();
+ }
+}
+
+bool
+malloc_tsd_boot0(void)
{
ncleanups = 0;
+ if (tsd_boot0())
+ return (true);
+ *tsd_arenas_cache_bypassp_get(tsd_fetch()) = true;
+ return (false);
+}
+
+void
+malloc_tsd_boot1(void)
+{
+
+ tsd_boot1();
+ *tsd_arenas_cache_bypassp_get(tsd_fetch()) = false;
}
#ifdef _WIN32
@@ -102,7 +154,7 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
# pragma section(".CRT$XLY",long,read)
#endif
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
-static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
+static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
diff --git a/deps/jemalloc/src/util.c b/deps/jemalloc/src/util.c
index 93a19fd16..4cb0d6c1e 100644
--- a/deps/jemalloc/src/util.c
+++ b/deps/jemalloc/src/util.c
@@ -81,10 +81,10 @@ buferror(int err, char *buf, size_t buflen)
{
#ifdef _WIN32
- FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
+ FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
(LPSTR)buf, buflen, NULL);
return (0);
-#elif defined(_GNU_SOURCE)
+#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
@@ -100,7 +100,7 @@ uintmax_t
malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
{
uintmax_t ret, digit;
- int b;
+ unsigned b;
bool neg;
const char *p, *ns;
@@ -266,7 +266,7 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p)
sign = '-';
switch (sign) {
case '-':
- if (neg == false)
+ if (!neg)
break;
/* Fall through. */
case ' ':
@@ -329,7 +329,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
/* Left padding. */ \
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
(size_t)width - slen : 0); \
- if (left_justify == false && pad_len != 0) { \
+ if (!left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) \
APPEND_C(' '); \
@@ -381,7 +381,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
case 'p': /* Synthetic; used for %p. */ \
val = va_arg(ap, uintptr_t); \
break; \
- default: not_reached(); \
+ default: \
+ not_reached(); \
+ val = 0; \
} \
} while (0)
@@ -404,19 +406,19 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
while (true) {
switch (*f) {
case '#':
- assert(alt_form == false);
+ assert(!alt_form);
alt_form = true;
break;
case '-':
- assert(left_justify == false);
+ assert(!left_justify);
left_justify = true;
break;
case ' ':
- assert(plus_space == false);
+ assert(!plus_space);
plus_space = true;
break;
case '+':
- assert(plus_plus == false);
+ assert(!plus_plus);
plus_plus = true;
break;
default: goto label_width;
@@ -548,7 +550,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
s = va_arg(ap, char *);
- slen = (prec < 0) ? strlen(s) : prec;
+ slen = (prec < 0) ? strlen(s) : (size_t)prec;
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
@@ -584,7 +586,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
return (ret);
}
-JEMALLOC_ATTR(format(printf, 3, 4))
+JEMALLOC_FORMAT_PRINTF(3, 4)
int
malloc_snprintf(char *str, size_t size, const char *format, ...)
{
@@ -623,7 +625,7 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
* Print to a callback function in such a way as to (hopefully) avoid memory
* allocation.
*/
-JEMALLOC_ATTR(format(printf, 3, 4))
+JEMALLOC_FORMAT_PRINTF(3, 4)
void
malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, ...)
@@ -636,7 +638,7 @@ malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
}
/* Print to stderr in such a way as to avoid memory allocation. */
-JEMALLOC_ATTR(format(printf, 1, 2))
+JEMALLOC_FORMAT_PRINTF(1, 2)
void
malloc_printf(const char *format, ...)
{
diff --git a/deps/jemalloc/src/valgrind.c b/deps/jemalloc/src/valgrind.c
new file mode 100644
index 000000000..8e7ef3a2e
--- /dev/null
+++ b/deps/jemalloc/src/valgrind.c
@@ -0,0 +1,34 @@
+#include "jemalloc/internal/jemalloc_internal.h"
+#ifndef JEMALLOC_VALGRIND
+# error "This source file is for Valgrind integration."
+#endif
+
+#include <valgrind/memcheck.h>
+
+void
+valgrind_make_mem_noaccess(void *ptr, size_t usize)
+{
+
+ VALGRIND_MAKE_MEM_NOACCESS(ptr, usize);
+}
+
+void
+valgrind_make_mem_undefined(void *ptr, size_t usize)
+{
+
+ VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize);
+}
+
+void
+valgrind_make_mem_defined(void *ptr, size_t usize)
+{
+
+ VALGRIND_MAKE_MEM_DEFINED(ptr, usize);
+}
+
+void
+valgrind_freelike_block(void *ptr, size_t usize)
+{
+
+ VALGRIND_FREELIKE_BLOCK(ptr, usize);
+}
diff --git a/deps/jemalloc/src/zone.c b/deps/jemalloc/src/zone.c
index e0302ef4e..12e1734a9 100644
--- a/deps/jemalloc/src/zone.c
+++ b/deps/jemalloc/src/zone.c
@@ -176,6 +176,7 @@ register_zone(void)
* register jemalloc's.
*/
malloc_zone_t *default_zone = malloc_default_zone();
+ malloc_zone_t *purgeable_zone = NULL;
if (!default_zone->zone_name ||
strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
return;
@@ -237,22 +238,37 @@ register_zone(void)
* run time.
*/
if (malloc_default_purgeable_zone != NULL)
- malloc_default_purgeable_zone();
+ purgeable_zone = malloc_default_purgeable_zone();
/* Register the custom zone. At this point it won't be the default. */
malloc_zone_register(&zone);
- /*
- * Unregister and reregister the default zone. On OSX >= 10.6,
- * unregistering takes the last registered zone and places it at the
- * location of the specified zone. Unregistering the default zone thus
- * makes the last registered one the default. On OSX < 10.6,
- * unregistering shifts all registered zones. The first registered zone
- * then becomes the default.
- */
do {
default_zone = malloc_default_zone();
+ /*
+ * Unregister and reregister the default zone. On OSX >= 10.6,
+ * unregistering takes the last registered zone and places it
+ * at the location of the specified zone. Unregistering the
+ * default zone thus makes the last registered one the default.
+ * On OSX < 10.6, unregistering shifts all registered zones.
+ * The first registered zone then becomes the default.
+ */
malloc_zone_unregister(default_zone);
malloc_zone_register(default_zone);
+ /*
+ * On OSX 10.6, having the default purgeable zone appear before
+ * the default zone makes some things crash because it thinks it
+ * owns the default zone allocated pointers. We thus
+ * unregister/re-register it in order to ensure it's always
+ * after the default zone. On OSX < 10.6, there is no purgeable
+ * zone, so this does nothing. On OSX >= 10.6, unregistering
+ * replaces the purgeable zone with the last registered zone
+ * above, i.e. the default zone. Registering it again then puts
+ * it at the end, obviously after the default zone.
+ */
+ if (purgeable_zone) {
+ malloc_zone_unregister(purgeable_zone);
+ malloc_zone_register(purgeable_zone);
+ }
} while (malloc_default_zone() != &zone);
}
diff --git a/deps/jemalloc/test/include/test/btalloc.h b/deps/jemalloc/test/include/test/btalloc.h
new file mode 100644
index 000000000..c3f9d4df7
--- /dev/null
+++ b/deps/jemalloc/test/include/test/btalloc.h
@@ -0,0 +1,31 @@
+/* btalloc() provides a mechanism for allocating via permuted backtraces. */
+void *btalloc(size_t size, unsigned bits);
+
+#define btalloc_n_proto(n) \
+void *btalloc_##n(size_t size, unsigned bits);
+btalloc_n_proto(0)
+btalloc_n_proto(1)
+
+#define btalloc_n_gen(n) \
+void * \
+btalloc_##n(size_t size, unsigned bits) \
+{ \
+ void *p; \
+ \
+ if (bits == 0) \
+ p = mallocx(size, 0); \
+ else { \
+ switch (bits & 0x1U) { \
+ case 0: \
+ p = (btalloc_0(size, bits >> 1)); \
+ break; \
+ case 1: \
+ p = (btalloc_1(size, bits >> 1)); \
+ break; \
+ default: not_reached(); \
+ } \
+ } \
+ /* Intentionally sabotage tail call optimization. */ \
+ assert_ptr_not_null(p, "Unexpected mallocx() failure"); \
+ return (p); \
+}
diff --git a/deps/jemalloc/test/include/test/jemalloc_test.h.in b/deps/jemalloc/test/include/test/jemalloc_test.h.in
index 730a55dba..455569da4 100644
--- a/deps/jemalloc/test/include/test/jemalloc_test.h.in
+++ b/deps/jemalloc/test/include/test/jemalloc_test.h.in
@@ -1,13 +1,21 @@
+#include <limits.h>
+#ifndef SIZE_T_MAX
+# define SIZE_T_MAX SIZE_MAX
+#endif
#include <stdlib.h>
#include <stdarg.h>
#include <stdbool.h>
#include <errno.h>
-#include <inttypes.h>
#include <math.h>
#include <string.h>
+#ifdef _WIN32
+# include "msvc_compat/strings.h"
+#endif
+#include <sys/time.h>
#ifdef _WIN32
# include <windows.h>
+# include "msvc_compat/windows_extra.h"
#else
# include <pthread.h>
#endif
@@ -132,10 +140,12 @@
/*
* Common test utilities.
*/
+#include "test/btalloc.h"
#include "test/math.h"
#include "test/mtx.h"
#include "test/mq.h"
#include "test/test.h"
+#include "test/timer.h"
#include "test/thd.h"
#define MEXP 19937
#include "test/SFMT.h"
diff --git a/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in b/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in
index 18a9773d7..5cc8532a3 100644
--- a/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in
+++ b/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in
@@ -1,5 +1,9 @@
#include "jemalloc/internal/jemalloc_internal_defs.h"
+#include "jemalloc/internal/jemalloc_internal_decls.h"
-/* For use by SFMT. */
+/*
+ * For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its
+ * dependencies are notoriously unportable in practice.
+ */
#undef HAVE_SSE2
#undef HAVE_ALTIVEC
diff --git a/deps/jemalloc/test/include/test/math.h b/deps/jemalloc/test/include/test/math.h
index a862ed7db..b057b29a1 100644
--- a/deps/jemalloc/test/include/test/math.h
+++ b/deps/jemalloc/test/include/test/math.h
@@ -299,7 +299,7 @@ pt_chi2(double p, double df, double ln_gamma_df_2)
/*
* Given a value p in [0..1] and Gamma distribution shape and scale parameters,
- * compute the upper limit on the definite integeral from [0..z] that satisfies
+ * compute the upper limit on the definite integral from [0..z] that satisfies
* p.
*/
JEMALLOC_INLINE double
diff --git a/deps/jemalloc/test/include/test/mq.h b/deps/jemalloc/test/include/test/mq.h
index 11188653c..7c4df4931 100644
--- a/deps/jemalloc/test/include/test/mq.h
+++ b/deps/jemalloc/test/include/test/mq.h
@@ -1,3 +1,5 @@
+void mq_nanosleep(unsigned ns);
+
/*
* Simple templated message queue implementation that relies on only mutexes for
* synchronization (which reduces portability issues). Given the following
@@ -75,26 +77,23 @@ a_attr a_mq_msg_type * \
a_prefix##get(a_mq_type *mq) \
{ \
a_mq_msg_type *msg; \
- struct timespec timeout; \
+ unsigned ns; \
\
msg = a_prefix##tryget(mq); \
if (msg != NULL) \
return (msg); \
\
- timeout.tv_sec = 0; \
- timeout.tv_nsec = 1; \
+ ns = 1; \
while (true) { \
- nanosleep(&timeout, NULL); \
+ mq_nanosleep(ns); \
msg = a_prefix##tryget(mq); \
if (msg != NULL) \
return (msg); \
- if (timeout.tv_sec == 0) { \
+ if (ns < 1000*1000*1000) { \
/* Double sleep time, up to max 1 second. */ \
- timeout.tv_nsec <<= 1; \
- if (timeout.tv_nsec >= 1000*1000*1000) { \
- timeout.tv_sec = 1; \
- timeout.tv_nsec = 0; \
- } \
+ ns <<= 1; \
+ if (ns > 1000*1000*1000) \
+ ns = 1000*1000*1000; \
} \
} \
} \
diff --git a/deps/jemalloc/test/include/test/test.h b/deps/jemalloc/test/include/test/test.h
index a32ec07c4..3cf901fc4 100644
--- a/deps/jemalloc/test/include/test/test.h
+++ b/deps/jemalloc/test/include/test/test.h
@@ -1,6 +1,6 @@
#define ASSERT_BUFSIZE 256
-#define assert_cmp(t, a, b, cmp, neg_cmp, pri, fmt...) do { \
+#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \
t a_ = (a); \
t b_ = (b); \
if (!(a_ cmp b_)) { \
@@ -12,205 +12,205 @@
"%"pri" "#neg_cmp" %"pri": ", \
__func__, __FILE__, __LINE__, \
#a, #b, a_, b_); \
- malloc_snprintf(message, sizeof(message), fmt); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
-#define assert_ptr_eq(a, b, fmt...) assert_cmp(void *, a, b, ==, \
- !=, "p", fmt)
-#define assert_ptr_ne(a, b, fmt...) assert_cmp(void *, a, b, !=, \
- ==, "p", fmt)
-#define assert_ptr_null(a, fmt...) assert_cmp(void *, a, NULL, ==, \
- !=, "p", fmt)
-#define assert_ptr_not_null(a, fmt...) assert_cmp(void *, a, NULL, !=, \
- ==, "p", fmt)
+#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \
+ !=, "p", __VA_ARGS__)
+#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \
+ ==, "p", __VA_ARGS__)
+#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \
+ !=, "p", __VA_ARGS__)
+#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \
+ ==, "p", __VA_ARGS__)
-#define assert_c_eq(a, b, fmt...) assert_cmp(char, a, b, ==, !=, "c", fmt)
-#define assert_c_ne(a, b, fmt...) assert_cmp(char, a, b, !=, ==, "c", fmt)
-#define assert_c_lt(a, b, fmt...) assert_cmp(char, a, b, <, >=, "c", fmt)
-#define assert_c_le(a, b, fmt...) assert_cmp(char, a, b, <=, >, "c", fmt)
-#define assert_c_ge(a, b, fmt...) assert_cmp(char, a, b, >=, <, "c", fmt)
-#define assert_c_gt(a, b, fmt...) assert_cmp(char, a, b, >, <=, "c", fmt)
+#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__)
+#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__)
+#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__)
+#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__)
+#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__)
+#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__)
-#define assert_x_eq(a, b, fmt...) assert_cmp(int, a, b, ==, !=, "#x", fmt)
-#define assert_x_ne(a, b, fmt...) assert_cmp(int, a, b, !=, ==, "#x", fmt)
-#define assert_x_lt(a, b, fmt...) assert_cmp(int, a, b, <, >=, "#x", fmt)
-#define assert_x_le(a, b, fmt...) assert_cmp(int, a, b, <=, >, "#x", fmt)
-#define assert_x_ge(a, b, fmt...) assert_cmp(int, a, b, >=, <, "#x", fmt)
-#define assert_x_gt(a, b, fmt...) assert_cmp(int, a, b, >, <=, "#x", fmt)
+#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__)
+#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__)
+#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__)
+#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__)
+#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__)
+#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__)
-#define assert_d_eq(a, b, fmt...) assert_cmp(int, a, b, ==, !=, "d", fmt)
-#define assert_d_ne(a, b, fmt...) assert_cmp(int, a, b, !=, ==, "d", fmt)
-#define assert_d_lt(a, b, fmt...) assert_cmp(int, a, b, <, >=, "d", fmt)
-#define assert_d_le(a, b, fmt...) assert_cmp(int, a, b, <=, >, "d", fmt)
-#define assert_d_ge(a, b, fmt...) assert_cmp(int, a, b, >=, <, "d", fmt)
-#define assert_d_gt(a, b, fmt...) assert_cmp(int, a, b, >, <=, "d", fmt)
+#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__)
+#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__)
+#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__)
+#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__)
+#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__)
+#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__)
-#define assert_u_eq(a, b, fmt...) assert_cmp(int, a, b, ==, !=, "u", fmt)
-#define assert_u_ne(a, b, fmt...) assert_cmp(int, a, b, !=, ==, "u", fmt)
-#define assert_u_lt(a, b, fmt...) assert_cmp(int, a, b, <, >=, "u", fmt)
-#define assert_u_le(a, b, fmt...) assert_cmp(int, a, b, <=, >, "u", fmt)
-#define assert_u_ge(a, b, fmt...) assert_cmp(int, a, b, >=, <, "u", fmt)
-#define assert_u_gt(a, b, fmt...) assert_cmp(int, a, b, >, <=, "u", fmt)
+#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__)
+#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__)
+#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__)
+#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__)
+#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__)
+#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__)
-#define assert_ld_eq(a, b, fmt...) assert_cmp(long, a, b, ==, \
- !=, "ld", fmt)
-#define assert_ld_ne(a, b, fmt...) assert_cmp(long, a, b, !=, \
- ==, "ld", fmt)
-#define assert_ld_lt(a, b, fmt...) assert_cmp(long, a, b, <, \
- >=, "ld", fmt)
-#define assert_ld_le(a, b, fmt...) assert_cmp(long, a, b, <=, \
- >, "ld", fmt)
-#define assert_ld_ge(a, b, fmt...) assert_cmp(long, a, b, >=, \
- <, "ld", fmt)
-#define assert_ld_gt(a, b, fmt...) assert_cmp(long, a, b, >, \
- <=, "ld", fmt)
+#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \
+ !=, "ld", __VA_ARGS__)
+#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \
+ ==, "ld", __VA_ARGS__)
+#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \
+ >=, "ld", __VA_ARGS__)
+#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \
+ >, "ld", __VA_ARGS__)
+#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \
+ <, "ld", __VA_ARGS__)
+#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \
+ <=, "ld", __VA_ARGS__)
-#define assert_lu_eq(a, b, fmt...) assert_cmp(unsigned long, \
- a, b, ==, !=, "lu", fmt)
-#define assert_lu_ne(a, b, fmt...) assert_cmp(unsigned long, \
- a, b, !=, ==, "lu", fmt)
-#define assert_lu_lt(a, b, fmt...) assert_cmp(unsigned long, \
- a, b, <, >=, "lu", fmt)
-#define assert_lu_le(a, b, fmt...) assert_cmp(unsigned long, \
- a, b, <=, >, "lu", fmt)
-#define assert_lu_ge(a, b, fmt...) assert_cmp(unsigned long, \
- a, b, >=, <, "lu", fmt)
-#define assert_lu_gt(a, b, fmt...) assert_cmp(unsigned long, \
- a, b, >, <=, "lu", fmt)
+#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \
+ a, b, ==, !=, "lu", __VA_ARGS__)
+#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \
+ a, b, !=, ==, "lu", __VA_ARGS__)
+#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \
+ a, b, <, >=, "lu", __VA_ARGS__)
+#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \
+ a, b, <=, >, "lu", __VA_ARGS__)
+#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \
+ a, b, >=, <, "lu", __VA_ARGS__)
+#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \
+ a, b, >, <=, "lu", __VA_ARGS__)
-#define assert_qd_eq(a, b, fmt...) assert_cmp(long long, a, b, ==, \
- !=, "qd", fmt)
-#define assert_qd_ne(a, b, fmt...) assert_cmp(long long, a, b, !=, \
- ==, "qd", fmt)
-#define assert_qd_lt(a, b, fmt...) assert_cmp(long long, a, b, <, \
- >=, "qd", fmt)
-#define assert_qd_le(a, b, fmt...) assert_cmp(long long, a, b, <=, \
- >, "qd", fmt)
-#define assert_qd_ge(a, b, fmt...) assert_cmp(long long, a, b, >=, \
- <, "qd", fmt)
-#define assert_qd_gt(a, b, fmt...) assert_cmp(long long, a, b, >, \
- <=, "qd", fmt)
+#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \
+ !=, "qd", __VA_ARGS__)
+#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \
+ ==, "qd", __VA_ARGS__)
+#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \
+ >=, "qd", __VA_ARGS__)
+#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \
+ >, "qd", __VA_ARGS__)
+#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \
+ <, "qd", __VA_ARGS__)
+#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \
+ <=, "qd", __VA_ARGS__)
-#define assert_qu_eq(a, b, fmt...) assert_cmp(unsigned long long, \
- a, b, ==, !=, "qu", fmt)
-#define assert_qu_ne(a, b, fmt...) assert_cmp(unsigned long long, \
- a, b, !=, ==, "qu", fmt)
-#define assert_qu_lt(a, b, fmt...) assert_cmp(unsigned long long, \
- a, b, <, >=, "qu", fmt)
-#define assert_qu_le(a, b, fmt...) assert_cmp(unsigned long long, \
- a, b, <=, >, "qu", fmt)
-#define assert_qu_ge(a, b, fmt...) assert_cmp(unsigned long long, \
- a, b, >=, <, "qu", fmt)
-#define assert_qu_gt(a, b, fmt...) assert_cmp(unsigned long long, \
- a, b, >, <=, "qu", fmt)
+#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, ==, !=, "qu", __VA_ARGS__)
+#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, !=, ==, "qu", __VA_ARGS__)
+#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, <, >=, "qu", __VA_ARGS__)
+#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, <=, >, "qu", __VA_ARGS__)
+#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, >=, <, "qu", __VA_ARGS__)
+#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \
+ a, b, >, <=, "qu", __VA_ARGS__)
-#define assert_jd_eq(a, b, fmt...) assert_cmp(intmax_t, a, b, ==, \
- !=, "jd", fmt)
-#define assert_jd_ne(a, b, fmt...) assert_cmp(intmax_t, a, b, !=, \
- ==, "jd", fmt)
-#define assert_jd_lt(a, b, fmt...) assert_cmp(intmax_t, a, b, <, \
- >=, "jd", fmt)
-#define assert_jd_le(a, b, fmt...) assert_cmp(intmax_t, a, b, <=, \
- >, "jd", fmt)
-#define assert_jd_ge(a, b, fmt...) assert_cmp(intmax_t, a, b, >=, \
- <, "jd", fmt)
-#define assert_jd_gt(a, b, fmt...) assert_cmp(intmax_t, a, b, >, \
- <=, "jd", fmt)
+#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \
+ !=, "jd", __VA_ARGS__)
+#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \
+ ==, "jd", __VA_ARGS__)
+#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \
+ >=, "jd", __VA_ARGS__)
+#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \
+ >, "jd", __VA_ARGS__)
+#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \
+ <, "jd", __VA_ARGS__)
+#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \
+ <=, "jd", __VA_ARGS__)
-#define assert_ju_eq(a, b, fmt...) assert_cmp(uintmax_t, a, b, ==, \
- !=, "ju", fmt)
-#define assert_ju_ne(a, b, fmt...) assert_cmp(uintmax_t, a, b, !=, \
- ==, "ju", fmt)
-#define assert_ju_lt(a, b, fmt...) assert_cmp(uintmax_t, a, b, <, \
- >=, "ju", fmt)
-#define assert_ju_le(a, b, fmt...) assert_cmp(uintmax_t, a, b, <=, \
- >, "ju", fmt)
-#define assert_ju_ge(a, b, fmt...) assert_cmp(uintmax_t, a, b, >=, \
- <, "ju", fmt)
-#define assert_ju_gt(a, b, fmt...) assert_cmp(uintmax_t, a, b, >, \
- <=, "ju", fmt)
+#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \
+ !=, "ju", __VA_ARGS__)
+#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \
+ ==, "ju", __VA_ARGS__)
+#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \
+ >=, "ju", __VA_ARGS__)
+#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \
+ >, "ju", __VA_ARGS__)
+#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \
+ <, "ju", __VA_ARGS__)
+#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \
+ <=, "ju", __VA_ARGS__)
-#define assert_zd_eq(a, b, fmt...) assert_cmp(ssize_t, a, b, ==, \
- !=, "zd", fmt)
-#define assert_zd_ne(a, b, fmt...) assert_cmp(ssize_t, a, b, !=, \
- ==, "zd", fmt)
-#define assert_zd_lt(a, b, fmt...) assert_cmp(ssize_t, a, b, <, \
- >=, "zd", fmt)
-#define assert_zd_le(a, b, fmt...) assert_cmp(ssize_t, a, b, <=, \
- >, "zd", fmt)
-#define assert_zd_ge(a, b, fmt...) assert_cmp(ssize_t, a, b, >=, \
- <, "zd", fmt)
-#define assert_zd_gt(a, b, fmt...) assert_cmp(ssize_t, a, b, >, \
- <=, "zd", fmt)
+#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \
+ !=, "zd", __VA_ARGS__)
+#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \
+ ==, "zd", __VA_ARGS__)
+#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \
+ >=, "zd", __VA_ARGS__)
+#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \
+ >, "zd", __VA_ARGS__)
+#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \
+ <, "zd", __VA_ARGS__)
+#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \
+ <=, "zd", __VA_ARGS__)
-#define assert_zu_eq(a, b, fmt...) assert_cmp(size_t, a, b, ==, \
- !=, "zu", fmt)
-#define assert_zu_ne(a, b, fmt...) assert_cmp(size_t, a, b, !=, \
- ==, "zu", fmt)
-#define assert_zu_lt(a, b, fmt...) assert_cmp(size_t, a, b, <, \
- >=, "zu", fmt)
-#define assert_zu_le(a, b, fmt...) assert_cmp(size_t, a, b, <=, \
- >, "zu", fmt)
-#define assert_zu_ge(a, b, fmt...) assert_cmp(size_t, a, b, >=, \
- <, "zu", fmt)
-#define assert_zu_gt(a, b, fmt...) assert_cmp(size_t, a, b, >, \
- <=, "zu", fmt)
+#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \
+ !=, "zu", __VA_ARGS__)
+#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \
+ ==, "zu", __VA_ARGS__)
+#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \
+ >=, "zu", __VA_ARGS__)
+#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \
+ >, "zu", __VA_ARGS__)
+#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \
+ <, "zu", __VA_ARGS__)
+#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \
+ <=, "zu", __VA_ARGS__)
-#define assert_d32_eq(a, b, fmt...) assert_cmp(int32_t, a, b, ==, \
- !=, PRId32, fmt)
-#define assert_d32_ne(a, b, fmt...) assert_cmp(int32_t, a, b, !=, \
- ==, PRId32, fmt)
-#define assert_d32_lt(a, b, fmt...) assert_cmp(int32_t, a, b, <, \
- >=, PRId32, fmt)
-#define assert_d32_le(a, b, fmt...) assert_cmp(int32_t, a, b, <=, \
- >, PRId32, fmt)
-#define assert_d32_ge(a, b, fmt...) assert_cmp(int32_t, a, b, >=, \
- <, PRId32, fmt)
-#define assert_d32_gt(a, b, fmt...) assert_cmp(int32_t, a, b, >, \
- <=, PRId32, fmt)
+#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \
+ !=, FMTd32, __VA_ARGS__)
+#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \
+ ==, FMTd32, __VA_ARGS__)
+#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \
+ >=, FMTd32, __VA_ARGS__)
+#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \
+ >, FMTd32, __VA_ARGS__)
+#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \
+ <, FMTd32, __VA_ARGS__)
+#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \
+ <=, FMTd32, __VA_ARGS__)
-#define assert_u32_eq(a, b, fmt...) assert_cmp(uint32_t, a, b, ==, \
- !=, PRIu32, fmt)
-#define assert_u32_ne(a, b, fmt...) assert_cmp(uint32_t, a, b, !=, \
- ==, PRIu32, fmt)
-#define assert_u32_lt(a, b, fmt...) assert_cmp(uint32_t, a, b, <, \
- >=, PRIu32, fmt)
-#define assert_u32_le(a, b, fmt...) assert_cmp(uint32_t, a, b, <=, \
- >, PRIu32, fmt)
-#define assert_u32_ge(a, b, fmt...) assert_cmp(uint32_t, a, b, >=, \
- <, PRIu32, fmt)
-#define assert_u32_gt(a, b, fmt...) assert_cmp(uint32_t, a, b, >, \
- <=, PRIu32, fmt)
+#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \
+ !=, FMTu32, __VA_ARGS__)
+#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \
+ ==, FMTu32, __VA_ARGS__)
+#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \
+ >=, FMTu32, __VA_ARGS__)
+#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \
+ >, FMTu32, __VA_ARGS__)
+#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \
+ <, FMTu32, __VA_ARGS__)
+#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \
+ <=, FMTu32, __VA_ARGS__)
-#define assert_d64_eq(a, b, fmt...) assert_cmp(int64_t, a, b, ==, \
- !=, PRId64, fmt)
-#define assert_d64_ne(a, b, fmt...) assert_cmp(int64_t, a, b, !=, \
- ==, PRId64, fmt)
-#define assert_d64_lt(a, b, fmt...) assert_cmp(int64_t, a, b, <, \
- >=, PRId64, fmt)
-#define assert_d64_le(a, b, fmt...) assert_cmp(int64_t, a, b, <=, \
- >, PRId64, fmt)
-#define assert_d64_ge(a, b, fmt...) assert_cmp(int64_t, a, b, >=, \
- <, PRId64, fmt)
-#define assert_d64_gt(a, b, fmt...) assert_cmp(int64_t, a, b, >, \
- <=, PRId64, fmt)
+#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \
+ !=, FMTd64, __VA_ARGS__)
+#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \
+ ==, FMTd64, __VA_ARGS__)
+#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \
+ >=, FMTd64, __VA_ARGS__)
+#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \
+ >, FMTd64, __VA_ARGS__)
+#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \
+ <, FMTd64, __VA_ARGS__)
+#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \
+ <=, FMTd64, __VA_ARGS__)
-#define assert_u64_eq(a, b, fmt...) assert_cmp(uint64_t, a, b, ==, \
- !=, PRIu64, fmt)
-#define assert_u64_ne(a, b, fmt...) assert_cmp(uint64_t, a, b, !=, \
- ==, PRIu64, fmt)
-#define assert_u64_lt(a, b, fmt...) assert_cmp(uint64_t, a, b, <, \
- >=, PRIu64, fmt)
-#define assert_u64_le(a, b, fmt...) assert_cmp(uint64_t, a, b, <=, \
- >, PRIu64, fmt)
-#define assert_u64_ge(a, b, fmt...) assert_cmp(uint64_t, a, b, >=, \
- <, PRIu64, fmt)
-#define assert_u64_gt(a, b, fmt...) assert_cmp(uint64_t, a, b, >, \
- <=, PRIu64, fmt)
+#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \
+ !=, FMTu64, __VA_ARGS__)
+#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \
+ ==, FMTu64, __VA_ARGS__)
+#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \
+ >=, FMTu64, __VA_ARGS__)
+#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \
+ >, FMTu64, __VA_ARGS__)
+#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \
+ <, FMTu64, __VA_ARGS__)
+#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \
+ <=, FMTu64, __VA_ARGS__)
-#define assert_b_eq(a, b, fmt...) do { \
+#define assert_b_eq(a, b, ...) do { \
bool a_ = (a); \
bool b_ = (b); \
if (!(a_ == b_)) { \
@@ -222,11 +222,11 @@
__func__, __FILE__, __LINE__, \
#a, #b, a_ ? "true" : "false", \
b_ ? "true" : "false"); \
- malloc_snprintf(message, sizeof(message), fmt); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
-#define assert_b_ne(a, b, fmt...) do { \
+#define assert_b_ne(a, b, ...) do { \
bool a_ = (a); \
bool b_ = (b); \
if (!(a_ != b_)) { \
@@ -238,14 +238,14 @@
__func__, __FILE__, __LINE__, \
#a, #b, a_ ? "true" : "false", \
b_ ? "true" : "false"); \
- malloc_snprintf(message, sizeof(message), fmt); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
-#define assert_true(a, fmt...) assert_b_eq(a, true, fmt)
-#define assert_false(a, fmt...) assert_b_eq(a, false, fmt)
+#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__)
+#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__)
-#define assert_str_eq(a, b, fmt...) do { \
+#define assert_str_eq(a, b, ...) do { \
if (strcmp((a), (b))) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
@@ -254,11 +254,11 @@
"(%s) same as (%s) --> " \
"\"%s\" differs from \"%s\": ", \
__func__, __FILE__, __LINE__, #a, #b, a, b); \
- malloc_snprintf(message, sizeof(message), fmt); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
-#define assert_str_ne(a, b, fmt...) do { \
+#define assert_str_ne(a, b, ...) do { \
if (!strcmp((a), (b))) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
@@ -267,18 +267,18 @@
"(%s) differs from (%s) --> " \
"\"%s\" same as \"%s\": ", \
__func__, __FILE__, __LINE__, #a, #b, a, b); \
- malloc_snprintf(message, sizeof(message), fmt); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
-#define assert_not_reached(fmt...) do { \
+#define assert_not_reached(...) do { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Unreachable code reached: ", \
__func__, __FILE__, __LINE__); \
- malloc_snprintf(message, sizeof(message), fmt); \
+ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} while (0)
@@ -308,8 +308,8 @@ label_test_end: \
p_test_fini(); \
}
-#define test(tests...) \
- p_test(tests, NULL)
+#define test(...) \
+ p_test(__VA_ARGS__, NULL)
#define test_skip_if(e) do { \
if (e) { \
@@ -319,11 +319,11 @@ label_test_end: \
} \
} while (0)
-void test_skip(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2));
-void test_fail(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2));
+void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
+void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
/* For private use by macros. */
-test_status_t p_test(test_t* t, ...);
+test_status_t p_test(test_t *t, ...);
void p_test_init(const char *name);
void p_test_fini(void);
void p_test_fail(const char *prefix, const char *message);
diff --git a/deps/jemalloc/test/include/test/thd.h b/deps/jemalloc/test/include/test/thd.h
index f941d7a75..47a51262e 100644
--- a/deps/jemalloc/test/include/test/thd.h
+++ b/deps/jemalloc/test/include/test/thd.h
@@ -1,4 +1,4 @@
-/* Abstraction layer for threading in tests */
+/* Abstraction layer for threading in tests. */
#ifdef _WIN32
typedef HANDLE thd_t;
#else
diff --git a/deps/jemalloc/test/include/test/timer.h b/deps/jemalloc/test/include/test/timer.h
new file mode 100644
index 000000000..a7fefdfd1
--- /dev/null
+++ b/deps/jemalloc/test/include/test/timer.h
@@ -0,0 +1,26 @@
+/* Simple timer, for use in benchmark reporting. */
+
+#include <unistd.h>
+#include <sys/time.h>
+
+#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \
+ && _POSIX_MONOTONIC_CLOCK >= 0
+
+typedef struct {
+#ifdef _WIN32
+ FILETIME ft0;
+ FILETIME ft1;
+#elif JEMALLOC_CLOCK_GETTIME
+ struct timespec ts0;
+ struct timespec ts1;
+ int clock_id;
+#else
+ struct timeval tv0;
+ struct timeval tv1;
+#endif
+} timedelta_t;
+
+void timer_start(timedelta_t *timer);
+void timer_stop(timedelta_t *timer);
+uint64_t timer_usec(const timedelta_t *timer);
+void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen);
diff --git a/deps/jemalloc/test/integration/MALLOCX_ARENA.c b/deps/jemalloc/test/integration/MALLOCX_ARENA.c
index 71cf6f255..30c203ae6 100644
--- a/deps/jemalloc/test/integration/MALLOCX_ARENA.c
+++ b/deps/jemalloc/test/integration/MALLOCX_ARENA.c
@@ -2,6 +2,14 @@
#define NTHREADS 10
+static bool have_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+
void *
thd_start(void *arg)
{
@@ -18,13 +26,16 @@ thd_start(void *arg)
size_t mib[3];
size_t miblen = sizeof(mib) / sizeof(size_t);
const char *dss_precs[] = {"disabled", "primary", "secondary"};
- const char *dss = dss_precs[thread_ind %
- (sizeof(dss_precs)/sizeof(char*))];
+ unsigned prec_ind = thread_ind %
+ (sizeof(dss_precs)/sizeof(char*));
+ const char *dss = dss_precs[prec_ind];
+ int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT;
assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
"Error in mallctlnametomib()");
mib[1] = arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
- sizeof(const char *)), 0, "Error in mallctlbymib()");
+ sizeof(const char *)), expected_err,
+ "Error in mallctlbymib()");
}
p = mallocx(1, MALLOCX_ARENA(arena_ind));
@@ -34,7 +45,7 @@ thd_start(void *arg)
return (NULL);
}
-TEST_BEGIN(test_ALLOCM_ARENA)
+TEST_BEGIN(test_MALLOCX_ARENA)
{
thd_t thds[NTHREADS];
unsigned i;
@@ -54,5 +65,5 @@ main(void)
{
return (test(
- test_ALLOCM_ARENA));
+ test_MALLOCX_ARENA));
}
diff --git a/deps/jemalloc/test/integration/allocm.c b/deps/jemalloc/test/integration/allocm.c
deleted file mode 100644
index 7b4ea0c2c..000000000
--- a/deps/jemalloc/test/integration/allocm.c
+++ /dev/null
@@ -1,107 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define CHUNK 0x400000
-#define MAXALIGN (((size_t)1) << 25)
-#define NITER 4
-
-TEST_BEGIN(test_basic)
-{
- size_t nsz, rsz, sz;
- void *p;
-
- sz = 42;
- nsz = 0;
- assert_d_eq(nallocm(&nsz, sz, 0), ALLOCM_SUCCESS,
- "Unexpected nallocm() error");
- rsz = 0;
- assert_d_eq(allocm(&p, &rsz, sz, 0), ALLOCM_SUCCESS,
- "Unexpected allocm() error");
- assert_zu_ge(rsz, sz, "Real size smaller than expected");
- assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch");
- assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
- "Unexpected dallocm() error");
-
- assert_d_eq(allocm(&p, NULL, sz, 0), ALLOCM_SUCCESS,
- "Unexpected allocm() error");
- assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
- "Unexpected dallocm() error");
-
- nsz = 0;
- assert_d_eq(nallocm(&nsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS,
- "Unexpected nallocm() error");
- rsz = 0;
- assert_d_eq(allocm(&p, &rsz, sz, ALLOCM_ZERO), ALLOCM_SUCCESS,
- "Unexpected allocm() error");
- assert_zu_eq(nsz, rsz, "nallocm()/allocm() rsize mismatch");
- assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
- "Unexpected dallocm() error");
-}
-TEST_END
-
-TEST_BEGIN(test_alignment_and_size)
-{
- int r;
- size_t nsz, rsz, sz, alignment, total;
- unsigned i;
- void *ps[NITER];
-
- for (i = 0; i < NITER; i++)
- ps[i] = NULL;
-
- for (alignment = 8;
- alignment <= MAXALIGN;
- alignment <<= 1) {
- total = 0;
- for (sz = 1;
- sz < 3 * alignment && sz < (1U << 31);
- sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
- for (i = 0; i < NITER; i++) {
- nsz = 0;
- r = nallocm(&nsz, sz, ALLOCM_ALIGN(alignment) |
- ALLOCM_ZERO);
- assert_d_eq(r, ALLOCM_SUCCESS,
- "nallocm() error for alignment=%zu, "
- "size=%zu (%#zx): %d",
- alignment, sz, sz, r);
- rsz = 0;
- r = allocm(&ps[i], &rsz, sz,
- ALLOCM_ALIGN(alignment) | ALLOCM_ZERO);
- assert_d_eq(r, ALLOCM_SUCCESS,
- "allocm() error for alignment=%zu, "
- "size=%zu (%#zx): %d",
- alignment, sz, sz, r);
- assert_zu_ge(rsz, sz,
- "Real size smaller than expected for "
- "alignment=%zu, size=%zu", alignment, sz);
- assert_zu_eq(nsz, rsz,
- "nallocm()/allocm() rsize mismatch for "
- "alignment=%zu, size=%zu", alignment, sz);
- assert_ptr_null(
- (void *)((uintptr_t)ps[i] & (alignment-1)),
- "%p inadequately aligned for"
- " alignment=%zu, size=%zu", ps[i],
- alignment, sz);
- sallocm(ps[i], &rsz, 0);
- total += rsz;
- if (total >= (MAXALIGN << 1))
- break;
- }
- for (i = 0; i < NITER; i++) {
- if (ps[i] != NULL) {
- dallocm(ps[i], 0);
- ps[i] = NULL;
- }
- }
- }
- }
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_basic,
- test_alignment_and_size));
-}
diff --git a/deps/jemalloc/test/integration/chunk.c b/deps/jemalloc/test/integration/chunk.c
new file mode 100644
index 000000000..af1c9a53e
--- /dev/null
+++ b/deps/jemalloc/test/integration/chunk.c
@@ -0,0 +1,276 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_FILL
+const char *malloc_conf = "junk:false";
+#endif
+
+static chunk_hooks_t orig_hooks;
+static chunk_hooks_t old_hooks;
+
+static bool do_dalloc = true;
+static bool do_decommit;
+
+static bool did_alloc;
+static bool did_dalloc;
+static bool did_commit;
+static bool did_decommit;
+static bool did_purge;
+static bool did_split;
+static bool did_merge;
+
+#if 0
+# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__)
+#else
+# define TRACE_HOOK(fmt, ...)
+#endif
+
+void *
+chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit, unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(new_addr=%p, size=%zu, alignment=%zu, *zero=%s, "
+ "*commit=%s, arena_ind=%u)\n", __func__, new_addr, size, alignment,
+ *zero ? "true" : "false", *commit ? "true" : "false", arena_ind);
+ did_alloc = true;
+ return (old_hooks.alloc(new_addr, size, alignment, zero, commit,
+ arena_ind));
+}
+
+bool
+chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, committed=%s, arena_ind=%u)\n",
+ __func__, chunk, size, committed ? "true" : "false", arena_ind);
+ did_dalloc = true;
+ if (!do_dalloc)
+ return (true);
+ return (old_hooks.dalloc(chunk, size, committed, arena_ind));
+}
+
+bool
+chunk_commit(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+ bool err;
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, "
+ "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+ arena_ind);
+ err = old_hooks.commit(chunk, size, offset, length, arena_ind);
+ did_commit = !err;
+ return (err);
+}
+
+bool
+chunk_decommit(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+ bool err;
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, "
+ "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+ arena_ind);
+ if (!do_decommit)
+ return (true);
+ err = old_hooks.decommit(chunk, size, offset, length, arena_ind);
+ did_decommit = !err;
+ return (err);
+}
+
+bool
+chunk_purge(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu "
+ "arena_ind=%u)\n", __func__, chunk, size, offset, length,
+ arena_ind);
+ did_purge = true;
+ return (old_hooks.purge(chunk, size, offset, length, arena_ind));
+}
+
+bool
+chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(chunk=%p, size=%zu, size_a=%zu, size_b=%zu, "
+ "committed=%s, arena_ind=%u)\n", __func__, chunk, size, size_a,
+ size_b, committed ? "true" : "false", arena_ind);
+ did_split = true;
+ return (old_hooks.split(chunk, size, size_a, size_b, committed,
+ arena_ind));
+}
+
+bool
+chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ TRACE_HOOK("%s(chunk_a=%p, size_a=%zu, chunk_b=%p size_b=%zu, "
+ "committed=%s, arena_ind=%u)\n", __func__, chunk_a, size_a, chunk_b,
+ size_b, committed ? "true" : "false", arena_ind);
+ did_merge = true;
+ return (old_hooks.merge(chunk_a, size_a, chunk_b, size_b,
+ committed, arena_ind));
+}
+
+TEST_BEGIN(test_chunk)
+{
+ void *p;
+ size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz;
+ chunk_hooks_t new_hooks = {
+ chunk_alloc,
+ chunk_dalloc,
+ chunk_commit,
+ chunk_decommit,
+ chunk_purge,
+ chunk_split,
+ chunk_merge
+ };
+ bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
+
+ /* Install custom chunk hooks. */
+ old_size = sizeof(chunk_hooks_t);
+ new_size = sizeof(chunk_hooks_t);
+ assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size,
+ &new_hooks, new_size), 0, "Unexpected chunk_hooks error");
+ orig_hooks = old_hooks;
+ assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error");
+ assert_ptr_ne(old_hooks.dalloc, chunk_dalloc,
+ "Unexpected dalloc error");
+ assert_ptr_ne(old_hooks.commit, chunk_commit,
+ "Unexpected commit error");
+ assert_ptr_ne(old_hooks.decommit, chunk_decommit,
+ "Unexpected decommit error");
+ assert_ptr_ne(old_hooks.purge, chunk_purge, "Unexpected purge error");
+ assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error");
+ assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error");
+
+ /* Get large size classes. */
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
+ "Unexpected arenas.lrun.0.size failure");
+ assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0,
+ "Unexpected arenas.lrun.1.size failure");
+
+ /* Get huge size classes. */
+ assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
+ "Unexpected arenas.hchunk.0.size failure");
+ assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0,
+ "Unexpected arenas.hchunk.1.size failure");
+ assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0,
+ "Unexpected arenas.hchunk.2.size failure");
+
+ /* Test dalloc/decommit/purge cascade. */
+ do_dalloc = false;
+ do_decommit = false;
+ p = mallocx(huge0 * 2, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ did_dalloc = false;
+ did_decommit = false;
+ did_purge = false;
+ did_split = false;
+ xallocx_success_a = (xallocx(p, huge0, 0, 0) == huge0);
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.purge error");
+ if (xallocx_success_a) {
+ assert_true(did_dalloc, "Expected dalloc");
+ assert_false(did_decommit, "Unexpected decommit");
+ assert_true(did_purge, "Expected purge");
+ }
+ assert_true(did_split, "Expected split");
+ dallocx(p, 0);
+ do_dalloc = true;
+
+ /* Test decommit/commit and observe split/merge. */
+ do_dalloc = false;
+ do_decommit = true;
+ p = mallocx(huge0 * 2, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ did_decommit = false;
+ did_commit = false;
+ did_split = false;
+ did_merge = false;
+ xallocx_success_b = (xallocx(p, huge0, 0, 0) == huge0);
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.purge error");
+ if (xallocx_success_b)
+ assert_true(did_split, "Expected split");
+ xallocx_success_c = (xallocx(p, huge0 * 2, 0, 0) == huge0 * 2);
+ assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
+ if (xallocx_success_b && xallocx_success_c)
+ assert_true(did_merge, "Expected merge");
+ dallocx(p, 0);
+ do_dalloc = true;
+ do_decommit = false;
+
+ /* Test purge for partial-chunk huge allocations. */
+ if (huge0 * 2 > huge2) {
+ /*
+ * There are at least four size classes per doubling, so a
+ * successful xallocx() from size=huge2 to size=huge1 is
+ * guaranteed to leave trailing purgeable memory.
+ */
+ p = mallocx(huge2, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ did_purge = false;
+ assert_zu_eq(xallocx(p, huge1, 0, 0), huge1,
+ "Unexpected xallocx() failure");
+ assert_true(did_purge, "Expected purge");
+ dallocx(p, 0);
+ }
+
+ /* Test decommit for large allocations. */
+ do_decommit = true;
+ p = mallocx(large1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.purge error");
+ did_decommit = false;
+ assert_zu_eq(xallocx(p, large0, 0, 0), large0,
+ "Unexpected xallocx() failure");
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.purge error");
+ did_commit = false;
+ assert_zu_eq(xallocx(p, large1, 0, 0), large1,
+ "Unexpected xallocx() failure");
+ assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
+ dallocx(p, 0);
+ do_decommit = false;
+
+ /* Make sure non-huge allocation succeeds. */
+ p = mallocx(42, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, 0);
+
+ /* Restore chunk hooks. */
+ assert_d_eq(mallctl("arena.0.chunk_hooks", NULL, NULL, &old_hooks,
+ new_size), 0, "Unexpected chunk_hooks error");
+ assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size,
+ NULL, 0), 0, "Unexpected chunk_hooks error");
+ assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc,
+ "Unexpected alloc error");
+ assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc,
+ "Unexpected dalloc error");
+ assert_ptr_eq(old_hooks.commit, orig_hooks.commit,
+ "Unexpected commit error");
+ assert_ptr_eq(old_hooks.decommit, orig_hooks.decommit,
+ "Unexpected decommit error");
+ assert_ptr_eq(old_hooks.purge, orig_hooks.purge,
+ "Unexpected purge error");
+ assert_ptr_eq(old_hooks.split, orig_hooks.split,
+ "Unexpected split error");
+ assert_ptr_eq(old_hooks.merge, orig_hooks.merge,
+ "Unexpected merge error");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(test_chunk));
+}
diff --git a/deps/jemalloc/test/integration/mallocx.c b/deps/jemalloc/test/integration/mallocx.c
index 123e041fa..6253175d6 100644
--- a/deps/jemalloc/test/integration/mallocx.c
+++ b/deps/jemalloc/test/integration/mallocx.c
@@ -1,40 +1,122 @@
#include "test/jemalloc_test.h"
-#define CHUNK 0x400000
-#define MAXALIGN (((size_t)1) << 25)
-#define NITER 4
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return (ret);
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+ return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return (ret);
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
+TEST_BEGIN(test_oom)
+{
+ size_t hugemax, size, alignment;
+
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ /*
+ * It should be impossible to allocate two objects that each consume
+ * more than half the virtual address space.
+ */
+ {
+ void *p;
+
+ p = mallocx(hugemax, 0);
+ if (p != NULL) {
+ assert_ptr_null(mallocx(hugemax, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", hugemax);
+ dallocx(p, 0);
+ }
+ }
+
+#if LG_SIZEOF_PTR == 3
+ size = ZU(0x8000000000000000);
+ alignment = ZU(0x8000000000000000);
+#else
+ size = ZU(0x80000000);
+ alignment = ZU(0x80000000);
+#endif
+ assert_ptr_null(mallocx(size, MALLOCX_ALIGN(alignment)),
+ "Expected OOM for mallocx(size=%#zx, MALLOCX_ALIGN(%#zx)", size,
+ alignment);
+}
+TEST_END
TEST_BEGIN(test_basic)
{
- size_t nsz, rsz, sz;
- void *p;
-
- sz = 42;
- nsz = nallocx(sz, 0);
- assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
- p = mallocx(sz, 0);
- assert_ptr_not_null(p, "Unexpected mallocx() error");
- rsz = sallocx(p, 0);
- assert_zu_ge(rsz, sz, "Real size smaller than expected");
- assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
- dallocx(p, 0);
-
- p = mallocx(sz, 0);
- assert_ptr_not_null(p, "Unexpected mallocx() error");
- dallocx(p, 0);
-
- nsz = nallocx(sz, MALLOCX_ZERO);
- assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
- p = mallocx(sz, MALLOCX_ZERO);
- assert_ptr_not_null(p, "Unexpected mallocx() error");
- rsz = sallocx(p, 0);
- assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
- dallocx(p, 0);
+#define MAXSZ (((size_t)1) << 26)
+ size_t sz;
+
+ for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
+ size_t nsz, rsz;
+ void *p;
+ nsz = nallocx(sz, 0);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ rsz = sallocx(p, 0);
+ assert_zu_ge(rsz, sz, "Real size smaller than expected");
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
+ dallocx(p, 0);
+
+ p = mallocx(sz, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ dallocx(p, 0);
+
+ nsz = nallocx(sz, MALLOCX_ZERO);
+ assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
+ p = mallocx(sz, MALLOCX_ZERO);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ rsz = sallocx(p, 0);
+ assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
+ dallocx(p, 0);
+ }
+#undef MAXSZ
}
TEST_END
TEST_BEGIN(test_alignment_and_size)
{
+#define MAXALIGN (((size_t)1) << 25)
+#define NITER 4
size_t nsz, rsz, sz, alignment, total;
unsigned i;
void *ps[NITER];
@@ -84,6 +166,8 @@ TEST_BEGIN(test_alignment_and_size)
}
}
}
+#undef MAXALIGN
+#undef NITER
}
TEST_END
@@ -92,6 +176,7 @@ main(void)
{
return (test(
+ test_oom,
test_basic,
test_alignment_and_size));
}
diff --git a/deps/jemalloc/test/integration/mremap.c b/deps/jemalloc/test/integration/mremap.c
deleted file mode 100644
index a7fb7ef0a..000000000
--- a/deps/jemalloc/test/integration/mremap.c
+++ /dev/null
@@ -1,45 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_mremap)
-{
- int err;
- size_t sz, lg_chunk, chunksize, i;
- char *p, *q;
-
- sz = sizeof(lg_chunk);
- err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0);
- assert_d_eq(err, 0, "Error in mallctl(): %s", strerror(err));
- chunksize = ((size_t)1U) << lg_chunk;
-
- p = (char *)malloc(chunksize);
- assert_ptr_not_null(p, "malloc(%zu) --> %p", chunksize, p);
- memset(p, 'a', chunksize);
-
- q = (char *)realloc(p, chunksize * 2);
- assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize * 2,
- q);
- for (i = 0; i < chunksize; i++) {
- assert_c_eq(q[i], 'a',
- "realloc() should preserve existing bytes across copies");
- }
-
- p = q;
-
- q = (char *)realloc(p, chunksize);
- assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize, q);
- for (i = 0; i < chunksize; i++) {
- assert_c_eq(q[i], 'a',
- "realloc() should preserve existing bytes across copies");
- }
-
- free(q);
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_mremap));
-}
diff --git a/deps/jemalloc/test/integration/overflow.c b/deps/jemalloc/test/integration/overflow.c
new file mode 100644
index 000000000..303d9b2d3
--- /dev/null
+++ b/deps/jemalloc/test/integration/overflow.c
@@ -0,0 +1,49 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_overflow)
+{
+ unsigned nhchunks;
+ size_t mib[4];
+ size_t sz, miblen, max_size_class;
+ void *p;
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
+ "Unexpected mallctl() error");
+
+ miblen = sizeof(mib) / sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+ mib[2] = nhchunks - 1;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
+ "Unexpected mallctlbymib() error");
+
+ assert_ptr_null(malloc(max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ assert_ptr_null(malloc(SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+
+ assert_ptr_null(calloc(1, max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ assert_ptr_null(calloc(1, SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() OOM");
+ assert_ptr_null(realloc(p, max_size_class + 1),
+ "Expected OOM due to over-sized allocation request");
+ assert_ptr_null(realloc(p, SIZE_T_MAX),
+ "Expected OOM due to over-sized allocation request");
+ free(p);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_overflow));
+}
diff --git a/deps/jemalloc/test/integration/rallocm.c b/deps/jemalloc/test/integration/rallocm.c
deleted file mode 100644
index 33c11bb7c..000000000
--- a/deps/jemalloc/test/integration/rallocm.c
+++ /dev/null
@@ -1,111 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_same_size)
-{
- void *p, *q;
- size_t sz, tsz;
-
- assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
- "Unexpected allocm() error");
-
- q = p;
- assert_d_eq(rallocm(&q, &tsz, sz, 0, ALLOCM_NO_MOVE), ALLOCM_SUCCESS,
- "Unexpected rallocm() error");
- assert_ptr_eq(q, p, "Unexpected object move");
- assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
-
- assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
- "Unexpected dallocm() error");
-}
-TEST_END
-
-TEST_BEGIN(test_extra_no_move)
-{
- void *p, *q;
- size_t sz, tsz;
-
- assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
- "Unexpected allocm() error");
-
- q = p;
- assert_d_eq(rallocm(&q, &tsz, sz, sz-42, ALLOCM_NO_MOVE),
- ALLOCM_SUCCESS, "Unexpected rallocm() error");
- assert_ptr_eq(q, p, "Unexpected object move");
- assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
-
- assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
- "Unexpected dallocm() error");
-}
-TEST_END
-
-TEST_BEGIN(test_no_move_fail)
-{
- void *p, *q;
- size_t sz, tsz;
-
- assert_d_eq(allocm(&p, &sz, 42, 0), ALLOCM_SUCCESS,
- "Unexpected allocm() error");
-
- q = p;
- assert_d_eq(rallocm(&q, &tsz, sz + 5, 0, ALLOCM_NO_MOVE),
- ALLOCM_ERR_NOT_MOVED, "Unexpected rallocm() result");
- assert_ptr_eq(q, p, "Unexpected object move");
- assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
-
- assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
- "Unexpected dallocm() error");
-}
-TEST_END
-
-TEST_BEGIN(test_grow_and_shrink)
-{
- void *p, *q;
- size_t tsz;
-#define NCYCLES 3
- unsigned i, j;
-#define NSZS 2500
- size_t szs[NSZS];
-#define MAXSZ ZU(12 * 1024 * 1024)
-
- assert_d_eq(allocm(&p, &szs[0], 1, 0), ALLOCM_SUCCESS,
- "Unexpected allocm() error");
-
- for (i = 0; i < NCYCLES; i++) {
- for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
- q = p;
- assert_d_eq(rallocm(&q, &szs[j], szs[j-1]+1, 0, 0),
- ALLOCM_SUCCESS,
- "Unexpected rallocm() error for size=%zu-->%zu",
- szs[j-1], szs[j-1]+1);
- assert_zu_ne(szs[j], szs[j-1]+1,
- "Expected size to at least: %zu", szs[j-1]+1);
- p = q;
- }
-
- for (j--; j > 0; j--) {
- q = p;
- assert_d_eq(rallocm(&q, &tsz, szs[j-1], 0, 0),
- ALLOCM_SUCCESS,
- "Unexpected rallocm() error for size=%zu-->%zu",
- szs[j], szs[j-1]);
- assert_zu_eq(tsz, szs[j-1],
- "Expected size=%zu, got size=%zu", szs[j-1], tsz);
- p = q;
- }
- }
-
- assert_d_eq(dallocm(p, 0), ALLOCM_SUCCESS,
- "Unexpected dallocm() error");
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_same_size,
- test_extra_no_move,
- test_no_move_fail,
- test_grow_and_shrink));
-}
diff --git a/deps/jemalloc/test/integration/rallocx.c b/deps/jemalloc/test/integration/rallocx.c
index ee21aedff..be1b27b73 100644
--- a/deps/jemalloc/test/integration/rallocx.c
+++ b/deps/jemalloc/test/integration/rallocx.c
@@ -22,7 +22,7 @@ TEST_BEGIN(test_grow_and_shrink)
szs[j-1], szs[j-1]+1);
szs[j] = sallocx(q, 0);
assert_zu_ne(szs[j], szs[j-1]+1,
- "Expected size to at least: %zu", szs[j-1]+1);
+ "Expected size to be at least: %zu", szs[j-1]+1);
p = q;
}
@@ -55,8 +55,9 @@ validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
for (i = 0; i < len; i++) {
uint8_t b = buf[offset+i];
if (b != c) {
- test_fail("Allocation at %p contains %#x rather than "
- "%#x at offset %zu", p, b, c, offset+i);
+ test_fail("Allocation at %p (len=%zu) contains %#x "
+ "rather than %#x at offset %zu", p, len, b, c,
+ offset+i);
ret = true;
}
}
@@ -95,7 +96,8 @@ TEST_BEGIN(test_zero)
"Expected zeroed memory");
}
if (psz != qsz) {
- memset(q+psz, FILL_BYTE, qsz-psz);
+ memset((void *)((uintptr_t)q+psz), FILL_BYTE,
+ qsz-psz);
psz = qsz;
}
p = q;
@@ -159,8 +161,9 @@ TEST_BEGIN(test_lg_align_and_zero)
} else {
assert_false(validate_fill(q, 0, 0, MAX_VALIDATE),
"Expected zeroed memory");
- assert_false(validate_fill(q+sz-MAX_VALIDATE, 0, 0,
- MAX_VALIDATE), "Expected zeroed memory");
+ assert_false(validate_fill(
+ (void *)((uintptr_t)q+sz-MAX_VALIDATE),
+ 0, 0, MAX_VALIDATE), "Expected zeroed memory");
}
p = q;
}
diff --git a/deps/jemalloc/test/integration/sdallocx.c b/deps/jemalloc/test/integration/sdallocx.c
new file mode 100644
index 000000000..b84817d76
--- /dev/null
+++ b/deps/jemalloc/test/integration/sdallocx.c
@@ -0,0 +1,57 @@
+#include "test/jemalloc_test.h"
+
+#define MAXALIGN (((size_t)1) << 25)
+#define NITER 4
+
+TEST_BEGIN(test_basic)
+{
+ void *ptr = mallocx(64, 0);
+ sdallocx(ptr, 64, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_alignment_and_size)
+{
+ size_t nsz, sz, alignment, total;
+ unsigned i;
+ void *ps[NITER];
+
+ for (i = 0; i < NITER; i++)
+ ps[i] = NULL;
+
+ for (alignment = 8;
+ alignment <= MAXALIGN;
+ alignment <<= 1) {
+ total = 0;
+ for (sz = 1;
+ sz < 3 * alignment && sz < (1U << 31);
+ sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
+ for (i = 0; i < NITER; i++) {
+ nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
+ MALLOCX_ZERO);
+ total += nsz;
+ if (total >= (MAXALIGN << 1))
+ break;
+ }
+ for (i = 0; i < NITER; i++) {
+ if (ps[i] != NULL) {
+ sdallocx(ps[i], sz,
+ MALLOCX_ALIGN(alignment));
+ ps[i] = NULL;
+ }
+ }
+ }
+ }
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_basic,
+ test_alignment_and_size));
+}
diff --git a/deps/jemalloc/test/integration/xallocx.c b/deps/jemalloc/test/integration/xallocx.c
index ab4cf945e..373625219 100644
--- a/deps/jemalloc/test/integration/xallocx.c
+++ b/deps/jemalloc/test/integration/xallocx.c
@@ -48,6 +48,411 @@ TEST_BEGIN(test_no_move_fail)
}
TEST_END
+static unsigned
+get_nsizes_impl(const char *cmd)
+{
+ unsigned ret;
+ size_t z;
+
+ z = sizeof(unsigned);
+ assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
+ "Unexpected mallctl(\"%s\", ...) failure", cmd);
+
+ return (ret);
+}
+
+static unsigned
+get_nsmall(void)
+{
+
+ return (get_nsizes_impl("arenas.nbins"));
+}
+
+static unsigned
+get_nlarge(void)
+{
+
+ return (get_nsizes_impl("arenas.nlruns"));
+}
+
+static unsigned
+get_nhuge(void)
+{
+
+ return (get_nsizes_impl("arenas.nhchunks"));
+}
+
+static size_t
+get_size_impl(const char *cmd, size_t ind)
+{
+ size_t ret;
+ size_t z;
+ size_t mib[4];
+ size_t miblen = 4;
+
+ z = sizeof(size_t);
+ assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
+ mib[2] = ind;
+ z = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
+ 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
+
+ return (ret);
+}
+
+static size_t
+get_small_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.bin.0.size", ind));
+}
+
+static size_t
+get_large_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.lrun.0.size", ind));
+}
+
+static size_t
+get_huge_size(size_t ind)
+{
+
+ return (get_size_impl("arenas.hchunk.0.size", ind));
+}
+
+TEST_BEGIN(test_size)
+{
+ size_t small0, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(small0, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ /* Test smallest supported size. */
+ assert_zu_eq(xallocx(p, 1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ /* Test largest supported size. */
+ assert_zu_le(xallocx(p, hugemax, 0, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ /* Test size overflow. */
+ assert_zu_le(xallocx(p, hugemax+1, 0, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_size_extra_overflow)
+{
+ size_t small0, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(small0, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ /* Test overflows that can be resolved by clamping extra. */
+ assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ /* Test overflow such that hugemax-size underflows. */
+ assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_small)
+{
+ size_t small0, small1, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ small0 = get_small_size(0);
+ small1 = get_small_size(1);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(small0, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ assert_zu_eq(xallocx(p, small1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, small1, 0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ /* Test size+extra overflow. */
+ assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_large)
+{
+ size_t smallmax, large0, large1, large2, huge0, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ smallmax = get_small_size(get_nsmall()-1);
+ large0 = get_large_size(0);
+ large1 = get_large_size(1);
+ large2 = get_large_size(2);
+ huge0 = get_huge_size(0);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(large2, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ assert_zu_eq(xallocx(p, large2, 0, 0), large2,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with zero extra. */
+ assert_zu_eq(xallocx(p, large0, 0, 0), large0,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, smallmax, 0, 0), large0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large2, 0, 0), large2,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with non-zero extra. */
+ assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, large1, large2 - large1, 0), large2,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, large0, large1 - large0, 0), large1,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, 0), large0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large0, 0, 0), large0,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with zero extra. */
+ assert_zu_eq(xallocx(p, large2, 0, 0), large2,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, huge0, 0, 0), large2,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large0, 0, 0), large0,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_lt(xallocx(p, large0, huge0 - large0, 0), huge0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large0, 0, 0), large0,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, large2, 0, 0), large2,
+ "Unexpected xallocx() behavior");
+ /* Test size+extra overflow. */
+ assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, 0), huge0,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_extra_huge)
+{
+ size_t largemax, huge0, huge1, huge2, hugemax;
+ void *p;
+
+ /* Get size classes. */
+ largemax = get_large_size(get_nlarge()-1);
+ huge0 = get_huge_size(0);
+ huge1 = get_huge_size(1);
+ huge2 = get_huge_size(2);
+ hugemax = get_huge_size(get_nhuge()-1);
+
+ p = mallocx(huge2, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+
+ assert_zu_eq(xallocx(p, huge2, 0, 0), huge2,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with zero extra. */
+ assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
+ "Unexpected xallocx() behavior");
+ assert_zu_ge(xallocx(p, largemax, 0, 0), huge0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, huge2, 0, 0), huge2,
+ "Unexpected xallocx() behavior");
+ /* Test size decrease with non-zero extra. */
+ assert_zu_eq(xallocx(p, huge0, huge2 - huge0, 0), huge2,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, huge1, huge2 - huge1, 0), huge2,
+ "Unexpected xallocx() behavior");
+ assert_zu_eq(xallocx(p, huge0, huge1 - huge0, 0), huge1,
+ "Unexpected xallocx() behavior");
+ assert_zu_ge(xallocx(p, largemax, huge0 - largemax, 0), huge0,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with zero extra. */
+ assert_zu_le(xallocx(p, huge2, 0, 0), huge2,
+ "Unexpected xallocx() behavior");
+ assert_zu_le(xallocx(p, hugemax+1, 0, 0), huge2,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_le(xallocx(p, huge0, SIZE_T_MAX - huge0, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
+ "Unexpected xallocx() behavior");
+ /* Test size increase with non-zero extra. */
+ assert_zu_le(xallocx(p, huge0, huge2 - huge0, 0), huge2,
+ "Unexpected xallocx() behavior");
+
+ assert_zu_eq(xallocx(p, huge2, 0, 0), huge2,
+ "Unexpected xallocx() behavior");
+ /* Test size+extra overflow. */
+ assert_zu_le(xallocx(p, huge2, hugemax - huge2 + 1, 0), hugemax,
+ "Unexpected xallocx() behavior");
+
+ dallocx(p, 0);
+}
+TEST_END
+
+static void
+print_filled_extents(const void *p, uint8_t c, size_t len)
+{
+ const uint8_t *pc = (const uint8_t *)p;
+ size_t i, range0;
+ uint8_t c0;
+
+ malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len);
+ range0 = 0;
+ c0 = pc[0];
+ for (i = 0; i < len; i++) {
+ if (pc[i] != c0) {
+ malloc_printf(" %#x[%zu..%zu)", c0, range0, i);
+ range0 = i;
+ c0 = pc[i];
+ }
+ }
+ malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i);
+}
+
+static bool
+validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
+{
+ const uint8_t *pc = (const uint8_t *)p;
+ bool err;
+ size_t i;
+
+ for (i = offset, err = false; i < offset+len; i++) {
+ if (pc[i] != c)
+ err = true;
+ }
+
+ if (err)
+ print_filled_extents(p, c, offset + len);
+
+ return (err);
+}
+
+static void
+test_zero(size_t szmin, size_t szmax)
+{
+ size_t sz, nsz;
+ void *p;
+#define FILL_BYTE 0x7aU
+
+ sz = szmax;
+ p = mallocx(sz, MALLOCX_ZERO);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
+ assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
+ sz);
+
+ /*
+ * Fill with non-zero so that non-debug builds are more likely to detect
+ * errors.
+ */
+ memset(p, FILL_BYTE, sz);
+ assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+
+ /* Shrink in place so that we can expect growing in place to succeed. */
+ sz = szmin;
+ assert_zu_eq(xallocx(p, sz, 0, MALLOCX_ZERO), sz,
+ "Unexpected xallocx() error");
+ assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+
+ for (sz = szmin; sz < szmax; sz = nsz) {
+ nsz = nallocx(sz+1, MALLOCX_ZERO);
+ assert_zu_eq(xallocx(p, sz+1, 0, MALLOCX_ZERO), nsz,
+ "Unexpected xallocx() failure");
+ assert_false(validate_fill(p, FILL_BYTE, 0, sz),
+ "Memory not filled: sz=%zu", sz);
+ assert_false(validate_fill(p, 0x00, sz, nsz-sz),
+ "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz);
+ memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz);
+ assert_false(validate_fill(p, FILL_BYTE, 0, nsz),
+ "Memory not filled: nsz=%zu", nsz);
+ }
+
+ dallocx(p, 0);
+}
+
+TEST_BEGIN(test_zero_large)
+{
+ size_t large0, largemax;
+
+ /* Get size classes. */
+ large0 = get_large_size(0);
+ largemax = get_large_size(get_nlarge()-1);
+
+ test_zero(large0, largemax);
+}
+TEST_END
+
+TEST_BEGIN(test_zero_huge)
+{
+ size_t huge0, huge1;
+
+ /* Get size classes. */
+ huge0 = get_huge_size(0);
+ huge1 = get_huge_size(1);
+
+ test_zero(huge1, huge0 * 2);
+}
+TEST_END
+
int
main(void)
{
@@ -55,5 +460,12 @@ main(void)
return (test(
test_same_size,
test_extra_no_move,
- test_no_move_fail));
+ test_no_move_fail,
+ test_size,
+ test_size_extra_overflow,
+ test_extra_small,
+ test_extra_large,
+ test_extra_huge,
+ test_zero_large,
+ test_zero_huge));
}
diff --git a/deps/jemalloc/test/src/SFMT.c b/deps/jemalloc/test/src/SFMT.c
index e6f8deecb..80cabe05e 100644
--- a/deps/jemalloc/test/src/SFMT.c
+++ b/deps/jemalloc/test/src/SFMT.c
@@ -463,11 +463,11 @@ uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) {
above = 0xffffffffU - (0xffffffffU % limit);
while (1) {
- ret = gen_rand32(ctx);
- if (ret < above) {
- ret %= limit;
- break;
- }
+ ret = gen_rand32(ctx);
+ if (ret < above) {
+ ret %= limit;
+ break;
+ }
}
return ret;
}
@@ -511,13 +511,13 @@ uint64_t gen_rand64(sfmt_t *ctx) {
uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) {
uint64_t ret, above;
- above = 0xffffffffffffffffLLU - (0xffffffffffffffffLLU % limit);
+ above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit);
while (1) {
- ret = gen_rand64(ctx);
- if (ret < above) {
- ret %= limit;
- break;
- }
+ ret = gen_rand64(ctx);
+ if (ret < above) {
+ ret %= limit;
+ break;
+ }
}
return ret;
}
diff --git a/deps/jemalloc/test/src/btalloc.c b/deps/jemalloc/test/src/btalloc.c
new file mode 100644
index 000000000..9a253d978
--- /dev/null
+++ b/deps/jemalloc/test/src/btalloc.c
@@ -0,0 +1,8 @@
+#include "test/jemalloc_test.h"
+
+void *
+btalloc(size_t size, unsigned bits)
+{
+
+ return (btalloc_0(size, bits));
+}
diff --git a/deps/jemalloc/test/src/btalloc_0.c b/deps/jemalloc/test/src/btalloc_0.c
new file mode 100644
index 000000000..77d8904ea
--- /dev/null
+++ b/deps/jemalloc/test/src/btalloc_0.c
@@ -0,0 +1,3 @@
+#include "test/jemalloc_test.h"
+
+btalloc_n_gen(0)
diff --git a/deps/jemalloc/test/src/btalloc_1.c b/deps/jemalloc/test/src/btalloc_1.c
new file mode 100644
index 000000000..4c126c309
--- /dev/null
+++ b/deps/jemalloc/test/src/btalloc_1.c
@@ -0,0 +1,3 @@
+#include "test/jemalloc_test.h"
+
+btalloc_n_gen(1)
diff --git a/deps/jemalloc/test/src/mq.c b/deps/jemalloc/test/src/mq.c
new file mode 100644
index 000000000..40b31c15c
--- /dev/null
+++ b/deps/jemalloc/test/src/mq.c
@@ -0,0 +1,29 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Sleep for approximately ns nanoseconds. No lower *nor* upper bound on sleep
+ * time is guaranteed.
+ */
+void
+mq_nanosleep(unsigned ns)
+{
+
+ assert(ns <= 1000*1000*1000);
+
+#ifdef _WIN32
+ Sleep(ns / 1000);
+#else
+ {
+ struct timespec timeout;
+
+ if (ns < 1000*1000*1000) {
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = ns;
+ } else {
+ timeout.tv_sec = 1;
+ timeout.tv_nsec = 0;
+ }
+ nanosleep(&timeout, NULL);
+ }
+#endif
+}
diff --git a/deps/jemalloc/test/src/mtx.c b/deps/jemalloc/test/src/mtx.c
index 41b95d59d..73bd02f6d 100644
--- a/deps/jemalloc/test/src/mtx.c
+++ b/deps/jemalloc/test/src/mtx.c
@@ -1,5 +1,9 @@
#include "test/jemalloc_test.h"
+#ifndef _CRT_SPINCOUNT
+#define _CRT_SPINCOUNT 4000
+#endif
+
bool
mtx_init(mtx_t *mtx)
{
diff --git a/deps/jemalloc/test/src/test.c b/deps/jemalloc/test/src/test.c
index 528d85831..8173614cf 100644
--- a/deps/jemalloc/test/src/test.c
+++ b/deps/jemalloc/test/src/test.c
@@ -5,7 +5,7 @@ static test_status_t test_counts[test_status_count] = {0, 0, 0};
static test_status_t test_status = test_status_pass;
static const char * test_name = "";
-JEMALLOC_ATTR(format(printf, 1, 2))
+JEMALLOC_FORMAT_PRINTF(1, 2)
void
test_skip(const char *format, ...)
{
@@ -18,7 +18,7 @@ test_skip(const char *format, ...)
test_status = test_status_skip;
}
-JEMALLOC_ATTR(format(printf, 1, 2))
+JEMALLOC_FORMAT_PRINTF(1, 2)
void
test_fail(const char *format, ...)
{
@@ -61,13 +61,26 @@ p_test_fini(void)
}
test_status_t
-p_test(test_t* t, ...)
+p_test(test_t *t, ...)
{
- test_status_t ret = test_status_pass;
+ test_status_t ret;
va_list ap;
+ /*
+ * Make sure initialization occurs prior to running tests. Tests are
+ * special because they may use internal facilities prior to triggering
+ * initialization as a side effect of calling into the public API. This
+ * is a final safety that works even if jemalloc_constructor() doesn't
+ * run, as for MSVC builds.
+ */
+ if (nallocx(1, 0) == 0) {
+ malloc_printf("Initialization error");
+ return (test_status_fail);
+ }
+
+ ret = test_status_pass;
va_start(ap, t);
- for (; t != NULL; t = va_arg(ap, test_t*)) {
+ for (; t != NULL; t = va_arg(ap, test_t *)) {
t();
if (test_status > ret)
ret = test_status;
diff --git a/deps/jemalloc/test/src/thd.c b/deps/jemalloc/test/src/thd.c
index 233242a16..c9d006586 100644
--- a/deps/jemalloc/test/src/thd.c
+++ b/deps/jemalloc/test/src/thd.c
@@ -14,7 +14,11 @@ void
thd_join(thd_t thd, void **ret)
{
- WaitForSingleObject(thd, INFINITE);
+ if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) {
+ DWORD exit_code;
+ GetExitCodeThread(thd, (LPDWORD) &exit_code);
+ *ret = (void *)(uintptr_t)exit_code;
+ }
}
#else
diff --git a/deps/jemalloc/test/src/timer.c b/deps/jemalloc/test/src/timer.c
new file mode 100644
index 000000000..0c93abaf9
--- /dev/null
+++ b/deps/jemalloc/test/src/timer.c
@@ -0,0 +1,85 @@
+#include "test/jemalloc_test.h"
+
+void
+timer_start(timedelta_t *timer)
+{
+
+#ifdef _WIN32
+ GetSystemTimeAsFileTime(&timer->ft0);
+#elif JEMALLOC_CLOCK_GETTIME
+ if (sysconf(_SC_MONOTONIC_CLOCK) <= 0)
+ timer->clock_id = CLOCK_REALTIME;
+ else
+ timer->clock_id = CLOCK_MONOTONIC;
+ clock_gettime(timer->clock_id, &timer->ts0);
+#else
+ gettimeofday(&timer->tv0, NULL);
+#endif
+}
+
+void
+timer_stop(timedelta_t *timer)
+{
+
+#ifdef _WIN32
+ GetSystemTimeAsFileTime(&timer->ft0);
+#elif JEMALLOC_CLOCK_GETTIME
+ clock_gettime(timer->clock_id, &timer->ts1);
+#else
+ gettimeofday(&timer->tv1, NULL);
+#endif
+}
+
+uint64_t
+timer_usec(const timedelta_t *timer)
+{
+
+#ifdef _WIN32
+ uint64_t t0, t1;
+ t0 = (((uint64_t)timer->ft0.dwHighDateTime) << 32) |
+ timer->ft0.dwLowDateTime;
+ t1 = (((uint64_t)timer->ft1.dwHighDateTime) << 32) |
+ timer->ft1.dwLowDateTime;
+ return ((t1 - t0) / 10);
+#elif JEMALLOC_CLOCK_GETTIME
+ return (((timer->ts1.tv_sec - timer->ts0.tv_sec) * 1000000) +
+ (timer->ts1.tv_nsec - timer->ts0.tv_nsec) / 1000);
+#else
+ return (((timer->tv1.tv_sec - timer->tv0.tv_sec) * 1000000) +
+ timer->tv1.tv_usec - timer->tv0.tv_usec);
+#endif
+}
+
+void
+timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen)
+{
+ uint64_t t0 = timer_usec(a);
+ uint64_t t1 = timer_usec(b);
+ uint64_t mult;
+ unsigned i = 0;
+ unsigned j;
+ int n;
+
+ /* Whole. */
+ n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1);
+ i += n;
+ if (i >= buflen)
+ return;
+ mult = 1;
+ for (j = 0; j < n; j++)
+ mult *= 10;
+
+ /* Decimal. */
+ n = malloc_snprintf(&buf[i], buflen-i, ".");
+ i += n;
+
+ /* Fraction. */
+ while (i < buflen-1) {
+ uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10
+ >= 5)) ? 1 : 0;
+ n = malloc_snprintf(&buf[i], buflen-i,
+ "%"FMTu64, (t0 * mult / t1) % 10 + round);
+ i += n;
+ mult *= 10;
+ }
+}
diff --git a/deps/jemalloc/test/stress/microbench.c b/deps/jemalloc/test/stress/microbench.c
new file mode 100644
index 000000000..ee39fea7f
--- /dev/null
+++ b/deps/jemalloc/test/stress/microbench.c
@@ -0,0 +1,181 @@
+#include "test/jemalloc_test.h"
+
+JEMALLOC_INLINE_C void
+time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, void (*func)(void))
+{
+ uint64_t i;
+
+ for (i = 0; i < nwarmup; i++)
+ func();
+ timer_start(timer);
+ for (i = 0; i < niter; i++)
+ func();
+ timer_stop(timer);
+}
+
+void
+compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a,
+ void (*func_a), const char *name_b, void (*func_b))
+{
+ timedelta_t timer_a, timer_b;
+ char ratio_buf[6];
+ void *p;
+
+ p = mallocx(1, 0);
+ if (p == NULL) {
+ test_fail("Unexpected mallocx() failure");
+ return;
+ }
+
+ time_func(&timer_a, nwarmup, niter, func_a);
+ time_func(&timer_b, nwarmup, niter, func_b);
+
+ timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf));
+ malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us, "
+ "%s=%"FMTu64"us, ratio=1:%s\n",
+ niter, name_a, timer_usec(&timer_a), name_b, timer_usec(&timer_b),
+ ratio_buf);
+
+ dallocx(p, 0);
+}
+
+static void
+malloc_free(void)
+{
+ /* The compiler can optimize away free(malloc(1))! */
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ free(p);
+}
+
+static void
+mallocx_free(void)
+{
+ void *p = mallocx(1, 0);
+ if (p == NULL) {
+ test_fail("Unexpected mallocx() failure");
+ return;
+ }
+ free(p);
+}
+
+TEST_BEGIN(test_malloc_vs_mallocx)
+{
+
+ compare_funcs(10*1000*1000, 100*1000*1000, "malloc",
+ malloc_free, "mallocx", mallocx_free);
+}
+TEST_END
+
+static void
+malloc_dallocx(void)
+{
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ dallocx(p, 0);
+}
+
+static void
+malloc_sdallocx(void)
+{
+ void *p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ sdallocx(p, 1, 0);
+}
+
+TEST_BEGIN(test_free_vs_dallocx)
+{
+
+ compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free,
+ "dallocx", malloc_dallocx);
+}
+TEST_END
+
+TEST_BEGIN(test_dallocx_vs_sdallocx)
+{
+
+ compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx,
+ "sdallocx", malloc_sdallocx);
+}
+TEST_END
+
+static void
+malloc_mus_free(void)
+{
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ malloc_usable_size(p);
+ free(p);
+}
+
+static void
+malloc_sallocx_free(void)
+{
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ if (sallocx(p, 0) < 1)
+ test_fail("Unexpected sallocx() failure");
+ free(p);
+}
+
+TEST_BEGIN(test_mus_vs_sallocx)
+{
+
+ compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size",
+ malloc_mus_free, "sallocx", malloc_sallocx_free);
+}
+TEST_END
+
+static void
+malloc_nallocx_free(void)
+{
+ void *p;
+
+ p = malloc(1);
+ if (p == NULL) {
+ test_fail("Unexpected malloc() failure");
+ return;
+ }
+ if (nallocx(1, 0) < 1)
+ test_fail("Unexpected nallocx() failure");
+ free(p);
+}
+
+TEST_BEGIN(test_sallocx_vs_nallocx)
+{
+
+ compare_funcs(10*1000*1000, 100*1000*1000, "sallocx",
+ malloc_sallocx_free, "nallocx", malloc_nallocx_free);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_malloc_vs_mallocx,
+ test_free_vs_dallocx,
+ test_dallocx_vs_sdallocx,
+ test_mus_vs_sallocx,
+ test_sallocx_vs_nallocx));
+}
diff --git a/deps/jemalloc/test/unit/SFMT.c b/deps/jemalloc/test/unit/SFMT.c
index c57bd68df..ba4be8702 100644
--- a/deps/jemalloc/test/unit/SFMT.c
+++ b/deps/jemalloc/test/unit/SFMT.c
@@ -445,1008 +445,1008 @@ static const uint32_t init_by_array_32_expected[] = {
2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U
};
static const uint64_t init_gen_rand_64_expected[] = {
- QU(16924766246869039260LLU), QU( 8201438687333352714LLU),
- QU( 2265290287015001750LLU), QU(18397264611805473832LLU),
- QU( 3375255223302384358LLU), QU( 6345559975416828796LLU),
- QU(18229739242790328073LLU), QU( 7596792742098800905LLU),
- QU( 255338647169685981LLU), QU( 2052747240048610300LLU),
- QU(18328151576097299343LLU), QU(12472905421133796567LLU),
- QU(11315245349717600863LLU), QU(16594110197775871209LLU),
- QU(15708751964632456450LLU), QU(10452031272054632535LLU),
- QU(11097646720811454386LLU), QU( 4556090668445745441LLU),
- QU(17116187693090663106LLU), QU(14931526836144510645LLU),
- QU( 9190752218020552591LLU), QU( 9625800285771901401LLU),
- QU(13995141077659972832LLU), QU( 5194209094927829625LLU),
- QU( 4156788379151063303LLU), QU( 8523452593770139494LLU),
- QU(14082382103049296727LLU), QU( 2462601863986088483LLU),
- QU( 3030583461592840678LLU), QU( 5221622077872827681LLU),
- QU( 3084210671228981236LLU), QU(13956758381389953823LLU),
- QU(13503889856213423831LLU), QU(15696904024189836170LLU),
- QU( 4612584152877036206LLU), QU( 6231135538447867881LLU),
- QU(10172457294158869468LLU), QU( 6452258628466708150LLU),
- QU(14044432824917330221LLU), QU( 370168364480044279LLU),
- QU(10102144686427193359LLU), QU( 667870489994776076LLU),
- QU( 2732271956925885858LLU), QU(18027788905977284151LLU),
- QU(15009842788582923859LLU), QU( 7136357960180199542LLU),
- QU(15901736243475578127LLU), QU(16951293785352615701LLU),
- QU(10551492125243691632LLU), QU(17668869969146434804LLU),
- QU(13646002971174390445LLU), QU( 9804471050759613248LLU),
- QU( 5511670439655935493LLU), QU(18103342091070400926LLU),
- QU(17224512747665137533LLU), QU(15534627482992618168LLU),
- QU( 1423813266186582647LLU), QU(15821176807932930024LLU),
- QU( 30323369733607156LLU), QU(11599382494723479403LLU),
- QU( 653856076586810062LLU), QU( 3176437395144899659LLU),
- QU(14028076268147963917LLU), QU(16156398271809666195LLU),
- QU( 3166955484848201676LLU), QU( 5746805620136919390LLU),
- QU(17297845208891256593LLU), QU(11691653183226428483LLU),
- QU(17900026146506981577LLU), QU(15387382115755971042LLU),
- QU(16923567681040845943LLU), QU( 8039057517199388606LLU),
- QU(11748409241468629263LLU), QU( 794358245539076095LLU),
- QU(13438501964693401242LLU), QU(14036803236515618962LLU),
- QU( 5252311215205424721LLU), QU(17806589612915509081LLU),
- QU( 6802767092397596006LLU), QU(14212120431184557140LLU),
- QU( 1072951366761385712LLU), QU(13098491780722836296LLU),
- QU( 9466676828710797353LLU), QU(12673056849042830081LLU),
- QU(12763726623645357580LLU), QU(16468961652999309493LLU),
- QU(15305979875636438926LLU), QU(17444713151223449734LLU),
- QU( 5692214267627883674LLU), QU(13049589139196151505LLU),
- QU( 880115207831670745LLU), QU( 1776529075789695498LLU),
- QU(16695225897801466485LLU), QU(10666901778795346845LLU),
- QU( 6164389346722833869LLU), QU( 2863817793264300475LLU),
- QU( 9464049921886304754LLU), QU( 3993566636740015468LLU),
- QU( 9983749692528514136LLU), QU(16375286075057755211LLU),
- QU(16042643417005440820LLU), QU(11445419662923489877LLU),
- QU( 7999038846885158836LLU), QU( 6721913661721511535LLU),
- QU( 5363052654139357320LLU), QU( 1817788761173584205LLU),
- QU(13290974386445856444LLU), QU( 4650350818937984680LLU),
- QU( 8219183528102484836LLU), QU( 1569862923500819899LLU),
- QU( 4189359732136641860LLU), QU(14202822961683148583LLU),
- QU( 4457498315309429058LLU), QU(13089067387019074834LLU),
- QU(11075517153328927293LLU), QU(10277016248336668389LLU),
- QU( 7070509725324401122LLU), QU(17808892017780289380LLU),
- QU(13143367339909287349LLU), QU( 1377743745360085151LLU),
- QU( 5749341807421286485LLU), QU(14832814616770931325LLU),
- QU( 7688820635324359492LLU), QU(10960474011539770045LLU),
- QU( 81970066653179790LLU), QU(12619476072607878022LLU),
- QU( 4419566616271201744LLU), QU(15147917311750568503LLU),
- QU( 5549739182852706345LLU), QU( 7308198397975204770LLU),
- QU(13580425496671289278LLU), QU(17070764785210130301LLU),
- QU( 8202832846285604405LLU), QU( 6873046287640887249LLU),
- QU( 6927424434308206114LLU), QU( 6139014645937224874LLU),
- QU(10290373645978487639LLU), QU(15904261291701523804LLU),
- QU( 9628743442057826883LLU), QU(18383429096255546714LLU),
- QU( 4977413265753686967LLU), QU( 7714317492425012869LLU),
- QU( 9025232586309926193LLU), QU(14627338359776709107LLU),
- QU(14759849896467790763LLU), QU(10931129435864423252LLU),
- QU( 4588456988775014359LLU), QU(10699388531797056724LLU),
- QU( 468652268869238792LLU), QU( 5755943035328078086LLU),
- QU( 2102437379988580216LLU), QU( 9986312786506674028LLU),
- QU( 2654207180040945604LLU), QU( 8726634790559960062LLU),
- QU( 100497234871808137LLU), QU( 2800137176951425819LLU),
- QU( 6076627612918553487LLU), QU( 5780186919186152796LLU),
- QU( 8179183595769929098LLU), QU( 6009426283716221169LLU),
- QU( 2796662551397449358LLU), QU( 1756961367041986764LLU),
- QU( 6972897917355606205LLU), QU(14524774345368968243LLU),
- QU( 2773529684745706940LLU), QU( 4853632376213075959LLU),
- QU( 4198177923731358102LLU), QU( 8271224913084139776LLU),
- QU( 2741753121611092226LLU), QU(16782366145996731181LLU),
- QU(15426125238972640790LLU), QU(13595497100671260342LLU),
- QU( 3173531022836259898LLU), QU( 6573264560319511662LLU),
- QU(18041111951511157441LLU), QU( 2351433581833135952LLU),
- QU( 3113255578908173487LLU), QU( 1739371330877858784LLU),
- QU(16046126562789165480LLU), QU( 8072101652214192925LLU),
- QU(15267091584090664910LLU), QU( 9309579200403648940LLU),
- QU( 5218892439752408722LLU), QU(14492477246004337115LLU),
- QU(17431037586679770619LLU), QU( 7385248135963250480LLU),
- QU( 9580144956565560660LLU), QU( 4919546228040008720LLU),
- QU(15261542469145035584LLU), QU(18233297270822253102LLU),
- QU( 5453248417992302857LLU), QU( 9309519155931460285LLU),
- QU(10342813012345291756LLU), QU(15676085186784762381LLU),
- QU(15912092950691300645LLU), QU( 9371053121499003195LLU),
- QU( 9897186478226866746LLU), QU(14061858287188196327LLU),
- QU( 122575971620788119LLU), QU(12146750969116317754LLU),
- QU( 4438317272813245201LLU), QU( 8332576791009527119LLU),
- QU(13907785691786542057LLU), QU(10374194887283287467LLU),
- QU( 2098798755649059566LLU), QU( 3416235197748288894LLU),
- QU( 8688269957320773484LLU), QU( 7503964602397371571LLU),
- QU(16724977015147478236LLU), QU( 9461512855439858184LLU),
- QU(13259049744534534727LLU), QU( 3583094952542899294LLU),
- QU( 8764245731305528292LLU), QU(13240823595462088985LLU),
- QU(13716141617617910448LLU), QU(18114969519935960955LLU),
- QU( 2297553615798302206LLU), QU( 4585521442944663362LLU),
- QU(17776858680630198686LLU), QU( 4685873229192163363LLU),
- QU( 152558080671135627LLU), QU(15424900540842670088LLU),
- QU(13229630297130024108LLU), QU(17530268788245718717LLU),
- QU(16675633913065714144LLU), QU( 3158912717897568068LLU),
- QU(15399132185380087288LLU), QU( 7401418744515677872LLU),
- QU(13135412922344398535LLU), QU( 6385314346100509511LLU),
- QU(13962867001134161139LLU), QU(10272780155442671999LLU),
- QU(12894856086597769142LLU), QU(13340877795287554994LLU),
- QU(12913630602094607396LLU), QU(12543167911119793857LLU),
- QU(17343570372251873096LLU), QU(10959487764494150545LLU),
- QU( 6966737953093821128LLU), QU(13780699135496988601LLU),
- QU( 4405070719380142046LLU), QU(14923788365607284982LLU),
- QU( 2869487678905148380LLU), QU( 6416272754197188403LLU),
- QU(15017380475943612591LLU), QU( 1995636220918429487LLU),
- QU( 3402016804620122716LLU), QU(15800188663407057080LLU),
- QU(11362369990390932882LLU), QU(15262183501637986147LLU),
- QU(10239175385387371494LLU), QU( 9352042420365748334LLU),
- QU( 1682457034285119875LLU), QU( 1724710651376289644LLU),
- QU( 2038157098893817966LLU), QU( 9897825558324608773LLU),
- QU( 1477666236519164736LLU), QU(16835397314511233640LLU),
- QU(10370866327005346508LLU), QU(10157504370660621982LLU),
- QU(12113904045335882069LLU), QU(13326444439742783008LLU),
- QU(11302769043000765804LLU), QU(13594979923955228484LLU),
- QU(11779351762613475968LLU), QU( 3786101619539298383LLU),
- QU( 8021122969180846063LLU), QU(15745904401162500495LLU),
- QU(10762168465993897267LLU), QU(13552058957896319026LLU),
- QU(11200228655252462013LLU), QU( 5035370357337441226LLU),
- QU( 7593918984545500013LLU), QU( 5418554918361528700LLU),
- QU( 4858270799405446371LLU), QU( 9974659566876282544LLU),
- QU(18227595922273957859LLU), QU( 2772778443635656220LLU),
- QU(14285143053182085385LLU), QU( 9939700992429600469LLU),
- QU(12756185904545598068LLU), QU( 2020783375367345262LLU),
- QU( 57026775058331227LLU), QU( 950827867930065454LLU),
- QU( 6602279670145371217LLU), QU( 2291171535443566929LLU),
- QU( 5832380724425010313LLU), QU( 1220343904715982285LLU),
- QU(17045542598598037633LLU), QU(15460481779702820971LLU),
- QU(13948388779949365130LLU), QU(13975040175430829518LLU),
- QU(17477538238425541763LLU), QU(11104663041851745725LLU),
- QU(15860992957141157587LLU), QU(14529434633012950138LLU),
- QU( 2504838019075394203LLU), QU( 7512113882611121886LLU),
- QU( 4859973559980886617LLU), QU( 1258601555703250219LLU),
- QU(15594548157514316394LLU), QU( 4516730171963773048LLU),
- QU(11380103193905031983LLU), QU( 6809282239982353344LLU),
- QU(18045256930420065002LLU), QU( 2453702683108791859LLU),
- QU( 977214582986981460LLU), QU( 2006410402232713466LLU),
- QU( 6192236267216378358LLU), QU( 3429468402195675253LLU),
- QU(18146933153017348921LLU), QU(17369978576367231139LLU),
- QU( 1246940717230386603LLU), QU(11335758870083327110LLU),
- QU(14166488801730353682LLU), QU( 9008573127269635732LLU),
- QU(10776025389820643815LLU), QU(15087605441903942962LLU),
- QU( 1359542462712147922LLU), QU(13898874411226454206LLU),
- QU(17911176066536804411LLU), QU( 9435590428600085274LLU),
- QU( 294488509967864007LLU), QU( 8890111397567922046LLU),
- QU( 7987823476034328778LLU), QU(13263827582440967651LLU),
- QU( 7503774813106751573LLU), QU(14974747296185646837LLU),
- QU( 8504765037032103375LLU), QU(17340303357444536213LLU),
- QU( 7704610912964485743LLU), QU( 8107533670327205061LLU),
- QU( 9062969835083315985LLU), QU(16968963142126734184LLU),
- QU(12958041214190810180LLU), QU( 2720170147759570200LLU),
- QU( 2986358963942189566LLU), QU(14884226322219356580LLU),
- QU( 286224325144368520LLU), QU(11313800433154279797LLU),
- QU(18366849528439673248LLU), QU(17899725929482368789LLU),
- QU( 3730004284609106799LLU), QU( 1654474302052767205LLU),
- QU( 5006698007047077032LLU), QU( 8196893913601182838LLU),
- QU(15214541774425211640LLU), QU(17391346045606626073LLU),
- QU( 8369003584076969089LLU), QU( 3939046733368550293LLU),
- QU(10178639720308707785LLU), QU( 2180248669304388697LLU),
- QU( 62894391300126322LLU), QU( 9205708961736223191LLU),
- QU( 6837431058165360438LLU), QU( 3150743890848308214LLU),
- QU(17849330658111464583LLU), QU(12214815643135450865LLU),
- QU(13410713840519603402LLU), QU( 3200778126692046802LLU),
- QU(13354780043041779313LLU), QU( 800850022756886036LLU),
- QU(15660052933953067433LLU), QU( 6572823544154375676LLU),
- QU(11030281857015819266LLU), QU(12682241941471433835LLU),
- QU(11654136407300274693LLU), QU( 4517795492388641109LLU),
- QU( 9757017371504524244LLU), QU(17833043400781889277LLU),
- QU(12685085201747792227LLU), QU(10408057728835019573LLU),
- QU( 98370418513455221LLU), QU( 6732663555696848598LLU),
- QU(13248530959948529780LLU), QU( 3530441401230622826LLU),
- QU(18188251992895660615LLU), QU( 1847918354186383756LLU),
- QU( 1127392190402660921LLU), QU(11293734643143819463LLU),
- QU( 3015506344578682982LLU), QU(13852645444071153329LLU),
- QU( 2121359659091349142LLU), QU( 1294604376116677694LLU),
- QU( 5616576231286352318LLU), QU( 7112502442954235625LLU),
- QU(11676228199551561689LLU), QU(12925182803007305359LLU),
- QU( 7852375518160493082LLU), QU( 1136513130539296154LLU),
- QU( 5636923900916593195LLU), QU( 3221077517612607747LLU),
- QU(17784790465798152513LLU), QU( 3554210049056995938LLU),
- QU(17476839685878225874LLU), QU( 3206836372585575732LLU),
- QU( 2765333945644823430LLU), QU(10080070903718799528LLU),
- QU( 5412370818878286353LLU), QU( 9689685887726257728LLU),
- QU( 8236117509123533998LLU), QU( 1951139137165040214LLU),
- QU( 4492205209227980349LLU), QU(16541291230861602967LLU),
- QU( 1424371548301437940LLU), QU( 9117562079669206794LLU),
- QU(14374681563251691625LLU), QU(13873164030199921303LLU),
- QU( 6680317946770936731LLU), QU(15586334026918276214LLU),
- QU(10896213950976109802LLU), QU( 9506261949596413689LLU),
- QU( 9903949574308040616LLU), QU( 6038397344557204470LLU),
- QU( 174601465422373648LLU), QU(15946141191338238030LLU),
- QU(17142225620992044937LLU), QU( 7552030283784477064LLU),
- QU( 2947372384532947997LLU), QU( 510797021688197711LLU),
- QU( 4962499439249363461LLU), QU( 23770320158385357LLU),
- QU( 959774499105138124LLU), QU( 1468396011518788276LLU),
- QU( 2015698006852312308LLU), QU( 4149400718489980136LLU),
- QU( 5992916099522371188LLU), QU(10819182935265531076LLU),
- QU(16189787999192351131LLU), QU( 342833961790261950LLU),
- QU(12470830319550495336LLU), QU(18128495041912812501LLU),
- QU( 1193600899723524337LLU), QU( 9056793666590079770LLU),
- QU( 2154021227041669041LLU), QU( 4963570213951235735LLU),
- QU( 4865075960209211409LLU), QU( 2097724599039942963LLU),
- QU( 2024080278583179845LLU), QU(11527054549196576736LLU),
- QU(10650256084182390252LLU), QU( 4808408648695766755LLU),
- QU( 1642839215013788844LLU), QU(10607187948250398390LLU),
- QU( 7076868166085913508LLU), QU( 730522571106887032LLU),
- QU(12500579240208524895LLU), QU( 4484390097311355324LLU),
- QU(15145801330700623870LLU), QU( 8055827661392944028LLU),
- QU( 5865092976832712268LLU), QU(15159212508053625143LLU),
- QU( 3560964582876483341LLU), QU( 4070052741344438280LLU),
- QU( 6032585709886855634LLU), QU(15643262320904604873LLU),
- QU( 2565119772293371111LLU), QU( 318314293065348260LLU),
- QU(15047458749141511872LLU), QU( 7772788389811528730LLU),
- QU( 7081187494343801976LLU), QU( 6465136009467253947LLU),
- QU(10425940692543362069LLU), QU( 554608190318339115LLU),
- QU(14796699860302125214LLU), QU( 1638153134431111443LLU),
- QU(10336967447052276248LLU), QU( 8412308070396592958LLU),
- QU( 4004557277152051226LLU), QU( 8143598997278774834LLU),
- QU(16413323996508783221LLU), QU(13139418758033994949LLU),
- QU( 9772709138335006667LLU), QU( 2818167159287157659LLU),
- QU(17091740573832523669LLU), QU(14629199013130751608LLU),
- QU(18268322711500338185LLU), QU( 8290963415675493063LLU),
- QU( 8830864907452542588LLU), QU( 1614839084637494849LLU),
- QU(14855358500870422231LLU), QU( 3472996748392519937LLU),
- QU(15317151166268877716LLU), QU( 5825895018698400362LLU),
- QU(16730208429367544129LLU), QU(10481156578141202800LLU),
- QU( 4746166512382823750LLU), QU(12720876014472464998LLU),
- QU( 8825177124486735972LLU), QU(13733447296837467838LLU),
- QU( 6412293741681359625LLU), QU( 8313213138756135033LLU),
- QU(11421481194803712517LLU), QU( 7997007691544174032LLU),
- QU( 6812963847917605930LLU), QU( 9683091901227558641LLU),
- QU(14703594165860324713LLU), QU( 1775476144519618309LLU),
- QU( 2724283288516469519LLU), QU( 717642555185856868LLU),
- QU( 8736402192215092346LLU), QU(11878800336431381021LLU),
- QU( 4348816066017061293LLU), QU( 6115112756583631307LLU),
- QU( 9176597239667142976LLU), QU(12615622714894259204LLU),
- QU(10283406711301385987LLU), QU( 5111762509485379420LLU),
- QU( 3118290051198688449LLU), QU( 7345123071632232145LLU),
- QU( 9176423451688682359LLU), QU( 4843865456157868971LLU),
- QU(12008036363752566088LLU), QU(12058837181919397720LLU),
- QU( 2145073958457347366LLU), QU( 1526504881672818067LLU),
- QU( 3488830105567134848LLU), QU(13208362960674805143LLU),
- QU( 4077549672899572192LLU), QU( 7770995684693818365LLU),
- QU( 1398532341546313593LLU), QU(12711859908703927840LLU),
- QU( 1417561172594446813LLU), QU(17045191024194170604LLU),
- QU( 4101933177604931713LLU), QU(14708428834203480320LLU),
- QU(17447509264469407724LLU), QU(14314821973983434255LLU),
- QU(17990472271061617265LLU), QU( 5087756685841673942LLU),
- QU(12797820586893859939LLU), QU( 1778128952671092879LLU),
- QU( 3535918530508665898LLU), QU( 9035729701042481301LLU),
- QU(14808661568277079962LLU), QU(14587345077537747914LLU),
- QU(11920080002323122708LLU), QU( 6426515805197278753LLU),
- QU( 3295612216725984831LLU), QU(11040722532100876120LLU),
- QU(12305952936387598754LLU), QU(16097391899742004253LLU),
- QU( 4908537335606182208LLU), QU(12446674552196795504LLU),
- QU(16010497855816895177LLU), QU( 9194378874788615551LLU),
- QU( 3382957529567613384LLU), QU( 5154647600754974077LLU),
- QU( 9801822865328396141LLU), QU( 9023662173919288143LLU),
- QU(17623115353825147868LLU), QU( 8238115767443015816LLU),
- QU(15811444159859002560LLU), QU( 9085612528904059661LLU),
- QU( 6888601089398614254LLU), QU( 258252992894160189LLU),
- QU( 6704363880792428622LLU), QU( 6114966032147235763LLU),
- QU(11075393882690261875LLU), QU( 8797664238933620407LLU),
- QU( 5901892006476726920LLU), QU( 5309780159285518958LLU),
- QU(14940808387240817367LLU), QU(14642032021449656698LLU),
- QU( 9808256672068504139LLU), QU( 3670135111380607658LLU),
- QU(11211211097845960152LLU), QU( 1474304506716695808LLU),
- QU(15843166204506876239LLU), QU( 7661051252471780561LLU),
- QU(10170905502249418476LLU), QU( 7801416045582028589LLU),
- QU( 2763981484737053050LLU), QU( 9491377905499253054LLU),
- QU(16201395896336915095LLU), QU( 9256513756442782198LLU),
- QU( 5411283157972456034LLU), QU( 5059433122288321676LLU),
- QU( 4327408006721123357LLU), QU( 9278544078834433377LLU),
- QU( 7601527110882281612LLU), QU(11848295896975505251LLU),
- QU(12096998801094735560LLU), QU(14773480339823506413LLU),
- QU(15586227433895802149LLU), QU(12786541257830242872LLU),
- QU( 6904692985140503067LLU), QU( 5309011515263103959LLU),
- QU(12105257191179371066LLU), QU(14654380212442225037LLU),
- QU( 2556774974190695009LLU), QU( 4461297399927600261LLU),
- QU(14888225660915118646LLU), QU(14915459341148291824LLU),
- QU( 2738802166252327631LLU), QU( 6047155789239131512LLU),
- QU(12920545353217010338LLU), QU(10697617257007840205LLU),
- QU( 2751585253158203504LLU), QU(13252729159780047496LLU),
- QU(14700326134672815469LLU), QU(14082527904374600529LLU),
- QU(16852962273496542070LLU), QU(17446675504235853907LLU),
- QU(15019600398527572311LLU), QU(12312781346344081551LLU),
- QU(14524667935039810450LLU), QU( 5634005663377195738LLU),
- QU(11375574739525000569LLU), QU( 2423665396433260040LLU),
- QU( 5222836914796015410LLU), QU( 4397666386492647387LLU),
- QU( 4619294441691707638LLU), QU( 665088602354770716LLU),
- QU(13246495665281593610LLU), QU( 6564144270549729409LLU),
- QU(10223216188145661688LLU), QU( 3961556907299230585LLU),
- QU(11543262515492439914LLU), QU(16118031437285993790LLU),
- QU( 7143417964520166465LLU), QU(13295053515909486772LLU),
- QU( 40434666004899675LLU), QU(17127804194038347164LLU),
- QU( 8599165966560586269LLU), QU( 8214016749011284903LLU),
- QU(13725130352140465239LLU), QU( 5467254474431726291LLU),
- QU( 7748584297438219877LLU), QU(16933551114829772472LLU),
- QU( 2169618439506799400LLU), QU( 2169787627665113463LLU),
- QU(17314493571267943764LLU), QU(18053575102911354912LLU),
- QU(11928303275378476973LLU), QU(11593850925061715550LLU),
- QU(17782269923473589362LLU), QU( 3280235307704747039LLU),
- QU( 6145343578598685149LLU), QU(17080117031114086090LLU),
- QU(18066839902983594755LLU), QU( 6517508430331020706LLU),
- QU( 8092908893950411541LLU), QU(12558378233386153732LLU),
- QU( 4476532167973132976LLU), QU(16081642430367025016LLU),
- QU( 4233154094369139361LLU), QU( 8693630486693161027LLU),
- QU(11244959343027742285LLU), QU(12273503967768513508LLU),
- QU(14108978636385284876LLU), QU( 7242414665378826984LLU),
- QU( 6561316938846562432LLU), QU( 8601038474994665795LLU),
- QU(17532942353612365904LLU), QU(17940076637020912186LLU),
- QU( 7340260368823171304LLU), QU( 7061807613916067905LLU),
- QU(10561734935039519326LLU), QU(17990796503724650862LLU),
- QU( 6208732943911827159LLU), QU( 359077562804090617LLU),
- QU(14177751537784403113LLU), QU(10659599444915362902LLU),
- QU(15081727220615085833LLU), QU(13417573895659757486LLU),
- QU(15513842342017811524LLU), QU(11814141516204288231LLU),
- QU( 1827312513875101814LLU), QU( 2804611699894603103LLU),
- QU(17116500469975602763LLU), QU(12270191815211952087LLU),
- QU(12256358467786024988LLU), QU(18435021722453971267LLU),
- QU( 671330264390865618LLU), QU( 476504300460286050LLU),
- QU(16465470901027093441LLU), QU( 4047724406247136402LLU),
- QU( 1322305451411883346LLU), QU( 1388308688834322280LLU),
- QU( 7303989085269758176LLU), QU( 9323792664765233642LLU),
- QU( 4542762575316368936LLU), QU(17342696132794337618LLU),
- QU( 4588025054768498379LLU), QU(13415475057390330804LLU),
- QU(17880279491733405570LLU), QU(10610553400618620353LLU),
- QU( 3180842072658960139LLU), QU(13002966655454270120LLU),
- QU( 1665301181064982826LLU), QU( 7083673946791258979LLU),
- QU( 190522247122496820LLU), QU(17388280237250677740LLU),
- QU( 8430770379923642945LLU), QU(12987180971921668584LLU),
- QU( 2311086108365390642LLU), QU( 2870984383579822345LLU),
- QU(14014682609164653318LLU), QU(14467187293062251484LLU),
- QU( 192186361147413298LLU), QU(15171951713531796524LLU),
- QU( 9900305495015948728LLU), QU(17958004775615466344LLU),
- QU(14346380954498606514LLU), QU(18040047357617407096LLU),
- QU( 5035237584833424532LLU), QU(15089555460613972287LLU),
- QU( 4131411873749729831LLU), QU( 1329013581168250330LLU),
- QU(10095353333051193949LLU), QU(10749518561022462716LLU),
- QU( 9050611429810755847LLU), QU(15022028840236655649LLU),
- QU( 8775554279239748298LLU), QU(13105754025489230502LLU),
- QU(15471300118574167585LLU), QU( 89864764002355628LLU),
- QU( 8776416323420466637LLU), QU( 5280258630612040891LLU),
- QU( 2719174488591862912LLU), QU( 7599309137399661994LLU),
- QU(15012887256778039979LLU), QU(14062981725630928925LLU),
- QU(12038536286991689603LLU), QU( 7089756544681775245LLU),
- QU(10376661532744718039LLU), QU( 1265198725901533130LLU),
- QU(13807996727081142408LLU), QU( 2935019626765036403LLU),
- QU( 7651672460680700141LLU), QU( 3644093016200370795LLU),
- QU( 2840982578090080674LLU), QU(17956262740157449201LLU),
- QU(18267979450492880548LLU), QU(11799503659796848070LLU),
- QU( 9942537025669672388LLU), QU(11886606816406990297LLU),
- QU( 5488594946437447576LLU), QU( 7226714353282744302LLU),
- QU( 3784851653123877043LLU), QU( 878018453244803041LLU),
- QU(12110022586268616085LLU), QU( 734072179404675123LLU),
- QU(11869573627998248542LLU), QU( 469150421297783998LLU),
- QU( 260151124912803804LLU), QU(11639179410120968649LLU),
- QU( 9318165193840846253LLU), QU(12795671722734758075LLU),
- QU(15318410297267253933LLU), QU( 691524703570062620LLU),
- QU( 5837129010576994601LLU), QU(15045963859726941052LLU),
- QU( 5850056944932238169LLU), QU(12017434144750943807LLU),
- QU( 7447139064928956574LLU), QU( 3101711812658245019LLU),
- QU(16052940704474982954LLU), QU(18195745945986994042LLU),
- QU( 8932252132785575659LLU), QU(13390817488106794834LLU),
- QU(11582771836502517453LLU), QU( 4964411326683611686LLU),
- QU( 2195093981702694011LLU), QU(14145229538389675669LLU),
- QU(16459605532062271798LLU), QU( 866316924816482864LLU),
- QU( 4593041209937286377LLU), QU( 8415491391910972138LLU),
- QU( 4171236715600528969LLU), QU(16637569303336782889LLU),
- QU( 2002011073439212680LLU), QU(17695124661097601411LLU),
- QU( 4627687053598611702LLU), QU( 7895831936020190403LLU),
- QU( 8455951300917267802LLU), QU( 2923861649108534854LLU),
- QU( 8344557563927786255LLU), QU( 6408671940373352556LLU),
- QU(12210227354536675772LLU), QU(14294804157294222295LLU),
- QU(10103022425071085127LLU), QU(10092959489504123771LLU),
- QU( 6554774405376736268LLU), QU(12629917718410641774LLU),
- QU( 6260933257596067126LLU), QU( 2460827021439369673LLU),
- QU( 2541962996717103668LLU), QU( 597377203127351475LLU),
- QU( 5316984203117315309LLU), QU( 4811211393563241961LLU),
- QU(13119698597255811641LLU), QU( 8048691512862388981LLU),
- QU(10216818971194073842LLU), QU( 4612229970165291764LLU),
- QU(10000980798419974770LLU), QU( 6877640812402540687LLU),
- QU( 1488727563290436992LLU), QU( 2227774069895697318LLU),
- QU(11237754507523316593LLU), QU(13478948605382290972LLU),
- QU( 1963583846976858124LLU), QU( 5512309205269276457LLU),
- QU( 3972770164717652347LLU), QU( 3841751276198975037LLU),
- QU(10283343042181903117LLU), QU( 8564001259792872199LLU),
- QU(16472187244722489221LLU), QU( 8953493499268945921LLU),
- QU( 3518747340357279580LLU), QU( 4003157546223963073LLU),
- QU( 3270305958289814590LLU), QU( 3966704458129482496LLU),
- QU( 8122141865926661939LLU), QU(14627734748099506653LLU),
- QU(13064426990862560568LLU), QU( 2414079187889870829LLU),
- QU( 5378461209354225306LLU), QU(10841985740128255566LLU),
- QU( 538582442885401738LLU), QU( 7535089183482905946LLU),
- QU(16117559957598879095LLU), QU( 8477890721414539741LLU),
- QU( 1459127491209533386LLU), QU(17035126360733620462LLU),
- QU( 8517668552872379126LLU), QU(10292151468337355014LLU),
- QU(17081267732745344157LLU), QU(13751455337946087178LLU),
- QU(14026945459523832966LLU), QU( 6653278775061723516LLU),
- QU(10619085543856390441LLU), QU( 2196343631481122885LLU),
- QU(10045966074702826136LLU), QU(10082317330452718282LLU),
- QU( 5920859259504831242LLU), QU( 9951879073426540617LLU),
- QU( 7074696649151414158LLU), QU(15808193543879464318LLU),
- QU( 7385247772746953374LLU), QU( 3192003544283864292LLU),
- QU(18153684490917593847LLU), QU(12423498260668568905LLU),
- QU(10957758099756378169LLU), QU(11488762179911016040LLU),
- QU( 2099931186465333782LLU), QU(11180979581250294432LLU),
- QU( 8098916250668367933LLU), QU( 3529200436790763465LLU),
- QU(12988418908674681745LLU), QU( 6147567275954808580LLU),
- QU( 3207503344604030989LLU), QU(10761592604898615360LLU),
- QU( 229854861031893504LLU), QU( 8809853962667144291LLU),
- QU(13957364469005693860LLU), QU( 7634287665224495886LLU),
- QU(12353487366976556874LLU), QU( 1134423796317152034LLU),
- QU( 2088992471334107068LLU), QU( 7393372127190799698LLU),
- QU( 1845367839871058391LLU), QU( 207922563987322884LLU),
- QU(11960870813159944976LLU), QU(12182120053317317363LLU),
- QU(17307358132571709283LLU), QU(13871081155552824936LLU),
- QU(18304446751741566262LLU), QU( 7178705220184302849LLU),
- QU(10929605677758824425LLU), QU(16446976977835806844LLU),
- QU(13723874412159769044LLU), QU( 6942854352100915216LLU),
- QU( 1726308474365729390LLU), QU( 2150078766445323155LLU),
- QU(15345558947919656626LLU), QU(12145453828874527201LLU),
- QU( 2054448620739726849LLU), QU( 2740102003352628137LLU),
- QU(11294462163577610655LLU), QU( 756164283387413743LLU),
- QU(17841144758438810880LLU), QU(10802406021185415861LLU),
- QU( 8716455530476737846LLU), QU( 6321788834517649606LLU),
- QU(14681322910577468426LLU), QU(17330043563884336387LLU),
- QU(12701802180050071614LLU), QU(14695105111079727151LLU),
- QU( 5112098511654172830LLU), QU( 4957505496794139973LLU),
- QU( 8270979451952045982LLU), QU(12307685939199120969LLU),
- QU(12425799408953443032LLU), QU( 8376410143634796588LLU),
- QU(16621778679680060464LLU), QU( 3580497854566660073LLU),
- QU( 1122515747803382416LLU), QU( 857664980960597599LLU),
- QU( 6343640119895925918LLU), QU(12878473260854462891LLU),
- QU(10036813920765722626LLU), QU(14451335468363173812LLU),
- QU( 5476809692401102807LLU), QU(16442255173514366342LLU),
- QU(13060203194757167104LLU), QU(14354124071243177715LLU),
- QU(15961249405696125227LLU), QU(13703893649690872584LLU),
- QU( 363907326340340064LLU), QU( 6247455540491754842LLU),
- QU(12242249332757832361LLU), QU( 156065475679796717LLU),
- QU( 9351116235749732355LLU), QU( 4590350628677701405LLU),
- QU( 1671195940982350389LLU), QU(13501398458898451905LLU),
- QU( 6526341991225002255LLU), QU( 1689782913778157592LLU),
- QU( 7439222350869010334LLU), QU(13975150263226478308LLU),
- QU(11411961169932682710LLU), QU(17204271834833847277LLU),
- QU( 541534742544435367LLU), QU( 6591191931218949684LLU),
- QU( 2645454775478232486LLU), QU( 4322857481256485321LLU),
- QU( 8477416487553065110LLU), QU(12902505428548435048LLU),
- QU( 971445777981341415LLU), QU(14995104682744976712LLU),
- QU( 4243341648807158063LLU), QU( 8695061252721927661LLU),
- QU( 5028202003270177222LLU), QU( 2289257340915567840LLU),
- QU(13870416345121866007LLU), QU(13994481698072092233LLU),
- QU( 6912785400753196481LLU), QU( 2278309315841980139LLU),
- QU( 4329765449648304839LLU), QU( 5963108095785485298LLU),
- QU( 4880024847478722478LLU), QU(16015608779890240947LLU),
- QU( 1866679034261393544LLU), QU( 914821179919731519LLU),
- QU( 9643404035648760131LLU), QU( 2418114953615593915LLU),
- QU( 944756836073702374LLU), QU(15186388048737296834LLU),
- QU( 7723355336128442206LLU), QU( 7500747479679599691LLU),
- QU(18013961306453293634LLU), QU( 2315274808095756456LLU),
- QU(13655308255424029566LLU), QU(17203800273561677098LLU),
- QU( 1382158694422087756LLU), QU( 5090390250309588976LLU),
- QU( 517170818384213989LLU), QU( 1612709252627729621LLU),
- QU( 1330118955572449606LLU), QU( 300922478056709885LLU),
- QU(18115693291289091987LLU), QU(13491407109725238321LLU),
- QU(15293714633593827320LLU), QU( 5151539373053314504LLU),
- QU( 5951523243743139207LLU), QU(14459112015249527975LLU),
- QU( 5456113959000700739LLU), QU( 3877918438464873016LLU),
- QU(12534071654260163555LLU), QU(15871678376893555041LLU),
- QU(11005484805712025549LLU), QU(16353066973143374252LLU),
- QU( 4358331472063256685LLU), QU( 8268349332210859288LLU),
- QU(12485161590939658075LLU), QU(13955993592854471343LLU),
- QU( 5911446886848367039LLU), QU(14925834086813706974LLU),
- QU( 6590362597857994805LLU), QU( 1280544923533661875LLU),
- QU( 1637756018947988164LLU), QU( 4734090064512686329LLU),
- QU(16693705263131485912LLU), QU( 6834882340494360958LLU),
- QU( 8120732176159658505LLU), QU( 2244371958905329346LLU),
- QU(10447499707729734021LLU), QU( 7318742361446942194LLU),
- QU( 8032857516355555296LLU), QU(14023605983059313116LLU),
- QU( 1032336061815461376LLU), QU( 9840995337876562612LLU),
- QU( 9869256223029203587LLU), QU(12227975697177267636LLU),
- QU(12728115115844186033LLU), QU( 7752058479783205470LLU),
- QU( 729733219713393087LLU), QU(12954017801239007622LLU)
+ KQU(16924766246869039260), KQU( 8201438687333352714),
+ KQU( 2265290287015001750), KQU(18397264611805473832),
+ KQU( 3375255223302384358), KQU( 6345559975416828796),
+ KQU(18229739242790328073), KQU( 7596792742098800905),
+ KQU( 255338647169685981), KQU( 2052747240048610300),
+ KQU(18328151576097299343), KQU(12472905421133796567),
+ KQU(11315245349717600863), KQU(16594110197775871209),
+ KQU(15708751964632456450), KQU(10452031272054632535),
+ KQU(11097646720811454386), KQU( 4556090668445745441),
+ KQU(17116187693090663106), KQU(14931526836144510645),
+ KQU( 9190752218020552591), KQU( 9625800285771901401),
+ KQU(13995141077659972832), KQU( 5194209094927829625),
+ KQU( 4156788379151063303), KQU( 8523452593770139494),
+ KQU(14082382103049296727), KQU( 2462601863986088483),
+ KQU( 3030583461592840678), KQU( 5221622077872827681),
+ KQU( 3084210671228981236), KQU(13956758381389953823),
+ KQU(13503889856213423831), KQU(15696904024189836170),
+ KQU( 4612584152877036206), KQU( 6231135538447867881),
+ KQU(10172457294158869468), KQU( 6452258628466708150),
+ KQU(14044432824917330221), KQU( 370168364480044279),
+ KQU(10102144686427193359), KQU( 667870489994776076),
+ KQU( 2732271956925885858), KQU(18027788905977284151),
+ KQU(15009842788582923859), KQU( 7136357960180199542),
+ KQU(15901736243475578127), KQU(16951293785352615701),
+ KQU(10551492125243691632), KQU(17668869969146434804),
+ KQU(13646002971174390445), KQU( 9804471050759613248),
+ KQU( 5511670439655935493), KQU(18103342091070400926),
+ KQU(17224512747665137533), KQU(15534627482992618168),
+ KQU( 1423813266186582647), KQU(15821176807932930024),
+ KQU( 30323369733607156), KQU(11599382494723479403),
+ KQU( 653856076586810062), KQU( 3176437395144899659),
+ KQU(14028076268147963917), KQU(16156398271809666195),
+ KQU( 3166955484848201676), KQU( 5746805620136919390),
+ KQU(17297845208891256593), KQU(11691653183226428483),
+ KQU(17900026146506981577), KQU(15387382115755971042),
+ KQU(16923567681040845943), KQU( 8039057517199388606),
+ KQU(11748409241468629263), KQU( 794358245539076095),
+ KQU(13438501964693401242), KQU(14036803236515618962),
+ KQU( 5252311215205424721), KQU(17806589612915509081),
+ KQU( 6802767092397596006), KQU(14212120431184557140),
+ KQU( 1072951366761385712), KQU(13098491780722836296),
+ KQU( 9466676828710797353), KQU(12673056849042830081),
+ KQU(12763726623645357580), KQU(16468961652999309493),
+ KQU(15305979875636438926), KQU(17444713151223449734),
+ KQU( 5692214267627883674), KQU(13049589139196151505),
+ KQU( 880115207831670745), KQU( 1776529075789695498),
+ KQU(16695225897801466485), KQU(10666901778795346845),
+ KQU( 6164389346722833869), KQU( 2863817793264300475),
+ KQU( 9464049921886304754), KQU( 3993566636740015468),
+ KQU( 9983749692528514136), KQU(16375286075057755211),
+ KQU(16042643417005440820), KQU(11445419662923489877),
+ KQU( 7999038846885158836), KQU( 6721913661721511535),
+ KQU( 5363052654139357320), KQU( 1817788761173584205),
+ KQU(13290974386445856444), KQU( 4650350818937984680),
+ KQU( 8219183528102484836), KQU( 1569862923500819899),
+ KQU( 4189359732136641860), KQU(14202822961683148583),
+ KQU( 4457498315309429058), KQU(13089067387019074834),
+ KQU(11075517153328927293), KQU(10277016248336668389),
+ KQU( 7070509725324401122), KQU(17808892017780289380),
+ KQU(13143367339909287349), KQU( 1377743745360085151),
+ KQU( 5749341807421286485), KQU(14832814616770931325),
+ KQU( 7688820635324359492), KQU(10960474011539770045),
+ KQU( 81970066653179790), KQU(12619476072607878022),
+ KQU( 4419566616271201744), KQU(15147917311750568503),
+ KQU( 5549739182852706345), KQU( 7308198397975204770),
+ KQU(13580425496671289278), KQU(17070764785210130301),
+ KQU( 8202832846285604405), KQU( 6873046287640887249),
+ KQU( 6927424434308206114), KQU( 6139014645937224874),
+ KQU(10290373645978487639), KQU(15904261291701523804),
+ KQU( 9628743442057826883), KQU(18383429096255546714),
+ KQU( 4977413265753686967), KQU( 7714317492425012869),
+ KQU( 9025232586309926193), KQU(14627338359776709107),
+ KQU(14759849896467790763), KQU(10931129435864423252),
+ KQU( 4588456988775014359), KQU(10699388531797056724),
+ KQU( 468652268869238792), KQU( 5755943035328078086),
+ KQU( 2102437379988580216), KQU( 9986312786506674028),
+ KQU( 2654207180040945604), KQU( 8726634790559960062),
+ KQU( 100497234871808137), KQU( 2800137176951425819),
+ KQU( 6076627612918553487), KQU( 5780186919186152796),
+ KQU( 8179183595769929098), KQU( 6009426283716221169),
+ KQU( 2796662551397449358), KQU( 1756961367041986764),
+ KQU( 6972897917355606205), KQU(14524774345368968243),
+ KQU( 2773529684745706940), KQU( 4853632376213075959),
+ KQU( 4198177923731358102), KQU( 8271224913084139776),
+ KQU( 2741753121611092226), KQU(16782366145996731181),
+ KQU(15426125238972640790), KQU(13595497100671260342),
+ KQU( 3173531022836259898), KQU( 6573264560319511662),
+ KQU(18041111951511157441), KQU( 2351433581833135952),
+ KQU( 3113255578908173487), KQU( 1739371330877858784),
+ KQU(16046126562789165480), KQU( 8072101652214192925),
+ KQU(15267091584090664910), KQU( 9309579200403648940),
+ KQU( 5218892439752408722), KQU(14492477246004337115),
+ KQU(17431037586679770619), KQU( 7385248135963250480),
+ KQU( 9580144956565560660), KQU( 4919546228040008720),
+ KQU(15261542469145035584), KQU(18233297270822253102),
+ KQU( 5453248417992302857), KQU( 9309519155931460285),
+ KQU(10342813012345291756), KQU(15676085186784762381),
+ KQU(15912092950691300645), KQU( 9371053121499003195),
+ KQU( 9897186478226866746), KQU(14061858287188196327),
+ KQU( 122575971620788119), KQU(12146750969116317754),
+ KQU( 4438317272813245201), KQU( 8332576791009527119),
+ KQU(13907785691786542057), KQU(10374194887283287467),
+ KQU( 2098798755649059566), KQU( 3416235197748288894),
+ KQU( 8688269957320773484), KQU( 7503964602397371571),
+ KQU(16724977015147478236), KQU( 9461512855439858184),
+ KQU(13259049744534534727), KQU( 3583094952542899294),
+ KQU( 8764245731305528292), KQU(13240823595462088985),
+ KQU(13716141617617910448), KQU(18114969519935960955),
+ KQU( 2297553615798302206), KQU( 4585521442944663362),
+ KQU(17776858680630198686), KQU( 4685873229192163363),
+ KQU( 152558080671135627), KQU(15424900540842670088),
+ KQU(13229630297130024108), KQU(17530268788245718717),
+ KQU(16675633913065714144), KQU( 3158912717897568068),
+ KQU(15399132185380087288), KQU( 7401418744515677872),
+ KQU(13135412922344398535), KQU( 6385314346100509511),
+ KQU(13962867001134161139), KQU(10272780155442671999),
+ KQU(12894856086597769142), KQU(13340877795287554994),
+ KQU(12913630602094607396), KQU(12543167911119793857),
+ KQU(17343570372251873096), KQU(10959487764494150545),
+ KQU( 6966737953093821128), KQU(13780699135496988601),
+ KQU( 4405070719380142046), KQU(14923788365607284982),
+ KQU( 2869487678905148380), KQU( 6416272754197188403),
+ KQU(15017380475943612591), KQU( 1995636220918429487),
+ KQU( 3402016804620122716), KQU(15800188663407057080),
+ KQU(11362369990390932882), KQU(15262183501637986147),
+ KQU(10239175385387371494), KQU( 9352042420365748334),
+ KQU( 1682457034285119875), KQU( 1724710651376289644),
+ KQU( 2038157098893817966), KQU( 9897825558324608773),
+ KQU( 1477666236519164736), KQU(16835397314511233640),
+ KQU(10370866327005346508), KQU(10157504370660621982),
+ KQU(12113904045335882069), KQU(13326444439742783008),
+ KQU(11302769043000765804), KQU(13594979923955228484),
+ KQU(11779351762613475968), KQU( 3786101619539298383),
+ KQU( 8021122969180846063), KQU(15745904401162500495),
+ KQU(10762168465993897267), KQU(13552058957896319026),
+ KQU(11200228655252462013), KQU( 5035370357337441226),
+ KQU( 7593918984545500013), KQU( 5418554918361528700),
+ KQU( 4858270799405446371), KQU( 9974659566876282544),
+ KQU(18227595922273957859), KQU( 2772778443635656220),
+ KQU(14285143053182085385), KQU( 9939700992429600469),
+ KQU(12756185904545598068), KQU( 2020783375367345262),
+ KQU( 57026775058331227), KQU( 950827867930065454),
+ KQU( 6602279670145371217), KQU( 2291171535443566929),
+ KQU( 5832380724425010313), KQU( 1220343904715982285),
+ KQU(17045542598598037633), KQU(15460481779702820971),
+ KQU(13948388779949365130), KQU(13975040175430829518),
+ KQU(17477538238425541763), KQU(11104663041851745725),
+ KQU(15860992957141157587), KQU(14529434633012950138),
+ KQU( 2504838019075394203), KQU( 7512113882611121886),
+ KQU( 4859973559980886617), KQU( 1258601555703250219),
+ KQU(15594548157514316394), KQU( 4516730171963773048),
+ KQU(11380103193905031983), KQU( 6809282239982353344),
+ KQU(18045256930420065002), KQU( 2453702683108791859),
+ KQU( 977214582986981460), KQU( 2006410402232713466),
+ KQU( 6192236267216378358), KQU( 3429468402195675253),
+ KQU(18146933153017348921), KQU(17369978576367231139),
+ KQU( 1246940717230386603), KQU(11335758870083327110),
+ KQU(14166488801730353682), KQU( 9008573127269635732),
+ KQU(10776025389820643815), KQU(15087605441903942962),
+ KQU( 1359542462712147922), KQU(13898874411226454206),
+ KQU(17911176066536804411), KQU( 9435590428600085274),
+ KQU( 294488509967864007), KQU( 8890111397567922046),
+ KQU( 7987823476034328778), KQU(13263827582440967651),
+ KQU( 7503774813106751573), KQU(14974747296185646837),
+ KQU( 8504765037032103375), KQU(17340303357444536213),
+ KQU( 7704610912964485743), KQU( 8107533670327205061),
+ KQU( 9062969835083315985), KQU(16968963142126734184),
+ KQU(12958041214190810180), KQU( 2720170147759570200),
+ KQU( 2986358963942189566), KQU(14884226322219356580),
+ KQU( 286224325144368520), KQU(11313800433154279797),
+ KQU(18366849528439673248), KQU(17899725929482368789),
+ KQU( 3730004284609106799), KQU( 1654474302052767205),
+ KQU( 5006698007047077032), KQU( 8196893913601182838),
+ KQU(15214541774425211640), KQU(17391346045606626073),
+ KQU( 8369003584076969089), KQU( 3939046733368550293),
+ KQU(10178639720308707785), KQU( 2180248669304388697),
+ KQU( 62894391300126322), KQU( 9205708961736223191),
+ KQU( 6837431058165360438), KQU( 3150743890848308214),
+ KQU(17849330658111464583), KQU(12214815643135450865),
+ KQU(13410713840519603402), KQU( 3200778126692046802),
+ KQU(13354780043041779313), KQU( 800850022756886036),
+ KQU(15660052933953067433), KQU( 6572823544154375676),
+ KQU(11030281857015819266), KQU(12682241941471433835),
+ KQU(11654136407300274693), KQU( 4517795492388641109),
+ KQU( 9757017371504524244), KQU(17833043400781889277),
+ KQU(12685085201747792227), KQU(10408057728835019573),
+ KQU( 98370418513455221), KQU( 6732663555696848598),
+ KQU(13248530959948529780), KQU( 3530441401230622826),
+ KQU(18188251992895660615), KQU( 1847918354186383756),
+ KQU( 1127392190402660921), KQU(11293734643143819463),
+ KQU( 3015506344578682982), KQU(13852645444071153329),
+ KQU( 2121359659091349142), KQU( 1294604376116677694),
+ KQU( 5616576231286352318), KQU( 7112502442954235625),
+ KQU(11676228199551561689), KQU(12925182803007305359),
+ KQU( 7852375518160493082), KQU( 1136513130539296154),
+ KQU( 5636923900916593195), KQU( 3221077517612607747),
+ KQU(17784790465798152513), KQU( 3554210049056995938),
+ KQU(17476839685878225874), KQU( 3206836372585575732),
+ KQU( 2765333945644823430), KQU(10080070903718799528),
+ KQU( 5412370818878286353), KQU( 9689685887726257728),
+ KQU( 8236117509123533998), KQU( 1951139137165040214),
+ KQU( 4492205209227980349), KQU(16541291230861602967),
+ KQU( 1424371548301437940), KQU( 9117562079669206794),
+ KQU(14374681563251691625), KQU(13873164030199921303),
+ KQU( 6680317946770936731), KQU(15586334026918276214),
+ KQU(10896213950976109802), KQU( 9506261949596413689),
+ KQU( 9903949574308040616), KQU( 6038397344557204470),
+ KQU( 174601465422373648), KQU(15946141191338238030),
+ KQU(17142225620992044937), KQU( 7552030283784477064),
+ KQU( 2947372384532947997), KQU( 510797021688197711),
+ KQU( 4962499439249363461), KQU( 23770320158385357),
+ KQU( 959774499105138124), KQU( 1468396011518788276),
+ KQU( 2015698006852312308), KQU( 4149400718489980136),
+ KQU( 5992916099522371188), KQU(10819182935265531076),
+ KQU(16189787999192351131), KQU( 342833961790261950),
+ KQU(12470830319550495336), KQU(18128495041912812501),
+ KQU( 1193600899723524337), KQU( 9056793666590079770),
+ KQU( 2154021227041669041), KQU( 4963570213951235735),
+ KQU( 4865075960209211409), KQU( 2097724599039942963),
+ KQU( 2024080278583179845), KQU(11527054549196576736),
+ KQU(10650256084182390252), KQU( 4808408648695766755),
+ KQU( 1642839215013788844), KQU(10607187948250398390),
+ KQU( 7076868166085913508), KQU( 730522571106887032),
+ KQU(12500579240208524895), KQU( 4484390097311355324),
+ KQU(15145801330700623870), KQU( 8055827661392944028),
+ KQU( 5865092976832712268), KQU(15159212508053625143),
+ KQU( 3560964582876483341), KQU( 4070052741344438280),
+ KQU( 6032585709886855634), KQU(15643262320904604873),
+ KQU( 2565119772293371111), KQU( 318314293065348260),
+ KQU(15047458749141511872), KQU( 7772788389811528730),
+ KQU( 7081187494343801976), KQU( 6465136009467253947),
+ KQU(10425940692543362069), KQU( 554608190318339115),
+ KQU(14796699860302125214), KQU( 1638153134431111443),
+ KQU(10336967447052276248), KQU( 8412308070396592958),
+ KQU( 4004557277152051226), KQU( 8143598997278774834),
+ KQU(16413323996508783221), KQU(13139418758033994949),
+ KQU( 9772709138335006667), KQU( 2818167159287157659),
+ KQU(17091740573832523669), KQU(14629199013130751608),
+ KQU(18268322711500338185), KQU( 8290963415675493063),
+ KQU( 8830864907452542588), KQU( 1614839084637494849),
+ KQU(14855358500870422231), KQU( 3472996748392519937),
+ KQU(15317151166268877716), KQU( 5825895018698400362),
+ KQU(16730208429367544129), KQU(10481156578141202800),
+ KQU( 4746166512382823750), KQU(12720876014472464998),
+ KQU( 8825177124486735972), KQU(13733447296837467838),
+ KQU( 6412293741681359625), KQU( 8313213138756135033),
+ KQU(11421481194803712517), KQU( 7997007691544174032),
+ KQU( 6812963847917605930), KQU( 9683091901227558641),
+ KQU(14703594165860324713), KQU( 1775476144519618309),
+ KQU( 2724283288516469519), KQU( 717642555185856868),
+ KQU( 8736402192215092346), KQU(11878800336431381021),
+ KQU( 4348816066017061293), KQU( 6115112756583631307),
+ KQU( 9176597239667142976), KQU(12615622714894259204),
+ KQU(10283406711301385987), KQU( 5111762509485379420),
+ KQU( 3118290051198688449), KQU( 7345123071632232145),
+ KQU( 9176423451688682359), KQU( 4843865456157868971),
+ KQU(12008036363752566088), KQU(12058837181919397720),
+ KQU( 2145073958457347366), KQU( 1526504881672818067),
+ KQU( 3488830105567134848), KQU(13208362960674805143),
+ KQU( 4077549672899572192), KQU( 7770995684693818365),
+ KQU( 1398532341546313593), KQU(12711859908703927840),
+ KQU( 1417561172594446813), KQU(17045191024194170604),
+ KQU( 4101933177604931713), KQU(14708428834203480320),
+ KQU(17447509264469407724), KQU(14314821973983434255),
+ KQU(17990472271061617265), KQU( 5087756685841673942),
+ KQU(12797820586893859939), KQU( 1778128952671092879),
+ KQU( 3535918530508665898), KQU( 9035729701042481301),
+ KQU(14808661568277079962), KQU(14587345077537747914),
+ KQU(11920080002323122708), KQU( 6426515805197278753),
+ KQU( 3295612216725984831), KQU(11040722532100876120),
+ KQU(12305952936387598754), KQU(16097391899742004253),
+ KQU( 4908537335606182208), KQU(12446674552196795504),
+ KQU(16010497855816895177), KQU( 9194378874788615551),
+ KQU( 3382957529567613384), KQU( 5154647600754974077),
+ KQU( 9801822865328396141), KQU( 9023662173919288143),
+ KQU(17623115353825147868), KQU( 8238115767443015816),
+ KQU(15811444159859002560), KQU( 9085612528904059661),
+ KQU( 6888601089398614254), KQU( 258252992894160189),
+ KQU( 6704363880792428622), KQU( 6114966032147235763),
+ KQU(11075393882690261875), KQU( 8797664238933620407),
+ KQU( 5901892006476726920), KQU( 5309780159285518958),
+ KQU(14940808387240817367), KQU(14642032021449656698),
+ KQU( 9808256672068504139), KQU( 3670135111380607658),
+ KQU(11211211097845960152), KQU( 1474304506716695808),
+ KQU(15843166204506876239), KQU( 7661051252471780561),
+ KQU(10170905502249418476), KQU( 7801416045582028589),
+ KQU( 2763981484737053050), KQU( 9491377905499253054),
+ KQU(16201395896336915095), KQU( 9256513756442782198),
+ KQU( 5411283157972456034), KQU( 5059433122288321676),
+ KQU( 4327408006721123357), KQU( 9278544078834433377),
+ KQU( 7601527110882281612), KQU(11848295896975505251),
+ KQU(12096998801094735560), KQU(14773480339823506413),
+ KQU(15586227433895802149), KQU(12786541257830242872),
+ KQU( 6904692985140503067), KQU( 5309011515263103959),
+ KQU(12105257191179371066), KQU(14654380212442225037),
+ KQU( 2556774974190695009), KQU( 4461297399927600261),
+ KQU(14888225660915118646), KQU(14915459341148291824),
+ KQU( 2738802166252327631), KQU( 6047155789239131512),
+ KQU(12920545353217010338), KQU(10697617257007840205),
+ KQU( 2751585253158203504), KQU(13252729159780047496),
+ KQU(14700326134672815469), KQU(14082527904374600529),
+ KQU(16852962273496542070), KQU(17446675504235853907),
+ KQU(15019600398527572311), KQU(12312781346344081551),
+ KQU(14524667935039810450), KQU( 5634005663377195738),
+ KQU(11375574739525000569), KQU( 2423665396433260040),
+ KQU( 5222836914796015410), KQU( 4397666386492647387),
+ KQU( 4619294441691707638), KQU( 665088602354770716),
+ KQU(13246495665281593610), KQU( 6564144270549729409),
+ KQU(10223216188145661688), KQU( 3961556907299230585),
+ KQU(11543262515492439914), KQU(16118031437285993790),
+ KQU( 7143417964520166465), KQU(13295053515909486772),
+ KQU( 40434666004899675), KQU(17127804194038347164),
+ KQU( 8599165966560586269), KQU( 8214016749011284903),
+ KQU(13725130352140465239), KQU( 5467254474431726291),
+ KQU( 7748584297438219877), KQU(16933551114829772472),
+ KQU( 2169618439506799400), KQU( 2169787627665113463),
+ KQU(17314493571267943764), KQU(18053575102911354912),
+ KQU(11928303275378476973), KQU(11593850925061715550),
+ KQU(17782269923473589362), KQU( 3280235307704747039),
+ KQU( 6145343578598685149), KQU(17080117031114086090),
+ KQU(18066839902983594755), KQU( 6517508430331020706),
+ KQU( 8092908893950411541), KQU(12558378233386153732),
+ KQU( 4476532167973132976), KQU(16081642430367025016),
+ KQU( 4233154094369139361), KQU( 8693630486693161027),
+ KQU(11244959343027742285), KQU(12273503967768513508),
+ KQU(14108978636385284876), KQU( 7242414665378826984),
+ KQU( 6561316938846562432), KQU( 8601038474994665795),
+ KQU(17532942353612365904), KQU(17940076637020912186),
+ KQU( 7340260368823171304), KQU( 7061807613916067905),
+ KQU(10561734935039519326), KQU(17990796503724650862),
+ KQU( 6208732943911827159), KQU( 359077562804090617),
+ KQU(14177751537784403113), KQU(10659599444915362902),
+ KQU(15081727220615085833), KQU(13417573895659757486),
+ KQU(15513842342017811524), KQU(11814141516204288231),
+ KQU( 1827312513875101814), KQU( 2804611699894603103),
+ KQU(17116500469975602763), KQU(12270191815211952087),
+ KQU(12256358467786024988), KQU(18435021722453971267),
+ KQU( 671330264390865618), KQU( 476504300460286050),
+ KQU(16465470901027093441), KQU( 4047724406247136402),
+ KQU( 1322305451411883346), KQU( 1388308688834322280),
+ KQU( 7303989085269758176), KQU( 9323792664765233642),
+ KQU( 4542762575316368936), KQU(17342696132794337618),
+ KQU( 4588025054768498379), KQU(13415475057390330804),
+ KQU(17880279491733405570), KQU(10610553400618620353),
+ KQU( 3180842072658960139), KQU(13002966655454270120),
+ KQU( 1665301181064982826), KQU( 7083673946791258979),
+ KQU( 190522247122496820), KQU(17388280237250677740),
+ KQU( 8430770379923642945), KQU(12987180971921668584),
+ KQU( 2311086108365390642), KQU( 2870984383579822345),
+ KQU(14014682609164653318), KQU(14467187293062251484),
+ KQU( 192186361147413298), KQU(15171951713531796524),
+ KQU( 9900305495015948728), KQU(17958004775615466344),
+ KQU(14346380954498606514), KQU(18040047357617407096),
+ KQU( 5035237584833424532), KQU(15089555460613972287),
+ KQU( 4131411873749729831), KQU( 1329013581168250330),
+ KQU(10095353333051193949), KQU(10749518561022462716),
+ KQU( 9050611429810755847), KQU(15022028840236655649),
+ KQU( 8775554279239748298), KQU(13105754025489230502),
+ KQU(15471300118574167585), KQU( 89864764002355628),
+ KQU( 8776416323420466637), KQU( 5280258630612040891),
+ KQU( 2719174488591862912), KQU( 7599309137399661994),
+ KQU(15012887256778039979), KQU(14062981725630928925),
+ KQU(12038536286991689603), KQU( 7089756544681775245),
+ KQU(10376661532744718039), KQU( 1265198725901533130),
+ KQU(13807996727081142408), KQU( 2935019626765036403),
+ KQU( 7651672460680700141), KQU( 3644093016200370795),
+ KQU( 2840982578090080674), KQU(17956262740157449201),
+ KQU(18267979450492880548), KQU(11799503659796848070),
+ KQU( 9942537025669672388), KQU(11886606816406990297),
+ KQU( 5488594946437447576), KQU( 7226714353282744302),
+ KQU( 3784851653123877043), KQU( 878018453244803041),
+ KQU(12110022586268616085), KQU( 734072179404675123),
+ KQU(11869573627998248542), KQU( 469150421297783998),
+ KQU( 260151124912803804), KQU(11639179410120968649),
+ KQU( 9318165193840846253), KQU(12795671722734758075),
+ KQU(15318410297267253933), KQU( 691524703570062620),
+ KQU( 5837129010576994601), KQU(15045963859726941052),
+ KQU( 5850056944932238169), KQU(12017434144750943807),
+ KQU( 7447139064928956574), KQU( 3101711812658245019),
+ KQU(16052940704474982954), KQU(18195745945986994042),
+ KQU( 8932252132785575659), KQU(13390817488106794834),
+ KQU(11582771836502517453), KQU( 4964411326683611686),
+ KQU( 2195093981702694011), KQU(14145229538389675669),
+ KQU(16459605532062271798), KQU( 866316924816482864),
+ KQU( 4593041209937286377), KQU( 8415491391910972138),
+ KQU( 4171236715600528969), KQU(16637569303336782889),
+ KQU( 2002011073439212680), KQU(17695124661097601411),
+ KQU( 4627687053598611702), KQU( 7895831936020190403),
+ KQU( 8455951300917267802), KQU( 2923861649108534854),
+ KQU( 8344557563927786255), KQU( 6408671940373352556),
+ KQU(12210227354536675772), KQU(14294804157294222295),
+ KQU(10103022425071085127), KQU(10092959489504123771),
+ KQU( 6554774405376736268), KQU(12629917718410641774),
+ KQU( 6260933257596067126), KQU( 2460827021439369673),
+ KQU( 2541962996717103668), KQU( 597377203127351475),
+ KQU( 5316984203117315309), KQU( 4811211393563241961),
+ KQU(13119698597255811641), KQU( 8048691512862388981),
+ KQU(10216818971194073842), KQU( 4612229970165291764),
+ KQU(10000980798419974770), KQU( 6877640812402540687),
+ KQU( 1488727563290436992), KQU( 2227774069895697318),
+ KQU(11237754507523316593), KQU(13478948605382290972),
+ KQU( 1963583846976858124), KQU( 5512309205269276457),
+ KQU( 3972770164717652347), KQU( 3841751276198975037),
+ KQU(10283343042181903117), KQU( 8564001259792872199),
+ KQU(16472187244722489221), KQU( 8953493499268945921),
+ KQU( 3518747340357279580), KQU( 4003157546223963073),
+ KQU( 3270305958289814590), KQU( 3966704458129482496),
+ KQU( 8122141865926661939), KQU(14627734748099506653),
+ KQU(13064426990862560568), KQU( 2414079187889870829),
+ KQU( 5378461209354225306), KQU(10841985740128255566),
+ KQU( 538582442885401738), KQU( 7535089183482905946),
+ KQU(16117559957598879095), KQU( 8477890721414539741),
+ KQU( 1459127491209533386), KQU(17035126360733620462),
+ KQU( 8517668552872379126), KQU(10292151468337355014),
+ KQU(17081267732745344157), KQU(13751455337946087178),
+ KQU(14026945459523832966), KQU( 6653278775061723516),
+ KQU(10619085543856390441), KQU( 2196343631481122885),
+ KQU(10045966074702826136), KQU(10082317330452718282),
+ KQU( 5920859259504831242), KQU( 9951879073426540617),
+ KQU( 7074696649151414158), KQU(15808193543879464318),
+ KQU( 7385247772746953374), KQU( 3192003544283864292),
+ KQU(18153684490917593847), KQU(12423498260668568905),
+ KQU(10957758099756378169), KQU(11488762179911016040),
+ KQU( 2099931186465333782), KQU(11180979581250294432),
+ KQU( 8098916250668367933), KQU( 3529200436790763465),
+ KQU(12988418908674681745), KQU( 6147567275954808580),
+ KQU( 3207503344604030989), KQU(10761592604898615360),
+ KQU( 229854861031893504), KQU( 8809853962667144291),
+ KQU(13957364469005693860), KQU( 7634287665224495886),
+ KQU(12353487366976556874), KQU( 1134423796317152034),
+ KQU( 2088992471334107068), KQU( 7393372127190799698),
+ KQU( 1845367839871058391), KQU( 207922563987322884),
+ KQU(11960870813159944976), KQU(12182120053317317363),
+ KQU(17307358132571709283), KQU(13871081155552824936),
+ KQU(18304446751741566262), KQU( 7178705220184302849),
+ KQU(10929605677758824425), KQU(16446976977835806844),
+ KQU(13723874412159769044), KQU( 6942854352100915216),
+ KQU( 1726308474365729390), KQU( 2150078766445323155),
+ KQU(15345558947919656626), KQU(12145453828874527201),
+ KQU( 2054448620739726849), KQU( 2740102003352628137),
+ KQU(11294462163577610655), KQU( 756164283387413743),
+ KQU(17841144758438810880), KQU(10802406021185415861),
+ KQU( 8716455530476737846), KQU( 6321788834517649606),
+ KQU(14681322910577468426), KQU(17330043563884336387),
+ KQU(12701802180050071614), KQU(14695105111079727151),
+ KQU( 5112098511654172830), KQU( 4957505496794139973),
+ KQU( 8270979451952045982), KQU(12307685939199120969),
+ KQU(12425799408953443032), KQU( 8376410143634796588),
+ KQU(16621778679680060464), KQU( 3580497854566660073),
+ KQU( 1122515747803382416), KQU( 857664980960597599),
+ KQU( 6343640119895925918), KQU(12878473260854462891),
+ KQU(10036813920765722626), KQU(14451335468363173812),
+ KQU( 5476809692401102807), KQU(16442255173514366342),
+ KQU(13060203194757167104), KQU(14354124071243177715),
+ KQU(15961249405696125227), KQU(13703893649690872584),
+ KQU( 363907326340340064), KQU( 6247455540491754842),
+ KQU(12242249332757832361), KQU( 156065475679796717),
+ KQU( 9351116235749732355), KQU( 4590350628677701405),
+ KQU( 1671195940982350389), KQU(13501398458898451905),
+ KQU( 6526341991225002255), KQU( 1689782913778157592),
+ KQU( 7439222350869010334), KQU(13975150263226478308),
+ KQU(11411961169932682710), KQU(17204271834833847277),
+ KQU( 541534742544435367), KQU( 6591191931218949684),
+ KQU( 2645454775478232486), KQU( 4322857481256485321),
+ KQU( 8477416487553065110), KQU(12902505428548435048),
+ KQU( 971445777981341415), KQU(14995104682744976712),
+ KQU( 4243341648807158063), KQU( 8695061252721927661),
+ KQU( 5028202003270177222), KQU( 2289257340915567840),
+ KQU(13870416345121866007), KQU(13994481698072092233),
+ KQU( 6912785400753196481), KQU( 2278309315841980139),
+ KQU( 4329765449648304839), KQU( 5963108095785485298),
+ KQU( 4880024847478722478), KQU(16015608779890240947),
+ KQU( 1866679034261393544), KQU( 914821179919731519),
+ KQU( 9643404035648760131), KQU( 2418114953615593915),
+ KQU( 944756836073702374), KQU(15186388048737296834),
+ KQU( 7723355336128442206), KQU( 7500747479679599691),
+ KQU(18013961306453293634), KQU( 2315274808095756456),
+ KQU(13655308255424029566), KQU(17203800273561677098),
+ KQU( 1382158694422087756), KQU( 5090390250309588976),
+ KQU( 517170818384213989), KQU( 1612709252627729621),
+ KQU( 1330118955572449606), KQU( 300922478056709885),
+ KQU(18115693291289091987), KQU(13491407109725238321),
+ KQU(15293714633593827320), KQU( 5151539373053314504),
+ KQU( 5951523243743139207), KQU(14459112015249527975),
+ KQU( 5456113959000700739), KQU( 3877918438464873016),
+ KQU(12534071654260163555), KQU(15871678376893555041),
+ KQU(11005484805712025549), KQU(16353066973143374252),
+ KQU( 4358331472063256685), KQU( 8268349332210859288),
+ KQU(12485161590939658075), KQU(13955993592854471343),
+ KQU( 5911446886848367039), KQU(14925834086813706974),
+ KQU( 6590362597857994805), KQU( 1280544923533661875),
+ KQU( 1637756018947988164), KQU( 4734090064512686329),
+ KQU(16693705263131485912), KQU( 6834882340494360958),
+ KQU( 8120732176159658505), KQU( 2244371958905329346),
+ KQU(10447499707729734021), KQU( 7318742361446942194),
+ KQU( 8032857516355555296), KQU(14023605983059313116),
+ KQU( 1032336061815461376), KQU( 9840995337876562612),
+ KQU( 9869256223029203587), KQU(12227975697177267636),
+ KQU(12728115115844186033), KQU( 7752058479783205470),
+ KQU( 729733219713393087), KQU(12954017801239007622)
};
static const uint64_t init_by_array_64_expected[] = {
- QU( 2100341266307895239LLU), QU( 8344256300489757943LLU),
- QU(15687933285484243894LLU), QU( 8268620370277076319LLU),
- QU(12371852309826545459LLU), QU( 8800491541730110238LLU),
- QU(18113268950100835773LLU), QU( 2886823658884438119LLU),
- QU( 3293667307248180724LLU), QU( 9307928143300172731LLU),
- QU( 7688082017574293629LLU), QU( 900986224735166665LLU),
- QU( 9977972710722265039LLU), QU( 6008205004994830552LLU),
- QU( 546909104521689292LLU), QU( 7428471521869107594LLU),
- QU(14777563419314721179LLU), QU(16116143076567350053LLU),
- QU( 5322685342003142329LLU), QU( 4200427048445863473LLU),
- QU( 4693092150132559146LLU), QU(13671425863759338582LLU),
- QU( 6747117460737639916LLU), QU( 4732666080236551150LLU),
- QU( 5912839950611941263LLU), QU( 3903717554504704909LLU),
- QU( 2615667650256786818LLU), QU(10844129913887006352LLU),
- QU(13786467861810997820LLU), QU(14267853002994021570LLU),
- QU(13767807302847237439LLU), QU(16407963253707224617LLU),
- QU( 4802498363698583497LLU), QU( 2523802839317209764LLU),
- QU( 3822579397797475589LLU), QU( 8950320572212130610LLU),
- QU( 3745623504978342534LLU), QU(16092609066068482806LLU),
- QU( 9817016950274642398LLU), QU(10591660660323829098LLU),
- QU(11751606650792815920LLU), QU( 5122873818577122211LLU),
- QU(17209553764913936624LLU), QU( 6249057709284380343LLU),
- QU(15088791264695071830LLU), QU(15344673071709851930LLU),
- QU( 4345751415293646084LLU), QU( 2542865750703067928LLU),
- QU(13520525127852368784LLU), QU(18294188662880997241LLU),
- QU( 3871781938044881523LLU), QU( 2873487268122812184LLU),
- QU(15099676759482679005LLU), QU(15442599127239350490LLU),
- QU( 6311893274367710888LLU), QU( 3286118760484672933LLU),
- QU( 4146067961333542189LLU), QU(13303942567897208770LLU),
- QU( 8196013722255630418LLU), QU( 4437815439340979989LLU),
- QU(15433791533450605135LLU), QU( 4254828956815687049LLU),
- QU( 1310903207708286015LLU), QU(10529182764462398549LLU),
- QU(14900231311660638810LLU), QU( 9727017277104609793LLU),
- QU( 1821308310948199033LLU), QU(11628861435066772084LLU),
- QU( 9469019138491546924LLU), QU( 3145812670532604988LLU),
- QU( 9938468915045491919LLU), QU( 1562447430672662142LLU),
- QU(13963995266697989134LLU), QU( 3356884357625028695LLU),
- QU( 4499850304584309747LLU), QU( 8456825817023658122LLU),
- QU(10859039922814285279LLU), QU( 8099512337972526555LLU),
- QU( 348006375109672149LLU), QU(11919893998241688603LLU),
- QU( 1104199577402948826LLU), QU(16689191854356060289LLU),
- QU(10992552041730168078LLU), QU( 7243733172705465836LLU),
- QU( 5668075606180319560LLU), QU(18182847037333286970LLU),
- QU( 4290215357664631322LLU), QU( 4061414220791828613LLU),
- QU(13006291061652989604LLU), QU( 7140491178917128798LLU),
- QU(12703446217663283481LLU), QU( 5500220597564558267LLU),
- QU(10330551509971296358LLU), QU(15958554768648714492LLU),
- QU( 5174555954515360045LLU), QU( 1731318837687577735LLU),
- QU( 3557700801048354857LLU), QU(13764012341928616198LLU),
- QU(13115166194379119043LLU), QU( 7989321021560255519LLU),
- QU( 2103584280905877040LLU), QU( 9230788662155228488LLU),
- QU(16396629323325547654LLU), QU( 657926409811318051LLU),
- QU(15046700264391400727LLU), QU( 5120132858771880830LLU),
- QU( 7934160097989028561LLU), QU( 6963121488531976245LLU),
- QU(17412329602621742089LLU), QU(15144843053931774092LLU),
- QU(17204176651763054532LLU), QU(13166595387554065870LLU),
- QU( 8590377810513960213LLU), QU( 5834365135373991938LLU),
- QU( 7640913007182226243LLU), QU( 3479394703859418425LLU),
- QU(16402784452644521040LLU), QU( 4993979809687083980LLU),
- QU(13254522168097688865LLU), QU(15643659095244365219LLU),
- QU( 5881437660538424982LLU), QU(11174892200618987379LLU),
- QU( 254409966159711077LLU), QU(17158413043140549909LLU),
- QU( 3638048789290376272LLU), QU( 1376816930299489190LLU),
- QU( 4622462095217761923LLU), QU(15086407973010263515LLU),
- QU(13253971772784692238LLU), QU( 5270549043541649236LLU),
- QU(11182714186805411604LLU), QU(12283846437495577140LLU),
- QU( 5297647149908953219LLU), QU(10047451738316836654LLU),
- QU( 4938228100367874746LLU), QU(12328523025304077923LLU),
- QU( 3601049438595312361LLU), QU( 9313624118352733770LLU),
- QU(13322966086117661798LLU), QU(16660005705644029394LLU),
- QU(11337677526988872373LLU), QU(13869299102574417795LLU),
- QU(15642043183045645437LLU), QU( 3021755569085880019LLU),
- QU( 4979741767761188161LLU), QU(13679979092079279587LLU),
- QU( 3344685842861071743LLU), QU(13947960059899588104LLU),
- QU( 305806934293368007LLU), QU( 5749173929201650029LLU),
- QU(11123724852118844098LLU), QU(15128987688788879802LLU),
- QU(15251651211024665009LLU), QU( 7689925933816577776LLU),
- QU(16732804392695859449LLU), QU(17087345401014078468LLU),
- QU(14315108589159048871LLU), QU( 4820700266619778917LLU),
- QU(16709637539357958441LLU), QU( 4936227875177351374LLU),
- QU( 2137907697912987247LLU), QU(11628565601408395420LLU),
- QU( 2333250549241556786LLU), QU( 5711200379577778637LLU),
- QU( 5170680131529031729LLU), QU(12620392043061335164LLU),
- QU( 95363390101096078LLU), QU( 5487981914081709462LLU),
- QU( 1763109823981838620LLU), QU( 3395861271473224396LLU),
- QU( 1300496844282213595LLU), QU( 6894316212820232902LLU),
- QU(10673859651135576674LLU), QU( 5911839658857903252LLU),
- QU(17407110743387299102LLU), QU( 8257427154623140385LLU),
- QU(11389003026741800267LLU), QU( 4070043211095013717LLU),
- QU(11663806997145259025LLU), QU(15265598950648798210LLU),
- QU( 630585789434030934LLU), QU( 3524446529213587334LLU),
- QU( 7186424168495184211LLU), QU(10806585451386379021LLU),
- QU(11120017753500499273LLU), QU( 1586837651387701301LLU),
- QU(17530454400954415544LLU), QU( 9991670045077880430LLU),
- QU( 7550997268990730180LLU), QU( 8640249196597379304LLU),
- QU( 3522203892786893823LLU), QU(10401116549878854788LLU),
- QU(13690285544733124852LLU), QU( 8295785675455774586LLU),
- QU(15535716172155117603LLU), QU( 3112108583723722511LLU),
- QU(17633179955339271113LLU), QU(18154208056063759375LLU),
- QU( 1866409236285815666LLU), QU(13326075895396412882LLU),
- QU( 8756261842948020025LLU), QU( 6281852999868439131LLU),
- QU(15087653361275292858LLU), QU(10333923911152949397LLU),
- QU( 5265567645757408500LLU), QU(12728041843210352184LLU),
- QU( 6347959327507828759LLU), QU( 154112802625564758LLU),
- QU(18235228308679780218LLU), QU( 3253805274673352418LLU),
- QU( 4849171610689031197LLU), QU(17948529398340432518LLU),
- QU(13803510475637409167LLU), QU(13506570190409883095LLU),
- QU(15870801273282960805LLU), QU( 8451286481299170773LLU),
- QU( 9562190620034457541LLU), QU( 8518905387449138364LLU),
- QU(12681306401363385655LLU), QU( 3788073690559762558LLU),
- QU( 5256820289573487769LLU), QU( 2752021372314875467LLU),
- QU( 6354035166862520716LLU), QU( 4328956378309739069LLU),
- QU( 449087441228269600LLU), QU( 5533508742653090868LLU),
- QU( 1260389420404746988LLU), QU(18175394473289055097LLU),
- QU( 1535467109660399420LLU), QU( 8818894282874061442LLU),
- QU(12140873243824811213LLU), QU(15031386653823014946LLU),
- QU( 1286028221456149232LLU), QU( 6329608889367858784LLU),
- QU( 9419654354945132725LLU), QU( 6094576547061672379LLU),
- QU(17706217251847450255LLU), QU( 1733495073065878126LLU),
- QU(16918923754607552663LLU), QU( 8881949849954945044LLU),
- QU(12938977706896313891LLU), QU(14043628638299793407LLU),
- QU(18393874581723718233LLU), QU( 6886318534846892044LLU),
- QU(14577870878038334081LLU), QU(13541558383439414119LLU),
- QU(13570472158807588273LLU), QU(18300760537910283361LLU),
- QU( 818368572800609205LLU), QU( 1417000585112573219LLU),
- QU(12337533143867683655LLU), QU(12433180994702314480LLU),
- QU( 778190005829189083LLU), QU(13667356216206524711LLU),
- QU( 9866149895295225230LLU), QU(11043240490417111999LLU),
- QU( 1123933826541378598LLU), QU( 6469631933605123610LLU),
- QU(14508554074431980040LLU), QU(13918931242962026714LLU),
- QU( 2870785929342348285LLU), QU(14786362626740736974LLU),
- QU(13176680060902695786LLU), QU( 9591778613541679456LLU),
- QU( 9097662885117436706LLU), QU( 749262234240924947LLU),
- QU( 1944844067793307093LLU), QU( 4339214904577487742LLU),
- QU( 8009584152961946551LLU), QU(16073159501225501777LLU),
- QU( 3335870590499306217LLU), QU(17088312653151202847LLU),
- QU( 3108893142681931848LLU), QU(16636841767202792021LLU),
- QU(10423316431118400637LLU), QU( 8008357368674443506LLU),
- QU(11340015231914677875LLU), QU(17687896501594936090LLU),
- QU(15173627921763199958LLU), QU( 542569482243721959LLU),
- QU(15071714982769812975LLU), QU( 4466624872151386956LLU),
- QU( 1901780715602332461LLU), QU( 9822227742154351098LLU),
- QU( 1479332892928648780LLU), QU( 6981611948382474400LLU),
- QU( 7620824924456077376LLU), QU(14095973329429406782LLU),
- QU( 7902744005696185404LLU), QU(15830577219375036920LLU),
- QU(10287076667317764416LLU), QU(12334872764071724025LLU),
- QU( 4419302088133544331LLU), QU(14455842851266090520LLU),
- QU(12488077416504654222LLU), QU( 7953892017701886766LLU),
- QU( 6331484925529519007LLU), QU( 4902145853785030022LLU),
- QU(17010159216096443073LLU), QU(11945354668653886087LLU),
- QU(15112022728645230829LLU), QU(17363484484522986742LLU),
- QU( 4423497825896692887LLU), QU( 8155489510809067471LLU),
- QU( 258966605622576285LLU), QU( 5462958075742020534LLU),
- QU( 6763710214913276228LLU), QU( 2368935183451109054LLU),
- QU(14209506165246453811LLU), QU( 2646257040978514881LLU),
- QU( 3776001911922207672LLU), QU( 1419304601390147631LLU),
- QU(14987366598022458284LLU), QU( 3977770701065815721LLU),
- QU( 730820417451838898LLU), QU( 3982991703612885327LLU),
- QU( 2803544519671388477LLU), QU(17067667221114424649LLU),
- QU( 2922555119737867166LLU), QU( 1989477584121460932LLU),
- QU(15020387605892337354LLU), QU( 9293277796427533547LLU),
- QU(10722181424063557247LLU), QU(16704542332047511651LLU),
- QU( 5008286236142089514LLU), QU(16174732308747382540LLU),
- QU(17597019485798338402LLU), QU(13081745199110622093LLU),
- QU( 8850305883842258115LLU), QU(12723629125624589005LLU),
- QU( 8140566453402805978LLU), QU(15356684607680935061LLU),
- QU(14222190387342648650LLU), QU(11134610460665975178LLU),
- QU( 1259799058620984266LLU), QU(13281656268025610041LLU),
- QU( 298262561068153992LLU), QU(12277871700239212922LLU),
- QU(13911297774719779438LLU), QU(16556727962761474934LLU),
- QU(17903010316654728010LLU), QU( 9682617699648434744LLU),
- QU(14757681836838592850LLU), QU( 1327242446558524473LLU),
- QU(11126645098780572792LLU), QU( 1883602329313221774LLU),
- QU( 2543897783922776873LLU), QU(15029168513767772842LLU),
- QU(12710270651039129878LLU), QU(16118202956069604504LLU),
- QU(15010759372168680524LLU), QU( 2296827082251923948LLU),
- QU(10793729742623518101LLU), QU(13829764151845413046LLU),
- QU(17769301223184451213LLU), QU( 3118268169210783372LLU),
- QU(17626204544105123127LLU), QU( 7416718488974352644LLU),
- QU(10450751996212925994LLU), QU( 9352529519128770586LLU),
- QU( 259347569641110140LLU), QU( 8048588892269692697LLU),
- QU( 1774414152306494058LLU), QU(10669548347214355622LLU),
- QU(13061992253816795081LLU), QU(18432677803063861659LLU),
- QU( 8879191055593984333LLU), QU(12433753195199268041LLU),
- QU(14919392415439730602LLU), QU( 6612848378595332963LLU),
- QU( 6320986812036143628LLU), QU(10465592420226092859LLU),
- QU( 4196009278962570808LLU), QU( 3747816564473572224LLU),
- QU(17941203486133732898LLU), QU( 2350310037040505198LLU),
- QU( 5811779859134370113LLU), QU(10492109599506195126LLU),
- QU( 7699650690179541274LLU), QU( 1954338494306022961LLU),
- QU(14095816969027231152LLU), QU( 5841346919964852061LLU),
- QU(14945969510148214735LLU), QU( 3680200305887550992LLU),
- QU( 6218047466131695792LLU), QU( 8242165745175775096LLU),
- QU(11021371934053307357LLU), QU( 1265099502753169797LLU),
- QU( 4644347436111321718LLU), QU( 3609296916782832859LLU),
- QU( 8109807992218521571LLU), QU(18387884215648662020LLU),
- QU(14656324896296392902LLU), QU(17386819091238216751LLU),
- QU(17788300878582317152LLU), QU( 7919446259742399591LLU),
- QU( 4466613134576358004LLU), QU(12928181023667938509LLU),
- QU(13147446154454932030LLU), QU(16552129038252734620LLU),
- QU( 8395299403738822450LLU), QU(11313817655275361164LLU),
- QU( 434258809499511718LLU), QU( 2074882104954788676LLU),
- QU( 7929892178759395518LLU), QU( 9006461629105745388LLU),
- QU( 5176475650000323086LLU), QU(11128357033468341069LLU),
- QU(12026158851559118955LLU), QU(14699716249471156500LLU),
- QU( 448982497120206757LLU), QU( 4156475356685519900LLU),
- QU( 6063816103417215727LLU), QU(10073289387954971479LLU),
- QU( 8174466846138590962LLU), QU( 2675777452363449006LLU),
- QU( 9090685420572474281LLU), QU( 6659652652765562060LLU),
- QU(12923120304018106621LLU), QU(11117480560334526775LLU),
- QU( 937910473424587511LLU), QU( 1838692113502346645LLU),
- QU(11133914074648726180LLU), QU( 7922600945143884053LLU),
- QU(13435287702700959550LLU), QU( 5287964921251123332LLU),
- QU(11354875374575318947LLU), QU(17955724760748238133LLU),
- QU(13728617396297106512LLU), QU( 4107449660118101255LLU),
- QU( 1210269794886589623LLU), QU(11408687205733456282LLU),
- QU( 4538354710392677887LLU), QU(13566803319341319267LLU),
- QU(17870798107734050771LLU), QU( 3354318982568089135LLU),
- QU( 9034450839405133651LLU), QU(13087431795753424314LLU),
- QU( 950333102820688239LLU), QU( 1968360654535604116LLU),
- QU(16840551645563314995LLU), QU( 8867501803892924995LLU),
- QU(11395388644490626845LLU), QU( 1529815836300732204LLU),
- QU(13330848522996608842LLU), QU( 1813432878817504265LLU),
- QU( 2336867432693429560LLU), QU(15192805445973385902LLU),
- QU( 2528593071076407877LLU), QU( 128459777936689248LLU),
- QU( 9976345382867214866LLU), QU( 6208885766767996043LLU),
- QU(14982349522273141706LLU), QU( 3099654362410737822LLU),
- QU(13776700761947297661LLU), QU( 8806185470684925550LLU),
- QU( 8151717890410585321LLU), QU( 640860591588072925LLU),
- QU(14592096303937307465LLU), QU( 9056472419613564846LLU),
- QU(14861544647742266352LLU), QU(12703771500398470216LLU),
- QU( 3142372800384138465LLU), QU( 6201105606917248196LLU),
- QU(18337516409359270184LLU), QU(15042268695665115339LLU),
- QU(15188246541383283846LLU), QU(12800028693090114519LLU),
- QU( 5992859621101493472LLU), QU(18278043971816803521LLU),
- QU( 9002773075219424560LLU), QU( 7325707116943598353LLU),
- QU( 7930571931248040822LLU), QU( 5645275869617023448LLU),
- QU( 7266107455295958487LLU), QU( 4363664528273524411LLU),
- QU(14313875763787479809LLU), QU(17059695613553486802LLU),
- QU( 9247761425889940932LLU), QU(13704726459237593128LLU),
- QU( 2701312427328909832LLU), QU(17235532008287243115LLU),
- QU(14093147761491729538LLU), QU( 6247352273768386516LLU),
- QU( 8268710048153268415LLU), QU( 7985295214477182083LLU),
- QU(15624495190888896807LLU), QU( 3772753430045262788LLU),
- QU( 9133991620474991698LLU), QU( 5665791943316256028LLU),
- QU( 7551996832462193473LLU), QU(13163729206798953877LLU),
- QU( 9263532074153846374LLU), QU( 1015460703698618353LLU),
- QU(17929874696989519390LLU), QU(18257884721466153847LLU),
- QU(16271867543011222991LLU), QU( 3905971519021791941LLU),
- QU(16814488397137052085LLU), QU( 1321197685504621613LLU),
- QU( 2870359191894002181LLU), QU(14317282970323395450LLU),
- QU(13663920845511074366LLU), QU( 2052463995796539594LLU),
- QU(14126345686431444337LLU), QU( 1727572121947022534LLU),
- QU(17793552254485594241LLU), QU( 6738857418849205750LLU),
- QU( 1282987123157442952LLU), QU(16655480021581159251LLU),
- QU( 6784587032080183866LLU), QU(14726758805359965162LLU),
- QU( 7577995933961987349LLU), QU(12539609320311114036LLU),
- QU(10789773033385439494LLU), QU( 8517001497411158227LLU),
- QU(10075543932136339710LLU), QU(14838152340938811081LLU),
- QU( 9560840631794044194LLU), QU(17445736541454117475LLU),
- QU(10633026464336393186LLU), QU(15705729708242246293LLU),
- QU( 1117517596891411098LLU), QU( 4305657943415886942LLU),
- QU( 4948856840533979263LLU), QU(16071681989041789593LLU),
- QU(13723031429272486527LLU), QU( 7639567622306509462LLU),
- QU(12670424537483090390LLU), QU( 9715223453097197134LLU),
- QU( 5457173389992686394LLU), QU( 289857129276135145LLU),
- QU(17048610270521972512LLU), QU( 692768013309835485LLU),
- QU(14823232360546632057LLU), QU(18218002361317895936LLU),
- QU( 3281724260212650204LLU), QU(16453957266549513795LLU),
- QU( 8592711109774511881LLU), QU( 929825123473369579LLU),
- QU(15966784769764367791LLU), QU( 9627344291450607588LLU),
- QU(10849555504977813287LLU), QU( 9234566913936339275LLU),
- QU( 6413807690366911210LLU), QU(10862389016184219267LLU),
- QU(13842504799335374048LLU), QU( 1531994113376881174LLU),
- QU( 2081314867544364459LLU), QU(16430628791616959932LLU),
- QU( 8314714038654394368LLU), QU( 9155473892098431813LLU),
- QU(12577843786670475704LLU), QU( 4399161106452401017LLU),
- QU( 1668083091682623186LLU), QU( 1741383777203714216LLU),
- QU( 2162597285417794374LLU), QU(15841980159165218736LLU),
- QU( 1971354603551467079LLU), QU( 1206714764913205968LLU),
- QU( 4790860439591272330LLU), QU(14699375615594055799LLU),
- QU( 8374423871657449988LLU), QU(10950685736472937738LLU),
- QU( 697344331343267176LLU), QU(10084998763118059810LLU),
- QU(12897369539795983124LLU), QU(12351260292144383605LLU),
- QU( 1268810970176811234LLU), QU( 7406287800414582768LLU),
- QU( 516169557043807831LLU), QU( 5077568278710520380LLU),
- QU( 3828791738309039304LLU), QU( 7721974069946943610LLU),
- QU( 3534670260981096460LLU), QU( 4865792189600584891LLU),
- QU(16892578493734337298LLU), QU( 9161499464278042590LLU),
- QU(11976149624067055931LLU), QU(13219479887277343990LLU),
- QU(14161556738111500680LLU), QU(14670715255011223056LLU),
- QU( 4671205678403576558LLU), QU(12633022931454259781LLU),
- QU(14821376219869187646LLU), QU( 751181776484317028LLU),
- QU( 2192211308839047070LLU), QU(11787306362361245189LLU),
- QU(10672375120744095707LLU), QU( 4601972328345244467LLU),
- QU(15457217788831125879LLU), QU( 8464345256775460809LLU),
- QU(10191938789487159478LLU), QU( 6184348739615197613LLU),
- QU(11425436778806882100LLU), QU( 2739227089124319793LLU),
- QU( 461464518456000551LLU), QU( 4689850170029177442LLU),
- QU( 6120307814374078625LLU), QU(11153579230681708671LLU),
- QU( 7891721473905347926LLU), QU(10281646937824872400LLU),
- QU( 3026099648191332248LLU), QU( 8666750296953273818LLU),
- QU(14978499698844363232LLU), QU(13303395102890132065LLU),
- QU( 8182358205292864080LLU), QU(10560547713972971291LLU),
- QU(11981635489418959093LLU), QU( 3134621354935288409LLU),
- QU(11580681977404383968LLU), QU(14205530317404088650LLU),
- QU( 5997789011854923157LLU), QU(13659151593432238041LLU),
- QU(11664332114338865086LLU), QU( 7490351383220929386LLU),
- QU( 7189290499881530378LLU), QU(15039262734271020220LLU),
- QU( 2057217285976980055LLU), QU( 555570804905355739LLU),
- QU(11235311968348555110LLU), QU(13824557146269603217LLU),
- QU(16906788840653099693LLU), QU( 7222878245455661677LLU),
- QU( 5245139444332423756LLU), QU( 4723748462805674292LLU),
- QU(12216509815698568612LLU), QU(17402362976648951187LLU),
- QU(17389614836810366768LLU), QU( 4880936484146667711LLU),
- QU( 9085007839292639880LLU), QU(13837353458498535449LLU),
- QU(11914419854360366677LLU), QU(16595890135313864103LLU),
- QU( 6313969847197627222LLU), QU(18296909792163910431LLU),
- QU(10041780113382084042LLU), QU( 2499478551172884794LLU),
- QU(11057894246241189489LLU), QU( 9742243032389068555LLU),
- QU(12838934582673196228LLU), QU(13437023235248490367LLU),
- QU(13372420669446163240LLU), QU( 6752564244716909224LLU),
- QU( 7157333073400313737LLU), QU(12230281516370654308LLU),
- QU( 1182884552219419117LLU), QU( 2955125381312499218LLU),
- QU(10308827097079443249LLU), QU( 1337648572986534958LLU),
- QU(16378788590020343939LLU), QU( 108619126514420935LLU),
- QU( 3990981009621629188LLU), QU( 5460953070230946410LLU),
- QU( 9703328329366531883LLU), QU(13166631489188077236LLU),
- QU( 1104768831213675170LLU), QU( 3447930458553877908LLU),
- QU( 8067172487769945676LLU), QU( 5445802098190775347LLU),
- QU( 3244840981648973873LLU), QU(17314668322981950060LLU),
- QU( 5006812527827763807LLU), QU(18158695070225526260LLU),
- QU( 2824536478852417853LLU), QU(13974775809127519886LLU),
- QU( 9814362769074067392LLU), QU(17276205156374862128LLU),
- QU(11361680725379306967LLU), QU( 3422581970382012542LLU),
- QU(11003189603753241266LLU), QU(11194292945277862261LLU),
- QU( 6839623313908521348LLU), QU(11935326462707324634LLU),
- QU( 1611456788685878444LLU), QU(13112620989475558907LLU),
- QU( 517659108904450427LLU), QU(13558114318574407624LLU),
- QU(15699089742731633077LLU), QU( 4988979278862685458LLU),
- QU( 8111373583056521297LLU), QU( 3891258746615399627LLU),
- QU( 8137298251469718086LLU), QU(12748663295624701649LLU),
- QU( 4389835683495292062LLU), QU( 5775217872128831729LLU),
- QU( 9462091896405534927LLU), QU( 8498124108820263989LLU),
- QU( 8059131278842839525LLU), QU(10503167994254090892LLU),
- QU(11613153541070396656LLU), QU(18069248738504647790LLU),
- QU( 570657419109768508LLU), QU( 3950574167771159665LLU),
- QU( 5514655599604313077LLU), QU( 2908460854428484165LLU),
- QU(10777722615935663114LLU), QU(12007363304839279486LLU),
- QU( 9800646187569484767LLU), QU( 8795423564889864287LLU),
- QU(14257396680131028419LLU), QU( 6405465117315096498LLU),
- QU( 7939411072208774878LLU), QU(17577572378528990006LLU),
- QU(14785873806715994850LLU), QU(16770572680854747390LLU),
- QU(18127549474419396481LLU), QU(11637013449455757750LLU),
- QU(14371851933996761086LLU), QU( 3601181063650110280LLU),
- QU( 4126442845019316144LLU), QU(10198287239244320669LLU),
- QU(18000169628555379659LLU), QU(18392482400739978269LLU),
- QU( 6219919037686919957LLU), QU( 3610085377719446052LLU),
- QU( 2513925039981776336LLU), QU(16679413537926716955LLU),
- QU(12903302131714909434LLU), QU( 5581145789762985009LLU),
- QU(12325955044293303233LLU), QU(17216111180742141204LLU),
- QU( 6321919595276545740LLU), QU( 3507521147216174501LLU),
- QU( 9659194593319481840LLU), QU(11473976005975358326LLU),
- QU(14742730101435987026LLU), QU( 492845897709954780LLU),
- QU(16976371186162599676LLU), QU(17712703422837648655LLU),
- QU( 9881254778587061697LLU), QU( 8413223156302299551LLU),
- QU( 1563841828254089168LLU), QU( 9996032758786671975LLU),
- QU( 138877700583772667LLU), QU(13003043368574995989LLU),
- QU( 4390573668650456587LLU), QU( 8610287390568126755LLU),
- QU(15126904974266642199LLU), QU( 6703637238986057662LLU),
- QU( 2873075592956810157LLU), QU( 6035080933946049418LLU),
- QU(13382846581202353014LLU), QU( 7303971031814642463LLU),
- QU(18418024405307444267LLU), QU( 5847096731675404647LLU),
- QU( 4035880699639842500LLU), QU(11525348625112218478LLU),
- QU( 3041162365459574102LLU), QU( 2604734487727986558LLU),
- QU(15526341771636983145LLU), QU(14556052310697370254LLU),
- QU(12997787077930808155LLU), QU( 9601806501755554499LLU),
- QU(11349677952521423389LLU), QU(14956777807644899350LLU),
- QU(16559736957742852721LLU), QU(12360828274778140726LLU),
- QU( 6685373272009662513LLU), QU(16932258748055324130LLU),
- QU(15918051131954158508LLU), QU( 1692312913140790144LLU),
- QU( 546653826801637367LLU), QU( 5341587076045986652LLU),
- QU(14975057236342585662LLU), QU(12374976357340622412LLU),
- QU(10328833995181940552LLU), QU(12831807101710443149LLU),
- QU(10548514914382545716LLU), QU( 2217806727199715993LLU),
- QU(12627067369242845138LLU), QU( 4598965364035438158LLU),
- QU( 150923352751318171LLU), QU(14274109544442257283LLU),
- QU( 4696661475093863031LLU), QU( 1505764114384654516LLU),
- QU(10699185831891495147LLU), QU( 2392353847713620519LLU),
- QU( 3652870166711788383LLU), QU( 8640653276221911108LLU),
- QU( 3894077592275889704LLU), QU( 4918592872135964845LLU),
- QU(16379121273281400789LLU), QU(12058465483591683656LLU),
- QU(11250106829302924945LLU), QU( 1147537556296983005LLU),
- QU( 6376342756004613268LLU), QU(14967128191709280506LLU),
- QU(18007449949790627628LLU), QU( 9497178279316537841LLU),
- QU( 7920174844809394893LLU), QU(10037752595255719907LLU),
- QU(15875342784985217697LLU), QU(15311615921712850696LLU),
- QU( 9552902652110992950LLU), QU(14054979450099721140LLU),
- QU( 5998709773566417349LLU), QU(18027910339276320187LLU),
- QU( 8223099053868585554LLU), QU( 7842270354824999767LLU),
- QU( 4896315688770080292LLU), QU(12969320296569787895LLU),
- QU( 2674321489185759961LLU), QU( 4053615936864718439LLU),
- QU(11349775270588617578LLU), QU( 4743019256284553975LLU),
- QU( 5602100217469723769LLU), QU(14398995691411527813LLU),
- QU( 7412170493796825470LLU), QU( 836262406131744846LLU),
- QU( 8231086633845153022LLU), QU( 5161377920438552287LLU),
- QU( 8828731196169924949LLU), QU(16211142246465502680LLU),
- QU( 3307990879253687818LLU), QU( 5193405406899782022LLU),
- QU( 8510842117467566693LLU), QU( 6070955181022405365LLU),
- QU(14482950231361409799LLU), QU(12585159371331138077LLU),
- QU( 3511537678933588148LLU), QU( 2041849474531116417LLU),
- QU(10944936685095345792LLU), QU(18303116923079107729LLU),
- QU( 2720566371239725320LLU), QU( 4958672473562397622LLU),
- QU( 3032326668253243412LLU), QU(13689418691726908338LLU),
- QU( 1895205511728843996LLU), QU( 8146303515271990527LLU),
- QU(16507343500056113480LLU), QU( 473996939105902919LLU),
- QU( 9897686885246881481LLU), QU(14606433762712790575LLU),
- QU( 6732796251605566368LLU), QU( 1399778120855368916LLU),
- QU( 935023885182833777LLU), QU(16066282816186753477LLU),
- QU( 7291270991820612055LLU), QU(17530230393129853844LLU),
- QU(10223493623477451366LLU), QU(15841725630495676683LLU),
- QU(17379567246435515824LLU), QU( 8588251429375561971LLU),
- QU(18339511210887206423LLU), QU(17349587430725976100LLU),
- QU(12244876521394838088LLU), QU( 6382187714147161259LLU),
- QU(12335807181848950831LLU), QU(16948885622305460665LLU),
- QU(13755097796371520506LLU), QU(14806740373324947801LLU),
- QU( 4828699633859287703LLU), QU( 8209879281452301604LLU),
- QU(12435716669553736437LLU), QU(13970976859588452131LLU),
- QU( 6233960842566773148LLU), QU(12507096267900505759LLU),
- QU( 1198713114381279421LLU), QU(14989862731124149015LLU),
- QU(15932189508707978949LLU), QU( 2526406641432708722LLU),
- QU( 29187427817271982LLU), QU( 1499802773054556353LLU),
- QU(10816638187021897173LLU), QU( 5436139270839738132LLU),
- QU( 6659882287036010082LLU), QU( 2154048955317173697LLU),
- QU(10887317019333757642LLU), QU(16281091802634424955LLU),
- QU(10754549879915384901LLU), QU(10760611745769249815LLU),
- QU( 2161505946972504002LLU), QU( 5243132808986265107LLU),
- QU(10129852179873415416LLU), QU( 710339480008649081LLU),
- QU( 7802129453068808528LLU), QU(17967213567178907213LLU),
- QU(15730859124668605599LLU), QU(13058356168962376502LLU),
- QU( 3701224985413645909LLU), QU(14464065869149109264LLU),
- QU( 9959272418844311646LLU), QU(10157426099515958752LLU),
- QU(14013736814538268528LLU), QU(17797456992065653951LLU),
- QU(17418878140257344806LLU), QU(15457429073540561521LLU),
- QU( 2184426881360949378LLU), QU( 2062193041154712416LLU),
- QU( 8553463347406931661LLU), QU( 4913057625202871854LLU),
- QU( 2668943682126618425LLU), QU(17064444737891172288LLU),
- QU( 4997115903913298637LLU), QU(12019402608892327416LLU),
- QU(17603584559765897352LLU), QU(11367529582073647975LLU),
- QU( 8211476043518436050LLU), QU( 8676849804070323674LLU),
- QU(18431829230394475730LLU), QU(10490177861361247904LLU),
- QU( 9508720602025651349LLU), QU( 7409627448555722700LLU),
- QU( 5804047018862729008LLU), QU(11943858176893142594LLU),
- QU(11908095418933847092LLU), QU( 5415449345715887652LLU),
- QU( 1554022699166156407LLU), QU( 9073322106406017161LLU),
- QU( 7080630967969047082LLU), QU(18049736940860732943LLU),
- QU(12748714242594196794LLU), QU( 1226992415735156741LLU),
- QU(17900981019609531193LLU), QU(11720739744008710999LLU),
- QU( 3006400683394775434LLU), QU(11347974011751996028LLU),
- QU( 3316999628257954608LLU), QU( 8384484563557639101LLU),
- QU(18117794685961729767LLU), QU( 1900145025596618194LLU),
- QU(17459527840632892676LLU), QU( 5634784101865710994LLU),
- QU( 7918619300292897158LLU), QU( 3146577625026301350LLU),
- QU( 9955212856499068767LLU), QU( 1873995843681746975LLU),
- QU( 1561487759967972194LLU), QU( 8322718804375878474LLU),
- QU(11300284215327028366LLU), QU( 4667391032508998982LLU),
- QU( 9820104494306625580LLU), QU(17922397968599970610LLU),
- QU( 1784690461886786712LLU), QU(14940365084341346821LLU),
- QU( 5348719575594186181LLU), QU(10720419084507855261LLU),
- QU(14210394354145143274LLU), QU( 2426468692164000131LLU),
- QU(16271062114607059202LLU), QU(14851904092357070247LLU),
- QU( 6524493015693121897LLU), QU( 9825473835127138531LLU),
- QU(14222500616268569578LLU), QU(15521484052007487468LLU),
- QU(14462579404124614699LLU), QU(11012375590820665520LLU),
- QU(11625327350536084927LLU), QU(14452017765243785417LLU),
- QU( 9989342263518766305LLU), QU( 3640105471101803790LLU),
- QU( 4749866455897513242LLU), QU(13963064946736312044LLU),
- QU(10007416591973223791LLU), QU(18314132234717431115LLU),
- QU( 3286596588617483450LLU), QU( 7726163455370818765LLU),
- QU( 7575454721115379328LLU), QU( 5308331576437663422LLU),
- QU(18288821894903530934LLU), QU( 8028405805410554106LLU),
- QU(15744019832103296628LLU), QU( 149765559630932100LLU),
- QU( 6137705557200071977LLU), QU(14513416315434803615LLU),
- QU(11665702820128984473LLU), QU( 218926670505601386LLU),
- QU( 6868675028717769519LLU), QU(15282016569441512302LLU),
- QU( 5707000497782960236LLU), QU( 6671120586555079567LLU),
- QU( 2194098052618985448LLU), QU(16849577895477330978LLU),
- QU(12957148471017466283LLU), QU( 1997805535404859393LLU),
- QU( 1180721060263860490LLU), QU(13206391310193756958LLU),
- QU(12980208674461861797LLU), QU( 3825967775058875366LLU),
- QU(17543433670782042631LLU), QU( 1518339070120322730LLU),
- QU(16344584340890991669LLU), QU( 2611327165318529819LLU),
- QU(11265022723283422529LLU), QU( 4001552800373196817LLU),
- QU(14509595890079346161LLU), QU( 3528717165416234562LLU),
- QU(18153222571501914072LLU), QU( 9387182977209744425LLU),
- QU(10064342315985580021LLU), QU(11373678413215253977LLU),
- QU( 2308457853228798099LLU), QU( 9729042942839545302LLU),
- QU( 7833785471140127746LLU), QU( 6351049900319844436LLU),
- QU(14454610627133496067LLU), QU(12533175683634819111LLU),
- QU(15570163926716513029LLU), QU(13356980519185762498LLU)
+ KQU( 2100341266307895239), KQU( 8344256300489757943),
+ KQU(15687933285484243894), KQU( 8268620370277076319),
+ KQU(12371852309826545459), KQU( 8800491541730110238),
+ KQU(18113268950100835773), KQU( 2886823658884438119),
+ KQU( 3293667307248180724), KQU( 9307928143300172731),
+ KQU( 7688082017574293629), KQU( 900986224735166665),
+ KQU( 9977972710722265039), KQU( 6008205004994830552),
+ KQU( 546909104521689292), KQU( 7428471521869107594),
+ KQU(14777563419314721179), KQU(16116143076567350053),
+ KQU( 5322685342003142329), KQU( 4200427048445863473),
+ KQU( 4693092150132559146), KQU(13671425863759338582),
+ KQU( 6747117460737639916), KQU( 4732666080236551150),
+ KQU( 5912839950611941263), KQU( 3903717554504704909),
+ KQU( 2615667650256786818), KQU(10844129913887006352),
+ KQU(13786467861810997820), KQU(14267853002994021570),
+ KQU(13767807302847237439), KQU(16407963253707224617),
+ KQU( 4802498363698583497), KQU( 2523802839317209764),
+ KQU( 3822579397797475589), KQU( 8950320572212130610),
+ KQU( 3745623504978342534), KQU(16092609066068482806),
+ KQU( 9817016950274642398), KQU(10591660660323829098),
+ KQU(11751606650792815920), KQU( 5122873818577122211),
+ KQU(17209553764913936624), KQU( 6249057709284380343),
+ KQU(15088791264695071830), KQU(15344673071709851930),
+ KQU( 4345751415293646084), KQU( 2542865750703067928),
+ KQU(13520525127852368784), KQU(18294188662880997241),
+ KQU( 3871781938044881523), KQU( 2873487268122812184),
+ KQU(15099676759482679005), KQU(15442599127239350490),
+ KQU( 6311893274367710888), KQU( 3286118760484672933),
+ KQU( 4146067961333542189), KQU(13303942567897208770),
+ KQU( 8196013722255630418), KQU( 4437815439340979989),
+ KQU(15433791533450605135), KQU( 4254828956815687049),
+ KQU( 1310903207708286015), KQU(10529182764462398549),
+ KQU(14900231311660638810), KQU( 9727017277104609793),
+ KQU( 1821308310948199033), KQU(11628861435066772084),
+ KQU( 9469019138491546924), KQU( 3145812670532604988),
+ KQU( 9938468915045491919), KQU( 1562447430672662142),
+ KQU(13963995266697989134), KQU( 3356884357625028695),
+ KQU( 4499850304584309747), KQU( 8456825817023658122),
+ KQU(10859039922814285279), KQU( 8099512337972526555),
+ KQU( 348006375109672149), KQU(11919893998241688603),
+ KQU( 1104199577402948826), KQU(16689191854356060289),
+ KQU(10992552041730168078), KQU( 7243733172705465836),
+ KQU( 5668075606180319560), KQU(18182847037333286970),
+ KQU( 4290215357664631322), KQU( 4061414220791828613),
+ KQU(13006291061652989604), KQU( 7140491178917128798),
+ KQU(12703446217663283481), KQU( 5500220597564558267),
+ KQU(10330551509971296358), KQU(15958554768648714492),
+ KQU( 5174555954515360045), KQU( 1731318837687577735),
+ KQU( 3557700801048354857), KQU(13764012341928616198),
+ KQU(13115166194379119043), KQU( 7989321021560255519),
+ KQU( 2103584280905877040), KQU( 9230788662155228488),
+ KQU(16396629323325547654), KQU( 657926409811318051),
+ KQU(15046700264391400727), KQU( 5120132858771880830),
+ KQU( 7934160097989028561), KQU( 6963121488531976245),
+ KQU(17412329602621742089), KQU(15144843053931774092),
+ KQU(17204176651763054532), KQU(13166595387554065870),
+ KQU( 8590377810513960213), KQU( 5834365135373991938),
+ KQU( 7640913007182226243), KQU( 3479394703859418425),
+ KQU(16402784452644521040), KQU( 4993979809687083980),
+ KQU(13254522168097688865), KQU(15643659095244365219),
+ KQU( 5881437660538424982), KQU(11174892200618987379),
+ KQU( 254409966159711077), KQU(17158413043140549909),
+ KQU( 3638048789290376272), KQU( 1376816930299489190),
+ KQU( 4622462095217761923), KQU(15086407973010263515),
+ KQU(13253971772784692238), KQU( 5270549043541649236),
+ KQU(11182714186805411604), KQU(12283846437495577140),
+ KQU( 5297647149908953219), KQU(10047451738316836654),
+ KQU( 4938228100367874746), KQU(12328523025304077923),
+ KQU( 3601049438595312361), KQU( 9313624118352733770),
+ KQU(13322966086117661798), KQU(16660005705644029394),
+ KQU(11337677526988872373), KQU(13869299102574417795),
+ KQU(15642043183045645437), KQU( 3021755569085880019),
+ KQU( 4979741767761188161), KQU(13679979092079279587),
+ KQU( 3344685842861071743), KQU(13947960059899588104),
+ KQU( 305806934293368007), KQU( 5749173929201650029),
+ KQU(11123724852118844098), KQU(15128987688788879802),
+ KQU(15251651211024665009), KQU( 7689925933816577776),
+ KQU(16732804392695859449), KQU(17087345401014078468),
+ KQU(14315108589159048871), KQU( 4820700266619778917),
+ KQU(16709637539357958441), KQU( 4936227875177351374),
+ KQU( 2137907697912987247), KQU(11628565601408395420),
+ KQU( 2333250549241556786), KQU( 5711200379577778637),
+ KQU( 5170680131529031729), KQU(12620392043061335164),
+ KQU( 95363390101096078), KQU( 5487981914081709462),
+ KQU( 1763109823981838620), KQU( 3395861271473224396),
+ KQU( 1300496844282213595), KQU( 6894316212820232902),
+ KQU(10673859651135576674), KQU( 5911839658857903252),
+ KQU(17407110743387299102), KQU( 8257427154623140385),
+ KQU(11389003026741800267), KQU( 4070043211095013717),
+ KQU(11663806997145259025), KQU(15265598950648798210),
+ KQU( 630585789434030934), KQU( 3524446529213587334),
+ KQU( 7186424168495184211), KQU(10806585451386379021),
+ KQU(11120017753500499273), KQU( 1586837651387701301),
+ KQU(17530454400954415544), KQU( 9991670045077880430),
+ KQU( 7550997268990730180), KQU( 8640249196597379304),
+ KQU( 3522203892786893823), KQU(10401116549878854788),
+ KQU(13690285544733124852), KQU( 8295785675455774586),
+ KQU(15535716172155117603), KQU( 3112108583723722511),
+ KQU(17633179955339271113), KQU(18154208056063759375),
+ KQU( 1866409236285815666), KQU(13326075895396412882),
+ KQU( 8756261842948020025), KQU( 6281852999868439131),
+ KQU(15087653361275292858), KQU(10333923911152949397),
+ KQU( 5265567645757408500), KQU(12728041843210352184),
+ KQU( 6347959327507828759), KQU( 154112802625564758),
+ KQU(18235228308679780218), KQU( 3253805274673352418),
+ KQU( 4849171610689031197), KQU(17948529398340432518),
+ KQU(13803510475637409167), KQU(13506570190409883095),
+ KQU(15870801273282960805), KQU( 8451286481299170773),
+ KQU( 9562190620034457541), KQU( 8518905387449138364),
+ KQU(12681306401363385655), KQU( 3788073690559762558),
+ KQU( 5256820289573487769), KQU( 2752021372314875467),
+ KQU( 6354035166862520716), KQU( 4328956378309739069),
+ KQU( 449087441228269600), KQU( 5533508742653090868),
+ KQU( 1260389420404746988), KQU(18175394473289055097),
+ KQU( 1535467109660399420), KQU( 8818894282874061442),
+ KQU(12140873243824811213), KQU(15031386653823014946),
+ KQU( 1286028221456149232), KQU( 6329608889367858784),
+ KQU( 9419654354945132725), KQU( 6094576547061672379),
+ KQU(17706217251847450255), KQU( 1733495073065878126),
+ KQU(16918923754607552663), KQU( 8881949849954945044),
+ KQU(12938977706896313891), KQU(14043628638299793407),
+ KQU(18393874581723718233), KQU( 6886318534846892044),
+ KQU(14577870878038334081), KQU(13541558383439414119),
+ KQU(13570472158807588273), KQU(18300760537910283361),
+ KQU( 818368572800609205), KQU( 1417000585112573219),
+ KQU(12337533143867683655), KQU(12433180994702314480),
+ KQU( 778190005829189083), KQU(13667356216206524711),
+ KQU( 9866149895295225230), KQU(11043240490417111999),
+ KQU( 1123933826541378598), KQU( 6469631933605123610),
+ KQU(14508554074431980040), KQU(13918931242962026714),
+ KQU( 2870785929342348285), KQU(14786362626740736974),
+ KQU(13176680060902695786), KQU( 9591778613541679456),
+ KQU( 9097662885117436706), KQU( 749262234240924947),
+ KQU( 1944844067793307093), KQU( 4339214904577487742),
+ KQU( 8009584152961946551), KQU(16073159501225501777),
+ KQU( 3335870590499306217), KQU(17088312653151202847),
+ KQU( 3108893142681931848), KQU(16636841767202792021),
+ KQU(10423316431118400637), KQU( 8008357368674443506),
+ KQU(11340015231914677875), KQU(17687896501594936090),
+ KQU(15173627921763199958), KQU( 542569482243721959),
+ KQU(15071714982769812975), KQU( 4466624872151386956),
+ KQU( 1901780715602332461), KQU( 9822227742154351098),
+ KQU( 1479332892928648780), KQU( 6981611948382474400),
+ KQU( 7620824924456077376), KQU(14095973329429406782),
+ KQU( 7902744005696185404), KQU(15830577219375036920),
+ KQU(10287076667317764416), KQU(12334872764071724025),
+ KQU( 4419302088133544331), KQU(14455842851266090520),
+ KQU(12488077416504654222), KQU( 7953892017701886766),
+ KQU( 6331484925529519007), KQU( 4902145853785030022),
+ KQU(17010159216096443073), KQU(11945354668653886087),
+ KQU(15112022728645230829), KQU(17363484484522986742),
+ KQU( 4423497825896692887), KQU( 8155489510809067471),
+ KQU( 258966605622576285), KQU( 5462958075742020534),
+ KQU( 6763710214913276228), KQU( 2368935183451109054),
+ KQU(14209506165246453811), KQU( 2646257040978514881),
+ KQU( 3776001911922207672), KQU( 1419304601390147631),
+ KQU(14987366598022458284), KQU( 3977770701065815721),
+ KQU( 730820417451838898), KQU( 3982991703612885327),
+ KQU( 2803544519671388477), KQU(17067667221114424649),
+ KQU( 2922555119737867166), KQU( 1989477584121460932),
+ KQU(15020387605892337354), KQU( 9293277796427533547),
+ KQU(10722181424063557247), KQU(16704542332047511651),
+ KQU( 5008286236142089514), KQU(16174732308747382540),
+ KQU(17597019485798338402), KQU(13081745199110622093),
+ KQU( 8850305883842258115), KQU(12723629125624589005),
+ KQU( 8140566453402805978), KQU(15356684607680935061),
+ KQU(14222190387342648650), KQU(11134610460665975178),
+ KQU( 1259799058620984266), KQU(13281656268025610041),
+ KQU( 298262561068153992), KQU(12277871700239212922),
+ KQU(13911297774719779438), KQU(16556727962761474934),
+ KQU(17903010316654728010), KQU( 9682617699648434744),
+ KQU(14757681836838592850), KQU( 1327242446558524473),
+ KQU(11126645098780572792), KQU( 1883602329313221774),
+ KQU( 2543897783922776873), KQU(15029168513767772842),
+ KQU(12710270651039129878), KQU(16118202956069604504),
+ KQU(15010759372168680524), KQU( 2296827082251923948),
+ KQU(10793729742623518101), KQU(13829764151845413046),
+ KQU(17769301223184451213), KQU( 3118268169210783372),
+ KQU(17626204544105123127), KQU( 7416718488974352644),
+ KQU(10450751996212925994), KQU( 9352529519128770586),
+ KQU( 259347569641110140), KQU( 8048588892269692697),
+ KQU( 1774414152306494058), KQU(10669548347214355622),
+ KQU(13061992253816795081), KQU(18432677803063861659),
+ KQU( 8879191055593984333), KQU(12433753195199268041),
+ KQU(14919392415439730602), KQU( 6612848378595332963),
+ KQU( 6320986812036143628), KQU(10465592420226092859),
+ KQU( 4196009278962570808), KQU( 3747816564473572224),
+ KQU(17941203486133732898), KQU( 2350310037040505198),
+ KQU( 5811779859134370113), KQU(10492109599506195126),
+ KQU( 7699650690179541274), KQU( 1954338494306022961),
+ KQU(14095816969027231152), KQU( 5841346919964852061),
+ KQU(14945969510148214735), KQU( 3680200305887550992),
+ KQU( 6218047466131695792), KQU( 8242165745175775096),
+ KQU(11021371934053307357), KQU( 1265099502753169797),
+ KQU( 4644347436111321718), KQU( 3609296916782832859),
+ KQU( 8109807992218521571), KQU(18387884215648662020),
+ KQU(14656324896296392902), KQU(17386819091238216751),
+ KQU(17788300878582317152), KQU( 7919446259742399591),
+ KQU( 4466613134576358004), KQU(12928181023667938509),
+ KQU(13147446154454932030), KQU(16552129038252734620),
+ KQU( 8395299403738822450), KQU(11313817655275361164),
+ KQU( 434258809499511718), KQU( 2074882104954788676),
+ KQU( 7929892178759395518), KQU( 9006461629105745388),
+ KQU( 5176475650000323086), KQU(11128357033468341069),
+ KQU(12026158851559118955), KQU(14699716249471156500),
+ KQU( 448982497120206757), KQU( 4156475356685519900),
+ KQU( 6063816103417215727), KQU(10073289387954971479),
+ KQU( 8174466846138590962), KQU( 2675777452363449006),
+ KQU( 9090685420572474281), KQU( 6659652652765562060),
+ KQU(12923120304018106621), KQU(11117480560334526775),
+ KQU( 937910473424587511), KQU( 1838692113502346645),
+ KQU(11133914074648726180), KQU( 7922600945143884053),
+ KQU(13435287702700959550), KQU( 5287964921251123332),
+ KQU(11354875374575318947), KQU(17955724760748238133),
+ KQU(13728617396297106512), KQU( 4107449660118101255),
+ KQU( 1210269794886589623), KQU(11408687205733456282),
+ KQU( 4538354710392677887), KQU(13566803319341319267),
+ KQU(17870798107734050771), KQU( 3354318982568089135),
+ KQU( 9034450839405133651), KQU(13087431795753424314),
+ KQU( 950333102820688239), KQU( 1968360654535604116),
+ KQU(16840551645563314995), KQU( 8867501803892924995),
+ KQU(11395388644490626845), KQU( 1529815836300732204),
+ KQU(13330848522996608842), KQU( 1813432878817504265),
+ KQU( 2336867432693429560), KQU(15192805445973385902),
+ KQU( 2528593071076407877), KQU( 128459777936689248),
+ KQU( 9976345382867214866), KQU( 6208885766767996043),
+ KQU(14982349522273141706), KQU( 3099654362410737822),
+ KQU(13776700761947297661), KQU( 8806185470684925550),
+ KQU( 8151717890410585321), KQU( 640860591588072925),
+ KQU(14592096303937307465), KQU( 9056472419613564846),
+ KQU(14861544647742266352), KQU(12703771500398470216),
+ KQU( 3142372800384138465), KQU( 6201105606917248196),
+ KQU(18337516409359270184), KQU(15042268695665115339),
+ KQU(15188246541383283846), KQU(12800028693090114519),
+ KQU( 5992859621101493472), KQU(18278043971816803521),
+ KQU( 9002773075219424560), KQU( 7325707116943598353),
+ KQU( 7930571931248040822), KQU( 5645275869617023448),
+ KQU( 7266107455295958487), KQU( 4363664528273524411),
+ KQU(14313875763787479809), KQU(17059695613553486802),
+ KQU( 9247761425889940932), KQU(13704726459237593128),
+ KQU( 2701312427328909832), KQU(17235532008287243115),
+ KQU(14093147761491729538), KQU( 6247352273768386516),
+ KQU( 8268710048153268415), KQU( 7985295214477182083),
+ KQU(15624495190888896807), KQU( 3772753430045262788),
+ KQU( 9133991620474991698), KQU( 5665791943316256028),
+ KQU( 7551996832462193473), KQU(13163729206798953877),
+ KQU( 9263532074153846374), KQU( 1015460703698618353),
+ KQU(17929874696989519390), KQU(18257884721466153847),
+ KQU(16271867543011222991), KQU( 3905971519021791941),
+ KQU(16814488397137052085), KQU( 1321197685504621613),
+ KQU( 2870359191894002181), KQU(14317282970323395450),
+ KQU(13663920845511074366), KQU( 2052463995796539594),
+ KQU(14126345686431444337), KQU( 1727572121947022534),
+ KQU(17793552254485594241), KQU( 6738857418849205750),
+ KQU( 1282987123157442952), KQU(16655480021581159251),
+ KQU( 6784587032080183866), KQU(14726758805359965162),
+ KQU( 7577995933961987349), KQU(12539609320311114036),
+ KQU(10789773033385439494), KQU( 8517001497411158227),
+ KQU(10075543932136339710), KQU(14838152340938811081),
+ KQU( 9560840631794044194), KQU(17445736541454117475),
+ KQU(10633026464336393186), KQU(15705729708242246293),
+ KQU( 1117517596891411098), KQU( 4305657943415886942),
+ KQU( 4948856840533979263), KQU(16071681989041789593),
+ KQU(13723031429272486527), KQU( 7639567622306509462),
+ KQU(12670424537483090390), KQU( 9715223453097197134),
+ KQU( 5457173389992686394), KQU( 289857129276135145),
+ KQU(17048610270521972512), KQU( 692768013309835485),
+ KQU(14823232360546632057), KQU(18218002361317895936),
+ KQU( 3281724260212650204), KQU(16453957266549513795),
+ KQU( 8592711109774511881), KQU( 929825123473369579),
+ KQU(15966784769764367791), KQU( 9627344291450607588),
+ KQU(10849555504977813287), KQU( 9234566913936339275),
+ KQU( 6413807690366911210), KQU(10862389016184219267),
+ KQU(13842504799335374048), KQU( 1531994113376881174),
+ KQU( 2081314867544364459), KQU(16430628791616959932),
+ KQU( 8314714038654394368), KQU( 9155473892098431813),
+ KQU(12577843786670475704), KQU( 4399161106452401017),
+ KQU( 1668083091682623186), KQU( 1741383777203714216),
+ KQU( 2162597285417794374), KQU(15841980159165218736),
+ KQU( 1971354603551467079), KQU( 1206714764913205968),
+ KQU( 4790860439591272330), KQU(14699375615594055799),
+ KQU( 8374423871657449988), KQU(10950685736472937738),
+ KQU( 697344331343267176), KQU(10084998763118059810),
+ KQU(12897369539795983124), KQU(12351260292144383605),
+ KQU( 1268810970176811234), KQU( 7406287800414582768),
+ KQU( 516169557043807831), KQU( 5077568278710520380),
+ KQU( 3828791738309039304), KQU( 7721974069946943610),
+ KQU( 3534670260981096460), KQU( 4865792189600584891),
+ KQU(16892578493734337298), KQU( 9161499464278042590),
+ KQU(11976149624067055931), KQU(13219479887277343990),
+ KQU(14161556738111500680), KQU(14670715255011223056),
+ KQU( 4671205678403576558), KQU(12633022931454259781),
+ KQU(14821376219869187646), KQU( 751181776484317028),
+ KQU( 2192211308839047070), KQU(11787306362361245189),
+ KQU(10672375120744095707), KQU( 4601972328345244467),
+ KQU(15457217788831125879), KQU( 8464345256775460809),
+ KQU(10191938789487159478), KQU( 6184348739615197613),
+ KQU(11425436778806882100), KQU( 2739227089124319793),
+ KQU( 461464518456000551), KQU( 4689850170029177442),
+ KQU( 6120307814374078625), KQU(11153579230681708671),
+ KQU( 7891721473905347926), KQU(10281646937824872400),
+ KQU( 3026099648191332248), KQU( 8666750296953273818),
+ KQU(14978499698844363232), KQU(13303395102890132065),
+ KQU( 8182358205292864080), KQU(10560547713972971291),
+ KQU(11981635489418959093), KQU( 3134621354935288409),
+ KQU(11580681977404383968), KQU(14205530317404088650),
+ KQU( 5997789011854923157), KQU(13659151593432238041),
+ KQU(11664332114338865086), KQU( 7490351383220929386),
+ KQU( 7189290499881530378), KQU(15039262734271020220),
+ KQU( 2057217285976980055), KQU( 555570804905355739),
+ KQU(11235311968348555110), KQU(13824557146269603217),
+ KQU(16906788840653099693), KQU( 7222878245455661677),
+ KQU( 5245139444332423756), KQU( 4723748462805674292),
+ KQU(12216509815698568612), KQU(17402362976648951187),
+ KQU(17389614836810366768), KQU( 4880936484146667711),
+ KQU( 9085007839292639880), KQU(13837353458498535449),
+ KQU(11914419854360366677), KQU(16595890135313864103),
+ KQU( 6313969847197627222), KQU(18296909792163910431),
+ KQU(10041780113382084042), KQU( 2499478551172884794),
+ KQU(11057894246241189489), KQU( 9742243032389068555),
+ KQU(12838934582673196228), KQU(13437023235248490367),
+ KQU(13372420669446163240), KQU( 6752564244716909224),
+ KQU( 7157333073400313737), KQU(12230281516370654308),
+ KQU( 1182884552219419117), KQU( 2955125381312499218),
+ KQU(10308827097079443249), KQU( 1337648572986534958),
+ KQU(16378788590020343939), KQU( 108619126514420935),
+ KQU( 3990981009621629188), KQU( 5460953070230946410),
+ KQU( 9703328329366531883), KQU(13166631489188077236),
+ KQU( 1104768831213675170), KQU( 3447930458553877908),
+ KQU( 8067172487769945676), KQU( 5445802098190775347),
+ KQU( 3244840981648973873), KQU(17314668322981950060),
+ KQU( 5006812527827763807), KQU(18158695070225526260),
+ KQU( 2824536478852417853), KQU(13974775809127519886),
+ KQU( 9814362769074067392), KQU(17276205156374862128),
+ KQU(11361680725379306967), KQU( 3422581970382012542),
+ KQU(11003189603753241266), KQU(11194292945277862261),
+ KQU( 6839623313908521348), KQU(11935326462707324634),
+ KQU( 1611456788685878444), KQU(13112620989475558907),
+ KQU( 517659108904450427), KQU(13558114318574407624),
+ KQU(15699089742731633077), KQU( 4988979278862685458),
+ KQU( 8111373583056521297), KQU( 3891258746615399627),
+ KQU( 8137298251469718086), KQU(12748663295624701649),
+ KQU( 4389835683495292062), KQU( 5775217872128831729),
+ KQU( 9462091896405534927), KQU( 8498124108820263989),
+ KQU( 8059131278842839525), KQU(10503167994254090892),
+ KQU(11613153541070396656), KQU(18069248738504647790),
+ KQU( 570657419109768508), KQU( 3950574167771159665),
+ KQU( 5514655599604313077), KQU( 2908460854428484165),
+ KQU(10777722615935663114), KQU(12007363304839279486),
+ KQU( 9800646187569484767), KQU( 8795423564889864287),
+ KQU(14257396680131028419), KQU( 6405465117315096498),
+ KQU( 7939411072208774878), KQU(17577572378528990006),
+ KQU(14785873806715994850), KQU(16770572680854747390),
+ KQU(18127549474419396481), KQU(11637013449455757750),
+ KQU(14371851933996761086), KQU( 3601181063650110280),
+ KQU( 4126442845019316144), KQU(10198287239244320669),
+ KQU(18000169628555379659), KQU(18392482400739978269),
+ KQU( 6219919037686919957), KQU( 3610085377719446052),
+ KQU( 2513925039981776336), KQU(16679413537926716955),
+ KQU(12903302131714909434), KQU( 5581145789762985009),
+ KQU(12325955044293303233), KQU(17216111180742141204),
+ KQU( 6321919595276545740), KQU( 3507521147216174501),
+ KQU( 9659194593319481840), KQU(11473976005975358326),
+ KQU(14742730101435987026), KQU( 492845897709954780),
+ KQU(16976371186162599676), KQU(17712703422837648655),
+ KQU( 9881254778587061697), KQU( 8413223156302299551),
+ KQU( 1563841828254089168), KQU( 9996032758786671975),
+ KQU( 138877700583772667), KQU(13003043368574995989),
+ KQU( 4390573668650456587), KQU( 8610287390568126755),
+ KQU(15126904974266642199), KQU( 6703637238986057662),
+ KQU( 2873075592956810157), KQU( 6035080933946049418),
+ KQU(13382846581202353014), KQU( 7303971031814642463),
+ KQU(18418024405307444267), KQU( 5847096731675404647),
+ KQU( 4035880699639842500), KQU(11525348625112218478),
+ KQU( 3041162365459574102), KQU( 2604734487727986558),
+ KQU(15526341771636983145), KQU(14556052310697370254),
+ KQU(12997787077930808155), KQU( 9601806501755554499),
+ KQU(11349677952521423389), KQU(14956777807644899350),
+ KQU(16559736957742852721), KQU(12360828274778140726),
+ KQU( 6685373272009662513), KQU(16932258748055324130),
+ KQU(15918051131954158508), KQU( 1692312913140790144),
+ KQU( 546653826801637367), KQU( 5341587076045986652),
+ KQU(14975057236342585662), KQU(12374976357340622412),
+ KQU(10328833995181940552), KQU(12831807101710443149),
+ KQU(10548514914382545716), KQU( 2217806727199715993),
+ KQU(12627067369242845138), KQU( 4598965364035438158),
+ KQU( 150923352751318171), KQU(14274109544442257283),
+ KQU( 4696661475093863031), KQU( 1505764114384654516),
+ KQU(10699185831891495147), KQU( 2392353847713620519),
+ KQU( 3652870166711788383), KQU( 8640653276221911108),
+ KQU( 3894077592275889704), KQU( 4918592872135964845),
+ KQU(16379121273281400789), KQU(12058465483591683656),
+ KQU(11250106829302924945), KQU( 1147537556296983005),
+ KQU( 6376342756004613268), KQU(14967128191709280506),
+ KQU(18007449949790627628), KQU( 9497178279316537841),
+ KQU( 7920174844809394893), KQU(10037752595255719907),
+ KQU(15875342784985217697), KQU(15311615921712850696),
+ KQU( 9552902652110992950), KQU(14054979450099721140),
+ KQU( 5998709773566417349), KQU(18027910339276320187),
+ KQU( 8223099053868585554), KQU( 7842270354824999767),
+ KQU( 4896315688770080292), KQU(12969320296569787895),
+ KQU( 2674321489185759961), KQU( 4053615936864718439),
+ KQU(11349775270588617578), KQU( 4743019256284553975),
+ KQU( 5602100217469723769), KQU(14398995691411527813),
+ KQU( 7412170493796825470), KQU( 836262406131744846),
+ KQU( 8231086633845153022), KQU( 5161377920438552287),
+ KQU( 8828731196169924949), KQU(16211142246465502680),
+ KQU( 3307990879253687818), KQU( 5193405406899782022),
+ KQU( 8510842117467566693), KQU( 6070955181022405365),
+ KQU(14482950231361409799), KQU(12585159371331138077),
+ KQU( 3511537678933588148), KQU( 2041849474531116417),
+ KQU(10944936685095345792), KQU(18303116923079107729),
+ KQU( 2720566371239725320), KQU( 4958672473562397622),
+ KQU( 3032326668253243412), KQU(13689418691726908338),
+ KQU( 1895205511728843996), KQU( 8146303515271990527),
+ KQU(16507343500056113480), KQU( 473996939105902919),
+ KQU( 9897686885246881481), KQU(14606433762712790575),
+ KQU( 6732796251605566368), KQU( 1399778120855368916),
+ KQU( 935023885182833777), KQU(16066282816186753477),
+ KQU( 7291270991820612055), KQU(17530230393129853844),
+ KQU(10223493623477451366), KQU(15841725630495676683),
+ KQU(17379567246435515824), KQU( 8588251429375561971),
+ KQU(18339511210887206423), KQU(17349587430725976100),
+ KQU(12244876521394838088), KQU( 6382187714147161259),
+ KQU(12335807181848950831), KQU(16948885622305460665),
+ KQU(13755097796371520506), KQU(14806740373324947801),
+ KQU( 4828699633859287703), KQU( 8209879281452301604),
+ KQU(12435716669553736437), KQU(13970976859588452131),
+ KQU( 6233960842566773148), KQU(12507096267900505759),
+ KQU( 1198713114381279421), KQU(14989862731124149015),
+ KQU(15932189508707978949), KQU( 2526406641432708722),
+ KQU( 29187427817271982), KQU( 1499802773054556353),
+ KQU(10816638187021897173), KQU( 5436139270839738132),
+ KQU( 6659882287036010082), KQU( 2154048955317173697),
+ KQU(10887317019333757642), KQU(16281091802634424955),
+ KQU(10754549879915384901), KQU(10760611745769249815),
+ KQU( 2161505946972504002), KQU( 5243132808986265107),
+ KQU(10129852179873415416), KQU( 710339480008649081),
+ KQU( 7802129453068808528), KQU(17967213567178907213),
+ KQU(15730859124668605599), KQU(13058356168962376502),
+ KQU( 3701224985413645909), KQU(14464065869149109264),
+ KQU( 9959272418844311646), KQU(10157426099515958752),
+ KQU(14013736814538268528), KQU(17797456992065653951),
+ KQU(17418878140257344806), KQU(15457429073540561521),
+ KQU( 2184426881360949378), KQU( 2062193041154712416),
+ KQU( 8553463347406931661), KQU( 4913057625202871854),
+ KQU( 2668943682126618425), KQU(17064444737891172288),
+ KQU( 4997115903913298637), KQU(12019402608892327416),
+ KQU(17603584559765897352), KQU(11367529582073647975),
+ KQU( 8211476043518436050), KQU( 8676849804070323674),
+ KQU(18431829230394475730), KQU(10490177861361247904),
+ KQU( 9508720602025651349), KQU( 7409627448555722700),
+ KQU( 5804047018862729008), KQU(11943858176893142594),
+ KQU(11908095418933847092), KQU( 5415449345715887652),
+ KQU( 1554022699166156407), KQU( 9073322106406017161),
+ KQU( 7080630967969047082), KQU(18049736940860732943),
+ KQU(12748714242594196794), KQU( 1226992415735156741),
+ KQU(17900981019609531193), KQU(11720739744008710999),
+ KQU( 3006400683394775434), KQU(11347974011751996028),
+ KQU( 3316999628257954608), KQU( 8384484563557639101),
+ KQU(18117794685961729767), KQU( 1900145025596618194),
+ KQU(17459527840632892676), KQU( 5634784101865710994),
+ KQU( 7918619300292897158), KQU( 3146577625026301350),
+ KQU( 9955212856499068767), KQU( 1873995843681746975),
+ KQU( 1561487759967972194), KQU( 8322718804375878474),
+ KQU(11300284215327028366), KQU( 4667391032508998982),
+ KQU( 9820104494306625580), KQU(17922397968599970610),
+ KQU( 1784690461886786712), KQU(14940365084341346821),
+ KQU( 5348719575594186181), KQU(10720419084507855261),
+ KQU(14210394354145143274), KQU( 2426468692164000131),
+ KQU(16271062114607059202), KQU(14851904092357070247),
+ KQU( 6524493015693121897), KQU( 9825473835127138531),
+ KQU(14222500616268569578), KQU(15521484052007487468),
+ KQU(14462579404124614699), KQU(11012375590820665520),
+ KQU(11625327350536084927), KQU(14452017765243785417),
+ KQU( 9989342263518766305), KQU( 3640105471101803790),
+ KQU( 4749866455897513242), KQU(13963064946736312044),
+ KQU(10007416591973223791), KQU(18314132234717431115),
+ KQU( 3286596588617483450), KQU( 7726163455370818765),
+ KQU( 7575454721115379328), KQU( 5308331576437663422),
+ KQU(18288821894903530934), KQU( 8028405805410554106),
+ KQU(15744019832103296628), KQU( 149765559630932100),
+ KQU( 6137705557200071977), KQU(14513416315434803615),
+ KQU(11665702820128984473), KQU( 218926670505601386),
+ KQU( 6868675028717769519), KQU(15282016569441512302),
+ KQU( 5707000497782960236), KQU( 6671120586555079567),
+ KQU( 2194098052618985448), KQU(16849577895477330978),
+ KQU(12957148471017466283), KQU( 1997805535404859393),
+ KQU( 1180721060263860490), KQU(13206391310193756958),
+ KQU(12980208674461861797), KQU( 3825967775058875366),
+ KQU(17543433670782042631), KQU( 1518339070120322730),
+ KQU(16344584340890991669), KQU( 2611327165318529819),
+ KQU(11265022723283422529), KQU( 4001552800373196817),
+ KQU(14509595890079346161), KQU( 3528717165416234562),
+ KQU(18153222571501914072), KQU( 9387182977209744425),
+ KQU(10064342315985580021), KQU(11373678413215253977),
+ KQU( 2308457853228798099), KQU( 9729042942839545302),
+ KQU( 7833785471140127746), KQU( 6351049900319844436),
+ KQU(14454610627133496067), KQU(12533175683634819111),
+ KQU(15570163926716513029), KQU(13356980519185762498)
};
TEST_BEGIN(test_gen_rand_32)
@@ -1543,13 +1543,13 @@ TEST_BEGIN(test_gen_rand_64)
}
r = gen_rand64(ctx);
assert_u64_eq(r, array64[i],
- "Mismatch at array64[%d]=%"PRIx64", gen=%"PRIx64, i,
+ "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i,
array64[i], r);
}
for (i = 0; i < COUNT_2; i++) {
r = gen_rand64(ctx);
assert_u64_eq(r, array64_2[i],
- "Mismatch at array64_2[%d]=%"PRIx64" gen=%"PRIx64"", i,
+ "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i,
array64_2[i], r);
}
fini_gen_rand(ctx);
@@ -1580,13 +1580,13 @@ TEST_BEGIN(test_by_array_64)
}
r = gen_rand64(ctx);
assert_u64_eq(r, array64[i],
- "Mismatch at array64[%d]=%"PRIx64" gen=%"PRIx64, i,
+ "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i,
array64[i], r);
}
for (i = 0; i < COUNT_2; i++) {
r = gen_rand64(ctx);
assert_u64_eq(r, array64_2[i],
- "Mismatch at array64_2[%d]=%"PRIx64" gen=%"PRIx64, i,
+ "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i,
array64_2[i], r);
}
fini_gen_rand(ctx);
diff --git a/deps/jemalloc/test/unit/atomic.c b/deps/jemalloc/test/unit/atomic.c
new file mode 100644
index 000000000..bdd74f659
--- /dev/null
+++ b/deps/jemalloc/test/unit/atomic.c
@@ -0,0 +1,122 @@
+#include "test/jemalloc_test.h"
+
+#define TEST_STRUCT(p, t) \
+struct p##_test_s { \
+ t accum0; \
+ t x; \
+ t s; \
+}; \
+typedef struct p##_test_s p##_test_t;
+
+#define TEST_BODY(p, t, tc, ta, FMT) do { \
+ const p##_test_t tests[] = { \
+ {(t)-1, (t)-1, (t)-2}, \
+ {(t)-1, (t) 0, (t)-2}, \
+ {(t)-1, (t) 1, (t)-2}, \
+ \
+ {(t) 0, (t)-1, (t)-2}, \
+ {(t) 0, (t) 0, (t)-2}, \
+ {(t) 0, (t) 1, (t)-2}, \
+ \
+ {(t) 1, (t)-1, (t)-2}, \
+ {(t) 1, (t) 0, (t)-2}, \
+ {(t) 1, (t) 1, (t)-2}, \
+ \
+ {(t)0, (t)-(1 << 22), (t)-2}, \
+ {(t)0, (t)(1 << 22), (t)-2}, \
+ {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \
+ {(t)(1 << 22), (t)(1 << 22), (t)-2} \
+ }; \
+ unsigned i; \
+ \
+ for (i = 0; i < sizeof(tests)/sizeof(p##_test_t); i++) { \
+ bool err; \
+ t accum = tests[i].accum0; \
+ assert_##ta##_eq(atomic_read_##p(&accum), \
+ tests[i].accum0, \
+ "Erroneous read, i=%u", i); \
+ \
+ assert_##ta##_eq(atomic_add_##p(&accum, tests[i].x), \
+ (t)((tc)tests[i].accum0 + (tc)tests[i].x), \
+ "i=%u, accum=%"FMT", x=%"FMT, \
+ i, tests[i].accum0, tests[i].x); \
+ assert_##ta##_eq(atomic_read_##p(&accum), accum, \
+ "Erroneous add, i=%u", i); \
+ \
+ accum = tests[i].accum0; \
+ assert_##ta##_eq(atomic_sub_##p(&accum, tests[i].x), \
+ (t)((tc)tests[i].accum0 - (tc)tests[i].x), \
+ "i=%u, accum=%"FMT", x=%"FMT, \
+ i, tests[i].accum0, tests[i].x); \
+ assert_##ta##_eq(atomic_read_##p(&accum), accum, \
+ "Erroneous sub, i=%u", i); \
+ \
+ accum = tests[i].accum0; \
+ err = atomic_cas_##p(&accum, tests[i].x, tests[i].s); \
+ assert_b_eq(err, tests[i].accum0 != tests[i].x, \
+ "Erroneous cas success/failure result"); \
+ assert_##ta##_eq(accum, err ? tests[i].accum0 : \
+ tests[i].s, "Erroneous cas effect, i=%u", i); \
+ \
+ accum = tests[i].accum0; \
+ atomic_write_##p(&accum, tests[i].s); \
+ assert_##ta##_eq(accum, tests[i].s, \
+ "Erroneous write, i=%u", i); \
+ } \
+} while (0)
+
+TEST_STRUCT(uint64, uint64_t)
+TEST_BEGIN(test_atomic_uint64)
+{
+
+#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
+ test_skip("64-bit atomic operations not supported");
+#else
+ TEST_BODY(uint64, uint64_t, uint64_t, u64, FMTx64);
+#endif
+}
+TEST_END
+
+TEST_STRUCT(uint32, uint32_t)
+TEST_BEGIN(test_atomic_uint32)
+{
+
+ TEST_BODY(uint32, uint32_t, uint32_t, u32, "#"FMTx32);
+}
+TEST_END
+
+TEST_STRUCT(p, void *)
+TEST_BEGIN(test_atomic_p)
+{
+
+ TEST_BODY(p, void *, uintptr_t, ptr, "p");
+}
+TEST_END
+
+TEST_STRUCT(z, size_t)
+TEST_BEGIN(test_atomic_z)
+{
+
+ TEST_BODY(z, size_t, size_t, zu, "#zx");
+}
+TEST_END
+
+TEST_STRUCT(u, unsigned)
+TEST_BEGIN(test_atomic_u)
+{
+
+ TEST_BODY(u, unsigned, unsigned, u, "#x");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_atomic_uint64,
+ test_atomic_uint32,
+ test_atomic_p,
+ test_atomic_z,
+ test_atomic_u));
+}
diff --git a/deps/jemalloc/test/unit/bitmap.c b/deps/jemalloc/test/unit/bitmap.c
index 8086b8885..7da583d85 100644
--- a/deps/jemalloc/test/unit/bitmap.c
+++ b/deps/jemalloc/test/unit/bitmap.c
@@ -1,17 +1,11 @@
#include "test/jemalloc_test.h"
-#if (LG_BITMAP_MAXBITS > 12)
-# define MAXBITS 4500
-#else
-# define MAXBITS (1U << LG_BITMAP_MAXBITS)
-#endif
-
TEST_BEGIN(test_bitmap_size)
{
size_t i, prev_size;
prev_size = 0;
- for (i = 1; i <= MAXBITS; i++) {
+ for (i = 1; i <= BITMAP_MAXBITS; i++) {
size_t size = bitmap_size(i);
assert_true(size >= prev_size,
"Bitmap size is smaller than expected");
@@ -24,12 +18,12 @@ TEST_BEGIN(test_bitmap_init)
{
size_t i;
- for (i = 1; i <= MAXBITS; i++) {
+ for (i = 1; i <= BITMAP_MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
- bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
+ bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
@@ -47,12 +41,12 @@ TEST_BEGIN(test_bitmap_set)
{
size_t i;
- for (i = 1; i <= MAXBITS; i++) {
+ for (i = 1; i <= BITMAP_MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
- bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
+ bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
@@ -70,12 +64,12 @@ TEST_BEGIN(test_bitmap_unset)
{
size_t i;
- for (i = 1; i <= MAXBITS; i++) {
+ for (i = 1; i <= BITMAP_MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
size_t j;
- bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
+ bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
@@ -99,12 +93,12 @@ TEST_BEGIN(test_bitmap_sfu)
{
size_t i;
- for (i = 1; i <= MAXBITS; i++) {
+ for (i = 1; i <= BITMAP_MAXBITS; i++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
ssize_t j;
- bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
+ bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
diff --git a/deps/jemalloc/test/unit/ckh.c b/deps/jemalloc/test/unit/ckh.c
index b214c279a..b11759599 100644
--- a/deps/jemalloc/test/unit/ckh.c
+++ b/deps/jemalloc/test/unit/ckh.c
@@ -2,20 +2,24 @@
TEST_BEGIN(test_new_delete)
{
+ tsd_t *tsd;
ckh_t ckh;
- assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp),
- "Unexpected ckh_new() error");
- ckh_delete(&ckh);
+ tsd = tsd_fetch();
- assert_false(ckh_new(&ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp),
+ assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp),
"Unexpected ckh_new() error");
- ckh_delete(&ckh);
+ ckh_delete(tsd, &ckh);
+
+ assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
+ ckh_pointer_keycomp), "Unexpected ckh_new() error");
+ ckh_delete(tsd, &ckh);
}
TEST_END
TEST_BEGIN(test_count_insert_search_remove)
{
+ tsd_t *tsd;
ckh_t ckh;
const char *strs[] = {
"a string",
@@ -26,7 +30,9 @@ TEST_BEGIN(test_count_insert_search_remove)
const char *missing = "A string not in the hash table.";
size_t i;
- assert_false(ckh_new(&ckh, 2, ckh_string_hash, ckh_string_keycomp),
+ tsd = tsd_fetch();
+
+ assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp),
"Unexpected ckh_new() error");
assert_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu", ZU(0),
@@ -34,7 +40,7 @@ TEST_BEGIN(test_count_insert_search_remove)
/* Insert. */
for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
- ckh_insert(&ckh, strs[i], strs[i]);
+ ckh_insert(tsd, &ckh, strs[i], strs[i]);
assert_zu_eq(ckh_count(&ckh), i+1,
"ckh_count() should return %zu, but it returned %zu", i+1,
ckh_count(&ckh));
@@ -58,10 +64,10 @@ TEST_BEGIN(test_count_insert_search_remove)
ks = (i & 1) ? strs[i] : (const char *)NULL;
vs = (i & 2) ? strs[i] : (const char *)NULL;
- assert_ptr_eq((void *)ks, (void *)k.s,
- "Key mismatch, i=%zu", i);
- assert_ptr_eq((void *)vs, (void *)v.s,
- "Value mismatch, i=%zu", i);
+ assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
+ i);
+ assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
+ i);
}
assert_true(ckh_search(&ckh, missing, NULL, NULL),
"Unexpected ckh_search() success");
@@ -79,36 +85,39 @@ TEST_BEGIN(test_count_insert_search_remove)
vp = (i & 2) ? &v.p : NULL;
k.p = NULL;
v.p = NULL;
- assert_false(ckh_remove(&ckh, strs[i], kp, vp),
+ assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp),
"Unexpected ckh_remove() error");
ks = (i & 1) ? strs[i] : (const char *)NULL;
vs = (i & 2) ? strs[i] : (const char *)NULL;
- assert_ptr_eq((void *)ks, (void *)k.s,
- "Key mismatch, i=%zu", i);
- assert_ptr_eq((void *)vs, (void *)v.s,
- "Value mismatch, i=%zu", i);
+ assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
+ i);
+ assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
+ i);
assert_zu_eq(ckh_count(&ckh),
sizeof(strs)/sizeof(const char *) - i - 1,
"ckh_count() should return %zu, but it returned %zu",
- sizeof(strs)/sizeof(const char *) - i - 1,
+ sizeof(strs)/sizeof(const char *) - i - 1,
ckh_count(&ckh));
}
- ckh_delete(&ckh);
+ ckh_delete(tsd, &ckh);
}
TEST_END
TEST_BEGIN(test_insert_iter_remove)
{
#define NITEMS ZU(1000)
+ tsd_t *tsd;
ckh_t ckh;
void **p[NITEMS];
void *q, *r;
size_t i;
- assert_false(ckh_new(&ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp),
- "Unexpected ckh_new() error");
+ tsd = tsd_fetch();
+
+ assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash,
+ ckh_pointer_keycomp), "Unexpected ckh_new() error");
for (i = 0; i < NITEMS; i++) {
p[i] = mallocx(i+1, 0);
@@ -119,7 +128,7 @@ TEST_BEGIN(test_insert_iter_remove)
size_t j;
for (j = i; j < NITEMS; j++) {
- assert_false(ckh_insert(&ckh, p[j], p[j]),
+ assert_false(ckh_insert(tsd, &ckh, p[j], p[j]),
"Unexpected ckh_insert() failure");
assert_false(ckh_search(&ckh, p[j], &q, &r),
"Unexpected ckh_search() failure");
@@ -134,13 +143,13 @@ TEST_BEGIN(test_insert_iter_remove)
for (j = i + 1; j < NITEMS; j++) {
assert_false(ckh_search(&ckh, p[j], NULL, NULL),
"Unexpected ckh_search() failure");
- assert_false(ckh_remove(&ckh, p[j], &q, &r),
+ assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r),
"Unexpected ckh_remove() failure");
assert_ptr_eq(p[j], q, "Key pointer mismatch");
assert_ptr_eq(p[j], r, "Value pointer mismatch");
assert_true(ckh_search(&ckh, p[j], NULL, NULL),
"Unexpected ckh_search() success");
- assert_true(ckh_remove(&ckh, p[j], &q, &r),
+ assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r),
"Unexpected ckh_remove() success");
}
@@ -150,8 +159,7 @@ TEST_BEGIN(test_insert_iter_remove)
memset(seen, 0, sizeof(seen));
- for (tabind = 0; ckh_iter(&ckh, &tabind, &q, &r) ==
- false;) {
+ for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) {
size_t k;
assert_ptr_eq(q, r, "Key and val not equal");
@@ -176,21 +184,21 @@ TEST_BEGIN(test_insert_iter_remove)
for (i = 0; i < NITEMS; i++) {
assert_false(ckh_search(&ckh, p[i], NULL, NULL),
"Unexpected ckh_search() failure");
- assert_false(ckh_remove(&ckh, p[i], &q, &r),
+ assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r),
"Unexpected ckh_remove() failure");
assert_ptr_eq(p[i], q, "Key pointer mismatch");
assert_ptr_eq(p[i], r, "Value pointer mismatch");
assert_true(ckh_search(&ckh, p[i], NULL, NULL),
"Unexpected ckh_search() success");
- assert_true(ckh_remove(&ckh, p[i], &q, &r),
+ assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r),
"Unexpected ckh_remove() success");
dallocx(p[i], 0);
}
assert_zu_eq(ckh_count(&ckh), 0,
- "ckh_count() should return %zu, but it returned %zu", ZU(0),
- ckh_count(&ckh));
- ckh_delete(&ckh);
+ "ckh_count() should return %zu, but it returned %zu",
+ ZU(0), ckh_count(&ckh));
+ ckh_delete(tsd, &ckh);
#undef NITEMS
}
TEST_END
diff --git a/deps/jemalloc/test/unit/hash.c b/deps/jemalloc/test/unit/hash.c
index abb394ac0..77a8cede9 100644
--- a/deps/jemalloc/test/unit/hash.c
+++ b/deps/jemalloc/test/unit/hash.c
@@ -64,8 +64,8 @@ hash_variant_verify(hash_variant_t variant)
{
const size_t hashbytes = hash_variant_bits(variant) / 8;
uint8_t key[256];
- uint8_t hashes[hashbytes * 256];
- uint8_t final[hashbytes];
+ VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256);
+ VARIABLE_ARRAY(uint8_t, final, hashbytes);
unsigned i;
uint32_t computed, expected;
diff --git a/deps/jemalloc/test/unit/junk.c b/deps/jemalloc/test/unit/junk.c
index 85bbf9e2b..b23dd1e95 100644
--- a/deps/jemalloc/test/unit/junk.c
+++ b/deps/jemalloc/test/unit/junk.c
@@ -1,14 +1,26 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
+# ifndef JEMALLOC_TEST_JUNK_OPT
+# define JEMALLOC_TEST_JUNK_OPT "junk:true"
+# endif
const char *malloc_conf =
- "abort:false,junk:true,zero:false,redzone:true,quarantine:0";
+ "abort:false,zero:false,redzone:true,quarantine:0," JEMALLOC_TEST_JUNK_OPT;
#endif
static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig;
static huge_dalloc_junk_t *huge_dalloc_junk_orig;
-static void *most_recently_junked;
+static void *watch_for_junking;
+static bool saw_junking;
+
+static void
+watch_junking(void *p)
+{
+
+ watch_for_junking = p;
+ saw_junking = false;
+}
static void
arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
@@ -21,7 +33,8 @@ arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
"Missing junk fill for byte %zu/%zu of deallocated region",
i, bin_info->reg_size);
}
- most_recently_junked = ptr;
+ if (ptr == watch_for_junking)
+ saw_junking = true;
}
static void
@@ -35,7 +48,8 @@ arena_dalloc_junk_large_intercept(void *ptr, size_t usize)
"Missing junk fill for byte %zu/%zu of deallocated region",
i, usize);
}
- most_recently_junked = ptr;
+ if (ptr == watch_for_junking)
+ saw_junking = true;
}
static void
@@ -48,7 +62,8 @@ huge_dalloc_junk_intercept(void *ptr, size_t usize)
* enough that it doesn't make sense to duplicate the decision logic in
* test code, so don't actually check that the region is junk-filled.
*/
- most_recently_junked = ptr;
+ if (ptr == watch_for_junking)
+ saw_junking = true;
}
static void
@@ -57,12 +72,14 @@ test_junk(size_t sz_min, size_t sz_max)
char *s;
size_t sz_prev, sz, i;
- arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
- arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
- arena_dalloc_junk_large_orig = arena_dalloc_junk_large;
- arena_dalloc_junk_large = arena_dalloc_junk_large_intercept;
- huge_dalloc_junk_orig = huge_dalloc_junk;
- huge_dalloc_junk = huge_dalloc_junk_intercept;
+ if (opt_junk_free) {
+ arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
+ arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
+ arena_dalloc_junk_large_orig = arena_dalloc_junk_large;
+ arena_dalloc_junk_large = arena_dalloc_junk_large_intercept;
+ huge_dalloc_junk_orig = huge_dalloc_junk;
+ huge_dalloc_junk = huge_dalloc_junk_intercept;
+ }
sz_prev = 0;
s = (char *)mallocx(sz_min, 0);
@@ -80,34 +97,35 @@ test_junk(size_t sz_min, size_t sz_max)
}
for (i = sz_prev; i < sz; i++) {
- assert_c_eq(s[i], 0xa5,
- "Newly allocated byte %zu/%zu isn't junk-filled",
- i, sz);
+ if (opt_junk_alloc) {
+ assert_c_eq(s[i], 0xa5,
+ "Newly allocated byte %zu/%zu isn't "
+ "junk-filled", i, sz);
+ }
s[i] = 'a';
}
if (xallocx(s, sz+1, 0, 0) == sz) {
- void *junked = (void *)s;
-
+ watch_junking(s);
s = (char *)rallocx(s, sz+1, 0);
assert_ptr_not_null((void *)s,
"Unexpected rallocx() failure");
- if (!config_mremap || sz+1 <= arena_maxclass) {
- assert_ptr_eq(most_recently_junked, junked,
- "Expected region of size %zu to be "
- "junk-filled",
- sz);
- }
+ assert_true(!opt_junk_free || saw_junking,
+ "Expected region of size %zu to be junk-filled",
+ sz);
}
}
+ watch_junking(s);
dallocx(s, 0);
- assert_ptr_eq(most_recently_junked, (void *)s,
+ assert_true(!opt_junk_free || saw_junking,
"Expected region of size %zu to be junk-filled", sz);
- arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
- arena_dalloc_junk_large = arena_dalloc_junk_large_orig;
- huge_dalloc_junk = huge_dalloc_junk_orig;
+ if (opt_junk_free) {
+ arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
+ arena_dalloc_junk_large = arena_dalloc_junk_large_orig;
+ huge_dalloc_junk = huge_dalloc_junk_orig;
+ }
}
TEST_BEGIN(test_junk_small)
@@ -122,7 +140,7 @@ TEST_BEGIN(test_junk_large)
{
test_skip_if(!config_fill);
- test_junk(SMALL_MAXCLASS+1, arena_maxclass);
+ test_junk(SMALL_MAXCLASS+1, large_maxclass);
}
TEST_END
@@ -130,20 +148,32 @@ TEST_BEGIN(test_junk_huge)
{
test_skip_if(!config_fill);
- test_junk(arena_maxclass+1, chunksize*2);
+ test_junk(large_maxclass+1, chunksize*2);
}
TEST_END
arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig;
static void *most_recently_trimmed;
+static size_t
+shrink_size(size_t size)
+{
+ size_t shrink_size;
+
+ for (shrink_size = size - 1; nallocx(shrink_size, 0) == size;
+ shrink_size--)
+ ; /* Do nothing. */
+
+ return (shrink_size);
+}
+
static void
arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize)
{
arena_ralloc_junk_large_orig(ptr, old_usize, usize);
- assert_zu_eq(old_usize, arena_maxclass, "Unexpected old_usize");
- assert_zu_eq(usize, arena_maxclass-PAGE, "Unexpected usize");
+ assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize");
+ assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize");
most_recently_trimmed = ptr;
}
@@ -151,13 +181,13 @@ TEST_BEGIN(test_junk_large_ralloc_shrink)
{
void *p1, *p2;
- p1 = mallocx(arena_maxclass, 0);
+ p1 = mallocx(large_maxclass, 0);
assert_ptr_not_null(p1, "Unexpected mallocx() failure");
arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
- p2 = rallocx(p1, arena_maxclass-PAGE, 0);
+ p2 = rallocx(p1, shrink_size(large_maxclass), 0);
assert_ptr_eq(p1, p2, "Unexpected move during shrink");
arena_ralloc_junk_large = arena_ralloc_junk_large_orig;
@@ -183,6 +213,7 @@ TEST_BEGIN(test_junk_redzone)
arena_redzone_corruption_t *arena_redzone_corruption_orig;
test_skip_if(!config_fill);
+ test_skip_if(!opt_junk_alloc || !opt_junk_free);
arena_redzone_corruption_orig = arena_redzone_corruption;
arena_redzone_corruption = arena_redzone_corruption_replacement;
@@ -213,6 +244,7 @@ int
main(void)
{
+ assert(!config_fill || opt_junk_alloc || opt_junk_free);
return (test(
test_junk_small,
test_junk_large,
diff --git a/deps/jemalloc/test/unit/junk_alloc.c b/deps/jemalloc/test/unit/junk_alloc.c
new file mode 100644
index 000000000..8db3331d2
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk_alloc.c
@@ -0,0 +1,3 @@
+#define JEMALLOC_TEST_JUNK_OPT "junk:alloc"
+#include "junk.c"
+#undef JEMALLOC_TEST_JUNK_OPT
diff --git a/deps/jemalloc/test/unit/junk_free.c b/deps/jemalloc/test/unit/junk_free.c
new file mode 100644
index 000000000..482a61d07
--- /dev/null
+++ b/deps/jemalloc/test/unit/junk_free.c
@@ -0,0 +1,3 @@
+#define JEMALLOC_TEST_JUNK_OPT "junk:free"
+#include "junk.c"
+#undef JEMALLOC_TEST_JUNK_OPT
diff --git a/deps/jemalloc/test/unit/lg_chunk.c b/deps/jemalloc/test/unit/lg_chunk.c
new file mode 100644
index 000000000..7e5df3814
--- /dev/null
+++ b/deps/jemalloc/test/unit/lg_chunk.c
@@ -0,0 +1,26 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * Make sure that opt.lg_chunk clamping is sufficient. In practice, this test
+ * program will fail a debug assertion during initialization and abort (rather
+ * than the test soft-failing) if clamping is insufficient.
+ */
+const char *malloc_conf = "lg_chunk:0";
+
+TEST_BEGIN(test_lg_chunk_clamp)
+{
+ void *p;
+
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ dallocx(p, 0);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_lg_chunk_clamp));
+}
diff --git a/deps/jemalloc/test/unit/mallctl.c b/deps/jemalloc/test/unit/mallctl.c
index 31fb81057..31e354ca7 100644
--- a/deps/jemalloc/test/unit/mallctl.c
+++ b/deps/jemalloc/test/unit/mallctl.c
@@ -126,11 +126,10 @@ TEST_BEGIN(test_mallctl_config)
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
} while (0)
+ TEST_MALLCTL_CONFIG(cache_oblivious);
TEST_MALLCTL_CONFIG(debug);
- TEST_MALLCTL_CONFIG(dss);
TEST_MALLCTL_CONFIG(fill);
TEST_MALLCTL_CONFIG(lazy_lock);
- TEST_MALLCTL_CONFIG(mremap);
TEST_MALLCTL_CONFIG(munmap);
TEST_MALLCTL_CONFIG(prof);
TEST_MALLCTL_CONFIG(prof_libgcc);
@@ -166,12 +165,11 @@ TEST_BEGIN(test_mallctl_opt)
TEST_MALLCTL_OPT(size_t, narenas, always);
TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always);
TEST_MALLCTL_OPT(bool, stats_print, always);
- TEST_MALLCTL_OPT(bool, junk, fill);
+ TEST_MALLCTL_OPT(const char *, junk, fill);
TEST_MALLCTL_OPT(size_t, quarantine, fill);
TEST_MALLCTL_OPT(bool, redzone, fill);
TEST_MALLCTL_OPT(bool, zero, fill);
TEST_MALLCTL_OPT(bool, utrace, utrace);
- TEST_MALLCTL_OPT(bool, valgrind, valgrind);
TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
TEST_MALLCTL_OPT(bool, tcache, tcache);
TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache);
@@ -214,6 +212,126 @@ TEST_BEGIN(test_manpage_example)
}
TEST_END
+TEST_BEGIN(test_tcache_none)
+{
+ void *p0, *q, *p1;
+
+ test_skip_if(!config_tcache);
+
+ /* Allocate p and q. */
+ p0 = mallocx(42, 0);
+ assert_ptr_not_null(p0, "Unexpected mallocx() failure");
+ q = mallocx(42, 0);
+ assert_ptr_not_null(q, "Unexpected mallocx() failure");
+
+ /* Deallocate p and q, but bypass the tcache for q. */
+ dallocx(p0, 0);
+ dallocx(q, MALLOCX_TCACHE_NONE);
+
+ /* Make sure that tcache-based allocation returns p, not q. */
+ p1 = mallocx(42, 0);
+ assert_ptr_not_null(p1, "Unexpected mallocx() failure");
+ assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region");
+
+ /* Clean up. */
+ dallocx(p1, MALLOCX_TCACHE_NONE);
+}
+TEST_END
+
+TEST_BEGIN(test_tcache)
+{
+#define NTCACHES 10
+ unsigned tis[NTCACHES];
+ void *ps[NTCACHES];
+ void *qs[NTCACHES];
+ unsigned i;
+ size_t sz, psz, qsz;
+
+ test_skip_if(!config_tcache);
+
+ psz = 42;
+ qsz = nallocx(psz, 0) + 1;
+
+ /* Create tcaches. */
+ for (i = 0; i < NTCACHES; i++) {
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure, i=%u", i);
+ }
+
+ /* Exercise tcache ID recycling. */
+ for (i = 0; i < NTCACHES; i++) {
+ assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
+ }
+ for (i = 0; i < NTCACHES; i++) {
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure, i=%u", i);
+ }
+
+ /* Flush empty tcaches. */
+ for (i = 0; i < NTCACHES; i++) {
+ assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
+ }
+
+ /* Cache some allocations. */
+ for (i = 0; i < NTCACHES; i++) {
+ ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
+ assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
+ i);
+ dallocx(ps[i], MALLOCX_TCACHE(tis[i]));
+
+ qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i]));
+ assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
+ i);
+ dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
+ }
+
+ /* Verify that tcaches allocate cached regions. */
+ for (i = 0; i < NTCACHES; i++) {
+ void *p0 = ps[i];
+ ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
+ assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
+ i);
+ assert_ptr_eq(ps[i], p0,
+ "Expected mallocx() to allocate cached region, i=%u", i);
+ }
+
+ /* Verify that reallocation uses cached regions. */
+ for (i = 0; i < NTCACHES; i++) {
+ void *q0 = qs[i];
+ qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
+ assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
+ i);
+ assert_ptr_eq(qs[i], q0,
+ "Expected rallocx() to allocate cached region, i=%u", i);
+ /* Avoid undefined behavior in case of test failure. */
+ if (qs[i] == NULL)
+ qs[i] = ps[i];
+ }
+ for (i = 0; i < NTCACHES; i++)
+ dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
+
+ /* Flush some non-empty tcaches. */
+ for (i = 0; i < NTCACHES/2; i++) {
+ assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
+ }
+
+ /* Destroy tcaches. */
+ for (i = 0; i < NTCACHES; i++) {
+ assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
+ }
+}
+TEST_END
+
TEST_BEGIN(test_thread_arena)
{
unsigned arena_old, arena_new, narenas;
@@ -231,6 +349,38 @@ TEST_BEGIN(test_thread_arena)
}
TEST_END
+TEST_BEGIN(test_arena_i_lg_dirty_mult)
+{
+ ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
+ size_t sz = sizeof(ssize_t);
+
+ assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+
+ lg_dirty_mult = -2;
+ assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
+ &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ lg_dirty_mult = (sizeof(size_t) << 3);
+ assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
+ &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
+ lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult
+ = lg_dirty_mult, lg_dirty_mult++) {
+ ssize_t old_lg_dirty_mult;
+
+ assert_d_eq(mallctl("arena.0.lg_dirty_mult", &old_lg_dirty_mult,
+ &sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
+ assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
+ "Unexpected old arena.0.lg_dirty_mult");
+ }
+}
+TEST_END
+
TEST_BEGIN(test_arena_i_purge)
{
unsigned narenas;
@@ -255,27 +405,41 @@ TEST_BEGIN(test_arena_i_dss)
{
const char *dss_prec_old, *dss_prec_new;
size_t sz = sizeof(dss_prec_old);
+ size_t mib[3];
+ size_t miblen;
- dss_prec_new = "primary";
- assert_d_eq(mallctl("arena.0.dss", &dss_prec_old, &sz, &dss_prec_new,
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+
+ dss_prec_new = "disabled";
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
- assert_d_eq(mallctl("arena.0.dss", &dss_prec_new, &sz, &dss_prec_old,
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure");
-}
-TEST_END
-TEST_BEGIN(test_arenas_purge)
-{
- unsigned arena = 0;
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected value for dss precedence");
- assert_d_eq(mallctl("arenas.purge", NULL, NULL, &arena, sizeof(arena)),
- 0, "Unexpected mallctl() failure");
+ mib[1] = narenas_total_get();
+ dss_prec_new = "disabled";
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
+ sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected default for dss precedence");
- assert_d_eq(mallctl("arenas.purge", NULL, NULL, NULL, 0), 0,
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
+ sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
+
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected value for dss precedence");
}
TEST_END
@@ -287,7 +451,7 @@ TEST_BEGIN(test_arenas_initialized)
assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
{
- bool initialized[narenas];
+ VARIABLE_ARRAY(bool, initialized, narenas);
sz = narenas * sizeof(bool);
assert_d_eq(mallctl("arenas.initialized", initialized, &sz,
@@ -296,6 +460,38 @@ TEST_BEGIN(test_arenas_initialized)
}
TEST_END
+TEST_BEGIN(test_arenas_lg_dirty_mult)
+{
+ ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
+ size_t sz = sizeof(ssize_t);
+
+ assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+
+ lg_dirty_mult = -2;
+ assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
+ &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ lg_dirty_mult = (sizeof(size_t) << 3);
+ assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
+ &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+ "Unexpected mallctl() success");
+
+ for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
+ lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult =
+ lg_dirty_mult, lg_dirty_mult++) {
+ ssize_t old_lg_dirty_mult;
+
+ assert_d_eq(mallctl("arenas.lg_dirty_mult", &old_lg_dirty_mult,
+ &sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
+ assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
+ "Unexpected old arenas.lg_dirty_mult");
+ }
+}
+TEST_END
+
TEST_BEGIN(test_arenas_constants)
{
@@ -310,7 +506,8 @@ TEST_BEGIN(test_arenas_constants)
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
TEST_ARENAS_CONSTANT(size_t, page, PAGE);
TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
- TEST_ARENAS_CONSTANT(size_t, nlruns, nlclasses);
+ TEST_ARENAS_CONSTANT(unsigned, nlruns, nlclasses);
+ TEST_ARENAS_CONSTANT(unsigned, nhchunks, nhclasses);
#undef TEST_ARENAS_CONSTANT
}
@@ -346,12 +543,29 @@ TEST_BEGIN(test_arenas_lrun_constants)
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
- TEST_ARENAS_LRUN_CONSTANT(size_t, size, (1 << LG_PAGE));
+ TEST_ARENAS_LRUN_CONSTANT(size_t, size, LARGE_MINCLASS);
#undef TEST_ARENAS_LRUN_CONSTANT
}
TEST_END
+TEST_BEGIN(test_arenas_hchunk_constants)
+{
+
+#define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \
+ t name; \
+ size_t sz = sizeof(t); \
+ assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL, \
+ 0), 0, "Unexpected mallctl() failure"); \
+ assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+} while (0)
+
+ TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, chunksize);
+
+#undef TEST_ARENAS_HCHUNK_CONSTANT
+}
+TEST_END
+
TEST_BEGIN(test_arenas_extend)
{
unsigned narenas_before, arena, narenas_after;
@@ -402,14 +616,18 @@ main(void)
test_mallctl_config,
test_mallctl_opt,
test_manpage_example,
+ test_tcache_none,
+ test_tcache,
test_thread_arena,
+ test_arena_i_lg_dirty_mult,
test_arena_i_purge,
test_arena_i_dss,
- test_arenas_purge,
test_arenas_initialized,
+ test_arenas_lg_dirty_mult,
test_arenas_constants,
test_arenas_bin_constants,
test_arenas_lrun_constants,
+ test_arenas_hchunk_constants,
test_arenas_extend,
test_stats_arenas));
}
diff --git a/deps/jemalloc/test/unit/math.c b/deps/jemalloc/test/unit/math.c
index a1b288ea1..ebec77a62 100644
--- a/deps/jemalloc/test/unit/math.c
+++ b/deps/jemalloc/test/unit/math.c
@@ -3,6 +3,12 @@
#define MAX_REL_ERR 1.0e-9
#define MAX_ABS_ERR 1.0e-9
+#include <float.h>
+
+#ifndef INFINITY
+#define INFINITY (DBL_MAX + DBL_MAX)
+#endif
+
static bool
double_eq_rel(double a, double b, double max_rel_err, double max_abs_err)
{
diff --git a/deps/jemalloc/test/unit/mq.c b/deps/jemalloc/test/unit/mq.c
index f57e96af1..bde2a480b 100644
--- a/deps/jemalloc/test/unit/mq.c
+++ b/deps/jemalloc/test/unit/mq.c
@@ -54,7 +54,7 @@ thd_sender_start(void *arg)
mq_msg_t *msg;
void *p;
p = mallocx(sizeof(mq_msg_t), 0);
- assert_ptr_not_null(p, "Unexpected allocm() failure");
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
msg = (mq_msg_t *)p;
mq_put(mq, msg);
}
@@ -85,6 +85,7 @@ TEST_END
int
main(void)
{
+
return (test(
test_mq_basic,
test_mq_threaded));
diff --git a/deps/jemalloc/test/unit/prof_accum.c b/deps/jemalloc/test/unit/prof_accum.c
index 050a8a7ee..fd229e0fd 100644
--- a/deps/jemalloc/test/unit/prof_accum.c
+++ b/deps/jemalloc/test/unit/prof_accum.c
@@ -1,4 +1,9 @@
-#include "prof_accum.h"
+#include "test/jemalloc_test.h"
+
+#define NTHREADS 4
+#define NALLOCS_PER_THREAD 50
+#define DUMP_INTERVAL 1
+#define BT_COUNT_CHECK_INTERVAL 5
#ifdef JEMALLOC_PROF
const char *malloc_conf =
@@ -20,7 +25,7 @@ static void *
alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration)
{
- return (alloc_0(thd_ind*NALLOCS_PER_THREAD + iteration));
+ return (btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration));
}
static void *
diff --git a/deps/jemalloc/test/unit/prof_accum.h b/deps/jemalloc/test/unit/prof_accum.h
deleted file mode 100644
index 109d86b59..000000000
--- a/deps/jemalloc/test/unit/prof_accum.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define NTHREADS 4
-#define NALLOCS_PER_THREAD 50
-#define DUMP_INTERVAL 1
-#define BT_COUNT_CHECK_INTERVAL 5
-
-#define alloc_n_proto(n) \
-void *alloc_##n(unsigned bits);
-alloc_n_proto(0)
-alloc_n_proto(1)
-
-#define alloc_n_gen(n) \
-void * \
-alloc_##n(unsigned bits) \
-{ \
- void *p; \
- \
- if (bits == 0) \
- p = mallocx(1, 0); \
- else { \
- switch (bits & 0x1U) { \
- case 0: \
- p = (alloc_0(bits >> 1)); \
- break; \
- case 1: \
- p = (alloc_1(bits >> 1)); \
- break; \
- default: not_reached(); \
- } \
- } \
- /* Intentionally sabotage tail call optimization. */ \
- assert_ptr_not_null(p, "Unexpected mallocx() failure"); \
- return (p); \
-}
diff --git a/deps/jemalloc/test/unit/prof_accum_a.c b/deps/jemalloc/test/unit/prof_accum_a.c
deleted file mode 100644
index 42ad521d8..000000000
--- a/deps/jemalloc/test/unit/prof_accum_a.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "prof_accum.h"
-
-alloc_n_gen(0)
diff --git a/deps/jemalloc/test/unit/prof_accum_b.c b/deps/jemalloc/test/unit/prof_accum_b.c
deleted file mode 100644
index 60d9dab6a..000000000
--- a/deps/jemalloc/test/unit/prof_accum_b.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "prof_accum.h"
-
-alloc_n_gen(1)
diff --git a/deps/jemalloc/test/unit/prof_active.c b/deps/jemalloc/test/unit/prof_active.c
new file mode 100644
index 000000000..814909572
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_active.c
@@ -0,0 +1,136 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf =
+ "prof:true,prof_thread_active_init:false,lg_prof_sample:0";
+#endif
+
+static void
+mallctl_bool_get(const char *name, bool expected, const char *func, int line)
+{
+ bool old;
+ size_t sz;
+
+ sz = sizeof(old);
+ assert_d_eq(mallctl(name, &old, &sz, NULL, 0), 0,
+ "%s():%d: Unexpected mallctl failure reading %s", func, line, name);
+ assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
+ name);
+}
+
+static void
+mallctl_bool_set(const char *name, bool old_expected, bool val_new,
+ const char *func, int line)
+{
+ bool old;
+ size_t sz;
+
+ sz = sizeof(old);
+ assert_d_eq(mallctl(name, &old, &sz, &val_new, sizeof(val_new)), 0,
+ "%s():%d: Unexpected mallctl failure reading/writing %s", func,
+ line, name);
+ assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
+ line, name);
+}
+
+static void
+mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func,
+ int line)
+{
+
+ mallctl_bool_get("prof.active", prof_active_old_expected, func, line);
+}
+#define mallctl_prof_active_get(a) \
+ mallctl_prof_active_get_impl(a, __func__, __LINE__)
+
+static void
+mallctl_prof_active_set_impl(bool prof_active_old_expected,
+ bool prof_active_new, const char *func, int line)
+{
+
+ mallctl_bool_set("prof.active", prof_active_old_expected,
+ prof_active_new, func, line);
+}
+#define mallctl_prof_active_set(a, b) \
+ mallctl_prof_active_set_impl(a, b, __func__, __LINE__)
+
+static void
+mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected,
+ const char *func, int line)
+{
+
+ mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected,
+ func, line);
+}
+#define mallctl_thread_prof_active_get(a) \
+ mallctl_thread_prof_active_get_impl(a, __func__, __LINE__)
+
+static void
+mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected,
+ bool thread_prof_active_new, const char *func, int line)
+{
+
+ mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected,
+ thread_prof_active_new, func, line);
+}
+#define mallctl_thread_prof_active_set(a, b) \
+ mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__)
+
+static void
+prof_sampling_probe_impl(bool expect_sample, const char *func, int line)
+{
+ void *p;
+ size_t expected_backtraces = expect_sample ? 1 : 0;
+
+ assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func,
+ line);
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ assert_zu_eq(prof_bt_count(), expected_backtraces,
+ "%s():%d: Unexpected backtrace count", func, line);
+ dallocx(p, 0);
+}
+#define prof_sampling_probe(a) \
+ prof_sampling_probe_impl(a, __func__, __LINE__)
+
+TEST_BEGIN(test_prof_active)
+{
+
+ test_skip_if(!config_prof);
+
+ mallctl_prof_active_get(true);
+ mallctl_thread_prof_active_get(false);
+
+ mallctl_prof_active_set(true, true);
+ mallctl_thread_prof_active_set(false, false);
+ /* prof.active, !thread.prof.active. */
+ prof_sampling_probe(false);
+
+ mallctl_prof_active_set(true, false);
+ mallctl_thread_prof_active_set(false, false);
+ /* !prof.active, !thread.prof.active. */
+ prof_sampling_probe(false);
+
+ mallctl_prof_active_set(false, false);
+ mallctl_thread_prof_active_set(false, true);
+ /* !prof.active, thread.prof.active. */
+ prof_sampling_probe(false);
+
+ mallctl_prof_active_set(false, true);
+ mallctl_thread_prof_active_set(true, true);
+ /* prof.active, thread.prof.active. */
+ prof_sampling_probe(true);
+
+ /* Restore settings. */
+ mallctl_prof_active_set(true, true);
+ mallctl_thread_prof_active_set(true, false);
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_prof_active));
+}
diff --git a/deps/jemalloc/test/unit/prof_gdump.c b/deps/jemalloc/test/unit/prof_gdump.c
index a00b1054f..a0e6ee921 100644
--- a/deps/jemalloc/test/unit/prof_gdump.c
+++ b/deps/jemalloc/test/unit/prof_gdump.c
@@ -21,8 +21,9 @@ prof_dump_open_intercept(bool propagate_err, const char *filename)
TEST_BEGIN(test_gdump)
{
- bool active;
- void *p, *q;
+ bool active, gdump, gdump_old;
+ void *p, *q, *r, *s;
+ size_t sz;
test_skip_if(!config_prof);
@@ -42,8 +43,32 @@ TEST_BEGIN(test_gdump)
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
+ gdump = false;
+ sz = sizeof(gdump_old);
+ assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
+ sizeof(gdump)), 0,
+ "Unexpected mallctl failure while disabling prof.gdump");
+ assert(gdump_old);
+ did_prof_dump_open = false;
+ r = mallocx(chunksize, 0);
+ assert_ptr_not_null(q, "Unexpected mallocx() failure");
+ assert_false(did_prof_dump_open, "Unexpected profile dump");
+
+ gdump = true;
+ sz = sizeof(gdump_old);
+ assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
+ sizeof(gdump)), 0,
+ "Unexpected mallctl failure while enabling prof.gdump");
+ assert(!gdump_old);
+ did_prof_dump_open = false;
+ s = mallocx(chunksize, 0);
+ assert_ptr_not_null(q, "Unexpected mallocx() failure");
+ assert_true(did_prof_dump_open, "Expected a profile dump");
+
dallocx(p, 0);
dallocx(q, 0);
+ dallocx(r, 0);
+ dallocx(s, 0);
}
TEST_END
diff --git a/deps/jemalloc/test/unit/prof_reset.c b/deps/jemalloc/test/unit/prof_reset.c
new file mode 100644
index 000000000..69983e5e5
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_reset.c
@@ -0,0 +1,302 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf =
+ "prof:true,prof_active:false,lg_prof_sample:0";
+#endif
+
+static int
+prof_dump_open_intercept(bool propagate_err, const char *filename)
+{
+ int fd;
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return (fd);
+}
+
+static void
+set_prof_active(bool active)
+{
+
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+ 0, "Unexpected mallctl failure");
+}
+
+static size_t
+get_lg_prof_sample(void)
+{
+ size_t lg_prof_sample;
+ size_t sz = sizeof(size_t);
+
+ assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure while reading profiling sample rate");
+ return (lg_prof_sample);
+}
+
+static void
+do_prof_reset(size_t lg_prof_sample)
+{
+ assert_d_eq(mallctl("prof.reset", NULL, NULL,
+ &lg_prof_sample, sizeof(size_t)), 0,
+ "Unexpected mallctl failure while resetting profile data");
+ assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
+ "Expected profile sample rate change");
+}
+
+TEST_BEGIN(test_prof_reset_basic)
+{
+ size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
+ size_t sz;
+ unsigned i;
+
+ test_skip_if(!config_prof);
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz,
+ NULL, 0), 0,
+ "Unexpected mallctl failure while reading profiling sample rate");
+ assert_zu_eq(lg_prof_sample_orig, 0,
+ "Unexpected profiling sample rate");
+ lg_prof_sample = get_lg_prof_sample();
+ assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
+ "Unexpected disagreement between \"opt.lg_prof_sample\" and "
+ "\"prof.lg_sample\"");
+
+ /* Test simple resets. */
+ for (i = 0; i < 2; i++) {
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure while resetting profile data");
+ lg_prof_sample = get_lg_prof_sample();
+ assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
+ "Unexpected profile sample rate change");
+ }
+
+ /* Test resets with prof.lg_sample changes. */
+ lg_prof_sample_next = 1;
+ for (i = 0; i < 2; i++) {
+ do_prof_reset(lg_prof_sample_next);
+ lg_prof_sample = get_lg_prof_sample();
+ assert_zu_eq(lg_prof_sample, lg_prof_sample_next,
+ "Expected profile sample rate change");
+ lg_prof_sample_next = lg_prof_sample_orig;
+ }
+
+ /* Make sure the test code restored prof.lg_sample. */
+ lg_prof_sample = get_lg_prof_sample();
+ assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
+ "Unexpected disagreement between \"opt.lg_prof_sample\" and "
+ "\"prof.lg_sample\"");
+}
+TEST_END
+
+bool prof_dump_header_intercepted = false;
+prof_cnt_t cnt_all_copy = {0, 0, 0, 0};
+static bool
+prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all)
+{
+
+ prof_dump_header_intercepted = true;
+ memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t));
+
+ return (false);
+}
+
+TEST_BEGIN(test_prof_reset_cleanup)
+{
+ void *p;
+ prof_dump_header_t *prof_dump_header_orig;
+
+ test_skip_if(!config_prof);
+
+ set_prof_active(true);
+
+ assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
+ p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
+
+ prof_dump_header_orig = prof_dump_header;
+ prof_dump_header = prof_dump_header_intercept;
+ assert_false(prof_dump_header_intercepted, "Unexpected intercept");
+
+ assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ assert_true(prof_dump_header_intercepted, "Expected intercept");
+ assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation");
+
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ "Unexpected error while resetting heap profile data");
+ assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations");
+ assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
+
+ prof_dump_header = prof_dump_header_orig;
+
+ dallocx(p, 0);
+ assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
+
+ set_prof_active(false);
+}
+TEST_END
+
+#define NTHREADS 4
+#define NALLOCS_PER_THREAD (1U << 13)
+#define OBJ_RING_BUF_COUNT 1531
+#define RESET_INTERVAL (1U << 10)
+#define DUMP_INTERVAL 3677
+static void *
+thd_start(void *varg)
+{
+ unsigned thd_ind = *(unsigned *)varg;
+ unsigned i;
+ void *objs[OBJ_RING_BUF_COUNT];
+
+ memset(objs, 0, sizeof(objs));
+
+ for (i = 0; i < NALLOCS_PER_THREAD; i++) {
+ if (i % RESET_INTERVAL == 0) {
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while resetting heap profile "
+ "data");
+ }
+
+ if (i % DUMP_INTERVAL == 0) {
+ assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ 0, "Unexpected error while dumping heap profile");
+ }
+
+ {
+ void **pp = &objs[i % OBJ_RING_BUF_COUNT];
+ if (*pp != NULL) {
+ dallocx(*pp, 0);
+ *pp = NULL;
+ }
+ *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
+ assert_ptr_not_null(*pp,
+ "Unexpected btalloc() failure");
+ }
+ }
+
+ /* Clean up any remaining objects. */
+ for (i = 0; i < OBJ_RING_BUF_COUNT; i++) {
+ void **pp = &objs[i % OBJ_RING_BUF_COUNT];
+ if (*pp != NULL) {
+ dallocx(*pp, 0);
+ *pp = NULL;
+ }
+ }
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_prof_reset)
+{
+ size_t lg_prof_sample_orig;
+ thd_t thds[NTHREADS];
+ unsigned thd_args[NTHREADS];
+ unsigned i;
+ size_t bt_count, tdata_count;
+
+ test_skip_if(!config_prof);
+
+ bt_count = prof_bt_count();
+ assert_zu_eq(bt_count, 0,
+ "Unexpected pre-existing tdata structures");
+ tdata_count = prof_tdata_count();
+
+ lg_prof_sample_orig = get_lg_prof_sample();
+ do_prof_reset(5);
+
+ set_prof_active(true);
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_args[i] = i;
+ thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
+ }
+ for (i = 0; i < NTHREADS; i++)
+ thd_join(thds[i], NULL);
+
+ assert_zu_eq(prof_bt_count(), bt_count,
+ "Unexpected bactrace count change");
+ assert_zu_eq(prof_tdata_count(), tdata_count,
+ "Unexpected remaining tdata structures");
+
+ set_prof_active(false);
+
+ do_prof_reset(lg_prof_sample_orig);
+}
+TEST_END
+#undef NTHREADS
+#undef NALLOCS_PER_THREAD
+#undef OBJ_RING_BUF_COUNT
+#undef RESET_INTERVAL
+#undef DUMP_INTERVAL
+
+/* Test sampling at the same allocation site across resets. */
+#define NITER 10
+TEST_BEGIN(test_xallocx)
+{
+ size_t lg_prof_sample_orig;
+ unsigned i;
+ void *ptrs[NITER];
+
+ test_skip_if(!config_prof);
+
+ lg_prof_sample_orig = get_lg_prof_sample();
+ set_prof_active(true);
+
+ /* Reset profiling. */
+ do_prof_reset(0);
+
+ for (i = 0; i < NITER; i++) {
+ void *p;
+ size_t sz, nsz;
+
+ /* Reset profiling. */
+ do_prof_reset(0);
+
+ /* Allocate small object (which will be promoted). */
+ p = ptrs[i] = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ /* Reset profiling. */
+ do_prof_reset(0);
+
+ /* Perform successful xallocx(). */
+ sz = sallocx(p, 0);
+ assert_zu_eq(xallocx(p, sz, 0, 0), sz,
+ "Unexpected xallocx() failure");
+
+ /* Perform unsuccessful xallocx(). */
+ nsz = nallocx(sz+1, 0);
+ assert_zu_eq(xallocx(p, nsz, 0, 0), sz,
+ "Unexpected xallocx() success");
+ }
+
+ for (i = 0; i < NITER; i++) {
+ /* dallocx. */
+ dallocx(ptrs[i], 0);
+ }
+
+ set_prof_active(false);
+ do_prof_reset(lg_prof_sample_orig);
+}
+TEST_END
+#undef NITER
+
+int
+main(void)
+{
+
+ /* Intercept dumping prior to running any tests. */
+ prof_dump_open = prof_dump_open_intercept;
+
+ return (test(
+ test_prof_reset_basic,
+ test_prof_reset_cleanup,
+ test_prof_reset,
+ test_xallocx));
+}
diff --git a/deps/jemalloc/test/unit/prof_thread_name.c b/deps/jemalloc/test/unit/prof_thread_name.c
new file mode 100644
index 000000000..f501158d7
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_thread_name.c
@@ -0,0 +1,129 @@
+#include "test/jemalloc_test.h"
+
+#ifdef JEMALLOC_PROF
+const char *malloc_conf = "prof:true,prof_active:false";
+#endif
+
+static void
+mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
+ int line)
+{
+ const char *thread_name_old;
+ size_t sz;
+
+ sz = sizeof(thread_name_old);
+ assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, NULL, 0),
+ 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name",
+ func, line);
+ assert_str_eq(thread_name_old, thread_name_expected,
+ "%s():%d: Unexpected thread.prof.name value", func, line);
+}
+#define mallctl_thread_name_get(a) \
+ mallctl_thread_name_get_impl(a, __func__, __LINE__)
+
+static void
+mallctl_thread_name_set_impl(const char *thread_name, const char *func,
+ int line)
+{
+
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
+ sizeof(thread_name)), 0,
+ "%s():%d: Unexpected mallctl failure reading thread.prof.name",
+ func, line);
+ mallctl_thread_name_get_impl(thread_name, func, line);
+}
+#define mallctl_thread_name_set(a) \
+ mallctl_thread_name_set_impl(a, __func__, __LINE__)
+
+TEST_BEGIN(test_prof_thread_name_validation)
+{
+ const char *thread_name;
+
+ test_skip_if(!config_prof);
+
+ mallctl_thread_name_get("");
+ mallctl_thread_name_set("hi there");
+
+ /* NULL input shouldn't be allowed. */
+ thread_name = NULL;
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
+ sizeof(thread_name)), EFAULT,
+ "Unexpected mallctl result writing \"%s\" to thread.prof.name",
+ thread_name);
+
+ /* '\n' shouldn't be allowed. */
+ thread_name = "hi\nthere";
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
+ sizeof(thread_name)), EFAULT,
+ "Unexpected mallctl result writing \"%s\" to thread.prof.name",
+ thread_name);
+
+ /* Simultaneous read/write shouldn't be allowed. */
+ {
+ const char *thread_name_old;
+ size_t sz;
+
+ sz = sizeof(thread_name_old);
+ assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz,
+ &thread_name, sizeof(thread_name)), EPERM,
+ "Unexpected mallctl result writing \"%s\" to "
+ "thread.prof.name", thread_name);
+ }
+
+ mallctl_thread_name_set("");
+}
+TEST_END
+
+#define NTHREADS 4
+#define NRESET 25
+static void *
+thd_start(void *varg)
+{
+ unsigned thd_ind = *(unsigned *)varg;
+ char thread_name[16] = "";
+ unsigned i;
+
+ malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind);
+
+ mallctl_thread_name_get("");
+ mallctl_thread_name_set(thread_name);
+
+ for (i = 0; i < NRESET; i++) {
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ "Unexpected error while resetting heap profile data");
+ mallctl_thread_name_get(thread_name);
+ }
+
+ mallctl_thread_name_set(thread_name);
+ mallctl_thread_name_set("");
+
+ return (NULL);
+}
+
+TEST_BEGIN(test_prof_thread_name_threaded)
+{
+ thd_t thds[NTHREADS];
+ unsigned thd_args[NTHREADS];
+ unsigned i;
+
+ test_skip_if(!config_prof);
+
+ for (i = 0; i < NTHREADS; i++) {
+ thd_args[i] = i;
+ thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
+ }
+ for (i = 0; i < NTHREADS; i++)
+ thd_join(thds[i], NULL);
+}
+TEST_END
+#undef NTHREADS
+#undef NRESET
+
+int
+main(void)
+{
+
+ return (test(
+ test_prof_thread_name_validation,
+ test_prof_thread_name_threaded));
+}
diff --git a/deps/jemalloc/test/unit/rb.c b/deps/jemalloc/test/unit/rb.c
index b737485a7..b38eb0e33 100644
--- a/deps/jemalloc/test/unit/rb.c
+++ b/deps/jemalloc/test/unit/rb.c
@@ -5,7 +5,7 @@
for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
rbp_bh_t != &(a_rbt)->rbt_nil; \
rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
- if (rbtn_red_get(a_type, a_field, rbp_bh_t) == false) { \
+ if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
(r_height)++; \
} \
} \
@@ -49,6 +49,7 @@ TEST_BEGIN(test_rb_empty)
tree_new(&tree);
+ assert_true(tree_empty(&tree), "Tree should be empty");
assert_ptr_null(tree_first(&tree), "Unexpected node");
assert_ptr_null(tree_last(&tree), "Unexpected node");
@@ -74,7 +75,7 @@ tree_recurse(node_t *node, unsigned black_height, unsigned black_depth,
node_t *left_node = rbtn_left_get(node_t, link, node);
node_t *right_node = rbtn_right_get(node_t, link, node);
- if (rbtn_red_get(node_t, link, node) == false)
+ if (!rbtn_red_get(node_t, link, node))
black_depth++;
/* Red nodes must be interleaved with black nodes. */
@@ -265,6 +266,8 @@ TEST_BEGIN(test_rb_random)
assert_u_eq(tree_iterate_reverse(&tree), k+1,
"Unexpected node iteration count");
+ assert_false(tree_empty(&tree),
+ "Tree should not be empty");
assert_ptr_not_null(tree_first(&tree),
"Tree should not be empty");
assert_ptr_not_null(tree_last(&tree),
diff --git a/deps/jemalloc/test/unit/rtree.c b/deps/jemalloc/test/unit/rtree.c
index 5463055fe..b54b3e86f 100644
--- a/deps/jemalloc/test/unit/rtree.c
+++ b/deps/jemalloc/test/unit/rtree.c
@@ -1,14 +1,30 @@
#include "test/jemalloc_test.h"
+static rtree_node_elm_t *
+node_alloc(size_t nelms)
+{
+
+ return ((rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t)));
+}
+
+static void
+node_dalloc(rtree_node_elm_t *node)
+{
+
+ free(node);
+}
+
TEST_BEGIN(test_rtree_get_empty)
{
unsigned i;
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
- rtree_t *rtree = rtree_new(i, imalloc, idalloc);
- assert_u_eq(rtree_get(rtree, 0), 0,
+ rtree_t rtree;
+ assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
+ "Unexpected rtree_new() failure");
+ assert_ptr_null(rtree_get(&rtree, 0, false),
"rtree_get() should return NULL for empty tree");
- rtree_delete(rtree);
+ rtree_delete(&rtree);
}
}
TEST_END
@@ -16,19 +32,24 @@ TEST_END
TEST_BEGIN(test_rtree_extrema)
{
unsigned i;
+ extent_node_t node_a, node_b;
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
- rtree_t *rtree = rtree_new(i, imalloc, idalloc);
+ rtree_t rtree;
+ assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
+ "Unexpected rtree_new() failure");
- rtree_set(rtree, 0, 1);
- assert_u_eq(rtree_get(rtree, 0), 1,
+ assert_false(rtree_set(&rtree, 0, &node_a),
+ "Unexpected rtree_set() failure");
+ assert_ptr_eq(rtree_get(&rtree, 0, true), &node_a,
"rtree_get() should return previously set value");
- rtree_set(rtree, ~((uintptr_t)0), 1);
- assert_u_eq(rtree_get(rtree, ~((uintptr_t)0)), 1,
+ assert_false(rtree_set(&rtree, ~((uintptr_t)0), &node_b),
+ "Unexpected rtree_set() failure");
+ assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true), &node_b,
"rtree_get() should return previously set value");
- rtree_delete(rtree);
+ rtree_delete(&rtree);
}
}
TEST_END
@@ -40,26 +61,32 @@ TEST_BEGIN(test_rtree_bits)
for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
uintptr_t keys[] = {0, 1,
(((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
- rtree_t *rtree = rtree_new(i, imalloc, idalloc);
+ extent_node_t node;
+ rtree_t rtree;
+
+ assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
+ "Unexpected rtree_new() failure");
for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
- rtree_set(rtree, keys[j], 1);
+ assert_false(rtree_set(&rtree, keys[j], &node),
+ "Unexpected rtree_set() failure");
for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
- assert_u_eq(rtree_get(rtree, keys[k]), 1,
- "rtree_get() should return previously set "
- "value and ignore insignificant key bits; "
- "i=%u, j=%u, k=%u, set key=%#"PRIxPTR", "
- "get key=%#"PRIxPTR, i, j, k, keys[j],
- keys[k]);
+ assert_ptr_eq(rtree_get(&rtree, keys[k], true),
+ &node, "rtree_get() should return "
+ "previously set value and ignore "
+ "insignificant key bits; i=%u, j=%u, k=%u, "
+ "set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
+ j, k, keys[j], keys[k]);
}
- assert_u_eq(rtree_get(rtree,
- (((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0,
+ assert_ptr_null(rtree_get(&rtree,
+ (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)), false),
"Only leftmost rtree leaf should be set; "
"i=%u, j=%u", i, j);
- rtree_set(rtree, keys[j], 0);
+ assert_false(rtree_set(&rtree, keys[j], NULL),
+ "Unexpected rtree_set() failure");
}
- rtree_delete(rtree);
+ rtree_delete(&rtree);
}
}
TEST_END
@@ -68,37 +95,43 @@ TEST_BEGIN(test_rtree_random)
{
unsigned i;
sfmt_t *sfmt;
-#define NSET 100
+#define NSET 16
#define SEED 42
sfmt = init_gen_rand(SEED);
for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
- rtree_t *rtree = rtree_new(i, imalloc, idalloc);
uintptr_t keys[NSET];
+ extent_node_t node;
unsigned j;
+ rtree_t rtree;
+
+ assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc),
+ "Unexpected rtree_new() failure");
for (j = 0; j < NSET; j++) {
keys[j] = (uintptr_t)gen_rand64(sfmt);
- rtree_set(rtree, keys[j], 1);
- assert_u_eq(rtree_get(rtree, keys[j]), 1,
+ assert_false(rtree_set(&rtree, keys[j], &node),
+ "Unexpected rtree_set() failure");
+ assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
- assert_u_eq(rtree_get(rtree, keys[j]), 1,
+ assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node,
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
- rtree_set(rtree, keys[j], 0);
- assert_u_eq(rtree_get(rtree, keys[j]), 0,
+ assert_false(rtree_set(&rtree, keys[j], NULL),
+ "Unexpected rtree_set() failure");
+ assert_ptr_null(rtree_get(&rtree, keys[j], true),
"rtree_get() should return previously set value");
}
for (j = 0; j < NSET; j++) {
- assert_u_eq(rtree_get(rtree, keys[j]), 0,
+ assert_ptr_null(rtree_get(&rtree, keys[j], true),
"rtree_get() should return previously set value");
}
- rtree_delete(rtree);
+ rtree_delete(&rtree);
}
fini_gen_rand(sfmt);
#undef NSET
diff --git a/deps/jemalloc/test/unit/size_classes.c b/deps/jemalloc/test/unit/size_classes.c
new file mode 100644
index 000000000..d3aaebd77
--- /dev/null
+++ b/deps/jemalloc/test/unit/size_classes.c
@@ -0,0 +1,89 @@
+#include "test/jemalloc_test.h"
+
+static size_t
+get_max_size_class(void)
+{
+ unsigned nhchunks;
+ size_t mib[4];
+ size_t sz, miblen, max_size_class;
+
+ sz = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
+ "Unexpected mallctl() error");
+
+ miblen = sizeof(mib) / sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
+ mib[2] = nhchunks - 1;
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
+ "Unexpected mallctlbymib() error");
+
+ return (max_size_class);
+}
+
+TEST_BEGIN(test_size_classes)
+{
+ size_t size_class, max_size_class;
+ szind_t index, max_index;
+
+ max_size_class = get_max_size_class();
+ max_index = size2index(max_size_class);
+
+ for (index = 0, size_class = index2size(index); index < max_index ||
+ size_class < max_size_class; index++, size_class =
+ index2size(index)) {
+ assert_true(index < max_index,
+ "Loop conditionals should be equivalent; index=%u, "
+ "size_class=%zu (%#zx)", index, size_class, size_class);
+ assert_true(size_class < max_size_class,
+ "Loop conditionals should be equivalent; index=%u, "
+ "size_class=%zu (%#zx)", index, size_class, size_class);
+
+ assert_u_eq(index, size2index(size_class),
+ "size2index() does not reverse index2size(): index=%u -->"
+ " size_class=%zu --> index=%u --> size_class=%zu", index,
+ size_class, size2index(size_class),
+ index2size(size2index(size_class)));
+ assert_zu_eq(size_class, index2size(size2index(size_class)),
+ "index2size() does not reverse size2index(): index=%u -->"
+ " size_class=%zu --> index=%u --> size_class=%zu", index,
+ size_class, size2index(size_class),
+ index2size(size2index(size_class)));
+
+ assert_u_eq(index+1, size2index(size_class+1),
+ "Next size_class does not round up properly");
+
+ assert_zu_eq(size_class, (index > 0) ?
+ s2u(index2size(index-1)+1) : s2u(1),
+ "s2u() does not round up to size class");
+ assert_zu_eq(size_class, s2u(size_class-1),
+ "s2u() does not round up to size class");
+ assert_zu_eq(size_class, s2u(size_class),
+ "s2u() does not compute same size class");
+ assert_zu_eq(s2u(size_class+1), index2size(index+1),
+ "s2u() does not round up to next size class");
+ }
+
+ assert_u_eq(index, size2index(index2size(index)),
+ "size2index() does not reverse index2size()");
+ assert_zu_eq(max_size_class, index2size(size2index(max_size_class)),
+ "index2size() does not reverse size2index()");
+
+ assert_zu_eq(size_class, s2u(index2size(index-1)+1),
+ "s2u() does not round up to size class");
+ assert_zu_eq(size_class, s2u(size_class-1),
+ "s2u() does not round up to size class");
+ assert_zu_eq(size_class, s2u(size_class),
+ "s2u() does not compute same size class");
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_size_classes));
+}
diff --git a/deps/jemalloc/test/unit/stats.c b/deps/jemalloc/test/unit/stats.c
index 03a55c7fd..8e4bc631e 100644
--- a/deps/jemalloc/test/unit/stats.c
+++ b/deps/jemalloc/test/unit/stats.c
@@ -3,7 +3,7 @@
TEST_BEGIN(test_stats_summary)
{
size_t *cactive;
- size_t sz, allocated, active, mapped;
+ size_t sz, allocated, active, resident, mapped;
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(cactive);
@@ -15,6 +15,8 @@ TEST_BEGIN(test_stats_summary)
expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.resident", &resident, &sz, NULL, 0),
+ expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
@@ -23,34 +25,10 @@ TEST_BEGIN(test_stats_summary)
"active should be no larger than cactive");
assert_zu_le(allocated, active,
"allocated should be no larger than active");
- assert_zu_le(active, mapped,
- "active should be no larger than mapped");
- }
-}
-TEST_END
-
-TEST_BEGIN(test_stats_chunks)
-{
- size_t current, high;
- uint64_t total;
- size_t sz;
- int expected = config_stats ? 0 : ENOENT;
-
- sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.chunks.current", &current, &sz, NULL, 0),
- expected, "Unexpected mallctl() result");
- sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.chunks.total", &total, &sz, NULL, 0),
- expected, "Unexpected mallctl() result");
- sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.chunks.high", &high, &sz, NULL, 0), expected,
- "Unexpected mallctl() result");
-
- if (config_stats) {
- assert_zu_le(current, high,
- "current should be no larger than high");
- assert_u64_le((uint64_t)high, total,
- "high should be no larger than total");
+ assert_zu_lt(active, resident,
+ "active should be less than resident");
+ assert_zu_lt(active, mapped,
+ "active should be less than mapped");
}
}
TEST_END
@@ -60,30 +38,34 @@ TEST_BEGIN(test_stats_huge)
void *p;
uint64_t epoch;
size_t allocated;
- uint64_t nmalloc, ndalloc;
+ uint64_t nmalloc, ndalloc, nrequests;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
- p = mallocx(arena_maxclass+1, 0);
+ p = mallocx(large_maxclass+1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.huge.allocated", &allocated, &sz, NULL, 0),
- expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.huge.nmalloc", &nmalloc, &sz, NULL, 0),
- expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.huge.ndalloc", &ndalloc, &sz, NULL, 0),
- expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL,
+ 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL,
+ 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
+ assert_u64_le(nmalloc, nrequests,
+ "nmalloc should no larger than nrequests");
}
dallocx(p, 0);
@@ -93,7 +75,7 @@ TEST_END
TEST_BEGIN(test_stats_arenas_summary)
{
unsigned arena;
- void *small, *large;
+ void *little, *large, *huge;
uint64_t epoch;
size_t sz;
int expected = config_stats ? 0 : ENOENT;
@@ -104,10 +86,12 @@ TEST_BEGIN(test_stats_arenas_summary)
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
- small = mallocx(SMALL_MAXCLASS, 0);
- assert_ptr_not_null(small, "Unexpected mallocx() failure");
- large = mallocx(arena_maxclass, 0);
+ little = mallocx(SMALL_MAXCLASS, 0);
+ assert_ptr_not_null(little, "Unexpected mallocx() failure");
+ large = mallocx(large_maxclass, 0);
assert_ptr_not_null(large, "Unexpected mallocx() failure");
+ huge = mallocx(chunksize, 0);
+ assert_ptr_not_null(huge, "Unexpected mallocx() failure");
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
@@ -133,8 +117,9 @@ TEST_BEGIN(test_stats_arenas_summary)
"nmadvise should be no greater than purged");
}
- dallocx(small, 0);
+ dallocx(little, 0);
dallocx(large, 0);
+ dallocx(huge, 0);
}
TEST_END
@@ -215,7 +200,7 @@ TEST_BEGIN(test_stats_arenas_large)
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
- p = mallocx(arena_maxclass, 0);
+ p = mallocx(large_maxclass, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
@@ -247,11 +232,51 @@ TEST_BEGIN(test_stats_arenas_large)
}
TEST_END
+TEST_BEGIN(test_stats_arenas_huge)
+{
+ unsigned arena;
+ void *p;
+ size_t sz, allocated;
+ uint64_t epoch, nmalloc, ndalloc;
+ int expected = config_stats ? 0 : ENOENT;
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ p = mallocx(chunksize, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_zu_gt(allocated, 0,
+ "allocated should be greater than zero");
+ assert_zu_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_zu_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
TEST_BEGIN(test_stats_arenas_bins)
{
unsigned arena;
void *p;
- size_t sz, allocated, curruns;
+ size_t sz, curruns, curregs;
uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nruns, nreruns;
int expected = config_stats ? 0 : ENOENT;
@@ -269,9 +294,6 @@ TEST_BEGIN(test_stats_arenas_bins)
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
"Unexpected mallctl() failure");
- sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.bins.0.allocated", &allocated, &sz,
- NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
@@ -279,7 +301,11 @@ TEST_BEGIN(test_stats_arenas_bins)
NULL, 0), expected, "Unexpected mallctl() result");
assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(uint64_t);
assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz,
NULL, 0), config_tcache ? expected : ENOENT,
"Unexpected mallctl() result");
@@ -296,14 +322,14 @@ TEST_BEGIN(test_stats_arenas_bins)
NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
- assert_zu_gt(allocated, 0,
- "allocated should be greater than zero");
assert_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
assert_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
assert_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
+ assert_zu_gt(curregs, 0,
+ "allocated should be greater than zero");
if (config_tcache) {
assert_u64_gt(nfills, 0,
"At least one fill should have occurred");
@@ -332,7 +358,7 @@ TEST_BEGIN(test_stats_arenas_lruns)
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
- p = mallocx(SMALL_MAXCLASS+1, 0);
+ p = mallocx(LARGE_MINCLASS, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
@@ -364,17 +390,58 @@ TEST_BEGIN(test_stats_arenas_lruns)
}
TEST_END
+TEST_BEGIN(test_stats_arenas_hchunks)
+{
+ unsigned arena;
+ void *p;
+ uint64_t epoch, nmalloc, ndalloc;
+ size_t curhchunks, sz;
+ int expected = config_stats ? 0 : ENOENT;
+
+ arena = 0;
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
+
+ p = mallocx(chunksize, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
+
+ sz = sizeof(uint64_t);
+ assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ sz = sizeof(size_t);
+ assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
+
+ if (config_stats) {
+ assert_u64_gt(nmalloc, 0,
+ "nmalloc should be greater than zero");
+ assert_u64_ge(nmalloc, ndalloc,
+ "nmalloc should be at least as large as ndalloc");
+ assert_u64_gt(curhchunks, 0,
+ "At least one chunk should be currently allocated");
+ }
+
+ dallocx(p, 0);
+}
+TEST_END
+
int
main(void)
{
return (test(
test_stats_summary,
- test_stats_chunks,
test_stats_huge,
test_stats_arenas_summary,
test_stats_arenas_small,
test_stats_arenas_large,
+ test_stats_arenas_huge,
test_stats_arenas_bins,
- test_stats_arenas_lruns));
+ test_stats_arenas_lruns,
+ test_stats_arenas_hchunks));
}
diff --git a/deps/jemalloc/test/unit/tsd.c b/deps/jemalloc/test/unit/tsd.c
index f421c1a3c..8be787fda 100644
--- a/deps/jemalloc/test/unit/tsd.c
+++ b/deps/jemalloc/test/unit/tsd.c
@@ -6,29 +6,64 @@ typedef unsigned int data_t;
static bool data_cleanup_executed;
+malloc_tsd_types(data_, data_t)
+malloc_tsd_protos(, data_, data_t)
+
void
data_cleanup(void *arg)
{
data_t *data = (data_t *)arg;
- assert_x_eq(*data, THREAD_DATA,
- "Argument passed into cleanup function should match tsd value");
+ if (!data_cleanup_executed) {
+ assert_x_eq(*data, THREAD_DATA,
+ "Argument passed into cleanup function should match tsd "
+ "value");
+ }
data_cleanup_executed = true;
+
+ /*
+ * Allocate during cleanup for two rounds, in order to assure that
+ * jemalloc's internal tsd reinitialization happens.
+ */
+ switch (*data) {
+ case THREAD_DATA:
+ *data = 1;
+ data_tsd_set(data);
+ break;
+ case 1:
+ *data = 2;
+ data_tsd_set(data);
+ break;
+ case 2:
+ return;
+ default:
+ not_reached();
+ }
+
+ {
+ void *p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpeced mallocx() failure");
+ dallocx(p, 0);
+ }
}
-malloc_tsd_protos(, data, data_t)
-malloc_tsd_externs(data, data_t)
+malloc_tsd_externs(data_, data_t)
#define DATA_INIT 0x12345678
-malloc_tsd_data(, data, data_t, DATA_INIT)
-malloc_tsd_funcs(, data, data_t, DATA_INIT, data_cleanup)
+malloc_tsd_data(, data_, data_t, DATA_INIT)
+malloc_tsd_funcs(, data_, data_t, DATA_INIT, data_cleanup)
static void *
thd_start(void *arg)
{
data_t d = (data_t)(uintptr_t)arg;
+ void *p;
+
assert_x_eq(*data_tsd_get(), DATA_INIT,
"Initial tsd get should return initialization value");
+ p = malloc(1);
+ assert_ptr_not_null(p, "Unexpected malloc() failure");
+
data_tsd_set(&d);
assert_x_eq(*data_tsd_get(), d,
"After tsd set, tsd get should return value that was set");
@@ -37,6 +72,7 @@ thd_start(void *arg)
assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg,
"Resetting local data should have no effect on tsd");
+ free(p);
return (NULL);
}
diff --git a/deps/jemalloc/test/unit/util.c b/deps/jemalloc/test/unit/util.c
index dc3cfe8a9..8ab39a458 100644
--- a/deps/jemalloc/test/unit/util.c
+++ b/deps/jemalloc/test/unit/util.c
@@ -52,8 +52,8 @@ TEST_BEGIN(test_malloc_strtoumax)
const char *expected_errno_name;
uintmax_t expected_x;
};
-#define ERR(e) e, #e
-#define UMAX(x) ((uintmax_t)x##ULL)
+#define ERR(e) e, #e
+#define KUMAX(x) ((uintmax_t)x##ULL)
struct test_s tests[] = {
{"0", "0", -1, ERR(EINVAL), UINTMAX_MAX},
{"0", "0", 1, ERR(EINVAL), UINTMAX_MAX},
@@ -64,51 +64,51 @@ TEST_BEGIN(test_malloc_strtoumax)
{"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX},
{"-", "-", 0, ERR(EINVAL), UINTMAX_MAX},
- {"42", "", 0, ERR(0), UMAX(42)},
- {"+42", "", 0, ERR(0), UMAX(42)},
- {"-42", "", 0, ERR(0), UMAX(-42)},
- {"042", "", 0, ERR(0), UMAX(042)},
- {"+042", "", 0, ERR(0), UMAX(042)},
- {"-042", "", 0, ERR(0), UMAX(-042)},
- {"0x42", "", 0, ERR(0), UMAX(0x42)},
- {"+0x42", "", 0, ERR(0), UMAX(0x42)},
- {"-0x42", "", 0, ERR(0), UMAX(-0x42)},
-
- {"0", "", 0, ERR(0), UMAX(0)},
- {"1", "", 0, ERR(0), UMAX(1)},
-
- {"42", "", 0, ERR(0), UMAX(42)},
- {" 42", "", 0, ERR(0), UMAX(42)},
- {"42 ", " ", 0, ERR(0), UMAX(42)},
- {"0x", "x", 0, ERR(0), UMAX(0)},
- {"42x", "x", 0, ERR(0), UMAX(42)},
-
- {"07", "", 0, ERR(0), UMAX(7)},
- {"010", "", 0, ERR(0), UMAX(8)},
- {"08", "8", 0, ERR(0), UMAX(0)},
- {"0_", "_", 0, ERR(0), UMAX(0)},
-
- {"0x", "x", 0, ERR(0), UMAX(0)},
- {"0X", "X", 0, ERR(0), UMAX(0)},
- {"0xg", "xg", 0, ERR(0), UMAX(0)},
- {"0XA", "", 0, ERR(0), UMAX(10)},
-
- {"010", "", 10, ERR(0), UMAX(10)},
- {"0x3", "x3", 10, ERR(0), UMAX(0)},
-
- {"12", "2", 2, ERR(0), UMAX(1)},
- {"78", "8", 8, ERR(0), UMAX(7)},
- {"9a", "a", 10, ERR(0), UMAX(9)},
- {"9A", "A", 10, ERR(0), UMAX(9)},
- {"fg", "g", 16, ERR(0), UMAX(15)},
- {"FG", "G", 16, ERR(0), UMAX(15)},
- {"0xfg", "g", 16, ERR(0), UMAX(15)},
- {"0XFG", "G", 16, ERR(0), UMAX(15)},
- {"z_", "_", 36, ERR(0), UMAX(35)},
- {"Z_", "_", 36, ERR(0), UMAX(35)}
+ {"42", "", 0, ERR(0), KUMAX(42)},
+ {"+42", "", 0, ERR(0), KUMAX(42)},
+ {"-42", "", 0, ERR(0), KUMAX(-42)},
+ {"042", "", 0, ERR(0), KUMAX(042)},
+ {"+042", "", 0, ERR(0), KUMAX(042)},
+ {"-042", "", 0, ERR(0), KUMAX(-042)},
+ {"0x42", "", 0, ERR(0), KUMAX(0x42)},
+ {"+0x42", "", 0, ERR(0), KUMAX(0x42)},
+ {"-0x42", "", 0, ERR(0), KUMAX(-0x42)},
+
+ {"0", "", 0, ERR(0), KUMAX(0)},
+ {"1", "", 0, ERR(0), KUMAX(1)},
+
+ {"42", "", 0, ERR(0), KUMAX(42)},
+ {" 42", "", 0, ERR(0), KUMAX(42)},
+ {"42 ", " ", 0, ERR(0), KUMAX(42)},
+ {"0x", "x", 0, ERR(0), KUMAX(0)},
+ {"42x", "x", 0, ERR(0), KUMAX(42)},
+
+ {"07", "", 0, ERR(0), KUMAX(7)},
+ {"010", "", 0, ERR(0), KUMAX(8)},
+ {"08", "8", 0, ERR(0), KUMAX(0)},
+ {"0_", "_", 0, ERR(0), KUMAX(0)},
+
+ {"0x", "x", 0, ERR(0), KUMAX(0)},
+ {"0X", "X", 0, ERR(0), KUMAX(0)},
+ {"0xg", "xg", 0, ERR(0), KUMAX(0)},
+ {"0XA", "", 0, ERR(0), KUMAX(10)},
+
+ {"010", "", 10, ERR(0), KUMAX(10)},
+ {"0x3", "x3", 10, ERR(0), KUMAX(0)},
+
+ {"12", "2", 2, ERR(0), KUMAX(1)},
+ {"78", "8", 8, ERR(0), KUMAX(7)},
+ {"9a", "a", 10, ERR(0), KUMAX(9)},
+ {"9A", "A", 10, ERR(0), KUMAX(9)},
+ {"fg", "g", 16, ERR(0), KUMAX(15)},
+ {"FG", "G", 16, ERR(0), KUMAX(15)},
+ {"0xfg", "g", 16, ERR(0), KUMAX(15)},
+ {"0XFG", "G", 16, ERR(0), KUMAX(15)},
+ {"z_", "_", 36, ERR(0), KUMAX(35)},
+ {"Z_", "_", 36, ERR(0), KUMAX(35)}
};
#undef ERR
-#undef UMAX
+#undef KUMAX
unsigned i;
for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
@@ -141,8 +141,8 @@ TEST_BEGIN(test_malloc_snprintf_truncated)
char buf[BUFLEN];
int result;
size_t len;
-#define TEST(expected_str_untruncated, fmt...) do { \
- result = malloc_snprintf(buf, len, fmt); \
+#define TEST(expected_str_untruncated, ...) do { \
+ result = malloc_snprintf(buf, len, __VA_ARGS__); \
assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
"Unexpected string inequality (\"%s\" vs \"%s\")", \
buf, expected_str_untruncated); \
@@ -173,8 +173,8 @@ TEST_BEGIN(test_malloc_snprintf)
#define BUFLEN 128
char buf[BUFLEN];
int result;
-#define TEST(expected_str, fmt...) do { \
- result = malloc_snprintf(buf, sizeof(buf), fmt); \
+#define TEST(expected_str, ...) do { \
+ result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
assert_str_eq(buf, expected_str, "Unexpected output"); \
assert_d_eq(result, strlen(expected_str), "Unexpected result"); \
} while (0)
diff --git a/deps/jemalloc/test/unit/zero.c b/deps/jemalloc/test/unit/zero.c
index 65a8f0c9c..93afc2b87 100644
--- a/deps/jemalloc/test/unit/zero.c
+++ b/deps/jemalloc/test/unit/zero.c
@@ -55,7 +55,7 @@ TEST_BEGIN(test_zero_large)
{
test_skip_if(!config_fill);
- test_zero(SMALL_MAXCLASS+1, arena_maxclass);
+ test_zero(SMALL_MAXCLASS+1, large_maxclass);
}
TEST_END
@@ -63,7 +63,7 @@ TEST_BEGIN(test_zero_huge)
{
test_skip_if(!config_fill);
- test_zero(arena_maxclass+1, chunksize*2);
+ test_zero(large_maxclass+1, chunksize*2);
}
TEST_END