summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorantirez <antirez@gmail.com>2017-01-30 09:58:34 +0100
committerantirez <antirez@gmail.com>2017-02-12 16:07:13 +0100
commit7178cac031843f63133a494fbf566569b0285eb0 (patch)
tree2d3d6495b8793ed06c9565ff9e7a0d01e17a96bb
parent33fad43c0f1baab702876a0c8e14e6f51b38380b (diff)
downloadredis-7178cac031843f63133a494fbf566569b0285eb0.tar.gz
Revert "Jemalloc updated to 4.4.0."
This reverts commit 153f2f00ea5c74cbd63d92a261d31c42df8dce21. Jemalloc 4.4.0 is apparently causing deadlocks in certain systems. See for example https://github.com/antirez/redis/issues/3799. As a cautionary step we are reverting the commit back and releasing a new stable Redis version.
-rw-r--r--deps/jemalloc/.appveyor.yml28
-rw-r--r--deps/jemalloc/.gitignore16
-rw-r--r--deps/jemalloc/.travis.yml29
-rw-r--r--deps/jemalloc/COPYING4
-rw-r--r--deps/jemalloc/ChangeLog220
-rw-r--r--deps/jemalloc/INSTALL26
-rw-r--r--deps/jemalloc/Makefile.in128
-rw-r--r--deps/jemalloc/README2
-rw-r--r--deps/jemalloc/VERSION2
-rw-r--r--deps/jemalloc/bin/jeprof.in131
-rwxr-xr-xdeps/jemalloc/config.guess (renamed from deps/jemalloc/build-aux/config.guess)174
-rwxr-xr-xdeps/jemalloc/config.sub (renamed from deps/jemalloc/build-aux/config.sub)76
-rwxr-xr-xdeps/jemalloc/configure1387
-rw-r--r--deps/jemalloc/configure.ac436
-rw-r--r--deps/jemalloc/doc/html.xsl.in1
-rw-r--r--deps/jemalloc/doc/jemalloc.3859
-rw-r--r--deps/jemalloc/doc/jemalloc.html1507
-rw-r--r--deps/jemalloc/doc/jemalloc.xml.in544
-rw-r--r--deps/jemalloc/doc/stylesheet.xsl7
-rw-r--r--deps/jemalloc/include/jemalloc/internal/arena.h582
-rw-r--r--deps/jemalloc/include/jemalloc/internal/assert.h45
-rw-r--r--deps/jemalloc/include/jemalloc/internal/atomic.h4
-rw-r--r--deps/jemalloc/include/jemalloc/internal/base.h11
-rw-r--r--deps/jemalloc/include/jemalloc/internal/bitmap.h76
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk.h38
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk_dss.h10
-rw-r--r--deps/jemalloc/include/jemalloc/internal/chunk_mmap.h4
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ckh.h6
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ctl.h29
-rw-r--r--deps/jemalloc/include/jemalloc/internal/extent.h43
-rw-r--r--deps/jemalloc/include/jemalloc/internal/hash.h33
-rw-r--r--deps/jemalloc/include/jemalloc/internal/huge.h21
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in440
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h11
-rw-r--r--deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in73
-rw-r--r--deps/jemalloc/include/jemalloc/internal/mb.h10
-rw-r--r--deps/jemalloc/include/jemalloc/internal/mutex.h62
-rw-r--r--deps/jemalloc/include/jemalloc/internal/nstime.h48
-rw-r--r--deps/jemalloc/include/jemalloc/internal/pages.h7
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ph.h345
-rw-r--r--deps/jemalloc/include/jemalloc/internal/private_symbols.txt252
-rw-r--r--deps/jemalloc/include/jemalloc/internal/prng.h195
-rw-r--r--deps/jemalloc/include/jemalloc/internal/prof.h86
-rw-r--r--deps/jemalloc/include/jemalloc/internal/rb.h208
-rw-r--r--deps/jemalloc/include/jemalloc/internal/rtree.h160
-rwxr-xr-xdeps/jemalloc/include/jemalloc/internal/size_classes.sh50
-rw-r--r--deps/jemalloc/include/jemalloc/internal/smoothstep.h246
-rwxr-xr-xdeps/jemalloc/include/jemalloc/internal/smoothstep.sh115
-rw-r--r--deps/jemalloc/include/jemalloc/internal/spin.h51
-rw-r--r--deps/jemalloc/include/jemalloc/internal/stats.h14
-rw-r--r--deps/jemalloc/include/jemalloc/internal/tcache.h159
-rw-r--r--deps/jemalloc/include/jemalloc/internal/ticker.h75
-rw-r--r--deps/jemalloc/include/jemalloc/internal/tsd.h164
-rw-r--r--deps/jemalloc/include/jemalloc/internal/util.h214
-rw-r--r--deps/jemalloc/include/jemalloc/internal/valgrind.h40
-rw-r--r--deps/jemalloc/include/jemalloc/internal/witness.h266
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_defs.h.in8
-rw-r--r--deps/jemalloc/include/jemalloc/jemalloc_macros.h.in61
-rw-r--r--deps/jemalloc/include/msvc_compat/strings.h30
-rw-r--r--deps/jemalloc/include/msvc_compat/windows_extra.h22
-rwxr-xr-xdeps/jemalloc/install-sh (renamed from deps/jemalloc/build-aux/install-sh)0
-rw-r--r--deps/jemalloc/jemalloc.pc.in2
-rw-r--r--deps/jemalloc/msvc/ReadMe.txt24
-rw-r--r--deps/jemalloc/msvc/jemalloc_vc2015.sln63
-rw-r--r--deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj402
-rw-r--r--deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters272
-rwxr-xr-xdeps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.cpp89
-rw-r--r--deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.h3
-rw-r--r--deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj327
-rw-r--r--deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters26
-rw-r--r--deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads_main.cpp12
-rw-r--r--deps/jemalloc/src/arena.c2021
-rw-r--r--deps/jemalloc/src/base.c73
-rw-r--r--deps/jemalloc/src/bitmap.c59
-rw-r--r--deps/jemalloc/src/chunk.c426
-rw-r--r--deps/jemalloc/src/chunk_dss.c182
-rw-r--r--deps/jemalloc/src/chunk_mmap.c18
-rw-r--r--deps/jemalloc/src/ckh.c43
-rw-r--r--deps/jemalloc/src/ctl.c761
-rw-r--r--deps/jemalloc/src/extent.c70
-rw-r--r--deps/jemalloc/src/huge.c238
-rw-r--r--deps/jemalloc/src/jemalloc.c1518
-rw-r--r--deps/jemalloc/src/mutex.c23
-rw-r--r--deps/jemalloc/src/nstime.c194
-rw-r--r--deps/jemalloc/src/pages.c177
-rw-r--r--deps/jemalloc/src/prng.c2
-rw-r--r--deps/jemalloc/src/prof.c664
-rw-r--r--deps/jemalloc/src/quarantine.c50
-rw-r--r--deps/jemalloc/src/rtree.c9
-rw-r--r--deps/jemalloc/src/spin.c2
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/src/stats.c1220
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/src/tcache.c170
-rw-r--r--deps/jemalloc/src/ticker.c2
-rw-r--r--deps/jemalloc/src/tsd.c28
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/src/util.c42
-rw-r--r--deps/jemalloc/src/witness.c136
-rw-r--r--deps/jemalloc/src/zone.c198
-rw-r--r--deps/jemalloc/test/include/test/jemalloc_test.h.in80
-rw-r--r--deps/jemalloc/test/include/test/mtx.h2
-rw-r--r--deps/jemalloc/test/include/test/test.h4
-rw-r--r--deps/jemalloc/test/include/test/timer.h19
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/integration/MALLOCX_ARENA.c4
-rw-r--r--deps/jemalloc/test/integration/aligned_alloc.c20
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/integration/allocated.c17
-rw-r--r--deps/jemalloc/test/integration/chunk.c98
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/integration/mallocx.c106
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/integration/overflow.c8
-rw-r--r--deps/jemalloc/test/integration/posix_memalign.c20
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/integration/rallocx.c86
-rw-r--r--deps/jemalloc/test/integration/sdallocx.c4
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/integration/thread_arena.c10
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/integration/thread_tcache_enabled.c39
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/integration/xallocx.c120
-rw-r--r--deps/jemalloc/test/src/mtx.c7
-rw-r--r--deps/jemalloc/test/src/test.c56
-rw-r--r--deps/jemalloc/test/src/timer.c45
-rw-r--r--deps/jemalloc/test/stress/microbench.c3
-rw-r--r--deps/jemalloc/test/unit/a0.c19
-rwxr-xr-xdeps/jemalloc/test/unit/arena_reset.c159
-rw-r--r--deps/jemalloc/test/unit/bitmap.c26
-rw-r--r--deps/jemalloc/test/unit/ckh.c8
-rwxr-xr-xdeps/jemalloc/test/unit/decay.c374
-rw-r--r--deps/jemalloc/test/unit/fork.c64
-rw-r--r--deps/jemalloc/test/unit/hash.c36
-rw-r--r--deps/jemalloc/test/unit/junk.c17
-rw-r--r--deps/jemalloc/test/unit/junk_alloc.c2
-rw-r--r--deps/jemalloc/test/unit/junk_free.c2
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/unit/mallctl.c319
-rw-r--r--deps/jemalloc/test/unit/math.c4
-rw-r--r--deps/jemalloc/test/unit/nstime.c227
-rw-r--r--deps/jemalloc/test/unit/pack.c206
-rw-r--r--deps/jemalloc/test/unit/pages.c27
-rw-r--r--deps/jemalloc/test/unit/ph.c290
-rw-r--r--deps/jemalloc/test/unit/prng.c263
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/unit/prof_accum.c5
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/unit/prof_active.c5
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/unit/prof_gdump.c13
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/unit/prof_idump.c5
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/unit/prof_reset.c16
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/unit/prof_thread_name.c22
-rw-r--r--deps/jemalloc/test/unit/rb.c60
-rw-r--r--deps/jemalloc/test/unit/run_quantize.c149
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/unit/size_classes.c105
-rw-r--r--deps/jemalloc/test/unit/smoothstep.c106
-rw-r--r--[-rwxr-xr-x]deps/jemalloc/test/unit/stats.c241
-rw-r--r--deps/jemalloc/test/unit/ticker.c76
-rw-r--r--deps/jemalloc/test/unit/tsd.c13
-rw-r--r--deps/jemalloc/test/unit/util.c89
-rw-r--r--deps/jemalloc/test/unit/witness.c278
-rw-r--r--deps/jemalloc/test/unit/zero.c16
150 files changed, 6256 insertions, 17171 deletions
diff --git a/deps/jemalloc/.appveyor.yml b/deps/jemalloc/.appveyor.yml
deleted file mode 100644
index ddd5c5711..000000000
--- a/deps/jemalloc/.appveyor.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-version: '{build}'
-
-environment:
- matrix:
- - MSYSTEM: MINGW64
- CPU: x86_64
- MSVC: amd64
- - MSYSTEM: MINGW32
- CPU: i686
- MSVC: x86
- - MSYSTEM: MINGW64
- CPU: x86_64
- - MSYSTEM: MINGW32
- CPU: i686
-
-install:
- - set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH%
- - if defined MSVC call "c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %MSVC%
- - if defined MSVC pacman --noconfirm -Rsc mingw-w64-%CPU%-gcc gcc
- - pacman --noconfirm -Suy mingw-w64-%CPU%-make
-
-build_script:
- - bash -c "autoconf"
- - bash -c "./configure"
- - mingw32-make -j3
- - file lib/jemalloc.dll
- - mingw32-make -j3 tests
- - mingw32-make -k check
diff --git a/deps/jemalloc/.gitignore b/deps/jemalloc/.gitignore
index 08278d087..d0e393619 100644
--- a/deps/jemalloc/.gitignore
+++ b/deps/jemalloc/.gitignore
@@ -73,19 +73,3 @@ test/include/test/jemalloc_test_defs.h
/test/unit/*.out
/VERSION
-
-*.pdb
-*.sdf
-*.opendb
-*.opensdf
-*.cachefile
-*.suo
-*.user
-*.sln.docstates
-*.tmp
-/msvc/Win32/
-/msvc/x64/
-/msvc/projects/*/*/Debug*/
-/msvc/projects/*/*/Release*/
-/msvc/projects/*/*/Win32/
-/msvc/projects/*/*/x64/
diff --git a/deps/jemalloc/.travis.yml b/deps/jemalloc/.travis.yml
deleted file mode 100644
index 1fed4f8e6..000000000
--- a/deps/jemalloc/.travis.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-language: c
-
-matrix:
- include:
- - os: linux
- compiler: gcc
- - os: linux
- compiler: gcc
- env:
- - EXTRA_FLAGS=-m32
- addons:
- apt:
- packages:
- - gcc-multilib
- - os: osx
- compiler: clang
- - os: osx
- compiler: clang
- env:
- - EXTRA_FLAGS=-m32
-
-before_script:
- - autoconf
- - ./configure${EXTRA_FLAGS:+ CC="$CC $EXTRA_FLAGS"}
- - make -j3
- - make -j3 tests
-
-script:
- - make check
diff --git a/deps/jemalloc/COPYING b/deps/jemalloc/COPYING
index 104b1f8b0..611968cda 100644
--- a/deps/jemalloc/COPYING
+++ b/deps/jemalloc/COPYING
@@ -1,10 +1,10 @@
Unless otherwise specified, files in the jemalloc source distribution are
subject to the following license:
--------------------------------------------------------------------------------
-Copyright (C) 2002-2016 Jason Evans <jasone@canonware.com>.
+Copyright (C) 2002-2015 Jason Evans <jasone@canonware.com>.
All rights reserved.
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
-Copyright (C) 2009-2016 Facebook, Inc. All rights reserved.
+Copyright (C) 2009-2015 Facebook, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
diff --git a/deps/jemalloc/ChangeLog b/deps/jemalloc/ChangeLog
index f75edd933..e3b0a5190 100644
--- a/deps/jemalloc/ChangeLog
+++ b/deps/jemalloc/ChangeLog
@@ -4,226 +4,6 @@ brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
-* 4.4.0 (December 3, 2016)
-
- New features:
- - Add configure support for *-*-linux-android. (@cferris1000, @jasone)
- - Add the --disable-syscall configure option, for use on systems that place
- security-motivated limitations on syscall(2). (@jasone)
- - Add support for Debian GNU/kFreeBSD. (@thesam)
-
- Optimizations:
- - Add extent serial numbers and use them where appropriate as a sort key that
- is higher priority than address, so that the allocation policy prefers older
- extents. This tends to improve locality (decrease fragmentation) when
- memory grows downward. (@jasone)
- - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized
- on Linux 4.5 and newer. (@jasone)
- - Mark partially purged arena chunks as non-huge-page. This improves
- interaction with Linux's transparent huge page functionality. (@jasone)
-
- Bug fixes:
- - Fix size class computations for edge conditions involving extremely large
- allocations. This regression was first released in 4.0.0. (@jasone,
- @ingvarha)
- - Remove overly restrictive assertions related to the cactive statistic. This
- regression was first released in 4.1.0. (@jasone)
- - Implement a more reliable detection scheme for os_unfair_lock on macOS.
- (@jszakmeister)
-
-* 4.3.1 (November 7, 2016)
-
- Bug fixes:
- - Fix a severe virtual memory leak. This regression was first released in
- 4.3.0. (@interwq, @jasone)
- - Refactor atomic and prng APIs to restore support for 32-bit platforms that
- use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone)
-
-* 4.3.0 (November 4, 2016)
-
- This is the first release that passes the test suite for multiple Windows
- configurations, thanks in large part to @glandium setting up continuous
- integration via AppVeyor (and Travis CI for Linux and OS X).
-
- New features:
- - Add "J" (JSON) support to malloc_stats_print(). (@jasone)
- - Add Cray compiler support. (@ronawho)
-
- Optimizations:
- - Add/use adaptive spinning for bootstrapping and radix tree node
- initialization. (@jasone)
-
- Bug fixes:
- - Fix large allocation to search starting in the optimal size class heap,
- which can substantially reduce virtual memory churn and fragmentation. This
- regression was first released in 4.0.0. (@mjp41, @jasone)
- - Fix stats.arenas.<i>.nthreads accounting. (@interwq)
- - Fix and simplify decay-based purging. (@jasone)
- - Make DSS (sbrk(2)-related) operations lockless, which resolves potential
- deadlocks during thread exit. (@jasone)
- - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun,
- @jasone)
- - Fix over-sized allocation of arena_t (plus associated stats) data
- structures. (@jasone, @interwq)
- - Fix EXTRA_CFLAGS to not affect configuration. (@jasone)
- - Fix a Valgrind integration bug. (@ronawho)
- - Disallow 0x5a junk filling when running in Valgrind. (@jasone)
- - Fix a file descriptor leak on Linux. This regression was first released in
- 4.2.0. (@vsarunas, @jasone)
- - Fix static linking of jemalloc with glibc. (@djwatson)
- - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This
- works around other libraries' system call wrappers performing reentrant
- allocation. (@kspinka, @Whissi, @jasone)
- - Fix OS X default zone replacement to work with OS X 10.12. (@glandium,
- @jasone)
- - Fix cached memory management to avoid needless commit/decommit operations
- during purging, which resolves permanent virtual memory map fragmentation
- issues on Windows. (@mjp41, @jasone)
- - Fix TSD fetches to avoid (recursive) allocation. This is relevant to
- non-TLS and Windows configurations. (@jasone)
- - Fix malloc_conf overriding to work on Windows. (@jasone)
- - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone)
-
-* 4.2.1 (June 8, 2016)
-
- Bug fixes:
- - Fix bootstrapping issues for configurations that require allocation during
- tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone)
- - Fix gettimeofday() version of nstime_update(). (@ronawho)
- - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho)
- - Fix potential VM map fragmentation regression. (@jasone)
- - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone)
- - Fix heap profiling context leaks in reallocation edge cases. (@jasone)
-
-* 4.2.0 (May 12, 2016)
-
- New features:
- - Add the arena.<i>.reset mallctl, which makes it possible to discard all of
- an arena's allocations in a single operation. (@jasone)
- - Add the stats.retained and stats.arenas.<i>.retained statistics. (@jasone)
- - Add the --with-version configure option. (@jasone)
- - Support --with-lg-page values larger than actual page size. (@jasone)
-
- Optimizations:
- - Use pairing heaps rather than red-black trees for various hot data
- structures. (@djwatson, @jasone)
- - Streamline fast paths of rtree operations. (@jasone)
- - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone)
- - Decommit unused virtual memory if the OS does not overcommit. (@jasone)
- - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order
- to avoid unfortunate interactions during fork(2). (@jasone)
-
- Bug fixes:
- - Fix chunk accounting related to triggering gdump profiles. (@jasone)
- - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone)
- - Scale leak report summary according to sampling probability. (@jasone)
-
-* 4.1.1 (May 3, 2016)
-
- This bugfix release resolves a variety of mostly minor issues, though the
- bitmap fix is critical for 64-bit Windows.
-
- Bug fixes:
- - Fix the linear scan version of bitmap_sfu() to shift by the proper amount
- even when sizeof(long) is not the same as sizeof(void *), as on 64-bit
- Windows. (@jasone)
- - Fix hashing functions to avoid unaligned memory accesses (and resulting
- crashes). This is relevant at least to some ARM-based platforms.
- (@rkmisra)
- - Fix fork()-related lock rank ordering reversals. These reversals were
- unlikely to cause deadlocks in practice except when heap profiling was
- enabled and active. (@jasone)
- - Fix various chunk leaks in OOM code paths. (@jasone)
- - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone)
- - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin)
- - Fix a variety of test failures that were due to test fragility rather than
- core bugs. (@jasone)
-
-* 4.1.0 (February 28, 2016)
-
- This release is primarily about optimizations, but it also incorporates a lot
- of portability-motivated refactoring and enhancements. Many people worked on
- this release, to an extent that even with the omission here of minor changes
- (see git revision history), and of the people who reported and diagnosed
- issues, so much of the work was contributed that starting with this release,
- changes are annotated with author credits to help reflect the collaborative
- effort involved.
-
- New features:
- - Implement decay-based unused dirty page purging, a major optimization with
- mallctl API impact. This is an alternative to the existing ratio-based
- unused dirty page purging, and is intended to eventually become the sole
- purging mechanism. New mallctls:
- + opt.purge
- + opt.decay_time
- + arena.<i>.decay
- + arena.<i>.decay_time
- + arenas.decay_time
- + stats.arenas.<i>.decay_time
- (@jasone, @cevans87)
- - Add --with-malloc-conf, which makes it possible to embed a default
- options string during configuration. This was motivated by the desire to
- specify --with-malloc-conf=purge:decay , since the default must remain
- purge:ratio until the 5.0.0 release. (@jasone)
- - Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin)
- - Make *allocx() size class overflow behavior defined. The maximum
- size class is now less than PTRDIFF_MAX to protect applications against
- numerical overflow, and all allocation functions are guaranteed to indicate
- errors rather than potentially crashing if the request size exceeds the
- maximum size class. (@jasone)
- - jeprof:
- + Add raw heap profile support. (@jasone)
- + Add --retain and --exclude for backtrace symbol filtering. (@jasone)
-
- Optimizations:
- - Optimize the fast path to combine various bootstrapping and configuration
- checks and execute more streamlined code in the common case. (@interwq)
- - Use linear scan for small bitmaps (used for small object tracking). In
- addition to speeding up bitmap operations on 64-bit systems, this reduces
- allocator metadata overhead by approximately 0.2%. (@djwatson)
- - Separate arena_avail trees, which substantially speeds up run tree
- operations. (@djwatson)
- - Use memoization (boot-time-computed table) for run quantization. Separate
- arena_avail trees reduced the importance of this optimization. (@jasone)
- - Attempt mmap-based in-place huge reallocation. This can dramatically speed
- up incremental huge reallocation. (@jasone)
-
- Incompatible changes:
- - Make opt.narenas unsigned rather than size_t. (@jasone)
-
- Bug fixes:
- - Fix stats.cactive accounting regression. (@rustyx, @jasone)
- - Handle unaligned keys in hash(). This caused problems for some ARM systems.
- (@jasone, @cferris1000)
- - Refactor arenas array. In addition to fixing a fork-related deadlock, this
- makes arena lookups faster and simpler. (@jasone)
- - Move retained memory allocation out of the default chunk allocation
- function, to a location that gets executed even if the application installs
- a custom chunk allocation function. This resolves a virtual memory leak.
- (@buchgr)
- - Fix a potential tsd cleanup leak. (@cferris1000, @jasone)
- - Fix run quantization. In practice this bug had no impact unless
- applications requested memory with alignment exceeding one page.
- (@jasone, @djwatson)
- - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv)
- - jeprof:
- + Don't discard curl options if timeout is not defined. (@djwatson)
- + Detect failed profile fetches. (@djwatson)
- - Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for
- --disable-stats case. (@jasone)
-
-* 4.0.4 (October 24, 2015)
-
- This bugfix release fixes another xallocx() regression. No other regressions
- have come to light in over a month, so this is likely a good starting point
- for people who prefer to wait for "dot one" releases with all the major issues
- shaken out.
-
- Bug fixes:
- - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large
- allocations that have been randomly assigned an offset of 0 when
- --enable-cache-oblivious configure option is enabled.
-
* 4.0.3 (September 24, 2015)
This bugfix release continues the trend of xallocx() and heap profiling fixes.
diff --git a/deps/jemalloc/INSTALL b/deps/jemalloc/INSTALL
index cce3ed711..8d3968745 100644
--- a/deps/jemalloc/INSTALL
+++ b/deps/jemalloc/INSTALL
@@ -35,10 +35,6 @@ any of the following arguments (not a definitive list) to 'configure':
will cause files to be installed into /usr/local/include, /usr/local/lib,
and /usr/local/man.
---with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>
- Use the specified version string rather than trying to generate one (if in
- a git repository) or use existing the VERSION file (if present).
-
--with-rpath=<colon-separated-rpath>
Embed one or more library paths, so that libjemalloc can find the libraries
it is linked to. This works only on ELF-based systems.
@@ -88,14 +84,6 @@ any of the following arguments (not a definitive list) to 'configure':
versions of jemalloc can coexist in the same installation directory. For
example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
---with-malloc-conf=<malloc_conf>
- Embed <malloc_conf> as a run-time options string that is processed prior to
- the malloc_conf global variable, the /etc/malloc.conf symlink, and the
- MALLOC_CONF environment variable. For example, to change the default chunk
- size to 256 KiB:
-
- --with-malloc-conf=lg_chunk:18
-
--disable-cc-silence
Disable code that silences non-useful compiler warnings. This is mainly
useful during development when auditing the set of warnings that are being
@@ -206,11 +194,6 @@ any of the following arguments (not a definitive list) to 'configure':
most extreme case increases physical memory usage for the 16 KiB size class
to 20 KiB.
---disable-syscall
- Disable use of syscall(2) rather than {open,read,write,close}(2). This is
- intended as a workaround for systems that place security limitations on
- syscall(2).
-
--with-xslroot=<path>
Specify where to find DocBook XSL stylesheets when building the
documentation.
@@ -332,15 +315,6 @@ LDFLAGS="?"
PATH="?"
'configure' uses this to find programs.
-In some cases it may be necessary to work around configuration results that do
-not match reality. For example, Linux 4.5 added support for the MADV_FREE flag
-to madvise(2), which can cause problems if building on a host with MADV_FREE
-support and deploying to a target without. To work around this, use a cache
-file to override the relevant configuration variable defined in configure.ac,
-e.g.:
-
- echo "je_cv_madv_free=no" > config.cache && ./configure -C
-
=== Advanced compilation =======================================================
To build only parts of jemalloc, use the following targets:
diff --git a/deps/jemalloc/Makefile.in b/deps/jemalloc/Makefile.in
index c70536391..1ac6f2926 100644
--- a/deps/jemalloc/Makefile.in
+++ b/deps/jemalloc/Makefile.in
@@ -24,11 +24,11 @@ abs_objroot := @abs_objroot@
# Build parameters.
CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include
-EXTRA_CFLAGS := @EXTRA_CFLAGS@
-CFLAGS := @CFLAGS@ $(EXTRA_CFLAGS)
+CFLAGS := @CFLAGS@
LDFLAGS := @LDFLAGS@
EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
LIBS := @LIBS@
+TESTLIBS := @TESTLIBS@
RPATH_EXTRA := @RPATH_EXTRA@
SO := @so@
IMPORTLIB := @importlib@
@@ -53,19 +53,15 @@ enable_prof := @enable_prof@
enable_valgrind := @enable_valgrind@
enable_zone_allocator := @enable_zone_allocator@
MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
-link_whole_archive := @link_whole_archive@
DSO_LDFLAGS = @DSO_LDFLAGS@
SOREV = @SOREV@
PIC_CFLAGS = @PIC_CFLAGS@
CTARGET = @CTARGET@
LDTARGET = @LDTARGET@
-TEST_LD_MODE = @TEST_LD_MODE@
MKLIB = @MKLIB@
AR = @AR@
ARFLAGS = @ARFLAGS@
CC_MM = @CC_MM@
-LM := @LM@
-INSTALL = @INSTALL@
ifeq (macho, $(ABI))
TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib"
@@ -82,34 +78,15 @@ LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix)
# Lists of files.
BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof
C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h
-C_SRCS := $(srcroot)src/jemalloc.c \
- $(srcroot)src/arena.c \
- $(srcroot)src/atomic.c \
- $(srcroot)src/base.c \
- $(srcroot)src/bitmap.c \
- $(srcroot)src/chunk.c \
- $(srcroot)src/chunk_dss.c \
- $(srcroot)src/chunk_mmap.c \
- $(srcroot)src/ckh.c \
- $(srcroot)src/ctl.c \
- $(srcroot)src/extent.c \
- $(srcroot)src/hash.c \
- $(srcroot)src/huge.c \
- $(srcroot)src/mb.c \
- $(srcroot)src/mutex.c \
- $(srcroot)src/nstime.c \
- $(srcroot)src/pages.c \
- $(srcroot)src/prng.c \
- $(srcroot)src/prof.c \
- $(srcroot)src/quarantine.c \
- $(srcroot)src/rtree.c \
- $(srcroot)src/stats.c \
- $(srcroot)src/spin.c \
- $(srcroot)src/tcache.c \
- $(srcroot)src/ticker.c \
- $(srcroot)src/tsd.c \
- $(srcroot)src/util.c \
- $(srcroot)src/witness.c
+C_SRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c \
+ $(srcroot)src/atomic.c $(srcroot)src/base.c $(srcroot)src/bitmap.c \
+ $(srcroot)src/chunk.c $(srcroot)src/chunk_dss.c \
+ $(srcroot)src/chunk_mmap.c $(srcroot)src/ckh.c $(srcroot)src/ctl.c \
+ $(srcroot)src/extent.c $(srcroot)src/hash.c $(srcroot)src/huge.c \
+ $(srcroot)src/mb.c $(srcroot)src/mutex.c $(srcroot)src/pages.c \
+ $(srcroot)src/prof.c $(srcroot)src/quarantine.c $(srcroot)src/rtree.c \
+ $(srcroot)src/stats.c $(srcroot)src/tcache.c $(srcroot)src/util.c \
+ $(srcroot)src/tsd.c
ifeq ($(enable_valgrind), 1)
C_SRCS += $(srcroot)src/valgrind.c
endif
@@ -128,11 +105,6 @@ DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV)
ifneq ($(SOREV),$(SO))
DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO)
endif
-ifeq (1, $(link_whole_archive))
-LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive
-else
-LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
-endif
PC := $(objroot)jemalloc.pc
MAN3 := $(objroot)doc/jemalloc$(install_suffix).3
DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml
@@ -144,19 +116,10 @@ C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
$(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \
$(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
$(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
-ifeq (1, $(link_whole_archive))
-C_UTIL_INTEGRATION_SRCS :=
-else
-C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/util.c
-endif
-TESTS_UNIT := \
- $(srcroot)test/unit/a0.c \
- $(srcroot)test/unit/arena_reset.c \
- $(srcroot)test/unit/atomic.c \
+C_UTIL_INTEGRATION_SRCS := $(srcroot)src/util.c
+TESTS_UNIT := $(srcroot)test/unit/atomic.c \
$(srcroot)test/unit/bitmap.c \
$(srcroot)test/unit/ckh.c \
- $(srcroot)test/unit/decay.c \
- $(srcroot)test/unit/fork.c \
$(srcroot)test/unit/hash.c \
$(srcroot)test/unit/junk.c \
$(srcroot)test/unit/junk_alloc.c \
@@ -166,10 +129,6 @@ TESTS_UNIT := \
$(srcroot)test/unit/math.c \
$(srcroot)test/unit/mq.c \
$(srcroot)test/unit/mtx.c \
- $(srcroot)test/unit/pack.c \
- $(srcroot)test/unit/pages.c \
- $(srcroot)test/unit/ph.c \
- $(srcroot)test/unit/prng.c \
$(srcroot)test/unit/prof_accum.c \
$(srcroot)test/unit/prof_active.c \
$(srcroot)test/unit/prof_gdump.c \
@@ -181,16 +140,11 @@ TESTS_UNIT := \
$(srcroot)test/unit/quarantine.c \
$(srcroot)test/unit/rb.c \
$(srcroot)test/unit/rtree.c \
- $(srcroot)test/unit/run_quantize.c \
$(srcroot)test/unit/SFMT.c \
$(srcroot)test/unit/size_classes.c \
- $(srcroot)test/unit/smoothstep.c \
$(srcroot)test/unit/stats.c \
- $(srcroot)test/unit/ticker.c \
- $(srcroot)test/unit/nstime.c \
$(srcroot)test/unit/tsd.c \
$(srcroot)test/unit/util.c \
- $(srcroot)test/unit/witness.c \
$(srcroot)test/unit/zero.c
TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
$(srcroot)test/integration/allocated.c \
@@ -312,69 +266,69 @@ $(STATIC_LIBS):
$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(TESTS_UNIT_LINK_OBJS) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
@mkdir -p $(@D)
- $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(TESTLIBS) $(EXTRA_LDFLAGS)
$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
@mkdir -p $(@D)
- $(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LJEMALLOC) $(LDFLAGS) $(filter-out -lm,$(filter -lrt -lpthread,$(LIBS))) $(LM) $(EXTRA_LDFLAGS)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(filter -lpthread,$(LIBS))) -lm $(TESTLIBS) $(EXTRA_LDFLAGS)
$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
@mkdir -p $(@D)
- $(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(TESTLIBS) $(EXTRA_LDFLAGS)
build_lib_shared: $(DSOS)
build_lib_static: $(STATIC_LIBS)
build_lib: build_lib_shared build_lib_static
install_bin:
- $(INSTALL) -d $(BINDIR)
+ install -d $(BINDIR)
@for b in $(BINS); do \
- echo "$(INSTALL) -m 755 $$b $(BINDIR)"; \
- $(INSTALL) -m 755 $$b $(BINDIR); \
+ echo "install -m 755 $$b $(BINDIR)"; \
+ install -m 755 $$b $(BINDIR); \
done
install_include:
- $(INSTALL) -d $(INCLUDEDIR)/jemalloc
+ install -d $(INCLUDEDIR)/jemalloc
@for h in $(C_HDRS); do \
- echo "$(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc"; \
- $(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc; \
+ echo "install -m 644 $$h $(INCLUDEDIR)/jemalloc"; \
+ install -m 644 $$h $(INCLUDEDIR)/jemalloc; \
done
install_lib_shared: $(DSOS)
- $(INSTALL) -d $(LIBDIR)
- $(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)
+ install -d $(LIBDIR)
+ install -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)
ifneq ($(SOREV),$(SO))
ln -sf $(LIBJEMALLOC).$(SOREV) $(LIBDIR)/$(LIBJEMALLOC).$(SO)
endif
install_lib_static: $(STATIC_LIBS)
- $(INSTALL) -d $(LIBDIR)
+ install -d $(LIBDIR)
@for l in $(STATIC_LIBS); do \
- echo "$(INSTALL) -m 755 $$l $(LIBDIR)"; \
- $(INSTALL) -m 755 $$l $(LIBDIR); \
+ echo "install -m 755 $$l $(LIBDIR)"; \
+ install -m 755 $$l $(LIBDIR); \
done
install_lib_pc: $(PC)
- $(INSTALL) -d $(LIBDIR)/pkgconfig
+ install -d $(LIBDIR)/pkgconfig
@for l in $(PC); do \
- echo "$(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig"; \
- $(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig; \
+ echo "install -m 644 $$l $(LIBDIR)/pkgconfig"; \
+ install -m 644 $$l $(LIBDIR)/pkgconfig; \
done
install_lib: install_lib_shared install_lib_static install_lib_pc
install_doc_html:
- $(INSTALL) -d $(DATADIR)/doc/jemalloc$(install_suffix)
+ install -d $(DATADIR)/doc/jemalloc$(install_suffix)
@for d in $(DOCS_HTML); do \
- echo "$(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix)"; \
- $(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \
+ echo "install -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix)"; \
+ install -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \
done
install_doc_man:
- $(INSTALL) -d $(MANDIR)/man3
+ install -d $(MANDIR)/man3
@for d in $(DOCS_MAN3); do \
- echo "$(INSTALL) -m 644 $$d $(MANDIR)/man3"; \
- $(INSTALL) -m 644 $$d $(MANDIR)/man3; \
+ echo "install -m 644 $$d $(MANDIR)/man3"; \
+ install -m 644 $$d $(MANDIR)/man3; \
done
install_doc: install_doc_html install_doc_man
@@ -395,22 +349,18 @@ stress_dir:
check_dir: check_unit_dir check_integration_dir
check_unit: tests_unit check_unit_dir
- $(MALLOC_CONF)="purge:ratio" $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
- $(MALLOC_CONF)="purge:decay" $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
+ $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
check_integration_prof: tests_integration check_integration_dir
ifeq ($(enable_prof), 1)
$(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
$(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
endif
-check_integration_decay: tests_integration check_integration_dir
- $(MALLOC_CONF)="purge:decay,decay_time:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
- $(MALLOC_CONF)="purge:decay,decay_time:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
- $(MALLOC_CONF)="purge:decay" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
check_integration: tests_integration check_integration_dir
$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
stress: tests_stress stress_dir
$(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
-check: check_unit check_integration check_integration_decay check_integration_prof
+check: tests check_dir check_integration_prof
+ $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
ifeq ($(enable_code_coverage), 1)
coverage_unit: check_unit
diff --git a/deps/jemalloc/README b/deps/jemalloc/README
index 5ff24a9ef..9b268f422 100644
--- a/deps/jemalloc/README
+++ b/deps/jemalloc/README
@@ -17,4 +17,4 @@ jemalloc.
The ChangeLog file contains a brief summary of changes for each release.
-URL: http://jemalloc.net/
+URL: http://www.canonware.com/jemalloc/
diff --git a/deps/jemalloc/VERSION b/deps/jemalloc/VERSION
index 810bd6d4c..f1f9f1c61 100644
--- a/deps/jemalloc/VERSION
+++ b/deps/jemalloc/VERSION
@@ -1 +1 @@
-4.4.0-0-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc
+4.0.3-0-ge9192eacf8935e29fc62fddc2701f7942b1cc02c
diff --git a/deps/jemalloc/bin/jeprof.in b/deps/jemalloc/bin/jeprof.in
index 42087fcec..e7178078a 100644
--- a/deps/jemalloc/bin/jeprof.in
+++ b/deps/jemalloc/bin/jeprof.in
@@ -95,7 +95,7 @@ my @EVINCE = ("evince"); # could also be xpdf or perhaps acroread
my @KCACHEGRIND = ("kcachegrind");
my @PS2PDF = ("ps2pdf");
# These are used for dynamic profiles
-my @URL_FETCHER = ("curl", "-s", "--fail");
+my @URL_FETCHER = ("curl", "-s");
# These are the web pages that servers need to support for dynamic profiles
my $HEAP_PAGE = "/pprof/heap";
@@ -223,14 +223,12 @@ Call-graph Options:
--nodefraction=<f> Hide nodes below <f>*total [default=.005]
--edgefraction=<f> Hide edges below <f>*total [default=.001]
--maxdegree=<n> Max incoming/outgoing edges per node [default=8]
- --focus=<regexp> Focus on backtraces with nodes matching <regexp>
+ --focus=<regexp> Focus on nodes matching <regexp>
--thread=<n> Show profile for thread <n>
- --ignore=<regexp> Ignore backtraces with nodes matching <regexp>
+ --ignore=<regexp> Ignore nodes matching <regexp>
--scale=<n> Set GV scaling [default=0]
--heapcheck Make nodes with non-0 object counts
(i.e. direct leak generators) more visible
- --retain=<regexp> Retain only nodes that match <regexp>
- --exclude=<regexp> Exclude all nodes that match <regexp>
Miscellaneous:
--tools=<prefix or binary:fullpath>[,...] \$PATH for object tool pathnames
@@ -341,8 +339,6 @@ sub Init() {
$main::opt_ignore = '';
$main::opt_scale = 0;
$main::opt_heapcheck = 0;
- $main::opt_retain = '';
- $main::opt_exclude = '';
$main::opt_seconds = 30;
$main::opt_lib = "";
@@ -414,8 +410,6 @@ sub Init() {
"ignore=s" => \$main::opt_ignore,
"scale=i" => \$main::opt_scale,
"heapcheck" => \$main::opt_heapcheck,
- "retain=s" => \$main::opt_retain,
- "exclude=s" => \$main::opt_exclude,
"inuse_space!" => \$main::opt_inuse_space,
"inuse_objects!" => \$main::opt_inuse_objects,
"alloc_space!" => \$main::opt_alloc_space,
@@ -1166,21 +1160,8 @@ sub PrintSymbolizedProfile {
}
print '---', "\n";
- my $profile_marker;
- if ($main::profile_type eq 'heap') {
- $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash
- $profile_marker = $&;
- } elsif ($main::profile_type eq 'growth') {
- $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash
- $profile_marker = $&;
- } elsif ($main::profile_type eq 'contention') {
- $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash
- $profile_marker = $&;
- } else { # elsif ($main::profile_type eq 'cpu')
- $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash
- $profile_marker = $&;
- }
-
+ $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $profile_marker = $&;
print '--- ', $profile_marker, "\n";
if (defined($main::collected_profile)) {
# if used with remote fetch, simply dump the collected profile to output.
@@ -1190,12 +1171,6 @@ sub PrintSymbolizedProfile {
}
close(SRC);
} else {
- # --raw/http: For everything to work correctly for non-remote profiles, we
- # would need to extend PrintProfileData() to handle all possible profile
- # types, re-enable the code that is currently disabled in ReadCPUProfile()
- # and FixCallerAddresses(), and remove the remote profile dumping code in
- # the block above.
- die "--raw/http: jeprof can only dump remote profiles for --raw\n";
# dump a cpu-format profile to standard out
PrintProfileData($profile);
}
@@ -2846,43 +2821,6 @@ sub ExtractCalls {
return $calls;
}
-sub FilterFrames {
- my $symbols = shift;
- my $profile = shift;
-
- if ($main::opt_retain eq '' && $main::opt_exclude eq '') {
- return $profile;
- }
-
- my $result = {};
- foreach my $k (keys(%{$profile})) {
- my $count = $profile->{$k};
- my @addrs = split(/\n/, $k);
- my @path = ();
- foreach my $a (@addrs) {
- my $sym;
- if (exists($symbols->{$a})) {
- $sym = $symbols->{$a}->[0];
- } else {
- $sym = $a;
- }
- if ($main::opt_retain ne '' && $sym !~ m/$main::opt_retain/) {
- next;
- }
- if ($main::opt_exclude ne '' && $sym =~ m/$main::opt_exclude/) {
- next;
- }
- push(@path, $a);
- }
- if (scalar(@path) > 0) {
- my $reduced_path = join("\n", @path);
- AddEntry($result, $reduced_path, $count);
- }
- }
-
- return $result;
-}
-
sub RemoveUninterestingFrames {
my $symbols = shift;
my $profile = shift;
@@ -3027,9 +2965,6 @@ sub RemoveUninterestingFrames {
my $reduced_path = join("\n", @path);
AddEntry($result, $reduced_path, $count);
}
-
- $result = FilterFrames($symbols, $result);
-
return $result;
}
@@ -3339,7 +3274,7 @@ sub ResolveRedirectionForCurl {
# Add a timeout flat to URL_FETCHER. Returns a new list.
sub AddFetchTimeout {
my $timeout = shift;
- my @fetcher = @_;
+ my @fetcher = shift;
if (defined($timeout)) {
if (join(" ", @fetcher) =~ m/\bcurl -s/) {
push(@fetcher, "--max-time", sprintf("%d", $timeout));
@@ -3385,27 +3320,6 @@ sub ReadSymbols {
return $map;
}
-sub URLEncode {
- my $str = shift;
- $str =~ s/([^A-Za-z0-9\-_.!~*'()])/ sprintf "%%%02x", ord $1 /eg;
- return $str;
-}
-
-sub AppendSymbolFilterParams {
- my $url = shift;
- my @params = ();
- if ($main::opt_retain ne '') {
- push(@params, sprintf("retain=%s", URLEncode($main::opt_retain)));
- }
- if ($main::opt_exclude ne '') {
- push(@params, sprintf("exclude=%s", URLEncode($main::opt_exclude)));
- }
- if (scalar @params > 0) {
- $url = sprintf("%s?%s", $url, join("&", @params));
- }
- return $url;
-}
-
# Fetches and processes symbols to prepare them for use in the profile output
# code. If the optional 'symbol_map' arg is not given, fetches symbols from
# $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols
@@ -3430,11 +3344,9 @@ sub FetchSymbols {
my $command_line;
if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) {
$url = ResolveRedirectionForCurl($url);
- $url = AppendSymbolFilterParams($url);
$command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym",
$url);
} else {
- $url = AppendSymbolFilterParams($url);
$command_line = (ShellEscape(@URL_FETCHER, "--post", $url)
. " < " . ShellEscape($main::tmpfile_sym));
}
@@ -3515,22 +3427,12 @@ sub FetchDynamicProfile {
}
$url .= sprintf("seconds=%d", $main::opt_seconds);
$fetch_timeout = $main::opt_seconds * 1.01 + 60;
- # Set $profile_type for consumption by PrintSymbolizedProfile.
- $main::profile_type = 'cpu';
} else {
# For non-CPU profiles, we add a type-extension to
# the target profile file name.
my $suffix = $path;
$suffix =~ s,/,.,g;
$profile_file .= $suffix;
- # Set $profile_type for consumption by PrintSymbolizedProfile.
- if ($path =~ m/$HEAP_PAGE/) {
- $main::profile_type = 'heap';
- } elsif ($path =~ m/$GROWTH_PAGE/) {
- $main::profile_type = 'growth';
- } elsif ($path =~ m/$CONTENTION_PAGE/) {
- $main::profile_type = 'contention';
- }
}
my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof");
@@ -3828,8 +3730,6 @@ sub ReadProfile {
my $symbol_marker = $&;
$PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash
my $profile_marker = $&;
- $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash
- my $heap_marker = $&;
# Look at first line to see if it is a heap or a CPU profile.
# CPU profile may start with no header at all, and just binary data
@@ -3856,13 +3756,7 @@ sub ReadProfile {
$header = ReadProfileHeader(*PROFILE) || "";
}
- if ($header =~ m/^--- *($heap_marker|$growth_marker)/o) {
- # Skip "--- ..." line for profile types that have their own headers.
- $header = ReadProfileHeader(*PROFILE) || "";
- }
-
$main::profile_type = '';
-
if ($header =~ m/^heap profile:.*$growth_marker/o) {
$main::profile_type = 'growth';
$result = ReadHeapProfile($prog, *PROFILE, $header);
@@ -3914,9 +3808,9 @@ sub ReadProfile {
# independent implementation.
sub FixCallerAddresses {
my $stack = shift;
- # --raw/http: Always subtract one from pc's, because PrintSymbolizedProfile()
- # dumps unadjusted profiles.
- {
+ if ($main::use_symbolized_profile) {
+ return $stack;
+ } else {
$stack =~ /(\s)/;
my $delimiter = $1;
my @addrs = split(' ', $stack);
@@ -3984,7 +3878,12 @@ sub ReadCPUProfile {
for (my $j = 0; $j < $d; $j++) {
my $pc = $slots->get($i+$j);
# Subtract one from caller pc so we map back to call instr.
- $pc--;
+ # However, don't do this if we're reading a symbolized profile
+ # file, in which case the subtract-one was done when the file
+ # was written.
+ if ($j > 0 && !$main::use_symbolized_profile) {
+ $pc--;
+ }
$pc = sprintf("%0*x", $address_length, $pc);
$pcs->{$pc} = 1;
push @k, $pc;
diff --git a/deps/jemalloc/build-aux/config.guess b/deps/jemalloc/config.guess
index 2e9ad7fe8..1f5c50c0d 100755
--- a/deps/jemalloc/build-aux/config.guess
+++ b/deps/jemalloc/config.guess
@@ -1,8 +1,8 @@
#! /bin/sh
# Attempt to guess a canonical system name.
-# Copyright 1992-2016 Free Software Foundation, Inc.
+# Copyright 1992-2014 Free Software Foundation, Inc.
-timestamp='2016-10-02'
+timestamp='2014-03-23'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@@ -24,12 +24,12 @@ timestamp='2016-10-02'
# program. This Exception is an additional permission under section 7
# of the GNU General Public License, version 3 ("GPLv3").
#
-# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
+# Originally written by Per Bothner.
#
# You can get the latest version of this script from:
-# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
#
-# Please send patches to <config-patches@gnu.org>.
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
me=`echo "$0" | sed -e 's,.*/,,'`
@@ -50,7 +50,7 @@ version="\
GNU config.guess ($timestamp)
Originally written by Per Bothner.
-Copyright 1992-2016 Free Software Foundation, Inc.
+Copyright 1992-2014 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -168,29 +168,19 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# Note: NetBSD doesn't particularly care about the vendor
# portion of the name. We always set it to "unknown".
sysctl="sysctl -n hw.machine_arch"
- UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \
- /sbin/$sysctl 2>/dev/null || \
- /usr/sbin/$sysctl 2>/dev/null || \
- echo unknown)`
+ UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
case "${UNAME_MACHINE_ARCH}" in
armeb) machine=armeb-unknown ;;
arm*) machine=arm-unknown ;;
sh3el) machine=shl-unknown ;;
sh3eb) machine=sh-unknown ;;
sh5el) machine=sh5le-unknown ;;
- earmv*)
- arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'`
- endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'`
- machine=${arch}${endian}-unknown
- ;;
*) machine=${UNAME_MACHINE_ARCH}-unknown ;;
esac
# The Operating System including object format, if it has switched
- # to ELF recently (or will in the future) and ABI.
+ # to ELF recently, or will in the future.
case "${UNAME_MACHINE_ARCH}" in
- earm*)
- os=netbsdelf
- ;;
arm*|i386|m68k|ns32k|sh3*|sparc|vax)
eval $set_cc_for_build
if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
@@ -207,13 +197,6 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
os=netbsd
;;
esac
- # Determine ABI tags.
- case "${UNAME_MACHINE_ARCH}" in
- earm*)
- expr='s/^earmv[0-9]/-eabi/;s/eb$//'
- abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"`
- ;;
- esac
# The OS release
# Debian GNU/NetBSD machines have a different userland, and
# thus, need a distinct triplet. However, they do not need
@@ -224,13 +207,13 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
release='-gnu'
;;
*)
- release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2`
+ release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
;;
esac
# Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
# contains redundant information, the shorter form:
# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
- echo "${machine}-${os}${release}${abi}"
+ echo "${machine}-${os}${release}"
exit ;;
*:Bitrig:*:*)
UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
@@ -240,10 +223,6 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
exit ;;
- *:LibertyBSD:*:*)
- UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'`
- echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE}
- exit ;;
*:ekkoBSD:*:*)
echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
exit ;;
@@ -256,9 +235,6 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
*:MirBSD:*:*)
echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
exit ;;
- *:Sortix:*:*)
- echo ${UNAME_MACHINE}-unknown-sortix
- exit ;;
alpha:OSF1:*:*)
case $UNAME_RELEASE in
*4.0)
@@ -275,42 +251,42 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
case "$ALPHA_CPU_TYPE" in
"EV4 (21064)")
- UNAME_MACHINE=alpha ;;
+ UNAME_MACHINE="alpha" ;;
"EV4.5 (21064)")
- UNAME_MACHINE=alpha ;;
+ UNAME_MACHINE="alpha" ;;
"LCA4 (21066/21068)")
- UNAME_MACHINE=alpha ;;
+ UNAME_MACHINE="alpha" ;;
"EV5 (21164)")
- UNAME_MACHINE=alphaev5 ;;
+ UNAME_MACHINE="alphaev5" ;;
"EV5.6 (21164A)")
- UNAME_MACHINE=alphaev56 ;;
+ UNAME_MACHINE="alphaev56" ;;
"EV5.6 (21164PC)")
- UNAME_MACHINE=alphapca56 ;;
+ UNAME_MACHINE="alphapca56" ;;
"EV5.7 (21164PC)")
- UNAME_MACHINE=alphapca57 ;;
+ UNAME_MACHINE="alphapca57" ;;
"EV6 (21264)")
- UNAME_MACHINE=alphaev6 ;;
+ UNAME_MACHINE="alphaev6" ;;
"EV6.7 (21264A)")
- UNAME_MACHINE=alphaev67 ;;
+ UNAME_MACHINE="alphaev67" ;;
"EV6.8CB (21264C)")
- UNAME_MACHINE=alphaev68 ;;
+ UNAME_MACHINE="alphaev68" ;;
"EV6.8AL (21264B)")
- UNAME_MACHINE=alphaev68 ;;
+ UNAME_MACHINE="alphaev68" ;;
"EV6.8CX (21264D)")
- UNAME_MACHINE=alphaev68 ;;
+ UNAME_MACHINE="alphaev68" ;;
"EV6.9A (21264/EV69A)")
- UNAME_MACHINE=alphaev69 ;;
+ UNAME_MACHINE="alphaev69" ;;
"EV7 (21364)")
- UNAME_MACHINE=alphaev7 ;;
+ UNAME_MACHINE="alphaev7" ;;
"EV7.9 (21364A)")
- UNAME_MACHINE=alphaev79 ;;
+ UNAME_MACHINE="alphaev79" ;;
esac
# A Pn.n version is a patched version.
# A Vn.n version is a released version.
# A Tn.n version is a released field test version.
# A Xn.n version is an unreleased experimental baselevel.
# 1.2 uses "1.2" for uname -r.
- echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
# Reset EXIT trap before exiting to avoid spurious non-zero exit code.
exitcode=$?
trap '' 0
@@ -383,16 +359,16 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
exit ;;
i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
eval $set_cc_for_build
- SUN_ARCH=i386
+ SUN_ARCH="i386"
# If there is a compiler, see if it is configured for 64-bit objects.
# Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
# This test works for both compilers.
- if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
- (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
grep IS_64BIT_ARCH >/dev/null
then
- SUN_ARCH=x86_64
+ SUN_ARCH="x86_64"
fi
fi
echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
@@ -417,7 +393,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
exit ;;
sun*:*:4.2BSD:*)
UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
- test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3
+ test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
case "`/bin/arch`" in
sun3)
echo m68k-sun-sunos${UNAME_RELEASE}
@@ -603,9 +579,8 @@ EOF
else
IBM_ARCH=powerpc
fi
- if [ -x /usr/bin/lslpp ] ; then
- IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc |
- awk -F: '{ print $3 }' | sed s/[0-9]*$/0/`
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
else
IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
fi
@@ -642,13 +617,13 @@ EOF
sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
case "${sc_cpu_version}" in
- 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0
- 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
532) # CPU_PA_RISC2_0
case "${sc_kernel_bits}" in
- 32) HP_ARCH=hppa2.0n ;;
- 64) HP_ARCH=hppa2.0w ;;
- '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
+ '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
esac ;;
esac
fi
@@ -687,11 +662,11 @@ EOF
exit (0);
}
EOF
- (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+ (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
test -z "$HP_ARCH" && HP_ARCH=hppa
fi ;;
esac
- if [ ${HP_ARCH} = hppa2.0w ]
+ if [ ${HP_ARCH} = "hppa2.0w" ]
then
eval $set_cc_for_build
@@ -704,12 +679,12 @@ EOF
# $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
# => hppa64-hp-hpux11.23
- if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) |
+ if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
grep -q __LP64__
then
- HP_ARCH=hppa2.0w
+ HP_ARCH="hppa2.0w"
else
- HP_ARCH=hppa64
+ HP_ARCH="hppa64"
fi
fi
echo ${HP_ARCH}-hp-hpux${HPUX_REV}
@@ -814,14 +789,14 @@ EOF
echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
exit ;;
F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
- FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
- FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
+ FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
exit ;;
5000:UNIX_System_V:4.*:*)
- FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'`
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
exit ;;
i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
@@ -903,7 +878,7 @@ EOF
exit ;;
*:GNU/*:*:*)
# other systems with GNU libc and userland
- echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
exit ;;
i*86:Minix:*:*)
echo ${UNAME_MACHINE}-pc-minix
@@ -926,7 +901,7 @@ EOF
EV68*) UNAME_MACHINE=alphaev68 ;;
esac
objdump --private-headers /bin/sh | grep -q ld.so.1
- if test "$?" = 0 ; then LIBC=gnulibc1 ; fi
+ if test "$?" = 0 ; then LIBC="gnulibc1" ; fi
echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
arc:Linux:*:* | arceb:Linux:*:*)
@@ -957,9 +932,6 @@ EOF
crisv32:Linux:*:*)
echo ${UNAME_MACHINE}-axis-linux-${LIBC}
exit ;;
- e2k:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
- exit ;;
frv:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
@@ -972,9 +944,6 @@ EOF
ia64:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
- k1om:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
- exit ;;
m32r*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
@@ -1000,9 +969,6 @@ EOF
eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
;;
- mips64el:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
- exit ;;
openrisc*:Linux:*:*)
echo or1k-unknown-linux-${LIBC}
exit ;;
@@ -1035,9 +1001,6 @@ EOF
ppcle:Linux:*:*)
echo powerpcle-unknown-linux-${LIBC}
exit ;;
- riscv32:Linux:*:* | riscv64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
- exit ;;
s390:Linux:*:* | s390x:Linux:*:*)
echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
exit ;;
@@ -1057,7 +1020,7 @@ EOF
echo ${UNAME_MACHINE}-dec-linux-${LIBC}
exit ;;
x86_64:Linux:*:*)
- echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
xtensa*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
@@ -1136,7 +1099,7 @@ EOF
# uname -m prints for DJGPP always 'pc', but it prints nothing about
# the processor, so we play safe by assuming i586.
# Note: whatever this is, it MUST be the same as what config.sub
- # prints for the "djgpp" host, or else GDB configure will decide that
+ # prints for the "djgpp" host, or else GDB configury will decide that
# this is a cross-build.
echo i586-pc-msdosdjgpp
exit ;;
@@ -1285,9 +1248,6 @@ EOF
SX-8R:SUPER-UX:*:*)
echo sx8r-nec-superux${UNAME_RELEASE}
exit ;;
- SX-ACE:SUPER-UX:*:*)
- echo sxace-nec-superux${UNAME_RELEASE}
- exit ;;
Power*:Rhapsody:*:*)
echo powerpc-apple-rhapsody${UNAME_RELEASE}
exit ;;
@@ -1301,9 +1261,9 @@ EOF
UNAME_PROCESSOR=powerpc
fi
if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then
- if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
- (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
grep IS_64BIT_ARCH >/dev/null
then
case $UNAME_PROCESSOR in
@@ -1325,7 +1285,7 @@ EOF
exit ;;
*:procnto*:*:* | *:QNX:[0123456789]*:*)
UNAME_PROCESSOR=`uname -p`
- if test "$UNAME_PROCESSOR" = x86; then
+ if test "$UNAME_PROCESSOR" = "x86"; then
UNAME_PROCESSOR=i386
UNAME_MACHINE=pc
fi
@@ -1356,7 +1316,7 @@ EOF
# "uname -m" is not consistent, so use $cputype instead. 386
# is converted to i386 for consistency with other x86
# operating systems.
- if test "$cputype" = 386; then
+ if test "$cputype" = "386"; then
UNAME_MACHINE=i386
else
UNAME_MACHINE="$cputype"
@@ -1398,7 +1358,7 @@ EOF
echo i386-pc-xenix
exit ;;
i*86:skyos:*:*)
- echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'`
+ echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
exit ;;
i*86:rdos:*:*)
echo ${UNAME_MACHINE}-pc-rdos
@@ -1409,25 +1369,23 @@ EOF
x86_64:VMkernel:*:*)
echo ${UNAME_MACHINE}-unknown-esx
exit ;;
- amd64:Isilon\ OneFS:*:*)
- echo x86_64-unknown-onefs
- exit ;;
esac
cat >&2 <<EOF
$0: unable to guess system type
-This script (version $timestamp), has failed to recognize the
-operating system you are using. If your script is old, overwrite
-config.guess and config.sub with the latest versions from:
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
- http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
and
- http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
-If $0 has already been updated, send the following data and any
-information you think might be pertinent to config-patches@gnu.org to
-provide the necessary information to handle your system.
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
config.guess timestamp = $timestamp
diff --git a/deps/jemalloc/build-aux/config.sub b/deps/jemalloc/config.sub
index dd2ca93c6..0ccff7706 100755
--- a/deps/jemalloc/build-aux/config.sub
+++ b/deps/jemalloc/config.sub
@@ -1,8 +1,8 @@
#! /bin/sh
# Configuration validation subroutine script.
-# Copyright 1992-2016 Free Software Foundation, Inc.
+# Copyright 1992-2014 Free Software Foundation, Inc.
-timestamp='2016-11-04'
+timestamp='2014-05-01'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@@ -25,7 +25,7 @@ timestamp='2016-11-04'
# of the GNU General Public License, version 3 ("GPLv3").
-# Please send patches to <config-patches@gnu.org>.
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
#
# Configuration subroutine to validate and canonicalize a configuration type.
# Supply the specified configuration type as an argument.
@@ -33,7 +33,7 @@ timestamp='2016-11-04'
# Otherwise, we print the canonical config type on stdout and succeed.
# You can get the latest version of this script from:
-# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
# This file is supposed to be the same for all GNU packages
# and recognize all the CPU types, system types and aliases
@@ -53,7 +53,8 @@ timestamp='2016-11-04'
me=`echo "$0" | sed -e 's,.*/,,'`
usage="\
-Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+ $0 [OPTION] ALIAS
Canonicalize a configuration name.
@@ -67,7 +68,7 @@ Report bugs and patches to <config-patches@gnu.org>."
version="\
GNU config.sub ($timestamp)
-Copyright 1992-2016 Free Software Foundation, Inc.
+Copyright 1992-2014 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -116,8 +117,8 @@ maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
case $maybe_os in
nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
- knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \
- kopensolaris*-gnu* | cloudabi*-eabi* | \
+ knetbsd*-gnu* | netbsd*-gnu* | \
+ kopensolaris*-gnu* | \
storm-chaos* | os2-emx* | rtmk-nova*)
os=-$maybe_os
basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
@@ -254,13 +255,12 @@ case $basic_machine in
| arc | arceb \
| arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
| avr | avr32 \
- | ba \
| be32 | be64 \
| bfin \
| c4x | c8051 | clipper \
| d10v | d30v | dlx | dsp16xx \
- | e2k | epiphany \
- | fido | fr30 | frv | ft32 \
+ | epiphany \
+ | fido | fr30 | frv \
| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
| hexagon \
| i370 | i860 | i960 | ia64 \
@@ -301,12 +301,10 @@ case $basic_machine in
| open8 | or1k | or1knd | or32 \
| pdp10 | pdp11 | pj | pjl \
| powerpc | powerpc64 | powerpc64le | powerpcle \
- | pru \
| pyramid \
- | riscv32 | riscv64 \
| rl78 | rx \
| score \
- | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
| sh64 | sh64le \
| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
@@ -314,7 +312,6 @@ case $basic_machine in
| tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
| ubicom32 \
| v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
- | visium \
| we32k \
| x86 | xc16x | xstormy16 | xtensa \
| z8k | z80)
@@ -329,9 +326,6 @@ case $basic_machine in
c6x)
basic_machine=tic6x-unknown
;;
- leon|leon[3-9])
- basic_machine=sparc-$basic_machine
- ;;
m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip)
basic_machine=$basic_machine-unknown
os=-none
@@ -377,13 +371,12 @@ case $basic_machine in
| alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
| arm-* | armbe-* | armle-* | armeb-* | armv*-* \
| avr-* | avr32-* \
- | ba-* \
| be32-* | be64-* \
| bfin-* | bs2000-* \
| c[123]* | c30-* | [cjt]90-* | c4x-* \
| c8051-* | clipper-* | craynv-* | cydra-* \
| d10v-* | d30v-* | dlx-* \
- | e2k-* | elxsi-* \
+ | elxsi-* \
| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
| h8300-* | h8500-* \
| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
@@ -429,15 +422,13 @@ case $basic_machine in
| orion-* \
| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
- | pru-* \
| pyramid-* \
- | riscv32-* | riscv64-* \
| rl78-* | romp-* | rs6000-* | rx-* \
| sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
| sparclite-* \
- | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \
+ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
| tahoe-* \
| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
| tile*-* \
@@ -445,7 +436,6 @@ case $basic_machine in
| ubicom32-* \
| v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
| vax-* \
- | visium-* \
| we32k-* \
| x86-* | x86_64-* | xc16x-* | xps100-* \
| xstormy16-* | xtensa*-* \
@@ -522,9 +512,6 @@ case $basic_machine in
basic_machine=i386-pc
os=-aros
;;
- asmjs)
- basic_machine=asmjs-unknown
- ;;
aux)
basic_machine=m68k-apple
os=-aux
@@ -645,14 +632,6 @@ case $basic_machine in
basic_machine=m68k-bull
os=-sysv3
;;
- e500v[12])
- basic_machine=powerpc-unknown
- os=$os"spe"
- ;;
- e500v[12]-*)
- basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
- os=$os"spe"
- ;;
ebmon29k)
basic_machine=a29k-amd
os=-ebmon
@@ -794,9 +773,6 @@ case $basic_machine in
basic_machine=m68k-isi
os=-sysv
;;
- leon-*|leon[3-9]-*)
- basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'`
- ;;
m68knommu)
basic_machine=m68k-unknown
os=-linux
@@ -852,10 +828,6 @@ case $basic_machine in
basic_machine=powerpc-unknown
os=-morphos
;;
- moxiebox)
- basic_machine=moxie-unknown
- os=-moxiebox
- ;;
msdos)
basic_machine=i386-pc
os=-msdos
@@ -1032,7 +1004,7 @@ case $basic_machine in
ppc-* | ppcbe-*)
basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
;;
- ppcle | powerpclittle)
+ ppcle | powerpclittle | ppc-le | powerpc-little)
basic_machine=powerpcle-unknown
;;
ppcle-* | powerpclittle-*)
@@ -1042,7 +1014,7 @@ case $basic_machine in
;;
ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
;;
- ppc64le | powerpc64little)
+ ppc64le | powerpc64little | ppc64-le | powerpc64-little)
basic_machine=powerpc64le-unknown
;;
ppc64le-* | powerpc64little-*)
@@ -1388,28 +1360,27 @@ case $os in
| -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
| -sym* | -kopensolaris* | -plan9* \
| -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
- | -aos* | -aros* | -cloudabi* | -sortix* \
+ | -aos* | -aros* \
| -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
| -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
| -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
- | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \
+ | -bitrig* | -openbsd* | -solidbsd* \
| -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
| -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
| -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
| -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
| -chorusos* | -chorusrdb* | -cegcc* \
| -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+ | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
| -linux-newlib* | -linux-musl* | -linux-uclibc* \
- | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \
+ | -uxpv* | -beos* | -mpeix* | -udk* \
| -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
| -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
| -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
| -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
| -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
| -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
- | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \
- | -onefs* | -tirtos* | -phoenix* | -fuchsia*)
+ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*)
# Remember, each alternative MUST END IN *, to match a version number.
;;
-qnx*)
@@ -1433,6 +1404,9 @@ case $os in
-mac*)
os=`echo $os | sed -e 's|mac|macos|'`
;;
+ # Apple iOS
+ -ios*)
+ ;;
-linux-dietlibc)
os=-linux-dietlibc
;;
@@ -1541,8 +1515,6 @@ case $os in
;;
-nacl*)
;;
- -ios)
- ;;
-none)
;;
*)
diff --git a/deps/jemalloc/configure b/deps/jemalloc/configure
index d7391524d..8c56c92a1 100755
--- a/deps/jemalloc/configure
+++ b/deps/jemalloc/configure
@@ -628,6 +628,7 @@ cfghdrs_in
enable_zone_allocator
enable_tls
enable_lazy_lock
+TESTLIBS
jemalloc_version_gid
jemalloc_version_nrev
jemalloc_version_bugfix
@@ -657,19 +658,16 @@ INSTALL_SCRIPT
INSTALL_PROGRAM
enable_autogen
RPATH_EXTRA
-LM
CC_MM
AROUT
ARFLAGS
MKLIB
-TEST_LD_MODE
LDTARGET
CTARGET
PIC_CFLAGS
SOREV
EXTRA_LDFLAGS
DSO_LDFLAGS
-link_whole_archive
libprefix
exe
a
@@ -691,7 +689,6 @@ build
EGREP
GREP
CPP
-EXTRA_CFLAGS
OBJEXT
EXEEXT
ac_ct_CC
@@ -732,7 +729,6 @@ infodir
docdir
oldincludedir
includedir
-runstatedir
localstatedir
sharedstatedir
sysconfdir
@@ -764,7 +760,6 @@ with_jemalloc_prefix
with_export
with_private_namespace
with_install_suffix
-with_malloc_conf
enable_cc_silence
enable_debug
enable_ivsalloc
@@ -786,8 +781,6 @@ with_lg_quantum
with_lg_page
with_lg_page_sizes
with_lg_size_class_group
-with_version
-enable_syscall
enable_lazy_lock
enable_tls
enable_zone_allocator
@@ -839,7 +832,6 @@ datadir='${datarootdir}'
sysconfdir='${prefix}/etc'
sharedstatedir='${prefix}/com'
localstatedir='${prefix}/var'
-runstatedir='${localstatedir}/run'
includedir='${prefix}/include'
oldincludedir='/usr/include'
docdir='${datarootdir}/doc/${PACKAGE}'
@@ -1092,15 +1084,6 @@ do
| -silent | --silent | --silen | --sile | --sil)
silent=yes ;;
- -runstatedir | --runstatedir | --runstatedi | --runstated \
- | --runstate | --runstat | --runsta | --runst | --runs \
- | --run | --ru | --r)
- ac_prev=runstatedir ;;
- -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
- | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
- | --run=* | --ru=* | --r=*)
- runstatedir=$ac_optarg ;;
-
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
ac_prev=sbindir ;;
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
@@ -1238,7 +1221,7 @@ fi
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
datadir sysconfdir sharedstatedir localstatedir includedir \
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
- libdir localedir mandir runstatedir
+ libdir localedir mandir
do
eval ac_val=\$$ac_var
# Remove trailing slashes.
@@ -1391,7 +1374,6 @@ Fine tuning of the installation directories:
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
- --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
--libdir=DIR object code libraries [EPREFIX/lib]
--includedir=DIR C header files [PREFIX/include]
--oldincludedir=DIR C header files for non-gcc [/usr/include]
@@ -1443,7 +1425,6 @@ Optional Features:
--disable-cache-oblivious
Disable support for cache-oblivious allocation
alignment
- --disable-syscall Disable use of syscall(2)
--enable-lazy-lock Enable lazy locking (only lock when multi-threaded)
--disable-tls Disable thread-local storage (__thread keyword)
--disable-zone-allocator
@@ -1462,8 +1443,6 @@ Optional Packages:
Prefix to prepend to all library-private APIs
--with-install-suffix=<suffix>
Suffix to append to all installed files
- --with-malloc-conf=<malloc_conf>
- config.malloc_conf options string
--with-static-libunwind=<libunwind.a>
Path to static libunwind library; use rather than
dynamically linking
@@ -1477,8 +1456,6 @@ Optional Packages:
Base 2 logs of system page sizes to support
--with-lg-size-class-group=<lg-size-class-group>
Base 2 log of size classes per doubling
- --with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>
- Version string
Some influential environment variables:
CC C compiler command
@@ -2507,36 +2484,6 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
-ac_aux_dir=
-for ac_dir in build-aux "$srcdir"/build-aux; do
- if test -f "$ac_dir/install-sh"; then
- ac_aux_dir=$ac_dir
- ac_install_sh="$ac_aux_dir/install-sh -c"
- break
- elif test -f "$ac_dir/install.sh"; then
- ac_aux_dir=$ac_dir
- ac_install_sh="$ac_aux_dir/install.sh -c"
- break
- elif test -f "$ac_dir/shtool"; then
- ac_aux_dir=$ac_dir
- ac_install_sh="$ac_aux_dir/shtool install -c"
- break
- fi
-done
-if test -z "$ac_aux_dir"; then
- as_fn_error $? "cannot find install-sh, install.sh, or shtool in build-aux \"$srcdir\"/build-aux" "$LINENO" 5
-fi
-
-# These three variables are undocumented and unsupported,
-# and are intended to be withdrawn in a future Autoconf release.
-# They can cause serious problems if a builder's source tree is in a directory
-# whose full name contains unusual characters.
-ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
-ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
-ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
-
-
-
@@ -3443,7 +3390,6 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
if test "x$GCC" != "xyes" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is MSVC" >&5
@@ -3477,125 +3423,10 @@ fi
$as_echo "$je_cv_msvc" >&6; }
fi
-je_cv_cray_prgenv_wrapper=""
-if test "x${PE_ENV}" != "x" ; then
- case "${CC}" in
- CC|cc)
- je_cv_cray_prgenv_wrapper="yes"
- ;;
- *)
- ;;
- esac
-fi
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is cray" >&5
-$as_echo_n "checking whether compiler is cray... " >&6; }
-if ${je_cv_cray+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-int
-main ()
-{
-
-#ifndef _CRAYC
- int fail-1;
-#endif
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cray=yes
-else
- je_cv_cray=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray" >&5
-$as_echo "$je_cv_cray" >&6; }
-
-if test "x${je_cv_cray}" = "xyes" ; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cray compiler version is 8.4" >&5
-$as_echo_n "checking whether cray compiler version is 8.4... " >&6; }
-if ${je_cv_cray_84+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-int
-main ()
-{
-
-#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4)
- int fail-1;
-#endif
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cray_84=yes
-else
- je_cv_cray_84=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray_84" >&5
-$as_echo "$je_cv_cray_84" >&6; }
-fi
-
if test "x$CFLAGS" = "x" ; then
no_CFLAGS="yes"
if test "x$GCC" = "xyes" ; then
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu11" >&5
-$as_echo_n "checking whether compiler supports -std=gnu11... " >&6; }
-TCFLAGS="${CFLAGS}"
-if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-std=gnu11"
-else
- CFLAGS="${CFLAGS} -std=gnu11"
-fi
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-
-int
-main ()
-{
-
- return 0;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-std=gnu11
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- je_cv_cflags_appended=
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- CFLAGS="${TCFLAGS}"
-
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
- if test "x$je_cv_cflags_appended" = "x-std=gnu11" ; then
- cat >>confdefs.h <<_ACEOF
-#define JEMALLOC_HAS_RESTRICT 1
-_ACEOF
-
- else
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu99" >&5
$as_echo_n "checking whether compiler supports -std=gnu99... " >&6; }
TCFLAGS="${CFLAGS}"
@@ -3631,12 +3462,11 @@ $as_echo "no" >&6; }
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
- if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then
- cat >>confdefs.h <<_ACEOF
+ if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then
+ cat >>confdefs.h <<_ACEOF
#define JEMALLOC_HAS_RESTRICT 1
_ACEOF
- fi
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5
@@ -3711,78 +3541,6 @@ fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wshorten-64-to-32" >&5
-$as_echo_n "checking whether compiler supports -Wshorten-64-to-32... " >&6; }
-TCFLAGS="${CFLAGS}"
-if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-Wshorten-64-to-32"
-else
- CFLAGS="${CFLAGS} -Wshorten-64-to-32"
-fi
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-
-int
-main ()
-{
-
- return 0;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-Wshorten-64-to-32
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- je_cv_cflags_appended=
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- CFLAGS="${TCFLAGS}"
-
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wsign-compare" >&5
-$as_echo_n "checking whether compiler supports -Wsign-compare... " >&6; }
-TCFLAGS="${CFLAGS}"
-if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-Wsign-compare"
-else
- CFLAGS="${CFLAGS} -Wsign-compare"
-fi
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-
-int
-main ()
-{
-
- return 0;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-Wsign-compare
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- je_cv_cflags_appended=
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- CFLAGS="${TCFLAGS}"
-
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -pipe" >&5
$as_echo_n "checking whether compiler supports -pipe... " >&6; }
TCFLAGS="${CFLAGS}"
@@ -4002,90 +3760,16 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat"
fi
- if test "x$je_cv_cray" = "xyes" ; then
- if test "x$je_cv_cray_84" = "xyes" ; then
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hipa2" >&5
-$as_echo_n "checking whether compiler supports -hipa2... " >&6; }
-TCFLAGS="${CFLAGS}"
-if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-hipa2"
-else
- CFLAGS="${CFLAGS} -hipa2"
-fi
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-
-int
-main ()
-{
-
- return 0;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-hipa2
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- je_cv_cflags_appended=
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- CFLAGS="${TCFLAGS}"
-
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnognu" >&5
-$as_echo_n "checking whether compiler supports -hnognu... " >&6; }
-TCFLAGS="${CFLAGS}"
-if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-hnognu"
-else
- CFLAGS="${CFLAGS} -hnognu"
-fi
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-
-int
-main ()
-{
-
- return 0;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-hnognu
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- je_cv_cflags_appended=
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- CFLAGS="${TCFLAGS}"
-
fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+if test "x$EXTRA_CFLAGS" != "x" ; then
- fi
- if test "x$enable_cc_silence" != "xno" ; then
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=128" >&5
-$as_echo_n "checking whether compiler supports -hnomessage=128... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports $EXTRA_CFLAGS" >&5
+$as_echo_n "checking whether compiler supports $EXTRA_CFLAGS... " >&6; }
TCFLAGS="${CFLAGS}"
if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-hnomessage=128"
+ CFLAGS="$EXTRA_CFLAGS"
else
- CFLAGS="${CFLAGS} -hnomessage=128"
+ CFLAGS="${CFLAGS} $EXTRA_CFLAGS"
fi
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
@@ -4102,7 +3786,7 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-hnomessage=128
+ je_cv_cflags_appended=$EXTRA_CFLAGS
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
else
@@ -4114,46 +3798,7 @@ $as_echo "no" >&6; }
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=1357" >&5
-$as_echo_n "checking whether compiler supports -hnomessage=1357... " >&6; }
-TCFLAGS="${CFLAGS}"
-if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-hnomessage=1357"
-else
- CFLAGS="${CFLAGS} -hnomessage=1357"
fi
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-
-int
-main ()
-{
-
- return 0;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-hnomessage=1357
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- je_cv_cflags_appended=
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- CFLAGS="${TCFLAGS}"
-
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
- fi
- fi
-fi
-
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@@ -4786,12 +4431,7 @@ if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99"
fi
-if test "x${je_cv_msvc}" = "xyes" ; then
- LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&5
-$as_echo "Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&6; }
-else
- # The cast to long int works around a bug in the HP C Compiler
+# The cast to long int works around a bug in the HP C Compiler
# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
# This bug is HP SR number 8606223364.
@@ -4824,13 +4464,12 @@ cat >>confdefs.h <<_ACEOF
_ACEOF
- if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
- LG_SIZEOF_PTR=3
- elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
- LG_SIZEOF_PTR=2
- else
- as_fn_error $? "Unsupported pointer size: ${ac_cv_sizeof_void_p}" "$LINENO" 5
- fi
+if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
+ LG_SIZEOF_PTR=3
+elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
+ LG_SIZEOF_PTR=2
+else
+ as_fn_error $? "Unsupported pointer size: ${ac_cv_sizeof_void_p}" "$LINENO" 5
fi
cat >>confdefs.h <<_ACEOF
#define LG_SIZEOF_PTR $LG_SIZEOF_PTR
@@ -4931,51 +4570,6 @@ _ACEOF
# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
# This bug is HP SR number 8606223364.
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long long" >&5
-$as_echo_n "checking size of long long... " >&6; }
-if ${ac_cv_sizeof_long_long+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long long))" "ac_cv_sizeof_long_long" "$ac_includes_default"; then :
-
-else
- if test "$ac_cv_type_long_long" = yes; then
- { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error 77 "cannot compute sizeof (long long)
-See \`config.log' for more details" "$LINENO" 5; }
- else
- ac_cv_sizeof_long_long=0
- fi
-fi
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_long" >&5
-$as_echo "$ac_cv_sizeof_long_long" >&6; }
-
-
-
-cat >>confdefs.h <<_ACEOF
-#define SIZEOF_LONG_LONG $ac_cv_sizeof_long_long
-_ACEOF
-
-
-if test "x${ac_cv_sizeof_long_long}" = "x8" ; then
- LG_SIZEOF_LONG_LONG=3
-elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then
- LG_SIZEOF_LONG_LONG=2
-else
- as_fn_error $? "Unsupported long long size: ${ac_cv_sizeof_long_long}" "$LINENO" 5
-fi
-cat >>confdefs.h <<_ACEOF
-#define LG_SIZEOF_LONG_LONG $LG_SIZEOF_LONG_LONG
-_ACEOF
-
-
-# The cast to long int works around a bug in the HP C Compiler
-# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
-# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
-# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of intmax_t" >&5
$as_echo_n "checking size of intmax_t... " >&6; }
if ${ac_cv_sizeof_intmax_t+:} false; then :
@@ -5019,6 +4613,35 @@ cat >>confdefs.h <<_ACEOF
_ACEOF
+ac_aux_dir=
+for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
+
+
# Make sure we can run config.sub.
$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
@@ -5093,45 +4716,7 @@ case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
CPU_SPINWAIT=""
case "${host_cpu}" in
i686|x86_64)
- if test "x${je_cv_msvc}" = "xyes" ; then
- if ${je_cv_pause_msvc+:} false; then :
- $as_echo_n "(cached) " >&6
-else
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction MSVC is compilable" >&5
-$as_echo_n "checking whether pause instruction MSVC is compilable... " >&6; }
-if ${je_cv_pause_msvc+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-int
-main ()
-{
-_mm_pause(); return 0;
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_pause_msvc=yes
-else
- je_cv_pause_msvc=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause_msvc" >&5
-$as_echo "$je_cv_pause_msvc" >&6; }
-
-fi
-
- if test "x${je_cv_pause_msvc}" = "xyes" ; then
- CPU_SPINWAIT='_mm_pause()'
- fi
- else
- if ${je_cv_pause+:} false; then :
+ if ${je_cv_pause+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -5164,9 +4749,8 @@ $as_echo "$je_cv_pause" >&6; }
fi
- if test "x${je_cv_pause}" = "xyes" ; then
- CPU_SPINWAIT='__asm__ volatile("pause")'
- fi
+ if test "x${je_cv_pause}" = "xyes" ; then
+ CPU_SPINWAIT='__asm__ volatile("pause")'
fi
;;
powerpc)
@@ -5190,27 +4774,17 @@ o="$ac_objext"
a="a"
exe="$ac_exeext"
libprefix="lib"
-link_whole_archive="0"
DSO_LDFLAGS='-shared -Wl,-soname,$(@F)'
RPATH='-Wl,-rpath,$(1)'
SOREV="${so}.${rev}"
PIC_CFLAGS='-fPIC -DPIC'
CTARGET='-o $@'
LDTARGET='-o $@'
-TEST_LD_MODE=
EXTRA_LDFLAGS=
ARFLAGS='crus'
AROUT=' $@'
CC_MM=1
-if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
- TEST_LD_MODE='-dynamic'
-fi
-
-if test "x${je_cv_cray}" = "xyes" ; then
- CC_MM=
-fi
-
@@ -5307,12 +4881,14 @@ else
fi
-CFLAGS="$CFLAGS"
default_munmap="1"
maps_coalesce="1"
case "${host}" in
*-*-darwin* | *-*-ios*)
+ CFLAGS="$CFLAGS"
abi="macho"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
RPATH=""
LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES"
so="dylib"
@@ -5323,41 +4899,38 @@ case "${host}" in
sbrk_deprecated="1"
;;
*-*-freebsd*)
+ CFLAGS="$CFLAGS"
abi="elf"
- $as_echo "#define JEMALLOC_SYSCTL_VM_OVERCOMMIT " >>confdefs.h
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
force_lazy_lock="1"
;;
*-*-dragonfly*)
+ CFLAGS="$CFLAGS"
abi="elf"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
;;
*-*-openbsd*)
+ CFLAGS="$CFLAGS"
abi="elf"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
force_tls="0"
;;
*-*-bitrig*)
+ CFLAGS="$CFLAGS"
abi="elf"
- ;;
- *-*-linux-android)
- CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
- abi="elf"
- $as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h
-
- $as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY " >>confdefs.h
-
- $as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h
-
- $as_echo "#define JEMALLOC_C11ATOMICS 1" >>confdefs.h
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
- force_tls="0"
- default_munmap="0"
;;
- *-*-linux* | *-*-kfreebsd*)
- CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
+ *-*-linux*)
+ CFLAGS="$CFLAGS"
+ CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
abi="elf"
$as_echo "#define JEMALLOC_HAS_ALLOCA_H 1" >>confdefs.h
- $as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY " >>confdefs.h
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED " >>confdefs.h
$as_echo "#define JEMALLOC_THREADED_INIT " >>confdefs.h
@@ -5385,16 +4958,21 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- abi="elf"
+ CFLAGS="$CFLAGS"; abi="elf"
else
abi="aout"
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $abi" >&5
$as_echo "$abi" >&6; }
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
;;
*-*-solaris2*)
+ CFLAGS="$CFLAGS"
abi="elf"
+ $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
+
RPATH='-Wl,-R,$(1)'
CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS"
LIBS="$LIBS -lposix4 -lsocket -lnsl"
@@ -5410,6 +4988,7 @@ $as_echo "$abi" >&6; }
*-*-mingw* | *-*-cygwin*)
abi="pecoff"
force_tls="0"
+ force_lazy_lock="1"
maps_coalesce="0"
RPATH=""
so="dll"
@@ -5426,7 +5005,6 @@ $as_echo "$abi" >&6; }
else
importlib="${so}"
DSO_LDFLAGS="-shared"
- link_whole_archive="1"
fi
a="lib"
libprefix=""
@@ -5508,73 +5086,6 @@ _ACEOF
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing log" >&5
-$as_echo_n "checking for library containing log... " >&6; }
-if ${ac_cv_search_log+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- ac_func_search_save_LIBS=$LIBS
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-/* Override any GCC internal prototype to avoid an error.
- Use char because int might match the return type of a GCC
- builtin and then its argument prototype would still apply. */
-#ifdef __cplusplus
-extern "C"
-#endif
-char log ();
-int
-main ()
-{
-return log ();
- ;
- return 0;
-}
-_ACEOF
-for ac_lib in '' m; do
- if test -z "$ac_lib"; then
- ac_res="none required"
- else
- ac_res=-l$ac_lib
- LIBS="-l$ac_lib $ac_func_search_save_LIBS"
- fi
- if ac_fn_c_try_link "$LINENO"; then :
- ac_cv_search_log=$ac_res
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext
- if ${ac_cv_search_log+:} false; then :
- break
-fi
-done
-if ${ac_cv_search_log+:} false; then :
-
-else
- ac_cv_search_log=no
-fi
-rm conftest.$ac_ext
-LIBS=$ac_func_search_save_LIBS
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_log" >&5
-$as_echo "$ac_cv_search_log" >&6; }
-ac_res=$ac_cv_search_log
-if test "$ac_res" != no; then :
- test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
-
-else
- as_fn_error $? "Missing math functions" "$LINENO" 5
-fi
-
-if test "x$ac_cv_search_log" != "xnone required" ; then
- LM="$ac_cv_search_log"
-else
- LM=
-fi
-
-
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __attribute__ syntax is compilable" >&5
$as_echo_n "checking whether __attribute__ syntax is compilable... " >&6; }
if ${je_cv_attribute+:} false; then :
@@ -5682,42 +5193,6 @@ fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
-$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
-TCFLAGS="${CFLAGS}"
-if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-herror_on_warning"
-else
- CFLAGS="${CFLAGS} -herror_on_warning"
-fi
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-
-int
-main ()
-{
-
- return 0;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-herror_on_warning
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- je_cv_cflags_appended=
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- CFLAGS="${TCFLAGS}"
-
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tls_model attribute is compilable" >&5
$as_echo_n "checking whether tls_model attribute is compilable... " >&6; }
if ${je_cv_tls_model+:} false; then :
@@ -5793,42 +5268,6 @@ fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
-$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
-TCFLAGS="${CFLAGS}"
-if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-herror_on_warning"
-else
- CFLAGS="${CFLAGS} -herror_on_warning"
-fi
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-
-int
-main ()
-{
-
- return 0;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-herror_on_warning
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- je_cv_cflags_appended=
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- CFLAGS="${TCFLAGS}"
-
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether alloc_size attribute is compilable" >&5
$as_echo_n "checking whether alloc_size attribute is compilable... " >&6; }
if ${je_cv_alloc_size+:} false; then :
@@ -5899,42 +5338,6 @@ fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
-$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
-TCFLAGS="${CFLAGS}"
-if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-herror_on_warning"
-else
- CFLAGS="${CFLAGS} -herror_on_warning"
-fi
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-
-int
-main ()
-{
-
- return 0;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-herror_on_warning
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- je_cv_cflags_appended=
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- CFLAGS="${TCFLAGS}"
-
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(gnu_printf, ...) attribute is compilable" >&5
$as_echo_n "checking whether format(gnu_printf, ...) attribute is compilable... " >&6; }
if ${je_cv_format_gnu_printf+:} false; then :
@@ -6005,42 +5408,6 @@ fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
-$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
-TCFLAGS="${CFLAGS}"
-if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-herror_on_warning"
-else
- CFLAGS="${CFLAGS} -herror_on_warning"
-fi
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-
-int
-main ()
-{
-
- return 0;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-herror_on_warning
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- je_cv_cflags_appended=
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- CFLAGS="${TCFLAGS}"
-
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(printf, ...) attribute is compilable" >&5
$as_echo_n "checking whether format(printf, ...) attribute is compilable... " >&6; }
if ${je_cv_format_printf+:} false; then :
@@ -6560,21 +5927,6 @@ fi
install_suffix="$INSTALL_SUFFIX"
-
-# Check whether --with-malloc_conf was given.
-if test "${with_malloc_conf+set}" = set; then :
- withval=$with_malloc_conf; JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"
-else
- JEMALLOC_CONFIG_MALLOC_CONF=""
-
-fi
-
-config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF"
-cat >>confdefs.h <<_ACEOF
-#define JEMALLOC_CONFIG_MALLOC_CONF "$config_malloc_conf"
-_ACEOF
-
-
je_="je_"
@@ -7143,8 +6495,8 @@ $as_echo_n "checking configured backtracing method... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $backtrace_method" >&5
$as_echo "$backtrace_method" >&6; }
if test "x$enable_prof" = "x1" ; then
- if test "x$LM" != "x" ; then
- LIBS="$LIBS $LM"
+ if test "x$abi" != "xpecoff"; then
+ LIBS="$LIBS -lm"
fi
$as_echo "#define JEMALLOC_PROF " >>confdefs.h
@@ -7393,52 +6745,6 @@ fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_unreachable is compilable" >&5
-$as_echo_n "checking whether a program using __builtin_unreachable is compilable... " >&6; }
-if ${je_cv_gcc_builtin_unreachable+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-void foo (void) {
- __builtin_unreachable();
-}
-
-int
-main ()
-{
-
- {
- foo();
- }
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_gcc_builtin_unreachable=yes
-else
- je_cv_gcc_builtin_unreachable=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_unreachable" >&5
-$as_echo "$je_cv_gcc_builtin_unreachable" >&6; }
-
-if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then
- $as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable" >>confdefs.h
-
-else
- $as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE abort" >>confdefs.h
-
-fi
-
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_ffsl is compilable" >&5
$as_echo_n "checking whether a program using __builtin_ffsl is compilable... " >&6; }
if ${je_cv_gcc_builtin_ffsl+:} false; then :
@@ -7476,8 +6782,6 @@ fi
$as_echo "$je_cv_gcc_builtin_ffsl" >&6; }
if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
- $as_echo "#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll" >>confdefs.h
-
$as_echo "#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl" >>confdefs.h
$as_echo "#define JEMALLOC_INTERNAL_FFS __builtin_ffs" >>confdefs.h
@@ -7521,8 +6825,6 @@ fi
$as_echo "$je_cv_function_ffsl" >&6; }
if test "x${je_cv_function_ffsl}" = "xyes" ; then
- $as_echo "#define JEMALLOC_INTERNAL_FFSLL ffsll" >>confdefs.h
-
$as_echo "#define JEMALLOC_INTERNAL_FFSL ffsl" >>confdefs.h
$as_echo "#define JEMALLOC_INTERNAL_FFS ffs" >>confdefs.h
@@ -7611,7 +6913,7 @@ main ()
if (f == NULL) {
return 1;
}
- fprintf(f, "%d", result);
+ fprintf(f, "%d\n", result);
fclose(f);
return 0;
@@ -7662,6 +6964,7 @@ else
LG_SIZE_CLASS_GROUP="2"
fi
+
if test ! -e "${objroot}VERSION" ; then
if test ! -e "${srcroot}VERSION" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: Missing VERSION file, and unable to generate it; creating bogus VERSION" >&5
@@ -7799,46 +7102,12 @@ fi
fi
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthread_atfork(3) is compilable" >&5
-$as_echo_n "checking whether pthread_atfork(3) is compilable... " >&6; }
-if ${je_cv_pthread_atfork+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#include <pthread.h>
-
-int
-main ()
-{
-
- pthread_atfork((void *)0, (void *)0, (void *)0);
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_pthread_atfork=yes
-else
- je_cv_pthread_atfork=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_atfork" >&5
-$as_echo "$je_cv_pthread_atfork" >&6; }
-
- if test "x${je_cv_pthread_atfork}" = "xyes" ; then
- $as_echo "#define JEMALLOC_HAVE_PTHREAD_ATFORK " >>confdefs.h
-
- fi
fi
CPPFLAGS="$CPPFLAGS -D_REENTRANT"
+SAVED_LIBS="${LIBS}"
+LIBS=
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5
$as_echo_n "checking for library containing clock_gettime... " >&6; }
if ${ac_cv_search_clock_gettime+:} false; then :
@@ -7892,321 +7161,11 @@ $as_echo "$ac_cv_search_clock_gettime" >&6; }
ac_res=$ac_cv_search_clock_gettime
if test "$ac_res" != no; then :
test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
-
-fi
-
-
-if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
- if test "$ac_cv_search_clock_gettime" != "-lrt"; then
- SAVED_CFLAGS="${CFLAGS}"
-
- unset ac_cv_search_clock_gettime
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -dynamic" >&5
-$as_echo_n "checking whether compiler supports -dynamic... " >&6; }
-TCFLAGS="${CFLAGS}"
-if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-dynamic"
-else
- CFLAGS="${CFLAGS} -dynamic"
-fi
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-
-int
-main ()
-{
-
- return 0;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-dynamic
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- je_cv_cflags_appended=
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- CFLAGS="${TCFLAGS}"
-
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5
-$as_echo_n "checking for library containing clock_gettime... " >&6; }
-if ${ac_cv_search_clock_gettime+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- ac_func_search_save_LIBS=$LIBS
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-/* Override any GCC internal prototype to avoid an error.
- Use char because int might match the return type of a GCC
- builtin and then its argument prototype would still apply. */
-#ifdef __cplusplus
-extern "C"
-#endif
-char clock_gettime ();
-int
-main ()
-{
-return clock_gettime ();
- ;
- return 0;
-}
-_ACEOF
-for ac_lib in '' rt; do
- if test -z "$ac_lib"; then
- ac_res="none required"
- else
- ac_res=-l$ac_lib
- LIBS="-l$ac_lib $ac_func_search_save_LIBS"
- fi
- if ac_fn_c_try_link "$LINENO"; then :
- ac_cv_search_clock_gettime=$ac_res
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext
- if ${ac_cv_search_clock_gettime+:} false; then :
- break
-fi
-done
-if ${ac_cv_search_clock_gettime+:} false; then :
-
-else
- ac_cv_search_clock_gettime=no
-fi
-rm conftest.$ac_ext
-LIBS=$ac_func_search_save_LIBS
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5
-$as_echo "$ac_cv_search_clock_gettime" >&6; }
-ac_res=$ac_cv_search_clock_gettime
-if test "$ac_res" != no; then :
- test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
-
-fi
-
-
- CFLAGS="${SAVED_CFLAGS}"
- fi
-fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable" >&5
-$as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable... " >&6; }
-if ${je_cv_clock_monotonic_coarse+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#include <time.h>
-
-int
-main ()
-{
-
- struct timespec ts;
-
- clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_clock_monotonic_coarse=yes
-else
- je_cv_clock_monotonic_coarse=no
+ TESTLIBS="${LIBS}"
fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic_coarse" >&5
-$as_echo "$je_cv_clock_monotonic_coarse" >&6; }
-
-if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then
- $as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1" >>confdefs.h
-
-fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable" >&5
-$as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable... " >&6; }
-if ${je_cv_clock_monotonic+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#include <unistd.h>
-#include <time.h>
-
-int
-main ()
-{
-
- struct timespec ts;
-
- clock_gettime(CLOCK_MONOTONIC, &ts);
-#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0
-# error _POSIX_MONOTONIC_CLOCK missing/invalid
-#endif
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_clock_monotonic=yes
-else
- je_cv_clock_monotonic=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic" >&5
-$as_echo "$je_cv_clock_monotonic" >&6; }
-
-if test "x${je_cv_clock_monotonic}" = "xyes" ; then
- $as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1" >>confdefs.h
-
-fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mach_absolute_time() is compilable" >&5
-$as_echo_n "checking whether mach_absolute_time() is compilable... " >&6; }
-if ${je_cv_mach_absolute_time+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#include <mach/mach_time.h>
-
-int
-main ()
-{
-
- mach_absolute_time();
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_mach_absolute_time=yes
-else
- je_cv_mach_absolute_time=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_mach_absolute_time" >&5
-$as_echo "$je_cv_mach_absolute_time" >&6; }
-
-if test "x${je_cv_mach_absolute_time}" = "xyes" ; then
- $as_echo "#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME 1" >>confdefs.h
-
-fi
-
-# Check whether --enable-syscall was given.
-if test "${enable_syscall+set}" = set; then :
- enableval=$enable_syscall; if test "x$enable_syscall" = "xno" ; then
- enable_syscall="0"
-else
- enable_syscall="1"
-fi
-
-else
- enable_syscall="1"
-
-fi
-
-if test "x$enable_syscall" = "x1" ; then
- SAVED_CFLAGS="${CFLAGS}"
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
-$as_echo_n "checking whether compiler supports -Werror... " >&6; }
-TCFLAGS="${CFLAGS}"
-if test "x${CFLAGS}" = "x" ; then
- CFLAGS="-Werror"
-else
- CFLAGS="${CFLAGS} -Werror"
-fi
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-
-int
-main ()
-{
-
- return 0;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- je_cv_cflags_appended=-Werror
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- je_cv_cflags_appended=
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- CFLAGS="${TCFLAGS}"
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether syscall(2) is compilable" >&5
-$as_echo_n "checking whether syscall(2) is compilable... " >&6; }
-if ${je_cv_syscall+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#include <sys/syscall.h>
-#include <unistd.h>
-
-int
-main ()
-{
-
- syscall(SYS_write, 2, "hello", 5);
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_syscall=yes
-else
- je_cv_syscall=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_syscall" >&5
-$as_echo "$je_cv_syscall" >&6; }
-
- CFLAGS="${SAVED_CFLAGS}"
- if test "x$je_cv_syscall" = "xyes" ; then
- $as_echo "#define JEMALLOC_USE_SYSCALL " >>confdefs.h
-
- fi
-fi
+LIBS="${SAVED_LIBS}"
ac_fn_c_check_func "$LINENO" "secure_getenv" "ac_cv_func_secure_getenv"
if test "x$ac_cv_func_secure_getenv" = xyes; then :
@@ -8274,19 +7233,10 @@ else
fi
-if test "x${enable_lazy_lock}" = "x" ; then
- if test "x${force_lazy_lock}" = "x1" ; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5
+if test "x$enable_lazy_lock" = "x" -a "x${force_lazy_lock}" = "x1" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5
$as_echo "Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&6; }
- enable_lazy_lock="1"
- else
- enable_lazy_lock="0"
- fi
-fi
-if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no lazy-lock because thread creation monitoring is unimplemented" >&5
-$as_echo "Forcing no lazy-lock because thread creation monitoring is unimplemented" >&6; }
- enable_lazy_lock="0"
+ enable_lazy_lock="1"
fi
if test "x$enable_lazy_lock" = "x1" ; then
if test "x$abi" != "xpecoff" ; then
@@ -8356,6 +7306,8 @@ fi
fi
$as_echo "#define JEMALLOC_LAZY_LOCK " >>confdefs.h
+else
+ enable_lazy_lock="0"
fi
@@ -8592,7 +7544,9 @@ int
main ()
{
- madvise((void *)0, 0, 0);
+ {
+ madvise((void *)0, 0, 0);
+ }
;
return 0;
@@ -8612,118 +7566,6 @@ $as_echo "$je_cv_madvise" >&6; }
if test "x${je_cv_madvise}" = "xyes" ; then
$as_echo "#define JEMALLOC_HAVE_MADVISE " >>confdefs.h
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_FREE) is compilable" >&5
-$as_echo_n "checking whether madvise(..., MADV_FREE) is compilable... " >&6; }
-if ${je_cv_madv_free+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#include <sys/mman.h>
-
-int
-main ()
-{
-
- madvise((void *)0, 0, MADV_FREE);
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_madv_free=yes
-else
- je_cv_madv_free=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_free" >&5
-$as_echo "$je_cv_madv_free" >&6; }
-
- if test "x${je_cv_madv_free}" = "xyes" ; then
- $as_echo "#define JEMALLOC_PURGE_MADVISE_FREE " >>confdefs.h
-
- fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_DONTNEED) is compilable" >&5
-$as_echo_n "checking whether madvise(..., MADV_DONTNEED) is compilable... " >&6; }
-if ${je_cv_madv_dontneed+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#include <sys/mman.h>
-
-int
-main ()
-{
-
- madvise((void *)0, 0, MADV_DONTNEED);
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_madv_dontneed=yes
-else
- je_cv_madv_dontneed=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_dontneed" >&5
-$as_echo "$je_cv_madv_dontneed" >&6; }
-
- if test "x${je_cv_madv_dontneed}" = "xyes" ; then
- $as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED " >>confdefs.h
-
- fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable" >&5
-$as_echo_n "checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable... " >&6; }
-if ${je_cv_thp+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#include <sys/mman.h>
-
-int
-main ()
-{
-
- madvise((void *)0, 0, MADV_HUGEPAGE);
- madvise((void *)0, 0, MADV_NOHUGEPAGE);
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_thp=yes
-else
- je_cv_thp=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_thp" >&5
-$as_echo "$je_cv_thp" >&6; }
-
- if test "x${je_cv_thp}" = "xyes" ; then
- $as_echo "#define JEMALLOC_THP " >>confdefs.h
-
- fi
fi
@@ -8866,51 +7708,6 @@ fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin os_unfair_lock_*() is compilable" >&5
-$as_echo_n "checking whether Darwin os_unfair_lock_*() is compilable... " >&6; }
-if ${je_cv_os_unfair_lock+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#include <os/lock.h>
-#include <AvailabilityMacros.h>
-
-int
-main ()
-{
-
- #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200
- #error "os_unfair_lock is not supported"
- #else
- os_unfair_lock lock = OS_UNFAIR_LOCK_INIT;
- os_unfair_lock_lock(&lock);
- os_unfair_lock_unlock(&lock);
- #endif
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
- je_cv_os_unfair_lock=yes
-else
- je_cv_os_unfair_lock=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
- conftest$ac_exeext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_os_unfair_lock" >&5
-$as_echo "$je_cv_os_unfair_lock" >&6; }
-
-if test "x${je_cv_os_unfair_lock}" = "xyes" ; then
- $as_echo "#define JEMALLOC_OS_UNFAIR_LOCK " >>confdefs.h
-
-fi
-
-
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSSpin*() is compilable" >&5
$as_echo_n "checking whether Darwin OSSpin*() is compilable... " >&6; }
if ${je_cv_osspin+:} false; then :
@@ -10813,8 +9610,6 @@ $as_echo "CONFIG : ${CONFIG}" >&6; }
$as_echo "CC : ${CC}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CFLAGS : ${CFLAGS}" >&5
$as_echo "CFLAGS : ${CFLAGS}" >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_CFLAGS : ${EXTRA_CFLAGS}" >&5
-$as_echo "EXTRA_CFLAGS : ${EXTRA_CFLAGS}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS : ${CPPFLAGS}" >&5
$as_echo "CPPFLAGS : ${CPPFLAGS}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS : ${LDFLAGS}" >&5
@@ -10823,6 +9618,8 @@ $as_echo "LDFLAGS : ${LDFLAGS}" >&6; }
$as_echo "EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS : ${LIBS}" >&5
$as_echo "LIBS : ${LIBS}" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: TESTLIBS : ${TESTLIBS}" >&5
+$as_echo "TESTLIBS : ${TESTLIBS}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: RPATH_EXTRA : ${RPATH_EXTRA}" >&5
$as_echo "RPATH_EXTRA : ${RPATH_EXTRA}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
@@ -10865,8 +9662,6 @@ $as_echo "JEMALLOC_PRIVATE_NAMESPACE" >&6; }
$as_echo " : ${JEMALLOC_PRIVATE_NAMESPACE}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: install_suffix : ${install_suffix}" >&5
$as_echo "install_suffix : ${install_suffix}" >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: malloc_conf : ${config_malloc_conf}" >&5
-$as_echo "malloc_conf : ${config_malloc_conf}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: autogen : ${enable_autogen}" >&5
$as_echo "autogen : ${enable_autogen}" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: cc-silence : ${enable_cc_silence}" >&5
diff --git a/deps/jemalloc/configure.ac b/deps/jemalloc/configure.ac
index 9573c3020..7a1290e0d 100644
--- a/deps/jemalloc/configure.ac
+++ b/deps/jemalloc/configure.ac
@@ -1,8 +1,6 @@
dnl Process this file with autoconf to produce a configure script.
AC_INIT([Makefile.in])
-AC_CONFIG_AUX_DIR([build-aux])
-
dnl ============================================================================
dnl Custom macro definitions.
@@ -118,7 +116,6 @@ dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise,
dnl just prevent autoconf from molesting CFLAGS.
CFLAGS=$CFLAGS
AC_PROG_CC
-
if test "x$GCC" != "xyes" ; then
AC_CACHE_CHECK([whether compiler is MSVC],
[je_cv_msvc],
@@ -132,58 +129,15 @@ if test "x$GCC" != "xyes" ; then
[je_cv_msvc=no])])
fi
-dnl check if a cray prgenv wrapper compiler is being used
-je_cv_cray_prgenv_wrapper=""
-if test "x${PE_ENV}" != "x" ; then
- case "${CC}" in
- CC|cc)
- je_cv_cray_prgenv_wrapper="yes"
- ;;
- *)
- ;;
- esac
-fi
-
-AC_CACHE_CHECK([whether compiler is cray],
- [je_cv_cray],
- [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
- [
-#ifndef _CRAYC
- int fail[-1];
-#endif
-])],
- [je_cv_cray=yes],
- [je_cv_cray=no])])
-
-if test "x${je_cv_cray}" = "xyes" ; then
- AC_CACHE_CHECK([whether cray compiler version is 8.4],
- [je_cv_cray_84],
- [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
- [
-#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4)
- int fail[-1];
-#endif
-])],
- [je_cv_cray_84=yes],
- [je_cv_cray_84=no])])
-fi
-
if test "x$CFLAGS" = "x" ; then
no_CFLAGS="yes"
if test "x$GCC" = "xyes" ; then
- JE_CFLAGS_APPEND([-std=gnu11])
- if test "x$je_cv_cflags_appended" = "x-std=gnu11" ; then
+ JE_CFLAGS_APPEND([-std=gnu99])
+ if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then
AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
- else
- JE_CFLAGS_APPEND([-std=gnu99])
- if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then
- AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
- fi
fi
JE_CFLAGS_APPEND([-Wall])
JE_CFLAGS_APPEND([-Werror=declaration-after-statement])
- JE_CFLAGS_APPEND([-Wshorten-64-to-32])
- JE_CFLAGS_APPEND([-Wsign-compare])
JE_CFLAGS_APPEND([-pipe])
JE_CFLAGS_APPEND([-g3])
elif test "x$je_cv_msvc" = "xyes" ; then
@@ -194,21 +148,11 @@ if test "x$CFLAGS" = "x" ; then
JE_CFLAGS_APPEND([-FS])
CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat"
fi
- if test "x$je_cv_cray" = "xyes" ; then
- dnl cray compiler 8.4 has an inlining bug
- if test "x$je_cv_cray_84" = "xyes" ; then
- JE_CFLAGS_APPEND([-hipa2])
- JE_CFLAGS_APPEND([-hnognu])
- fi
- if test "x$enable_cc_silence" != "xno" ; then
- dnl ignore unreachable code warning
- JE_CFLAGS_APPEND([-hnomessage=128])
- dnl ignore redefinition of "malloc", "free", etc warning
- JE_CFLAGS_APPEND([-hnomessage=1357])
- fi
- fi
fi
-AC_SUBST([EXTRA_CFLAGS])
+dnl Append EXTRA_CFLAGS to CFLAGS, if defined.
+if test "x$EXTRA_CFLAGS" != "x" ; then
+ JE_CFLAGS_APPEND([$EXTRA_CFLAGS])
+fi
AC_PROG_CPP
AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0])
@@ -220,18 +164,13 @@ if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99"
fi
-if test "x${je_cv_msvc}" = "xyes" ; then
- LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN
- AC_MSG_RESULT([Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit])
+AC_CHECK_SIZEOF([void *])
+if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
+ LG_SIZEOF_PTR=3
+elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
+ LG_SIZEOF_PTR=2
else
- AC_CHECK_SIZEOF([void *])
- if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
- LG_SIZEOF_PTR=3
- elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
- LG_SIZEOF_PTR=2
- else
- AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}])
- fi
+ AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}])
fi
AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR])
@@ -255,16 +194,6 @@ else
fi
AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG])
-AC_CHECK_SIZEOF([long long])
-if test "x${ac_cv_sizeof_long_long}" = "x8" ; then
- LG_SIZEOF_LONG_LONG=3
-elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then
- LG_SIZEOF_LONG_LONG=2
-else
- AC_MSG_ERROR([Unsupported long long size: ${ac_cv_sizeof_long_long}])
-fi
-AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG_LONG], [$LG_SIZEOF_LONG_LONG])
-
AC_CHECK_SIZEOF([intmax_t])
if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then
LG_SIZEOF_INTMAX_T=4
@@ -282,22 +211,12 @@ dnl CPU-specific settings.
CPU_SPINWAIT=""
case "${host_cpu}" in
i686|x86_64)
- if test "x${je_cv_msvc}" = "xyes" ; then
- AC_CACHE_VAL([je_cv_pause_msvc],
- [JE_COMPILABLE([pause instruction MSVC], [],
- [[_mm_pause(); return 0;]],
- [je_cv_pause_msvc])])
- if test "x${je_cv_pause_msvc}" = "xyes" ; then
- CPU_SPINWAIT='_mm_pause()'
- fi
- else
- AC_CACHE_VAL([je_cv_pause],
- [JE_COMPILABLE([pause instruction], [],
- [[__asm__ volatile("pause"); return 0;]],
- [je_cv_pause])])
- if test "x${je_cv_pause}" = "xyes" ; then
- CPU_SPINWAIT='__asm__ volatile("pause")'
- fi
+ AC_CACHE_VAL([je_cv_pause],
+ [JE_COMPILABLE([pause instruction], [],
+ [[__asm__ volatile("pause"); return 0;]],
+ [je_cv_pause])])
+ if test "x${je_cv_pause}" = "xyes" ; then
+ CPU_SPINWAIT='__asm__ volatile("pause")'
fi
;;
powerpc)
@@ -315,27 +234,17 @@ o="$ac_objext"
a="a"
exe="$ac_exeext"
libprefix="lib"
-link_whole_archive="0"
DSO_LDFLAGS='-shared -Wl,-soname,$(@F)'
RPATH='-Wl,-rpath,$(1)'
SOREV="${so}.${rev}"
PIC_CFLAGS='-fPIC -DPIC'
CTARGET='-o $@'
LDTARGET='-o $@'
-TEST_LD_MODE=
EXTRA_LDFLAGS=
ARFLAGS='crus'
AROUT=' $@'
CC_MM=1
-if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
- TEST_LD_MODE='-dynamic'
-fi
-
-if test "x${je_cv_cray}" = "xyes" ; then
- CC_MM=
-fi
-
AN_MAKEVAR([AR], [AC_PROG_AR])
AN_PROGRAM([ar], [AC_PROG_AR])
AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)])
@@ -348,12 +257,13 @@ dnl
dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
dnl definitions need to be seen before any headers are included, which is a pain
dnl to make happen otherwise.
-CFLAGS="$CFLAGS"
default_munmap="1"
maps_coalesce="1"
case "${host}" in
*-*-darwin* | *-*-ios*)
+ CFLAGS="$CFLAGS"
abi="macho"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
RPATH=""
LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES"
so="dylib"
@@ -364,37 +274,33 @@ case "${host}" in
sbrk_deprecated="1"
;;
*-*-freebsd*)
+ CFLAGS="$CFLAGS"
abi="elf"
- AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ])
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
force_lazy_lock="1"
;;
*-*-dragonfly*)
+ CFLAGS="$CFLAGS"
abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
;;
*-*-openbsd*)
+ CFLAGS="$CFLAGS"
abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
force_tls="0"
;;
*-*-bitrig*)
+ CFLAGS="$CFLAGS"
abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
;;
- *-*-linux-android)
- dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
+ *-*-linux*)
+ CFLAGS="$CFLAGS"
CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
abi="elf"
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
- AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
- AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
- AC_DEFINE([JEMALLOC_C11ATOMICS])
- force_tls="0"
- default_munmap="0"
- ;;
- *-*-linux* | *-*-kfreebsd*)
- dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
- CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
- abi="elf"
- AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
- AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
default_munmap="0"
@@ -408,12 +314,15 @@ case "${host}" in
#error aout
#endif
]])],
- [abi="elf"],
+ [CFLAGS="$CFLAGS"; abi="elf"],
[abi="aout"])
AC_MSG_RESULT([$abi])
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
;;
*-*-solaris2*)
+ CFLAGS="$CFLAGS"
abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
RPATH='-Wl,-R,$(1)'
dnl Solaris needs this for sigwait().
CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS"
@@ -432,6 +341,7 @@ case "${host}" in
*-*-mingw* | *-*-cygwin*)
abi="pecoff"
force_tls="0"
+ force_lazy_lock="1"
maps_coalesce="0"
RPATH=""
so="dll"
@@ -448,7 +358,6 @@ case "${host}" in
else
importlib="${so}"
DSO_LDFLAGS="-shared"
- link_whole_archive="1"
fi
a="lib"
libprefix=""
@@ -486,28 +395,17 @@ AC_SUBST([o])
AC_SUBST([a])
AC_SUBST([exe])
AC_SUBST([libprefix])
-AC_SUBST([link_whole_archive])
AC_SUBST([DSO_LDFLAGS])
AC_SUBST([EXTRA_LDFLAGS])
AC_SUBST([SOREV])
AC_SUBST([PIC_CFLAGS])
AC_SUBST([CTARGET])
AC_SUBST([LDTARGET])
-AC_SUBST([TEST_LD_MODE])
AC_SUBST([MKLIB])
AC_SUBST([ARFLAGS])
AC_SUBST([AROUT])
AC_SUBST([CC_MM])
-dnl Determine whether libm must be linked to use e.g. log(3).
-AC_SEARCH_LIBS([log], [m], , [AC_MSG_ERROR([Missing math functions])])
-if test "x$ac_cv_search_log" != "xnone required" ; then
- LM="$ac_cv_search_log"
-else
- LM=
-fi
-AC_SUBST(LM)
-
JE_COMPILABLE([__attribute__ syntax],
[static __attribute__((unused)) void foo(void){}],
[],
@@ -521,7 +419,6 @@ fi
dnl Check for tls_model attribute support (clang 3.0 still lacks support).
SAVED_CFLAGS="${CFLAGS}"
JE_CFLAGS_APPEND([-Werror])
-JE_CFLAGS_APPEND([-herror_on_warning])
JE_COMPILABLE([tls_model attribute], [],
[static __thread int
__attribute__((tls_model("initial-exec"), unused)) foo;
@@ -537,7 +434,6 @@ fi
dnl Check for alloc_size attribute support.
SAVED_CFLAGS="${CFLAGS}"
JE_CFLAGS_APPEND([-Werror])
-JE_CFLAGS_APPEND([-herror_on_warning])
JE_COMPILABLE([alloc_size attribute], [#include <stdlib.h>],
[void *foo(size_t size) __attribute__((alloc_size(1)));],
[je_cv_alloc_size])
@@ -548,7 +444,6 @@ fi
dnl Check for format(gnu_printf, ...) attribute support.
SAVED_CFLAGS="${CFLAGS}"
JE_CFLAGS_APPEND([-Werror])
-JE_CFLAGS_APPEND([-herror_on_warning])
JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include <stdlib.h>],
[void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));],
[je_cv_format_gnu_printf])
@@ -559,7 +454,6 @@ fi
dnl Check for format(printf, ...) attribute support.
SAVED_CFLAGS="${CFLAGS}"
JE_CFLAGS_APPEND([-Werror])
-JE_CFLAGS_APPEND([-herror_on_warning])
JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>],
[void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));],
[je_cv_format_printf])
@@ -681,15 +575,6 @@ AC_ARG_WITH([install_suffix],
install_suffix="$INSTALL_SUFFIX"
AC_SUBST([install_suffix])
-dnl Specify default malloc_conf.
-AC_ARG_WITH([malloc_conf],
- [AS_HELP_STRING([--with-malloc-conf=<malloc_conf>], [config.malloc_conf options string])],
- [JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"],
- [JEMALLOC_CONFIG_MALLOC_CONF=""]
-)
-config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF"
-AC_DEFINE_UNQUOTED([JEMALLOC_CONFIG_MALLOC_CONF], ["$config_malloc_conf"])
-
dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of
dnl jemalloc_protos_jet.h easy.
je_="je_"
@@ -954,9 +839,9 @@ fi
AC_MSG_CHECKING([configured backtracing method])
AC_MSG_RESULT([$backtrace_method])
if test "x$enable_prof" = "x1" ; then
- dnl Heap profiling uses the log(3) function.
- if test "x$LM" != "x" ; then
- LIBS="$LIBS $LM"
+ if test "x$abi" != "xpecoff"; then
+ dnl Heap profiling uses the log(3) function.
+ LIBS="$LIBS -lm"
fi
AC_DEFINE([JEMALLOC_PROF], [ ])
@@ -1125,28 +1010,11 @@ if test "x$enable_cache_oblivious" = "x1" ; then
fi
AC_SUBST([enable_cache_oblivious])
-
-
-JE_COMPILABLE([a program using __builtin_unreachable], [
-void foo (void) {
- __builtin_unreachable();
-}
-], [
- {
- foo();
- }
-], [je_cv_gcc_builtin_unreachable])
-if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then
- AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [__builtin_unreachable])
-else
- AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [abort])
-fi
-
dnl ============================================================================
dnl Check for __builtin_ffsl(), then ffsl(3), and fail if neither are found.
dnl One of those two functions should (theoretically) exist on all platforms
dnl that jemalloc currently has a chance of functioning on without modification.
-dnl We additionally assume ffs[ll]() or __builtin_ffs[ll]() are defined if
+dnl We additionally assume ffs() or __builtin_ffs() are defined if
dnl ffsl() or __builtin_ffsl() are defined, respectively.
JE_COMPILABLE([a program using __builtin_ffsl], [
#include <stdio.h>
@@ -1159,7 +1027,6 @@ JE_COMPILABLE([a program using __builtin_ffsl], [
}
], [je_cv_gcc_builtin_ffsl])
if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
- AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [__builtin_ffsll])
AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl])
AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs])
else
@@ -1174,7 +1041,6 @@ else
}
], [je_cv_function_ffsl])
if test "x${je_cv_function_ffsl}" = "xyes" ; then
- AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [ffsll])
AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl])
AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs])
else
@@ -1234,7 +1100,7 @@ if test "x$LG_PAGE" = "xdetect"; then
if (f == NULL) {
return 1;
}
- fprintf(f, "%d", result);
+ fprintf(f, "%d\n", result);
fclose(f);
return 0;
@@ -1267,36 +1133,27 @@ dnl ============================================================================
dnl jemalloc configuration.
dnl
-AC_ARG_WITH([version],
- [AS_HELP_STRING([--with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>],
- [Version string])],
- [
- echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null
- if test $? -ne 0 ; then
- AC_MSG_ERROR([${with_version} does not match <major>.<minor>.<bugfix>-<nrev>-g<gid>])
- fi
- echo "$with_version" > "${objroot}VERSION"
- ], [
- dnl Set VERSION if source directory is inside a git repository.
- if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
- dnl Pattern globs aren't powerful enough to match both single- and
- dnl double-digit version numbers, so iterate over patterns to support up
- dnl to version 99.99.99 without any accidental matches.
- for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
- '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
- '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
- '[0-9][0-9].[0-9][0-9].[0-9]' \
- '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do
- (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
- if test $? -eq 0 ; then
- mv "${objroot}VERSION.tmp" "${objroot}VERSION"
- break
- fi
- done
+dnl Set VERSION if source directory is inside a git repository.
+if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
+ dnl Pattern globs aren't powerful enough to match both single- and
+ dnl double-digit version numbers, so iterate over patterns to support up to
+ dnl version 99.99.99 without any accidental matches.
+ rm -f "${objroot}VERSION"
+ for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
+ '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do
+ if test ! -e "${objroot}VERSION" ; then
+ (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
+ if test $? -eq 0 ; then
+ mv "${objroot}VERSION.tmp" "${objroot}VERSION"
+ break
+ fi
fi
- rm -f "${objroot}VERSION.tmp"
- ])
-
+ done
+fi
+rm -f "${objroot}VERSION.tmp"
if test ! -e "${objroot}VERSION" ; then
if test ! -e "${srcroot}VERSION" ; then
AC_MSG_RESULT(
@@ -1329,101 +1186,17 @@ if test "x$abi" != "xpecoff" ; then
AC_CHECK_LIB([pthread], [pthread_create], [LIBS="$LIBS -lpthread"],
[AC_SEARCH_LIBS([pthread_create], , ,
AC_MSG_ERROR([libpthread is missing]))])
- JE_COMPILABLE([pthread_atfork(3)], [
-#include <pthread.h>
-], [
- pthread_atfork((void *)0, (void *)0, (void *)0);
-], [je_cv_pthread_atfork])
- if test "x${je_cv_pthread_atfork}" = "xyes" ; then
- AC_DEFINE([JEMALLOC_HAVE_PTHREAD_ATFORK], [ ])
- fi
fi
CPPFLAGS="$CPPFLAGS -D_REENTRANT"
-dnl Check whether clock_gettime(2) is in libc or librt.
-AC_SEARCH_LIBS([clock_gettime], [rt])
-
-dnl Cray wrapper compiler often adds `-lrt` when using `-static`. Check with
-dnl `-dynamic` as well in case a user tries to dynamically link in jemalloc
-if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
- if test "$ac_cv_search_clock_gettime" != "-lrt"; then
- SAVED_CFLAGS="${CFLAGS}"
-
- unset ac_cv_search_clock_gettime
- JE_CFLAGS_APPEND([-dynamic])
- AC_SEARCH_LIBS([clock_gettime], [rt])
-
- CFLAGS="${SAVED_CFLAGS}"
- fi
-fi
-
-dnl check for CLOCK_MONOTONIC_COARSE (Linux-specific).
-JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC_COARSE, ...)], [
-#include <time.h>
-], [
- struct timespec ts;
-
- clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
-], [je_cv_clock_monotonic_coarse])
-if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then
- AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE])
-fi
-
-dnl check for CLOCK_MONOTONIC.
-JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC, ...)], [
-#include <unistd.h>
-#include <time.h>
-], [
- struct timespec ts;
-
- clock_gettime(CLOCK_MONOTONIC, &ts);
-#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0
-# error _POSIX_MONOTONIC_CLOCK missing/invalid
-#endif
-], [je_cv_clock_monotonic])
-if test "x${je_cv_clock_monotonic}" = "xyes" ; then
- AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC])
-fi
-
-dnl Check for mach_absolute_time().
-JE_COMPILABLE([mach_absolute_time()], [
-#include <mach/mach_time.h>
-], [
- mach_absolute_time();
-], [je_cv_mach_absolute_time])
-if test "x${je_cv_mach_absolute_time}" = "xyes" ; then
- AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME])
-fi
-
-dnl Use syscall(2) (if available) by default.
-AC_ARG_ENABLE([syscall],
- [AS_HELP_STRING([--disable-syscall], [Disable use of syscall(2)])],
-[if test "x$enable_syscall" = "xno" ; then
- enable_syscall="0"
-else
- enable_syscall="1"
-fi
-],
-[enable_syscall="1"]
-)
-if test "x$enable_syscall" = "x1" ; then
- dnl Check if syscall(2) is usable. Treat warnings as errors, so that e.g. OS
- dnl X 10.12's deprecation warning prevents use.
- SAVED_CFLAGS="${CFLAGS}"
- JE_CFLAGS_APPEND([-Werror])
- JE_COMPILABLE([syscall(2)], [
-#include <sys/syscall.h>
-#include <unistd.h>
-], [
- syscall(SYS_write, 2, "hello", 5);
-],
- [je_cv_syscall])
- CFLAGS="${SAVED_CFLAGS}"
- if test "x$je_cv_syscall" = "xyes" ; then
- AC_DEFINE([JEMALLOC_USE_SYSCALL], [ ])
- fi
-fi
+dnl Check whether clock_gettime(2) is in libc or librt. This function is only
+dnl used in test code, so save the result to TESTLIBS to avoid poluting LIBS.
+SAVED_LIBS="${LIBS}"
+LIBS=
+AC_SEARCH_LIBS([clock_gettime], [rt], [TESTLIBS="${LIBS}"])
+AC_SUBST([TESTLIBS])
+LIBS="${SAVED_LIBS}"
dnl Check if the GNU-specific secure_getenv function exists.
AC_CHECK_FUNC([secure_getenv],
@@ -1479,17 +1252,9 @@ fi
],
[enable_lazy_lock=""]
)
-if test "x${enable_lazy_lock}" = "x" ; then
- if test "x${force_lazy_lock}" = "x1" ; then
- AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues])
- enable_lazy_lock="1"
- else
- enable_lazy_lock="0"
- fi
-fi
-if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then
- AC_MSG_RESULT([Forcing no lazy-lock because thread creation monitoring is unimplemented])
- enable_lazy_lock="0"
+if test "x$enable_lazy_lock" = "x" -a "x${force_lazy_lock}" = "x1" ; then
+ AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues])
+ enable_lazy_lock="1"
fi
if test "x$enable_lazy_lock" = "x1" ; then
if test "x$abi" != "xpecoff" ; then
@@ -1500,6 +1265,8 @@ if test "x$enable_lazy_lock" = "x1" ; then
])
fi
AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ])
+else
+ enable_lazy_lock="0"
fi
AC_SUBST([enable_lazy_lock])
@@ -1622,41 +1389,12 @@ dnl Check for madvise(2).
JE_COMPILABLE([madvise(2)], [
#include <sys/mman.h>
], [
- madvise((void *)0, 0, 0);
+ {
+ madvise((void *)0, 0, 0);
+ }
], [je_cv_madvise])
if test "x${je_cv_madvise}" = "xyes" ; then
AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ])
-
- dnl Check for madvise(..., MADV_FREE).
- JE_COMPILABLE([madvise(..., MADV_FREE)], [
-#include <sys/mman.h>
-], [
- madvise((void *)0, 0, MADV_FREE);
-], [je_cv_madv_free])
- if test "x${je_cv_madv_free}" = "xyes" ; then
- AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
- fi
-
- dnl Check for madvise(..., MADV_DONTNEED).
- JE_COMPILABLE([madvise(..., MADV_DONTNEED)], [
-#include <sys/mman.h>
-], [
- madvise((void *)0, 0, MADV_DONTNEED);
-], [je_cv_madv_dontneed])
- if test "x${je_cv_madv_dontneed}" = "xyes" ; then
- AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
- fi
-
- dnl Check for madvise(..., MADV_[NO]HUGEPAGE).
- JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [
-#include <sys/mman.h>
-], [
- madvise((void *)0, 0, MADV_HUGEPAGE);
- madvise((void *)0, 0, MADV_NOHUGEPAGE);
-], [je_cv_thp])
- if test "x${je_cv_thp}" = "xyes" ; then
- AC_DEFINE([JEMALLOC_THP], [ ])
- fi
fi
dnl ============================================================================
@@ -1717,25 +1455,6 @@ if test "x${je_cv_builtin_clz}" = "xyes" ; then
fi
dnl ============================================================================
-dnl Check for os_unfair_lock operations as provided on Darwin.
-
-JE_COMPILABLE([Darwin os_unfair_lock_*()], [
-#include <os/lock.h>
-#include <AvailabilityMacros.h>
-], [
- #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200
- #error "os_unfair_lock is not supported"
- #else
- os_unfair_lock lock = OS_UNFAIR_LOCK_INIT;
- os_unfair_lock_lock(&lock);
- os_unfair_lock_unlock(&lock);
- #endif
-], [je_cv_os_unfair_lock])
-if test "x${je_cv_os_unfair_lock}" = "xyes" ; then
- AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ])
-fi
-
-dnl ============================================================================
dnl Check for spinlock(3) operations as provided on Darwin.
JE_COMPILABLE([Darwin OSSpin*()], [
@@ -1979,11 +1698,11 @@ AC_MSG_RESULT([])
AC_MSG_RESULT([CONFIG : ${CONFIG}])
AC_MSG_RESULT([CC : ${CC}])
AC_MSG_RESULT([CFLAGS : ${CFLAGS}])
-AC_MSG_RESULT([EXTRA_CFLAGS : ${EXTRA_CFLAGS}])
AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}])
AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}])
AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}])
AC_MSG_RESULT([LIBS : ${LIBS}])
+AC_MSG_RESULT([TESTLIBS : ${TESTLIBS}])
AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}])
AC_MSG_RESULT([])
AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}])
@@ -2005,7 +1724,6 @@ AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}])
AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE])
AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}])
AC_MSG_RESULT([install_suffix : ${install_suffix}])
-AC_MSG_RESULT([malloc_conf : ${config_malloc_conf}])
AC_MSG_RESULT([autogen : ${enable_autogen}])
AC_MSG_RESULT([cc-silence : ${enable_cc_silence}])
AC_MSG_RESULT([debug : ${enable_debug}])
diff --git a/deps/jemalloc/doc/html.xsl.in b/deps/jemalloc/doc/html.xsl.in
index ec4fa6552..a91d9746f 100644
--- a/deps/jemalloc/doc/html.xsl.in
+++ b/deps/jemalloc/doc/html.xsl.in
@@ -1,5 +1,4 @@
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:import href="@XSLROOT@/html/docbook.xsl"/>
<xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
- <xsl:output method="xml" encoding="utf-8"/>
</xsl:stylesheet>
diff --git a/deps/jemalloc/doc/jemalloc.3 b/deps/jemalloc/doc/jemalloc.3
index 3709f6692..2e6b2c0e8 100644
--- a/deps/jemalloc/doc/jemalloc.3
+++ b/deps/jemalloc/doc/jemalloc.3
@@ -1,13 +1,13 @@
'\" t
.\" Title: JEMALLOC
.\" Author: Jason Evans
-.\" Generator: DocBook XSL Stylesheets v1.79.1 <http://docbook.sf.net/>
-.\" Date: 12/03/2016
+.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
+.\" Date: 09/24/2015
.\" Manual: User Manual
-.\" Source: jemalloc 4.4.0-0-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc
+.\" Source: jemalloc 4.0.3-0-ge9192eacf8935e29fc62fddc2701f7942b1cc02c
.\" Language: English
.\"
-.TH "JEMALLOC" "3" "12/03/2016" "jemalloc 4.4.0-0-gf1f76357313e" "User Manual"
+.TH "JEMALLOC" "3" "09/24/2015" "jemalloc 4.0.3-0-ge9192eacf893" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
-This manual describes jemalloc 4\&.4\&.0\-0\-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc\&. More information can be found at the
+This manual describes jemalloc 4\&.0\&.3\-0\-ge9192eacf8935e29fc62fddc2701f7942b1cc02c\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.SH "SYNOPSIS"
.sp
@@ -86,26 +86,26 @@ const char *\fImalloc_conf\fR;
.SS "Standard API"
.PP
The
-malloc()
+\fBmalloc\fR\fB\fR
function allocates
\fIsize\fR
bytes of uninitialized memory\&. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object\&.
.PP
The
-calloc()
+\fBcalloc\fR\fB\fR
function allocates space for
\fInumber\fR
objects, each
\fIsize\fR
bytes in length\&. The result is identical to calling
-malloc()
+\fBmalloc\fR\fB\fR
with an argument of
\fInumber\fR
*
\fIsize\fR, with the exception that the allocated memory is explicitly initialized to zero bytes\&.
.PP
The
-posix_memalign()
+\fBposix_memalign\fR\fB\fR
function allocates
\fIsize\fR
bytes of memory such that the allocation\*(Aqs base address is a multiple of
@@ -116,7 +116,7 @@ must be a power of 2 at least as large as
sizeof(\fBvoid *\fR)\&.
.PP
The
-aligned_alloc()
+\fBaligned_alloc\fR\fB\fR
function allocates
\fIsize\fR
bytes of memory such that the allocation\*(Aqs base address is a multiple of
@@ -128,7 +128,7 @@ is not an integral multiple of
\fIalignment\fR\&.
.PP
The
-realloc()
+\fBrealloc\fR\fB\fR
function changes the size of the previously allocated memory referenced by
\fIptr\fR
to
@@ -136,19 +136,19 @@ to
bytes\&. The contents of the memory are unchanged up to the lesser of the new and old sizes\&. If the new size is larger, the contents of the newly allocated portion of the memory are undefined\&. Upon success, the memory referenced by
\fIptr\fR
is freed and a pointer to the newly allocated memory is returned\&. Note that
-realloc()
+\fBrealloc\fR\fB\fR
may move the memory allocation, resulting in a different return value than
\fIptr\fR\&. If
\fIptr\fR
is
\fBNULL\fR, the
-realloc()
+\fBrealloc\fR\fB\fR
function behaves identically to
-malloc()
+\fBmalloc\fR\fB\fR
for the specified size\&.
.PP
The
-free()
+\fBfree\fR\fB\fR
function causes the allocated memory referenced by
\fIptr\fR
to be made available for future allocations\&. If
@@ -158,13 +158,13 @@ is
.SS "Non\-standard API"
.PP
The
-mallocx(),
-rallocx(),
-xallocx(),
-sallocx(),
-dallocx(),
-sdallocx(), and
-nallocx()
+\fBmallocx\fR\fB\fR,
+\fBrallocx\fR\fB\fR,
+\fBxallocx\fR\fB\fR,
+\fBsallocx\fR\fB\fR,
+\fBdallocx\fR\fB\fR,
+\fBsdallocx\fR\fB\fR, and
+\fBnallocx\fR\fB\fR
functions all have a
\fIflags\fR
argument that can be used to specify options\&. The functions only check the options that are contextually relevant\&. Use bitwise or (|) operations to specify one or more of the following:
@@ -196,7 +196,7 @@ Initialize newly allocated memory to contain zero bytes\&. In the growing reallo
.RS 4
Use the thread\-specific cache (tcache) specified by the identifier
\fItc\fR, which must have been acquired via the
-tcache\&.create
+"tcache\&.create"
mallctl\&. This macro does not validate that
\fItc\fR
specifies a valid identifier\&.
@@ -223,16 +223,16 @@ specifies an arena index in the valid range\&.
.RE
.PP
The
-mallocx()
+\fBmallocx\fR\fB\fR
function allocates at least
\fIsize\fR
bytes of memory, and returns a pointer to the base address of the allocation\&. Behavior is undefined if
\fIsize\fR
is
-\fB0\fR\&.
+\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
.PP
The
-rallocx()
+\fBrallocx\fR\fB\fR
function resizes the allocation at
\fIptr\fR
to be at least
@@ -240,10 +240,10 @@ to be at least
bytes, and returns a pointer to the base address of the resulting allocation, which may or may not have moved from its original location\&. Behavior is undefined if
\fIsize\fR
is
-\fB0\fR\&.
+\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
.PP
The
-xallocx()
+\fBxallocx\fR\fB\fR
function resizes the allocation at
\fIptr\fR
in place to be at least
@@ -259,42 +259,40 @@ is
(\fIsize\fR + \fIextra\fR > \fBSIZE_T_MAX\fR)\&.
.PP
The
-sallocx()
+\fBsallocx\fR\fB\fR
function returns the real size of the allocation at
\fIptr\fR\&.
.PP
The
-dallocx()
+\fBdallocx\fR\fB\fR
function causes the memory referenced by
\fIptr\fR
to be made available for future allocations\&.
.PP
The
-sdallocx()
+\fBsdallocx\fR\fB\fR
function is an extension of
-dallocx()
+\fBdallocx\fR\fB\fR
with a
\fIsize\fR
parameter to allow the caller to pass in the allocation size as an optimization\&. The minimum valid input size is the original requested size of the allocation, and the maximum valid input size is the corresponding value returned by
-nallocx()
+\fBnallocx\fR\fB\fR
or
-sallocx()\&.
+\fBsallocx\fR\fB\fR\&.
.PP
The
-nallocx()
+\fBnallocx\fR\fB\fR
function allocates no memory, but it performs the same size computation as the
-mallocx()
+\fBmallocx\fR\fB\fR
function, and returns the real size of the allocation that would result from the equivalent
-mallocx()
-function call, or
-\fB0\fR
-if the inputs exceed the maximum supported size class and/or alignment\&. Behavior is undefined if
+\fBmallocx\fR\fB\fR
+function call\&. Behavior is undefined if
\fIsize\fR
is
-\fB0\fR\&.
+\fB0\fR, or if request size overflows due to size class and/or alignment constraints\&.
.PP
The
-mallctl()
+\fBmallctl\fR\fB\fR
function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions\&. The period\-separated
\fIname\fR
argument specifies a location in a tree\-structured namespace; see the
@@ -313,12 +311,10 @@ and
\fB0\fR\&.
.PP
The
-mallctlnametomib()
-function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a
-\(lqManagement Information Base\(rq
-(MIB) that can be passed repeatedly to
-mallctlbymib()\&. Upon successful return from
-mallctlnametomib(),
+\fBmallctlnametomib\fR\fB\fR
+function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a \(lqManagement Information Base\(rq (MIB) that can be passed repeatedly to
+\fBmallctlbymib\fR\fB\fR\&. Upon successful return from
+\fBmallctlnametomib\fR\fB\fR,
\fImibp\fR
contains an array of
\fI*miblenp\fR
@@ -330,7 +326,7 @@ and the input value of
\fI*miblenp\fR\&. Thus it is possible to pass a
\fI*miblenp\fR
that is smaller than the number of period\-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB\&. For name components that are integers (e\&.g\&. the 2 in
-arenas\&.bin\&.2\&.size), the corresponding MIB component will always be that integer\&. Therefore, it is legitimate to construct code like the following:
+"arenas\&.bin\&.2\&.size"), the corresponding MIB component will always be that integer\&. Therefore, it is legitimate to construct code like the following:
.sp
.if n \{\
.RS 4
@@ -350,7 +346,7 @@ for (i = 0; i < nbins; i++) {
mib[2] = i;
len = sizeof(bin_size);
- mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0);
+ mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
/* Do something with bin_size\&.\&.\&. */
}
.fi
@@ -359,87 +355,67 @@ for (i = 0; i < nbins; i++) {
.\}
.PP
The
-malloc_stats_print()
-function writes summary statistics via the
+\fBmalloc_stats_print\fR\fB\fR
+function writes human\-readable summary statistics via the
\fIwrite_cb\fR
callback function pointer and
\fIcbopaque\fR
data passed to
\fIwrite_cb\fR, or
-malloc_message()
+\fBmalloc_message\fR\fB\fR
if
\fIwrite_cb\fR
is
-\fBNULL\fR\&. The statistics are presented in human\-readable form unless
-\(lqJ\(rq
-is specified as a character within the
-\fIopts\fR
-string, in which case the statistics are presented in
-\m[blue]\fBJSON format\fR\m[]\&\s-2\u[2]\d\s+2\&. This function can be called repeatedly\&. General information that never changes during execution can be omitted by specifying
-\(lqg\(rq
-as a character within the
+\fBNULL\fR\&. This function can be called repeatedly\&. General information that never changes during execution can be omitted by specifying "g" as a character within the
\fIopts\fR
string\&. Note that
-malloc_message()
+\fBmalloc_message\fR\fB\fR
uses the
-mallctl*()
+\fBmallctl*\fR\fB\fR
functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously\&. If
\fB\-\-enable\-stats\fR
-is specified during configuration,
-\(lqm\(rq
-and
-\(lqa\(rq
-can be specified to omit merged arena and per arena statistics, respectively;
-\(lqb\(rq,
-\(lql\(rq, and
-\(lqh\(rq
-can be specified to omit per size class statistics for bins, large objects, and huge objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&.
+is specified during configuration, \(lqm\(rq and \(lqa\(rq can be specified to omit merged arena and per arena statistics, respectively; \(lqb\(rq, \(lql\(rq, and \(lqh\(rq can be specified to omit per size class statistics for bins, large objects, and huge objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&.
.PP
The
-malloc_usable_size()
+\fBmalloc_usable_size\fR\fB\fR
function returns the usable size of the allocation pointed to by
\fIptr\fR\&. The return value may be larger than the size that was requested during allocation\&. The
-malloc_usable_size()
+\fBmalloc_usable_size\fR\fB\fR
function is not a mechanism for in\-place
-realloc(); rather it is provided solely as a tool for introspection purposes\&. Any discrepancy between the requested allocation size and the size reported by
-malloc_usable_size()
+\fBrealloc\fR\fB\fR; rather it is provided solely as a tool for introspection purposes\&. Any discrepancy between the requested allocation size and the size reported by
+\fBmalloc_usable_size\fR\fB\fR
should not be depended on, since such behavior is entirely implementation\-dependent\&.
.SH "TUNING"
.PP
Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile\- or run\-time\&.
.PP
-The string specified via
-\fB\-\-with\-malloc\-conf\fR, the string pointed to by the global variable
-\fImalloc_conf\fR, the
-\(lqname\(rq
-of the file referenced by the symbolic link named
+The string pointed to by the global variable
+\fImalloc_conf\fR, the \(lqname\(rq of the file referenced by the symbolic link named
/etc/malloc\&.conf, and the value of the environment variable
\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&. Note that
\fImalloc_conf\fR
may be read before
-main()
+\fBmain\fR\fB\fR
is entered, so the declaration of
\fImalloc_conf\fR
should specify an initializer that contains the final value to be read by jemalloc\&.
-\fB\-\-with\-malloc\-conf\fR
-and
\fImalloc_conf\fR
-are compile\-time mechanisms, whereas
+is a compile\-time setting, whereas
/etc/malloc\&.conf
and
\fBMALLOC_CONF\fR
can be safely set any time prior to program invocation\&.
.PP
An options string is a comma\-separated list of option:value pairs\&. There is one key corresponding to each
-opt\&.*
+"opt\&.*"
mallctl (see the
MALLCTL NAMESPACE
section for options documentation)\&. For example,
abort:true,narenas:1
sets the
-opt\&.abort
+"opt\&.abort"
and
-opt\&.narenas
+"opt\&.narenas"
options\&. Some options have boolean values (true/false), others have integer values (base 8, 10, or 16, depending on prefix), and yet others have raw string values\&.
.SH "IMPLEMENTATION NOTES"
.PP
@@ -460,26 +436,29 @@ In addition to multiple arenas, unless
\fB\-\-disable\-tcache\fR
is specified during configuration, this allocator supports thread\-specific caching for small and large objects, in order to make it possible to completely avoid synchronization for most allocation requests\&. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache\&.
.PP
-Memory is conceptually broken into equal\-sized chunks, where the chunk size is a power of two that is greater than the page size\&. Chunks are always aligned to multiples of the chunk size\&. This alignment makes it possible to find metadata for user objects very quickly\&. User objects are broken into three categories according to size: small, large, and huge\&. Multiple small and large objects can reside within a single chunk, whereas huge objects each have one or more chunks backing them\&. Each chunk that contains small and/or large objects tracks its contents as runs of contiguous pages (unused, backing a set of small objects, or backing one large object)\&. The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time\&.
+Memory is conceptually broken into equal\-sized chunks, where the chunk size is a power of two that is greater than the page size\&. Chunks are always aligned to multiples of the chunk size\&. This alignment makes it possible to find metadata for user objects very quickly\&.
+.PP
+User objects are broken into three categories according to size: small, large, and huge\&. Small and large objects are managed entirely by arenas; huge objects are additionally aggregated in a single data structure that is shared by all threads\&. Huge objects are typically used by applications infrequently enough that this single data structure is not a scalability issue\&.
+.PP
+Each chunk that is managed by an arena tracks its contents as runs of contiguous pages (unused, backing a set of small objects, or backing one large object)\&. The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time\&.
.PP
Small objects are managed in groups by page runs\&. Each run maintains a bitmap to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least
sizeof(\fBdouble\fR)\&. All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes\&. Small size classes are smaller than four times the page size, large size classes are smaller than the chunk size (see the
-opt\&.lg_chunk
-option), and huge size classes extend from the chunk size up to the largest size class that does not exceed
-\fBPTRDIFF_MAX\fR\&.
+"opt\&.lg_chunk"
+option), and huge size classes extend from the chunk size up to one size class less than the full address space size\&.
.PP
Allocations are packed tightly together, which can be an issue for multi\-threaded applications\&. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating\&.
.PP
The
-realloc(),
-rallocx(), and
-xallocx()
+\fBrealloc\fR\fB\fR,
+\fBrallocx\fR\fB\fR, and
+\fBxallocx\fR\fB\fR
functions may resize allocations without moving them under limited circumstances\&. Unlike the
-*allocx()
+\fB*allocx\fR\fB\fR
API, the standard API does not officially round up the usable size of an allocation to the nearest size class, so technically it is necessary to call
-realloc()
+\fBrealloc\fR\fB\fR
to grow e\&.g\&. a 9\-byte allocation to 16 bytes, or shrink a 16\-byte allocation to 9 bytes\&. Growth and shrinkage trivially succeeds in place as long as the pre\-size and post\-size both round up to the same size class\&. No other API guarantees are made regarding in\-place resizing, but the current implementation also tries to resize large and huge allocations in place, as long as the pre\-size and post\-size are both large or both huge\&. In such cases shrinkage always succeeds for large size classes, but for huge size classes the chunk allocator must support splitting (see
-arena\&.<i>\&.chunk_hooks)\&. Growth only succeeds if the trailing memory is currently available, and additionally for huge size classes the chunk allocator must support merging\&.
+"arena\&.<i>\&.chunk_hooks")\&. Growth only succeeds if the trailing memory is currently available, and additionally for huge size classes the chunk allocator must support merging\&.
.PP
Assuming 2 MiB chunks, 4 KiB pages, and a 16\-byte quantum on a 64\-bit system, the size classes in each category are as shown in
Table 1\&.
@@ -523,8 +502,6 @@ l r l
^ r l
^ r l
^ r l
-^ r l
-^ r l
^ r l.
T{
Small
@@ -652,22 +629,12 @@ T}
T}:T{
\&.\&.\&.
T}
-:T{
-512 PiB
-T}:T{
-[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]
-T}
-:T{
-1 EiB
-T}:T{
-[5 EiB, 6 EiB, 7 EiB]
-T}
.TE
.sp 1
.SH "MALLCTL NAMESPACE"
.PP
The following names are defined in the namespace accessible via the
-mallctl*()
+\fBmallctl*\fR\fB\fR
functions\&. Value types are specified in parentheses, their readable/writable statuses are encoded as
rw,
r\-,
@@ -677,118 +644,111 @@ r\-,
or
<j>
indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection\&. In the case of
-stats\&.arenas\&.<i>\&.*,
+"stats\&.arenas\&.<i>\&.*",
<i>
equal to
-arenas\&.narenas
+"arenas\&.narenas"
can be used to access the summation of statistics from all arenas\&. Take special note of the
-epoch
+"epoch"
mallctl, which controls refreshing of cached dynamic statistics\&.
.PP
-version (\fBconst char *\fR) r\-
+"version" (\fBconst char *\fR) r\-
.RS 4
Return the jemalloc version string\&.
.RE
.PP
-epoch (\fBuint64_t\fR) rw
+"epoch" (\fBuint64_t\fR) rw
.RS 4
If a value is passed in, refresh the data from which the
-mallctl*()
+\fBmallctl*\fR\fB\fR
functions report values, and increment the epoch\&. Return the current epoch\&. This is useful for detecting whether another thread caused a refresh\&.
.RE
.PP
-config\&.cache_oblivious (\fBbool\fR) r\-
+"config\&.cache_oblivious" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-cache\-oblivious\fR
was specified during build configuration\&.
.RE
.PP
-config\&.debug (\fBbool\fR) r\-
+"config\&.debug" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-debug\fR
was specified during build configuration\&.
.RE
.PP
-config\&.fill (\fBbool\fR) r\-
+"config\&.fill" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-fill\fR
was specified during build configuration\&.
.RE
.PP
-config\&.lazy_lock (\fBbool\fR) r\-
+"config\&.lazy_lock" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-lazy\-lock\fR
was specified during build configuration\&.
.RE
.PP
-config\&.malloc_conf (\fBconst char *\fR) r\-
-.RS 4
-Embedded configure\-time\-specified run\-time options string, empty unless
-\fB\-\-with\-malloc\-conf\fR
-was specified during build configuration\&.
-.RE
-.PP
-config\&.munmap (\fBbool\fR) r\-
+"config\&.munmap" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-munmap\fR
was specified during build configuration\&.
.RE
.PP
-config\&.prof (\fBbool\fR) r\-
+"config\&.prof" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-prof\fR
was specified during build configuration\&.
.RE
.PP
-config\&.prof_libgcc (\fBbool\fR) r\-
+"config\&.prof_libgcc" (\fBbool\fR) r\-
.RS 4
\fB\-\-disable\-prof\-libgcc\fR
was not specified during build configuration\&.
.RE
.PP
-config\&.prof_libunwind (\fBbool\fR) r\-
+"config\&.prof_libunwind" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-prof\-libunwind\fR
was specified during build configuration\&.
.RE
.PP
-config\&.stats (\fBbool\fR) r\-
+"config\&.stats" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-stats\fR
was specified during build configuration\&.
.RE
.PP
-config\&.tcache (\fBbool\fR) r\-
+"config\&.tcache" (\fBbool\fR) r\-
.RS 4
\fB\-\-disable\-tcache\fR
was not specified during build configuration\&.
.RE
.PP
-config\&.tls (\fBbool\fR) r\-
+"config\&.tls" (\fBbool\fR) r\-
.RS 4
\fB\-\-disable\-tls\fR
was not specified during build configuration\&.
.RE
.PP
-config\&.utrace (\fBbool\fR) r\-
+"config\&.utrace" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-utrace\fR
was specified during build configuration\&.
.RE
.PP
-config\&.valgrind (\fBbool\fR) r\-
+"config\&.valgrind" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-valgrind\fR
was specified during build configuration\&.
.RE
.PP
-config\&.xmalloc (\fBbool\fR) r\-
+"config\&.xmalloc" (\fBbool\fR) r\-
.RS 4
\fB\-\-enable\-xmalloc\fR
was specified during build configuration\&.
.RE
.PP
-opt\&.abort (\fBbool\fR) r\-
+"opt\&.abort" (\fBbool\fR) r\-
.RS 4
Abort\-on\-warning enabled/disabled\&. If true, most warnings are fatal\&. The process will call
\fBabort\fR(3)
@@ -797,132 +757,97 @@ in these cases\&. This option is disabled by default unless
is specified during configuration, in which case it is enabled by default\&.
.RE
.PP
-opt\&.dss (\fBconst char *\fR) r\-
+"opt\&.dss" (\fBconst char *\fR) r\-
.RS 4
dss (\fBsbrk\fR(2)) allocation precedence as related to
\fBmmap\fR(2)
allocation\&. The following settings are supported if
\fBsbrk\fR(2)
-is supported by the operating system:
-\(lqdisabled\(rq,
-\(lqprimary\(rq, and
-\(lqsecondary\(rq; otherwise only
-\(lqdisabled\(rq
-is supported\&. The default is
-\(lqsecondary\(rq
-if
+is supported by the operating system: \(lqdisabled\(rq, \(lqprimary\(rq, and \(lqsecondary\(rq; otherwise only \(lqdisabled\(rq is supported\&. The default is \(lqsecondary\(rq if
\fBsbrk\fR(2)
-is supported by the operating system;
-\(lqdisabled\(rq
-otherwise\&.
+is supported by the operating system; \(lqdisabled\(rq otherwise\&.
.RE
.PP
-opt\&.lg_chunk (\fBsize_t\fR) r\-
+"opt\&.lg_chunk" (\fBsize_t\fR) r\-
.RS 4
Virtual memory chunk size (log base 2)\&. If a chunk size outside the supported size range is specified, the size is silently clipped to the minimum/maximum supported size\&. The default chunk size is 2 MiB (2^21)\&.
.RE
.PP
-opt\&.narenas (\fBunsigned\fR) r\-
+"opt\&.narenas" (\fBsize_t\fR) r\-
.RS 4
Maximum number of arenas to use for automatic multiplexing of threads and arenas\&. The default is four times the number of CPUs, or one if there is a single CPU\&.
.RE
.PP
-opt\&.purge (\fBconst char *\fR) r\-
-.RS 4
-Purge mode is \(lqratio\(rq (default) or \(lqdecay\(rq\&. See
-opt\&.lg_dirty_mult
-for details of the ratio mode\&. See
-opt\&.decay_time
-for details of the decay mode\&.
-.RE
-.PP
-opt\&.lg_dirty_mult (\fBssize_t\fR) r\-
+"opt\&.lg_dirty_mult" (\fBssize_t\fR) r\-
.RS 4
Per\-arena minimum ratio (log base 2) of active to dirty pages\&. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via
\fBmadvise\fR(2)
or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 8:1 (2^3:1); an option value of \-1 will disable dirty page purging\&. See
-arenas\&.lg_dirty_mult
+"arenas\&.lg_dirty_mult"
and
-arena\&.<i>\&.lg_dirty_mult
+"arena\&.<i>\&.lg_dirty_mult"
for related dynamic control options\&.
.RE
.PP
-opt\&.decay_time (\fBssize_t\fR) r\-
-.RS 4
-Approximate time in seconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused\&. The pages are incrementally purged according to a sigmoidal decay curve that starts and ends with zero purge rate\&. A decay time of 0 causes all unused dirty pages to be purged immediately upon creation\&. A decay time of \-1 disables purging\&. The default decay time is 10 seconds\&. See
-arenas\&.decay_time
-and
-arena\&.<i>\&.decay_time
-for related dynamic control options\&.
-.RE
-.PP
-opt\&.stats_print (\fBbool\fR) r\-
+"opt\&.stats_print" (\fBbool\fR) r\-
.RS 4
Enable/disable statistics printing at exit\&. If enabled, the
-malloc_stats_print()
+\fBmalloc_stats_print\fR\fB\fR
function is called at program exit via an
\fBatexit\fR(3)
function\&. If
\fB\-\-enable\-stats\fR
is specified during configuration, this has the potential to cause deadlock for a multi\-threaded process that exits while one or more threads are executing in the memory allocation functions\&. Furthermore,
-atexit()
+\fBatexit\fR\fB\fR
may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls
-atexit(), so this option is not universally usable (though the application can register its own
-atexit()
+\fBatexit\fR\fB\fR, so this option is not univerally usable (though the application can register its own
+\fBatexit\fR\fB\fR
function with equivalent functionality)\&. Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development\&. This option is disabled by default\&.
.RE
.PP
-opt\&.junk (\fBconst char *\fR) r\- [\fB\-\-enable\-fill\fR]
+"opt\&.junk" (\fBconst char *\fR) r\- [\fB\-\-enable\-fill\fR]
.RS 4
-Junk filling\&. If set to
-\(lqalloc\(rq, each byte of uninitialized allocated memory will be initialized to
-0xa5\&. If set to
-\(lqfree\(rq, all deallocated memory will be initialized to
-0x5a\&. If set to
-\(lqtrue\(rq, both allocated and deallocated memory will be initialized, and if set to
-\(lqfalse\(rq, junk filling be disabled entirely\&. This is intended for debugging and will impact performance negatively\&. This option is
-\(lqfalse\(rq
-by default unless
+Junk filling\&. If set to "alloc", each byte of uninitialized allocated memory will be initialized to
+0xa5\&. If set to "free", all deallocated memory will be initialized to
+0x5a\&. If set to "true", both allocated and deallocated memory will be initialized, and if set to "false", junk filling be disabled entirely\&. This is intended for debugging and will impact performance negatively\&. This option is "false" by default unless
\fB\-\-enable\-debug\fR
-is specified during configuration, in which case it is
-\(lqtrue\(rq
-by default unless running inside
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[3]\d\s+2\&.
+is specified during configuration, in which case it is "true" by default unless running inside
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&.
.RE
.PP
-opt\&.quarantine (\fBsize_t\fR) r\- [\fB\-\-enable\-fill\fR]
+"opt\&.quarantine" (\fBsize_t\fR) r\- [\fB\-\-enable\-fill\fR]
.RS 4
Per thread quarantine size in bytes\&. If non\-zero, each thread maintains a FIFO object quarantine that stores up to the specified number of bytes of memory\&. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk\-filled if the
-opt\&.junk
+"opt\&.junk"
option is enabled\&. This feature is of particular use in combination with
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[3]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0 unless running inside Valgrind, in which case the default is 16 MiB\&.
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0 unless running inside Valgrind, in which case the default is 16 MiB\&.
.RE
.PP
-opt\&.redzone (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
+"opt\&.redzone" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
.RS 4
Redzones enabled/disabled\&. If enabled, small allocations have redzones before and after them\&. Furthermore, if the
-opt\&.junk
+"opt\&.junk"
option is enabled, the redzones are checked for corruption during deallocation\&. However, the primary intended purpose of this feature is to be used in combination with
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[3]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default unless running inside Valgrind\&.
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default unless running inside Valgrind\&.
.RE
.PP
-opt\&.zero (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
+"opt\&.zero" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
.RS 4
Zero filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to 0\&. Note that this initialization only happens once for each byte, so
-realloc()
+\fBrealloc\fR\fB\fR
and
-rallocx()
+\fBrallocx\fR\fB\fR
calls do not zero memory that was previously allocated\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default\&.
.RE
.PP
-opt\&.utrace (\fBbool\fR) r\- [\fB\-\-enable\-utrace\fR]
+"opt\&.utrace" (\fBbool\fR) r\- [\fB\-\-enable\-utrace\fR]
.RS 4
Allocation tracing based on
\fButrace\fR(2)
enabled/disabled\&. This option is disabled by default\&.
.RE
.PP
-opt\&.xmalloc (\fBbool\fR) r\- [\fB\-\-enable\-xmalloc\fR]
+"opt\&.xmalloc" (\fBbool\fR) r\- [\fB\-\-enable\-xmalloc\fR]
.RS 4
Abort\-on\-out\-of\-memory enabled/disabled\&. If enabled, rather than returning failure for any allocation function, display a diagnostic message on
\fBSTDERR_FILENO\fR
@@ -942,94 +867,92 @@ malloc_conf = "xmalloc:true";
This option is disabled by default\&.
.RE
.PP
-opt\&.tcache (\fBbool\fR) r\- [\fB\-\-enable\-tcache\fR]
+"opt\&.tcache" (\fBbool\fR) r\- [\fB\-\-enable\-tcache\fR]
.RS 4
Thread\-specific caching (tcache) enabled/disabled\&. When there are multiple threads, each thread uses a tcache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the
-opt\&.lg_tcache_max
+"opt\&.lg_tcache_max"
option for related tuning information\&. This option is enabled by default unless running inside
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[3]\d\s+2, in which case it is forcefully disabled\&.
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, in which case it is forcefully disabled\&.
.RE
.PP
-opt\&.lg_tcache_max (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
+"opt\&.lg_tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
.RS 4
Maximum size class (log base 2) to cache in the thread\-specific cache (tcache)\&. At a minimum, all small size classes are cached, and at a maximum all large size classes are cached\&. The default maximum is 32 KiB (2^15)\&.
.RE
.PP
-opt\&.prof (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+"opt\&.prof" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Memory profiling enabled/disabled\&. If enabled, profile memory allocation activity\&. See the
-opt\&.prof_active
+"opt\&.prof_active"
option for on\-the\-fly activation/deactivation\&. See the
-opt\&.lg_prof_sample
+"opt\&.lg_prof_sample"
option for probabilistic sampling control\&. See the
-opt\&.prof_accum
+"opt\&.prof_accum"
option for control of cumulative sample reporting\&. See the
-opt\&.lg_prof_interval
+"opt\&.lg_prof_interval"
option for information on interval\-triggered profile dumping, the
-opt\&.prof_gdump
+"opt\&.prof_gdump"
option for information on high\-water\-triggered profile dumping, and the
-opt\&.prof_final
+"opt\&.prof_final"
option for final profile dumping\&. Profile output is compatible with the
\fBjeprof\fR
command, which is based on the
\fBpprof\fR
that is developed as part of the
-\m[blue]\fBgperftools package\fR\m[]\&\s-2\u[4]\d\s+2\&. See
-HEAP PROFILE FORMAT
-for heap profile format documentation\&.
+\m[blue]\fBgperftools package\fR\m[]\&\s-2\u[3]\d\s+2\&.
.RE
.PP
-opt\&.prof_prefix (\fBconst char *\fR) r\- [\fB\-\-enable\-prof\fR]
+"opt\&.prof_prefix" (\fBconst char *\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Filename prefix for profile dumps\&. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled)\&. The default prefix is
jeprof\&.
.RE
.PP
-opt\&.prof_active (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+"opt\&.prof_active" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Profiling activated/deactivated\&. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the
-opt\&.prof
+"opt\&.prof"
option) but inactive, then toggle profiling at any time during program execution with the
-prof\&.active
+"prof\&.active"
mallctl\&. This option is enabled by default\&.
.RE
.PP
-opt\&.prof_thread_active_init (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+"opt\&.prof_thread_active_init" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Initial setting for
-thread\&.prof\&.active
+"thread\&.prof\&.active"
in newly created threads\&. The initial setting for newly created threads can also be changed during execution via the
-prof\&.thread_active_init
+"prof\&.thread_active_init"
mallctl\&. This option is enabled by default\&.
.RE
.PP
-opt\&.lg_prof_sample (\fBsize_t\fR) r\- [\fB\-\-enable\-prof\fR]
+"opt\&.lg_prof_sample" (\fBsize_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity\&. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead\&. The default sample interval is 512 KiB (2^19 B)\&.
.RE
.PP
-opt\&.prof_accum (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+"opt\&.prof_accum" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Reporting of cumulative object/byte counts in profile dumps enabled/disabled\&. If this option is enabled, every unique backtrace must be stored for the duration of execution\&. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest\&. This option is disabled by default\&.
.RE
.PP
-opt\&.lg_prof_interval (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR]
+"opt\&.lg_prof_interval" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Average interval (log base 2) between memory profile dumps, as measured in bytes of allocation activity\&. The actual interval between dumps may be sporadic because decentralized allocation counters are used to avoid synchronization bottlenecks\&. Profiles are dumped to files named according to the pattern
<prefix>\&.<pid>\&.<seq>\&.i<iseq>\&.heap, where
<prefix>
is controlled by the
-opt\&.prof_prefix
+"opt\&.prof_prefix"
option\&. By default, interval\-triggered profile dumping is disabled (encoded as \-1)\&.
.RE
.PP
-opt\&.prof_gdump (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+"opt\&.prof_gdump" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Set the initial state of
-prof\&.gdump, which when enabled triggers a memory profile dump every time the total virtual memory exceeds the previous maximum\&. This option is disabled by default\&.
+"prof\&.gdump", which when enabled triggers a memory profile dump every time the total virtual memory exceeds the previous maximum\&. This option is disabled by default\&.
.RE
.PP
-opt\&.prof_final (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+"opt\&.prof_final" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Use an
\fBatexit\fR(3)
@@ -1037,150 +960,127 @@ function to dump final memory usage to a file named according to the pattern
<prefix>\&.<pid>\&.<seq>\&.f\&.heap, where
<prefix>
is controlled by the
-opt\&.prof_prefix
+"opt\&.prof_prefix"
option\&. Note that
-atexit()
+\fBatexit\fR\fB\fR
may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls
-atexit(), so this option is not universally usable (though the application can register its own
-atexit()
+\fBatexit\fR\fB\fR, so this option is not univerally usable (though the application can register its own
+\fBatexit\fR\fB\fR
function with equivalent functionality)\&. This option is disabled by default\&.
.RE
.PP
-opt\&.prof_leak (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
+"opt\&.prof_leak" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Leak reporting enabled/disabled\&. If enabled, use an
\fBatexit\fR(3)
function to report memory leaks detected by allocation sampling\&. See the
-opt\&.prof
+"opt\&.prof"
option for information on analyzing heap profile output\&. This option is disabled by default\&.
.RE
.PP
-thread\&.arena (\fBunsigned\fR) rw
+"thread\&.arena" (\fBunsigned\fR) rw
.RS 4
Get or set the arena associated with the calling thread\&. If the specified arena was not initialized beforehand (see the
-arenas\&.initialized
+"arenas\&.initialized"
mallctl), it will be automatically initialized as a side effect of calling this interface\&.
.RE
.PP
-thread\&.allocated (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"thread\&.allocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Get the total number of bytes ever allocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&.
.RE
.PP
-thread\&.allocatedp (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR]
+"thread\&.allocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Get a pointer to the the value that is returned by the
-thread\&.allocated
+"thread\&.allocated"
mallctl\&. This is useful for avoiding the overhead of repeated
-mallctl*()
+\fBmallctl*\fR\fB\fR
calls\&.
.RE
.PP
-thread\&.deallocated (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"thread\&.deallocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Get the total number of bytes ever deallocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&.
.RE
.PP
-thread\&.deallocatedp (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR]
+"thread\&.deallocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Get a pointer to the the value that is returned by the
-thread\&.deallocated
+"thread\&.deallocated"
mallctl\&. This is useful for avoiding the overhead of repeated
-mallctl*()
+\fBmallctl*\fR\fB\fR
calls\&.
.RE
.PP
-thread\&.tcache\&.enabled (\fBbool\fR) rw [\fB\-\-enable\-tcache\fR]
+"thread\&.tcache\&.enabled" (\fBbool\fR) rw [\fB\-\-enable\-tcache\fR]
.RS 4
Enable/disable calling thread\*(Aqs tcache\&. The tcache is implicitly flushed as a side effect of becoming disabled (see
-thread\&.tcache\&.flush)\&.
+"thread\&.tcache\&.flush")\&.
.RE
.PP
-thread\&.tcache\&.flush (\fBvoid\fR) \-\- [\fB\-\-enable\-tcache\fR]
+"thread\&.tcache\&.flush" (\fBvoid\fR) \-\- [\fB\-\-enable\-tcache\fR]
.RS 4
Flush calling thread\*(Aqs thread\-specific cache (tcache)\&. This interface releases all cached objects and internal data structures associated with the calling thread\*(Aqs tcache\&. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits\&. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful\&.
.RE
.PP
-thread\&.prof\&.name (\fBconst char *\fR) r\- or \-w [\fB\-\-enable\-prof\fR]
+"thread\&.prof\&.name" (\fBconst char *\fR) r\- or \-w [\fB\-\-enable\-prof\fR]
.RS 4
-Get/set the descriptive name associated with the calling thread in memory profile dumps\&. An internal copy of the name string is created, so the input string need not be maintained after this interface completes execution\&. The output string of this interface should be copied for non\-ephemeral uses, because multiple implementation details can cause asynchronous string deallocation\&. Furthermore, each invocation of this interface can only read or write; simultaneous read/write is not supported due to string lifetime limitations\&. The name string must be nil\-terminated and comprised only of characters in the sets recognized by
+Get/set the descriptive name associated with the calling thread in memory profile dumps\&. An internal copy of the name string is created, so the input string need not be maintained after this interface completes execution\&. The output string of this interface should be copied for non\-ephemeral uses, because multiple implementation details can cause asynchronous string deallocation\&. Furthermore, each invocation of this interface can only read or write; simultaneous read/write is not supported due to string lifetime limitations\&. The name string must nil\-terminated and comprised only of characters in the sets recognized by
\fBisgraph\fR(3)
and
\fBisblank\fR(3)\&.
.RE
.PP
-thread\&.prof\&.active (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
+"thread\&.prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
.RS 4
Control whether sampling is currently active for the calling thread\&. This is an activation mechanism in addition to
-prof\&.active; both must be active for the calling thread to sample\&. This flag is enabled by default\&.
+"prof\&.active"; both must be active for the calling thread to sample\&. This flag is enabled by default\&.
.RE
.PP
-tcache\&.create (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR]
+"tcache\&.create" (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR]
.RS 4
Create an explicit thread\-specific cache (tcache) and return an identifier that can be passed to the
\fBMALLOCX_TCACHE(\fR\fB\fItc\fR\fR\fB)\fR
macro to explicitly use the specified cache rather than the automatically managed one that is used by default\&. Each explicit cache can be used by only one thread at a time; the application must assure that this constraint holds\&.
.RE
.PP
-tcache\&.flush (\fBunsigned\fR) \-w [\fB\-\-enable\-tcache\fR]
+"tcache\&.flush" (\fBunsigned\fR) \-w [\fB\-\-enable\-tcache\fR]
.RS 4
Flush the specified thread\-specific cache (tcache)\&. The same considerations apply to this interface as to
-thread\&.tcache\&.flush, except that the tcache will never be automatically discarded\&.
+"thread\&.tcache\&.flush", except that the tcache will never be automatically be discarded\&.
.RE
.PP
-tcache\&.destroy (\fBunsigned\fR) \-w [\fB\-\-enable\-tcache\fR]
+"tcache\&.destroy" (\fBunsigned\fR) \-w [\fB\-\-enable\-tcache\fR]
.RS 4
Flush the specified thread\-specific cache (tcache) and make the identifier available for use during a future tcache creation\&.
.RE
.PP
-arena\&.<i>\&.purge (\fBvoid\fR) \-\-
+"arena\&.<i>\&.purge" (\fBvoid\fR) \-\-
.RS 4
-Purge all unused dirty pages for arena <i>, or for all arenas if <i> equals
-arenas\&.narenas\&.
-.RE
-.PP
-arena\&.<i>\&.decay (\fBvoid\fR) \-\-
-.RS 4
-Trigger decay\-based purging of unused dirty pages for arena <i>, or for all arenas if <i> equals
-arenas\&.narenas\&. The proportion of unused dirty pages to be purged depends on the current time; see
-opt\&.decay_time
-for details\&.
+Purge unused dirty pages for arena <i>, or for all arenas if <i> equals
+"arenas\&.narenas"\&.
.RE
.PP
-arena\&.<i>\&.reset (\fBvoid\fR) \-\-
-.RS 4
-Discard all of the arena\*(Aqs extant allocations\&. This interface can only be used with arenas created via
-arenas\&.extend\&. None of the arena\*(Aqs discarded/cached allocations may accessed afterward\&. As part of this requirement, all thread caches which were used to allocate/deallocate in conjunction with the arena must be flushed beforehand\&. This interface cannot be used if running inside Valgrind, nor if the
-quarantine
-size is non\-zero\&.
-.RE
-.PP
-arena\&.<i>\&.dss (\fBconst char *\fR) rw
+"arena\&.<i>\&.dss" (\fBconst char *\fR) rw
.RS 4
Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals
-arenas\&.narenas\&. See
-opt\&.dss
+"arenas\&.narenas"\&. See
+"opt\&.dss"
for supported settings\&.
.RE
.PP
-arena\&.<i>\&.lg_dirty_mult (\fBssize_t\fR) rw
+"arena\&.<i>\&.lg_dirty_mult" (\fBssize_t\fR) rw
.RS 4
Current per\-arena minimum ratio (log base 2) of active to dirty pages for arena <i>\&. Each time this interface is set and the ratio is increased, pages are synchronously purged as necessary to impose the new ratio\&. See
-opt\&.lg_dirty_mult
-for additional information\&.
-.RE
-.PP
-arena\&.<i>\&.decay_time (\fBssize_t\fR) rw
-.RS 4
-Current per\-arena approximate time in seconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused\&. Each time this interface is set, all currently unused dirty pages are considered to have fully decayed, which causes immediate purging of all unused dirty pages unless the decay time is set to \-1 (i\&.e\&. purging disabled)\&. See
-opt\&.decay_time
+"opt\&.lg_dirty_mult"
for additional information\&.
.RE
.PP
-arena\&.<i>\&.chunk_hooks (\fBchunk_hooks_t\fR) rw
+"arena\&.<i>\&.chunk_hooks" (\fBchunk_hooks_t\fR) rw
.RS 4
Get or set the chunk management hook functions for arena <i>\&. The functions must be capable of operating on all extant chunks associated with arena <i>, usually by passing unknown chunks to the replaced functions\&. In practice, it is feasible to control allocation for arenas created via
-arenas\&.extend
+"arenas\&.extend"
such that all chunks originate from an application\-supplied chunk allocator (by setting custom chunk hook functions just after arena creation), but the automatically created arenas may have already created chunks prior to the application having an opportunity to take over chunk allocation\&.
.sp
.if n \{\
@@ -1249,7 +1149,7 @@ is not
on success or
\fBNULL\fR
on error\&. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults\&. Note that replacing the default chunk allocation function makes the arena\*(Aqs
-arena\&.<i>\&.dss
+"arena\&.<i>\&.dss"
setting irrelevant\&.
.HP \w'typedef\ bool\ (chunk_dalloc_t)('u
.BI "typedef bool (chunk_dalloc_t)(void\ *" "chunk" ", size_t\ " "size" ", bool\ " "committed" ", unsigned\ " "arena_ind" ");"
@@ -1396,504 +1296,407 @@ into one contiguous chunk, operating on
\fIarena_ind\fR, returning false upon success\&. If the function returns true, this indicates that the chunks remain distinct mappings and therefore should continue to be operated on independently\&.
.RE
.PP
-arenas\&.narenas (\fBunsigned\fR) r\-
+"arenas\&.narenas" (\fBunsigned\fR) r\-
.RS 4
Current limit on number of arenas\&.
.RE
.PP
-arenas\&.initialized (\fBbool *\fR) r\-
+"arenas\&.initialized" (\fBbool *\fR) r\-
.RS 4
An array of
-arenas\&.narenas
+"arenas\&.narenas"
booleans\&. Each boolean indicates whether the corresponding arena is initialized\&.
.RE
.PP
-arenas\&.lg_dirty_mult (\fBssize_t\fR) rw
+"arenas\&.lg_dirty_mult" (\fBssize_t\fR) rw
.RS 4
Current default per\-arena minimum ratio (log base 2) of active to dirty pages, used to initialize
-arena\&.<i>\&.lg_dirty_mult
+"arena\&.<i>\&.lg_dirty_mult"
during arena creation\&. See
-opt\&.lg_dirty_mult
+"opt\&.lg_dirty_mult"
for additional information\&.
.RE
.PP
-arenas\&.decay_time (\fBssize_t\fR) rw
-.RS 4
-Current default per\-arena approximate time in seconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused, used to initialize
-arena\&.<i>\&.decay_time
-during arena creation\&. See
-opt\&.decay_time
-for additional information\&.
-.RE
-.PP
-arenas\&.quantum (\fBsize_t\fR) r\-
+"arenas\&.quantum" (\fBsize_t\fR) r\-
.RS 4
Quantum size\&.
.RE
.PP
-arenas\&.page (\fBsize_t\fR) r\-
+"arenas\&.page" (\fBsize_t\fR) r\-
.RS 4
Page size\&.
.RE
.PP
-arenas\&.tcache_max (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
+"arenas\&.tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
.RS 4
Maximum thread\-cached size class\&.
.RE
.PP
-arenas\&.nbins (\fBunsigned\fR) r\-
+"arenas\&.nbins" (\fBunsigned\fR) r\-
.RS 4
Number of bin size classes\&.
.RE
.PP
-arenas\&.nhbins (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR]
+"arenas\&.nhbins" (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR]
.RS 4
Total number of thread cache bin size classes\&.
.RE
.PP
-arenas\&.bin\&.<i>\&.size (\fBsize_t\fR) r\-
+"arenas\&.bin\&.<i>\&.size" (\fBsize_t\fR) r\-
.RS 4
Maximum size supported by size class\&.
.RE
.PP
-arenas\&.bin\&.<i>\&.nregs (\fBuint32_t\fR) r\-
+"arenas\&.bin\&.<i>\&.nregs" (\fBuint32_t\fR) r\-
.RS 4
Number of regions per page run\&.
.RE
.PP
-arenas\&.bin\&.<i>\&.run_size (\fBsize_t\fR) r\-
+"arenas\&.bin\&.<i>\&.run_size" (\fBsize_t\fR) r\-
.RS 4
Number of bytes per page run\&.
.RE
.PP
-arenas\&.nlruns (\fBunsigned\fR) r\-
+"arenas\&.nlruns" (\fBunsigned\fR) r\-
.RS 4
Total number of large size classes\&.
.RE
.PP
-arenas\&.lrun\&.<i>\&.size (\fBsize_t\fR) r\-
+"arenas\&.lrun\&.<i>\&.size" (\fBsize_t\fR) r\-
.RS 4
Maximum size supported by this large size class\&.
.RE
.PP
-arenas\&.nhchunks (\fBunsigned\fR) r\-
+"arenas\&.nhchunks" (\fBunsigned\fR) r\-
.RS 4
Total number of huge size classes\&.
.RE
.PP
-arenas\&.hchunk\&.<i>\&.size (\fBsize_t\fR) r\-
+"arenas\&.hchunk\&.<i>\&.size" (\fBsize_t\fR) r\-
.RS 4
Maximum size supported by this huge size class\&.
.RE
.PP
-arenas\&.extend (\fBunsigned\fR) r\-
+"arenas\&.extend" (\fBunsigned\fR) r\-
.RS 4
Extend the array of arenas by appending a new arena, and returning the new arena index\&.
.RE
.PP
-prof\&.thread_active_init (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
+"prof\&.thread_active_init" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
.RS 4
Control the initial setting for
-thread\&.prof\&.active
+"thread\&.prof\&.active"
in newly created threads\&. See the
-opt\&.prof_thread_active_init
+"opt\&.prof_thread_active_init"
option for additional information\&.
.RE
.PP
-prof\&.active (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
+"prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
.RS 4
Control whether sampling is currently active\&. See the
-opt\&.prof_active
+"opt\&.prof_active"
option for additional information, as well as the interrelated
-thread\&.prof\&.active
+"thread\&.prof\&.active"
mallctl\&.
.RE
.PP
-prof\&.dump (\fBconst char *\fR) \-w [\fB\-\-enable\-prof\fR]
+"prof\&.dump" (\fBconst char *\fR) \-w [\fB\-\-enable\-prof\fR]
.RS 4
Dump a memory profile to the specified file, or if NULL is specified, to a file according to the pattern
<prefix>\&.<pid>\&.<seq>\&.m<mseq>\&.heap, where
<prefix>
is controlled by the
-opt\&.prof_prefix
+"opt\&.prof_prefix"
option\&.
.RE
.PP
-prof\&.gdump (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
+"prof\&.gdump" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
.RS 4
When enabled, trigger a memory profile dump every time the total virtual memory exceeds the previous maximum\&. Profiles are dumped to files named according to the pattern
<prefix>\&.<pid>\&.<seq>\&.u<useq>\&.heap, where
<prefix>
is controlled by the
-opt\&.prof_prefix
+"opt\&.prof_prefix"
option\&.
.RE
.PP
-prof\&.reset (\fBsize_t\fR) \-w [\fB\-\-enable\-prof\fR]
+"prof\&.reset" (\fBsize_t\fR) \-w [\fB\-\-enable\-prof\fR]
.RS 4
Reset all memory profile statistics, and optionally update the sample rate (see
-opt\&.lg_prof_sample
+"opt\&.lg_prof_sample"
and
-prof\&.lg_sample)\&.
+"prof\&.lg_sample")\&.
.RE
.PP
-prof\&.lg_sample (\fBsize_t\fR) r\- [\fB\-\-enable\-prof\fR]
+"prof\&.lg_sample" (\fBsize_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
Get the current sample rate (see
-opt\&.lg_prof_sample)\&.
+"opt\&.lg_prof_sample")\&.
.RE
.PP
-prof\&.interval (\fBuint64_t\fR) r\- [\fB\-\-enable\-prof\fR]
+"prof\&.interval" (\fBuint64_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
-Average number of bytes allocated between interval\-based profile dumps\&. See the
-opt\&.lg_prof_interval
+Average number of bytes allocated between inverval\-based profile dumps\&. See the
+"opt\&.lg_prof_interval"
option for additional information\&.
.RE
.PP
-stats\&.cactive (\fBsize_t *\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.cactive" (\fBsize_t *\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Pointer to a counter that contains an approximate count of the current number of bytes in active pages\&. The estimate may be high, but never low, because each arena rounds up when computing its contribution to the counter\&. Note that the
-epoch
+"epoch"
mallctl has no bearing on this counter\&. Furthermore, counter consistency is maintained via atomic operations, so it is necessary to use an atomic operation in order to guarantee a consistent read when dereferencing the pointer\&.
.RE
.PP
-stats\&.allocated (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes allocated by the application\&.
.RE
.PP
-stats\&.active (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.active" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes in active pages allocated by the application\&. This is a multiple of the page size, and greater than or equal to
-stats\&.allocated\&. This does not include
-stats\&.arenas\&.<i>\&.pdirty, nor pages entirely devoted to allocator metadata\&.
+"stats\&.allocated"\&. This does not include
+"stats\&.arenas\&.<i>\&.pdirty", nor pages entirely devoted to allocator metadata\&.
.RE
.PP
-stats\&.metadata (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.metadata" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes dedicated to metadata, which comprise base allocations used for bootstrap\-sensitive internal allocator data structures, arena chunk headers (see
-stats\&.arenas\&.<i>\&.metadata\&.mapped), and internal allocations (see
-stats\&.arenas\&.<i>\&.metadata\&.allocated)\&.
+"stats\&.arenas\&.<i>\&.metadata\&.mapped"), and internal allocations (see
+"stats\&.arenas\&.<i>\&.metadata\&.allocated")\&.
.RE
.PP
-stats\&.resident (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.resident" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Maximum number of bytes in physically resident data pages mapped by the allocator, comprising all pages dedicated to allocator metadata, pages backing active allocations, and unused dirty pages\&. This is a maximum rather than precise because pages may not actually be physically resident if they correspond to demand\-zeroed virtual memory that has not yet been touched\&. This is a multiple of the page size, and is larger than
-stats\&.active\&.
+"stats\&.active"\&.
.RE
.PP
-stats\&.mapped (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes in active chunks mapped by the allocator\&. This is a multiple of the chunk size, and is larger than
-stats\&.active\&. This does not include inactive chunks, even those that contain unused dirty pages, which means that there is no strict ordering between this and
-stats\&.resident\&.
-.RE
-.PP
-stats\&.retained (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
-.RS 4
-Total number of bytes in virtual memory mappings that were retained rather than being returned to the operating system via e\&.g\&.
-\fBmunmap\fR(2)\&. Retained virtual memory is typically untouched, decommitted, or purged, so it has no strongly associated physical memory (see
-chunk hooks
-for details)\&. Retained memory is excluded from mapped memory statistics, e\&.g\&.
-stats\&.mapped\&.
+"stats\&.active"\&. This does not include inactive chunks, even those that contain unused dirty pages, which means that there is no strict ordering between this and
+"stats\&.resident"\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.dss (\fBconst char *\fR) r\-
+"stats\&.arenas\&.<i>\&.dss" (\fBconst char *\fR) r\-
.RS 4
dss (\fBsbrk\fR(2)) allocation precedence as related to
\fBmmap\fR(2)
allocation\&. See
-opt\&.dss
+"opt\&.dss"
for details\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.lg_dirty_mult (\fBssize_t\fR) r\-
+"stats\&.arenas\&.<i>\&.lg_dirty_mult" (\fBssize_t\fR) r\-
.RS 4
Minimum ratio (log base 2) of active to dirty pages\&. See
-opt\&.lg_dirty_mult
+"opt\&.lg_dirty_mult"
for details\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.decay_time (\fBssize_t\fR) r\-
-.RS 4
-Approximate time in seconds from the creation of a set of unused dirty pages until an equivalent set of unused dirty pages is purged and/or reused\&. See
-opt\&.decay_time
-for details\&.
-.RE
-.PP
-stats\&.arenas\&.<i>\&.nthreads (\fBunsigned\fR) r\-
+"stats\&.arenas\&.<i>\&.nthreads" (\fBunsigned\fR) r\-
.RS 4
Number of threads currently assigned to arena\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.pactive (\fBsize_t\fR) r\-
+"stats\&.arenas\&.<i>\&.pactive" (\fBsize_t\fR) r\-
.RS 4
Number of pages in active runs\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.pdirty (\fBsize_t\fR) r\-
+"stats\&.arenas\&.<i>\&.pdirty" (\fBsize_t\fR) r\-
.RS 4
Number of pages within unused runs that are potentially dirty, and for which
-madvise\fI\&.\&.\&.\fR \fI\fBMADV_DONTNEED\fR\fR
+\fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR
or similar has not been called\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.mapped (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of mapped bytes\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.retained (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
-.RS 4
-Number of retained bytes\&. See
-stats\&.retained
-for details\&.
-.RE
-.PP
-stats\&.arenas\&.<i>\&.metadata\&.mapped (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.metadata\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of mapped bytes in arena chunk headers, which track the states of the non\-metadata pages\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.metadata\&.allocated (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.metadata\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of bytes dedicated to internal allocations\&. Internal allocations differ from application\-originated allocations in that they are for internal use, and that they are omitted from heap profiles\&. This statistic is reported separately from
-stats\&.metadata
+"stats\&.metadata"
and
-stats\&.arenas\&.<i>\&.metadata\&.mapped
+"stats\&.arenas\&.<i>\&.metadata\&.mapped"
because it overlaps with e\&.g\&. the
-stats\&.allocated
+"stats\&.allocated"
and
-stats\&.active
+"stats\&.active"
statistics, whereas the other metadata statistics do not\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.npurge (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.npurge" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of dirty page purge sweeps performed\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.nmadvise (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.nmadvise" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of
-madvise\fI\&.\&.\&.\fR \fI\fBMADV_DONTNEED\fR\fR
+\fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR
or similar calls made to purge dirty pages\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.purged (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.purged" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of pages purged\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.small\&.allocated (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.small\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of bytes currently allocated by small objects\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.small\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.small\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocation requests served by small bins\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.small\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.small\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of small objects returned to bins\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.small\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.small\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of small allocation requests\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.large\&.allocated (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.large\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of bytes currently allocated by large objects\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.large\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.large\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of large allocation requests served directly by the arena\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.large\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.large\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of large deallocation requests served directly by the arena\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.large\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.large\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of large allocation requests\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.huge\&.allocated (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.huge\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of bytes currently allocated by huge objects\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.huge\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.huge\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of huge allocation requests served directly by the arena\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.huge\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.huge\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of huge deallocation requests served directly by the arena\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.huge\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.huge\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of huge allocation requests\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocations served by bin\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocations returned to bin\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocation requests\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.curregs (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.curregs" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Current number of regions for this size class\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.nfills (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR]
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nfills" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR]
.RS 4
Cumulative number of tcache fills\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.nflushes (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR]
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nflushes" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR]
.RS 4
Cumulative number of tcache flushes\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.nruns (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of runs created\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.nreruns (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.nreruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of times the current run from which to allocate changed\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.bins\&.<j>\&.curruns (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.bins\&.<j>\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Current number of runs\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.lruns\&.<j>\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocation requests for this size class served directly by the arena\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.lruns\&.<j>\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of deallocation requests for this size class served directly by the arena\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.lruns\&.<j>\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocation requests for this size class\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.lruns\&.<j>\&.curruns (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.lruns\&.<j>\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Current number of runs for this size class\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.nmalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocation requests for this size class served directly by the arena\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.ndalloc (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of deallocation requests for this size class served directly by the arena\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.nrequests (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Cumulative number of allocation requests for this size class\&.
.RE
.PP
-stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.curhchunks (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+"stats\&.arenas\&.<i>\&.hchunks\&.<j>\&.curhchunks" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Current number of huge allocations for this size class\&.
.RE
-.SH "HEAP PROFILE FORMAT"
-.PP
-Although the heap profiling functionality was originally designed to be compatible with the
-\fBpprof\fR
-command that is developed as part of the
-\m[blue]\fBgperftools package\fR\m[]\&\s-2\u[4]\d\s+2, the addition of per thread heap profiling functionality required a different heap profile format\&. The
-\fBjeprof\fR
-command is derived from
-\fBpprof\fR, with enhancements to support the heap profile format described here\&.
-.PP
-In the following hypothetical heap profile,
-\fB[\&.\&.\&.]\fR
-indicates elision for the sake of compactness\&.
-.sp
-.if n \{\
-.RS 4
-.\}
-.nf
-heap_v2/524288
- t*: 28106: 56637512 [0: 0]
- [\&.\&.\&.]
- t3: 352: 16777344 [0: 0]
- [\&.\&.\&.]
- t99: 17754: 29341640 [0: 0]
- [\&.\&.\&.]
-@ 0x5f86da8 0x5f5a1dc [\&.\&.\&.] 0x29e4d4e 0xa200316 0xabb2988 [\&.\&.\&.]
- t*: 13: 6688 [0: 0]
- t3: 12: 6496 [0: ]
- t99: 1: 192 [0: 0]
-[\&.\&.\&.]
-
-MAPPED_LIBRARIES:
-[\&.\&.\&.]
-.fi
-.if n \{\
-.RE
-.\}
-.sp
-The following matches the above heap profile, but most tokens are replaced with
-\fB<description>\fR
-to indicate descriptions of the corresponding fields\&.
-.sp
-.if n \{\
-.RS 4
-.\}
-.nf
-<heap_profile_format_version>/<mean_sample_interval>
- <aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
- [\&.\&.\&.]
- <thread_3_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
- [\&.\&.\&.]
- <thread_99_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
- [\&.\&.\&.]
-@ <top_frame> <frame> [\&.\&.\&.] <frame> <frame> <frame> [\&.\&.\&.]
- <backtrace_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
- <backtrace_thread_3>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
- <backtrace_thread_99>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
-[\&.\&.\&.]
-
-MAPPED_LIBRARIES:
-</proc/<pid>/maps>
-.fi
-.if n \{\
-.RE
-.\}
.SH "DEBUGGING MALLOC PROBLEMS"
.PP
When debugging, it is a good idea to configure/build jemalloc with the
@@ -1902,16 +1705,14 @@ and
\fB\-\-enable\-fill\fR
options, and recompile the program with suitable options and symbols for debugger support\&. When so configured, jemalloc incorporates a wide variety of run\-time assertions that catch application errors such as double\-free, write\-after\-free, etc\&.
.PP
-Programs often accidentally depend on
-\(lquninitialized\(rq
-memory actually being filled with zero bytes\&. Junk filling (see the
-opt\&.junk
+Programs often accidentally depend on \(lquninitialized\(rq memory actually being filled with zero bytes\&. Junk filling (see the
+"opt\&.junk"
option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps\&. Conversely, zero filling (see the
-opt\&.zero
+"opt\&.zero"
option) eliminates the symptoms of such bugs\&. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs\&.
.PP
This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive\&. However, jemalloc does integrate with the most excellent
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[3]\d\s+2
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2
tool if the
\fB\-\-enable\-valgrind\fR
configuration option is enabled\&.
@@ -1919,7 +1720,7 @@ configuration option is enabled\&.
.PP
If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor
\fBSTDERR_FILENO\fR\&. Errors will result in the process dumping core\&. If the
-opt\&.abort
+"opt\&.abort"
option is set, most warnings are treated as errors\&.
.PP
The
@@ -1927,23 +1728,22 @@ The
variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the
\fBSTDERR_FILENO\fR
file descriptor is not suitable for this\&.
-malloc_message()
+\fBmalloc_message\fR\fB\fR
takes the
\fIcbopaque\fR
pointer argument that is
\fBNULL\fR
unless overridden by the arguments in a call to
-malloc_stats_print(), followed by a string pointer\&. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock\&.
+\fBmalloc_stats_print\fR\fB\fR, followed by a string pointer\&. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock\&.
.PP
-All messages are prefixed by
-\(lq<jemalloc>: \(rq\&.
+All messages are prefixed by \(lq<jemalloc>:\(rq\&.
.SH "RETURN VALUES"
.SS "Standard API"
.PP
The
-malloc()
+\fBmalloc\fR\fB\fR
and
-calloc()
+\fBcalloc\fR\fB\fR
functions return a pointer to the allocated memory if successful; otherwise a
\fBNULL\fR
pointer is returned and
@@ -1952,9 +1752,9 @@ is set to
ENOMEM\&.
.PP
The
-posix_memalign()
+\fBposix_memalign\fR\fB\fR
function returns the value 0 if successful; otherwise it returns an error value\&. The
-posix_memalign()
+\fBposix_memalign\fR\fB\fR
function will fail if:
.PP
EINVAL
@@ -1971,13 +1771,13 @@ Memory allocation error\&.
.RE
.PP
The
-aligned_alloc()
+\fBaligned_alloc\fR\fB\fR
function returns a pointer to the allocated memory if successful; otherwise a
\fBNULL\fR
pointer is returned and
\fIerrno\fR
is set\&. The
-aligned_alloc()
+\fBaligned_alloc\fR\fB\fR
function will fail if:
.PP
EINVAL
@@ -1993,7 +1793,7 @@ Memory allocation error\&.
.RE
.PP
The
-realloc()
+\fBrealloc\fR\fB\fR
function returns a pointer, possibly identical to
\fIptr\fR, to the allocated memory if successful; otherwise a
\fBNULL\fR
@@ -2002,44 +1802,44 @@ pointer is returned, and
is set to
ENOMEM
if the error was the result of an allocation failure\&. The
-realloc()
+\fBrealloc\fR\fB\fR
function always leaves the original buffer intact when an error occurs\&.
.PP
The
-free()
+\fBfree\fR\fB\fR
function returns no value\&.
.SS "Non\-standard API"
.PP
The
-mallocx()
+\fBmallocx\fR\fB\fR
and
-rallocx()
+\fBrallocx\fR\fB\fR
functions return a pointer to the allocated memory if successful; otherwise a
\fBNULL\fR
pointer is returned to indicate insufficient contiguous memory was available to service the allocation request\&.
.PP
The
-xallocx()
+\fBxallocx\fR\fB\fR
function returns the real size of the resulting resized allocation pointed to by
\fIptr\fR, which is a value less than
\fIsize\fR
if the allocation could not be adequately grown in place\&.
.PP
The
-sallocx()
+\fBsallocx\fR\fB\fR
function returns the real size of the allocation pointed to by
\fIptr\fR\&.
.PP
The
-nallocx()
+\fBnallocx\fR\fB\fR
returns the real size that would result from a successful equivalent
-mallocx()
+\fBmallocx\fR\fB\fR
function call, or zero if insufficient memory is available to perform the size computation\&.
.PP
The
-mallctl(),
-mallctlnametomib(), and
-mallctlbymib()
+\fBmallctl\fR\fB\fR,
+\fBmallctlnametomib\fR\fB\fR, and
+\fBmallctlbymib\fR\fB\fR
functions return 0 on success; otherwise they return an error value\&. The functions will fail if:
.PP
EINVAL
@@ -2074,12 +1874,12 @@ A memory allocation failure occurred\&.
EFAULT
.RS 4
An interface with side effects failed in some way not directly related to
-mallctl*()
+\fBmallctl*\fR\fB\fR
read/write processing\&.
.RE
.PP
The
-malloc_usable_size()
+\fBmalloc_usable_size\fR\fB\fR
function returns the usable size of the allocation pointed to by
\fIptr\fR\&.
.SH "ENVIRONMENT"
@@ -2129,14 +1929,14 @@ malloc_conf = "lg_chunk:24";
.SH "STANDARDS"
.PP
The
-malloc(),
-calloc(),
-realloc(), and
-free()
+\fBmalloc\fR\fB\fR,
+\fBcalloc\fR\fB\fR,
+\fBrealloc\fR\fB\fR, and
+\fBfree\fR\fB\fR
functions conform to ISO/IEC 9899:1990 (\(lqISO C90\(rq)\&.
.PP
The
-posix_memalign()
+\fBposix_memalign\fR\fB\fR
function conforms to IEEE Std 1003\&.1\-2001 (\(lqPOSIX\&.1\(rq)\&.
.SH "AUTHOR"
.PP
@@ -2147,19 +1947,14 @@ function conforms to IEEE Std 1003\&.1\-2001 (\(lqPOSIX\&.1\(rq)\&.
.IP " 1." 4
jemalloc website
.RS 4
-\%http://jemalloc.net/
+\%http://www.canonware.com/jemalloc/
.RE
.IP " 2." 4
-JSON format
-.RS 4
-\%http://www.json.org/
-.RE
-.IP " 3." 4
Valgrind
.RS 4
\%http://valgrind.org/
.RE
-.IP " 4." 4
+.IP " 3." 4
gperftools package
.RS 4
\%http://code.google.com/p/gperftools/
diff --git a/deps/jemalloc/doc/jemalloc.html b/deps/jemalloc/doc/jemalloc.html
index db2504f6e..7b8e2be8c 100644
--- a/deps/jemalloc/doc/jemalloc.html
+++ b/deps/jemalloc/doc/jemalloc.html
@@ -1,29 +1,28 @@
-<?xml version="1.0" encoding="utf-8"?>
-<html><head><title>JEMALLOC</title><meta name="generator" content="DocBook XSL Stylesheets V1.79.1"/></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="refentry"><a name="idm45291902356496"/><div class="titlepage"/><div class="refnamediv"><h2>Name</h2><p>jemalloc — general purpose memory allocation functions</p></div><div class="refsect1"><a name="library"/><h2>LIBRARY</h2><p>This manual describes jemalloc 4.4.0-0-gf1f76357313e7dcad7262f17a48ff0a2e005fcdc. More information
- can be found at the <a class="ulink" href="http://jemalloc.net/" target="_top">jemalloc website</a>.</p></div><div class="refsynopsisdiv"><h2>SYNOPSIS</h2><div class="funcsynopsis"><pre class="funcsynopsisinfo">#include &lt;<code class="filename">jemalloc/jemalloc.h</code>&gt;</pre><div class="refsect2"><a name="idm45291899537440"/><h3>Standard API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">malloc</b>(</code></td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">calloc</b>(</code></td><td>size_t <var class="pdparam">number</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">posix_memalign</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">aligned_alloc</b>(</code></td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">realloc</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">free</b>(</code></td><td>void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="refsect2"><a name="idm45291903783248"/><h3>Non-standard API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">mallocx</b>(</code></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">rallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">xallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">extra</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">sallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">dallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">sdallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">nallocx</b>(</code></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctl</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlnametomib</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">mibp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">miblenp</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlbymib</b>(</code></td><td>const size_t *<var class="pdparam">mib</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">miblen</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">malloc_stats_print</b>(</code></td><td>void <var class="pdparam">(*write_cb)</var>
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>JEMALLOC</title><meta name="generator" content="DocBook XSL Stylesheets V1.78.1"></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="refentry"><a name="idp45223136"></a><div class="titlepage"></div><div class="refnamediv"><h2>Name</h2><p>jemalloc &#8212; general purpose memory allocation functions</p></div><div class="refsect1"><a name="library"></a><h2>LIBRARY</h2><p>This manual describes jemalloc 4.0.3-0-ge9192eacf8935e29fc62fddc2701f7942b1cc02c. More information
+ can be found at the <a class="ulink" href="http://www.canonware.com/jemalloc/" target="_top">jemalloc website</a>.</p></div><div class="refsynopsisdiv"><h2>SYNOPSIS</h2><div class="funcsynopsis"><pre class="funcsynopsisinfo">#include &lt;<code class="filename">jemalloc/jemalloc.h</code>&gt;</pre><div class="refsect2"><a name="idp44244480"></a><h3>Standard API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">malloc</b>(</code></td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">calloc</b>(</code></td><td>size_t <var class="pdparam">number</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">posix_memalign</b>(</code></td><td>void **<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">aligned_alloc</b>(</code></td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">realloc</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">free</b>(</code></td><td>void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="refsect2"><a name="idp46062768"></a><h3>Non-standard API</h3><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">mallocx</b>(</code></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void *<b class="fsfunc">rallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">xallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">extra</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">sallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">dallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">sdallocx</b>(</code></td><td>void *<var class="pdparam">ptr</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">nallocx</b>(</code></td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>int <var class="pdparam">flags</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctl</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlnametomib</b>(</code></td><td>const char *<var class="pdparam">name</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">mibp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">miblenp</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">int <b class="fsfunc">mallctlbymib</b>(</code></td><td>const size_t *<var class="pdparam">mib</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">miblen</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">oldp</var>, </td></tr><tr><td> </td><td>size_t *<var class="pdparam">oldlenp</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">newp</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">newlen</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">malloc_stats_print</b>(</code></td><td>void <var class="pdparam">(*write_cb)</var>
<code>(</code>void *, const char *<code>)</code>
- , </td></tr><tr><td> </td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">opts</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">malloc_usable_size</b>(</code></td><td>const void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">(*malloc_message)</b>(</code></td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">s</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><p><span class="type">const char *</span><code class="varname">malloc_conf</code>;</p></div></div></div><div class="refsect1"><a name="description"/><h2>DESCRIPTION</h2><div class="refsect2"><a name="idm45291898401136"/><h3>Standard API</h3><p>The <code class="function">malloc()</code> function allocates
+ , </td></tr><tr><td> </td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">opts</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">size_t <b class="fsfunc">malloc_usable_size</b>(</code></td><td>const void *<var class="pdparam">ptr</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">void <b class="fsfunc">(*malloc_message)</b>(</code></td><td>void *<var class="pdparam">cbopaque</var>, </td></tr><tr><td> </td><td>const char *<var class="pdparam">s</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div><p><span class="type">const char *</span><code class="varname">malloc_conf</code>;</p></div></div></div><div class="refsect1"><a name="description"></a><h2>DESCRIPTION</h2><div class="refsect2"><a name="idp46115952"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) function allocates
<em class="parameter"><code>size</code></em> bytes of uninitialized memory. The allocated
space is suitably aligned (after possible pointer coercion) for storage
- of any type of object.</p><p>The <code class="function">calloc()</code> function allocates
+ of any type of object.</p><p>The <code class="function">calloc</code>(<em class="parameter"><code></code></em>) function allocates
space for <em class="parameter"><code>number</code></em> objects, each
<em class="parameter"><code>size</code></em> bytes in length. The result is identical to
- calling <code class="function">malloc()</code> with an argument of
+ calling <code class="function">malloc</code>(<em class="parameter"><code></code></em>) with an argument of
<em class="parameter"><code>number</code></em> * <em class="parameter"><code>size</code></em>, with the
exception that the allocated memory is explicitly initialized to zero
- bytes.</p><p>The <code class="function">posix_memalign()</code> function
+ bytes.</p><p>The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function
allocates <em class="parameter"><code>size</code></em> bytes of memory such that the
allocation's base address is a multiple of
<em class="parameter"><code>alignment</code></em>, and returns the allocation in the value
pointed to by <em class="parameter"><code>ptr</code></em>. The requested
<em class="parameter"><code>alignment</code></em> must be a power of 2 at least as large as
- <code class="code">sizeof(<span class="type">void *</span>)</code>.</p><p>The <code class="function">aligned_alloc()</code> function
+ <code class="code">sizeof(<span class="type">void *</span>)</code>.</p><p>The <code class="function">aligned_alloc</code>(<em class="parameter"><code></code></em>) function
allocates <em class="parameter"><code>size</code></em> bytes of memory such that the
allocation's base address is a multiple of
<em class="parameter"><code>alignment</code></em>. The requested
<em class="parameter"><code>alignment</code></em> must be a power of 2. Behavior is
undefined if <em class="parameter"><code>size</code></em> is not an integral multiple of
- <em class="parameter"><code>alignment</code></em>.</p><p>The <code class="function">realloc()</code> function changes the
+ <em class="parameter"><code>alignment</code></em>.</p><p>The <code class="function">realloc</code>(<em class="parameter"><code></code></em>) function changes the
size of the previously allocated memory referenced by
<em class="parameter"><code>ptr</code></em> to <em class="parameter"><code>size</code></em> bytes. The
contents of the memory are unchanged up to the lesser of the new and old
@@ -31,65 +30,69 @@
portion of the memory are undefined. Upon success, the memory referenced
by <em class="parameter"><code>ptr</code></em> is freed and a pointer to the newly
allocated memory is returned. Note that
- <code class="function">realloc()</code> may move the memory allocation,
+ <code class="function">realloc</code>(<em class="parameter"><code></code></em>) may move the memory allocation,
resulting in a different return value than <em class="parameter"><code>ptr</code></em>.
If <em class="parameter"><code>ptr</code></em> is <code class="constant">NULL</code>, the
- <code class="function">realloc()</code> function behaves identically to
- <code class="function">malloc()</code> for the specified size.</p><p>The <code class="function">free()</code> function causes the
+ <code class="function">realloc</code>(<em class="parameter"><code></code></em>) function behaves identically to
+ <code class="function">malloc</code>(<em class="parameter"><code></code></em>) for the specified size.</p><p>The <code class="function">free</code>(<em class="parameter"><code></code></em>) function causes the
allocated memory referenced by <em class="parameter"><code>ptr</code></em> to be made
available for future allocations. If <em class="parameter"><code>ptr</code></em> is
- <code class="constant">NULL</code>, no action occurs.</p></div><div class="refsect2"><a name="idm45291898376160"/><h3>Non-standard API</h3><p>The <code class="function">mallocx()</code>,
- <code class="function">rallocx()</code>,
- <code class="function">xallocx()</code>,
- <code class="function">sallocx()</code>,
- <code class="function">dallocx()</code>,
- <code class="function">sdallocx()</code>, and
- <code class="function">nallocx()</code> functions all have a
+ <code class="constant">NULL</code>, no action occurs.</p></div><div class="refsect2"><a name="idp46144704"></a><h3>Non-standard API</h3><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>),
+ <code class="function">rallocx</code>(<em class="parameter"><code></code></em>),
+ <code class="function">xallocx</code>(<em class="parameter"><code></code></em>),
+ <code class="function">sallocx</code>(<em class="parameter"><code></code></em>),
+ <code class="function">dallocx</code>(<em class="parameter"><code></code></em>),
+ <code class="function">sdallocx</code>(<em class="parameter"><code></code></em>), and
+ <code class="function">nallocx</code>(<em class="parameter"><code></code></em>) functions all have a
<em class="parameter"><code>flags</code></em> argument that can be used to specify
options. The functions only check the options that are contextually
relevant. Use bitwise or (<code class="code">|</code>) operations to
specify one or more of the following:
- </p><div class="variablelist"><dl class="variablelist"><dt><a name="MALLOCX_LG_ALIGN"/><span class="term"><code class="constant">MALLOCX_LG_ALIGN(<em class="parameter"><code>la</code></em>)
+ </p><div class="variablelist"><dl class="variablelist"><dt><a name="MALLOCX_LG_ALIGN"></a><span class="term"><code class="constant">MALLOCX_LG_ALIGN(<em class="parameter"><code>la</code></em>)
</code></span></dt><dd><p>Align the memory allocation to start at an address
that is a multiple of <code class="code">(1 &lt;&lt;
<em class="parameter"><code>la</code></em>)</code>. This macro does not validate
that <em class="parameter"><code>la</code></em> is within the valid
- range.</p></dd><dt><a name="MALLOCX_ALIGN"/><span class="term"><code class="constant">MALLOCX_ALIGN(<em class="parameter"><code>a</code></em>)
+ range.</p></dd><dt><a name="MALLOCX_ALIGN"></a><span class="term"><code class="constant">MALLOCX_ALIGN(<em class="parameter"><code>a</code></em>)
</code></span></dt><dd><p>Align the memory allocation to start at an address
that is a multiple of <em class="parameter"><code>a</code></em>, where
<em class="parameter"><code>a</code></em> is a power of two. This macro does not
validate that <em class="parameter"><code>a</code></em> is a power of 2.
- </p></dd><dt><a name="MALLOCX_ZERO"/><span class="term"><code class="constant">MALLOCX_ZERO</code></span></dt><dd><p>Initialize newly allocated memory to contain zero
+ </p></dd><dt><a name="MALLOCX_ZERO"></a><span class="term"><code class="constant">MALLOCX_ZERO</code></span></dt><dd><p>Initialize newly allocated memory to contain zero
bytes. In the growing reallocation case, the real size prior to
reallocation defines the boundary between untouched bytes and those
that are initialized to contain zero bytes. If this macro is
- absent, newly allocated memory is uninitialized.</p></dd><dt><a name="MALLOCX_TCACHE"/><span class="term"><code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)
+ absent, newly allocated memory is uninitialized.</p></dd><dt><a name="MALLOCX_TCACHE"></a><span class="term"><code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)
</code></span></dt><dd><p>Use the thread-specific cache (tcache) specified by
the identifier <em class="parameter"><code>tc</code></em>, which must have been
- acquired via the <a class="link" href="#tcache.create"><quote><code class="mallctl">tcache.create</code></quote></a>
+ acquired via the <a class="link" href="#tcache.create">
+ "<code class="mallctl">tcache.create</code>"
+ </a>
mallctl. This macro does not validate that
<em class="parameter"><code>tc</code></em> specifies a valid
- identifier.</p></dd><dt><a name="MALLOC_TCACHE_NONE"/><span class="term"><code class="constant">MALLOCX_TCACHE_NONE</code></span></dt><dd><p>Do not use a thread-specific cache (tcache). Unless
+ identifier.</p></dd><dt><a name="MALLOC_TCACHE_NONE"></a><span class="term"><code class="constant">MALLOCX_TCACHE_NONE</code></span></dt><dd><p>Do not use a thread-specific cache (tcache). Unless
<code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)</code> or
<code class="constant">MALLOCX_TCACHE_NONE</code> is specified, an
automatically managed tcache will be used under many circumstances.
This macro cannot be used in the same <em class="parameter"><code>flags</code></em>
argument as
- <code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)</code>.</p></dd><dt><a name="MALLOCX_ARENA"/><span class="term"><code class="constant">MALLOCX_ARENA(<em class="parameter"><code>a</code></em>)
+ <code class="constant">MALLOCX_TCACHE(<em class="parameter"><code>tc</code></em>)</code>.</p></dd><dt><a name="MALLOCX_ARENA"></a><span class="term"><code class="constant">MALLOCX_ARENA(<em class="parameter"><code>a</code></em>)
</code></span></dt><dd><p>Use the arena specified by the index
<em class="parameter"><code>a</code></em>. This macro has no effect for regions that
were allocated via an arena other than the one specified. This
macro does not validate that <em class="parameter"><code>a</code></em> specifies an
arena index in the valid range.</p></dd></dl></div><p>
- </p><p>The <code class="function">mallocx()</code> function allocates at
+ </p><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function allocates at
least <em class="parameter"><code>size</code></em> bytes of memory, and returns a pointer
to the base address of the allocation. Behavior is undefined if
- <em class="parameter"><code>size</code></em> is <code class="constant">0</code>.</p><p>The <code class="function">rallocx()</code> function resizes the
+ <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or if request size
+ overflows due to size class and/or alignment constraints.</p><p>The <code class="function">rallocx</code>(<em class="parameter"><code></code></em>) function resizes the
allocation at <em class="parameter"><code>ptr</code></em> to be at least
<em class="parameter"><code>size</code></em> bytes, and returns a pointer to the base
address of the resulting allocation, which may or may not have moved from
its original location. Behavior is undefined if
- <em class="parameter"><code>size</code></em> is <code class="constant">0</code>.</p><p>The <code class="function">xallocx()</code> function resizes the
+ <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or if request size
+ overflows due to size class and/or alignment constraints.</p><p>The <code class="function">xallocx</code>(<em class="parameter"><code></code></em>) function resizes the
allocation at <em class="parameter"><code>ptr</code></em> in place to be at least
<em class="parameter"><code>size</code></em> bytes, and returns the real size of the
allocation. If <em class="parameter"><code>extra</code></em> is non-zero, an attempt is
@@ -98,24 +101,24 @@
the extra byte(s) will not by itself result in failure to resize.
Behavior is undefined if <em class="parameter"><code>size</code></em> is
<code class="constant">0</code>, or if <code class="code">(<em class="parameter"><code>size</code></em> + <em class="parameter"><code>extra</code></em>
- &gt; <code class="constant">SIZE_T_MAX</code>)</code>.</p><p>The <code class="function">sallocx()</code> function returns the
- real size of the allocation at <em class="parameter"><code>ptr</code></em>.</p><p>The <code class="function">dallocx()</code> function causes the
+ &gt; <code class="constant">SIZE_T_MAX</code>)</code>.</p><p>The <code class="function">sallocx</code>(<em class="parameter"><code></code></em>) function returns the
+ real size of the allocation at <em class="parameter"><code>ptr</code></em>.</p><p>The <code class="function">dallocx</code>(<em class="parameter"><code></code></em>) function causes the
memory referenced by <em class="parameter"><code>ptr</code></em> to be made available for
- future allocations.</p><p>The <code class="function">sdallocx()</code> function is an
- extension of <code class="function">dallocx()</code> with a
+ future allocations.</p><p>The <code class="function">sdallocx</code>(<em class="parameter"><code></code></em>) function is an
+ extension of <code class="function">dallocx</code>(<em class="parameter"><code></code></em>) with a
<em class="parameter"><code>size</code></em> parameter to allow the caller to pass in the
allocation size as an optimization. The minimum valid input size is the
original requested size of the allocation, and the maximum valid input
size is the corresponding value returned by
- <code class="function">nallocx()</code> or
- <code class="function">sallocx()</code>.</p><p>The <code class="function">nallocx()</code> function allocates no
+ <code class="function">nallocx</code>(<em class="parameter"><code></code></em>) or
+ <code class="function">sallocx</code>(<em class="parameter"><code></code></em>).</p><p>The <code class="function">nallocx</code>(<em class="parameter"><code></code></em>) function allocates no
memory, but it performs the same size computation as the
- <code class="function">mallocx()</code> function, and returns the real
+ <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function, and returns the real
size of the allocation that would result from the equivalent
- <code class="function">mallocx()</code> function call, or
- <code class="constant">0</code> if the inputs exceed the maximum supported size
- class and/or alignment. Behavior is undefined if
- <em class="parameter"><code>size</code></em> is <code class="constant">0</code>.</p><p>The <code class="function">mallctl()</code> function provides a
+ <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function call. Behavior is
+ undefined if <em class="parameter"><code>size</code></em> is <code class="constant">0</code>, or if
+ request size overflows due to size class and/or alignment
+ constraints.</p><p>The <code class="function">mallctl</code>(<em class="parameter"><code></code></em>) function provides a
general interface for introspecting the memory allocator, as well as
setting modifiable parameters and triggering actions. The
period-separated <em class="parameter"><code>name</code></em> argument specifies a
@@ -127,12 +130,12 @@
write a value, pass a pointer to the value via
<em class="parameter"><code>newp</code></em>, and its length via
<em class="parameter"><code>newlen</code></em>; otherwise pass <code class="constant">NULL</code>
- and <code class="constant">0</code>.</p><p>The <code class="function">mallctlnametomib()</code> function
+ and <code class="constant">0</code>.</p><p>The <code class="function">mallctlnametomib</code>(<em class="parameter"><code></code></em>) function
provides a way to avoid repeated name lookups for applications that
repeatedly query the same portion of the namespace, by translating a name
- to a <span class="quote">“<span class="quote">Management Information Base</span>â€</span> (MIB) that can be passed
- repeatedly to <code class="function">mallctlbymib()</code>. Upon
- successful return from <code class="function">mallctlnametomib()</code>,
+ to a &#8220;Management Information Base&#8221; (MIB) that can be passed
+ repeatedly to <code class="function">mallctlbymib</code>(<em class="parameter"><code></code></em>). Upon
+ successful return from <code class="function">mallctlnametomib</code>(<em class="parameter"><code></code></em>),
<em class="parameter"><code>mibp</code></em> contains an array of
<em class="parameter"><code>*miblenp</code></em> integers, where
<em class="parameter"><code>*miblenp</code></em> is the lesser of the number of components
@@ -142,7 +145,9 @@
period-separated name components, which results in a partial MIB that can
be used as the basis for constructing a complete MIB. For name
components that are integers (e.g. the 2 in
- <a class="link" href="#arenas.bin.i.size"><quote><code class="mallctl">arenas.bin.2.size</code></quote></a>),
+ <a class="link" href="#arenas.bin.i.size">
+ "<code class="mallctl">arenas.bin.2.size</code>"
+ </a>),
the corresponding MIB component will always be that integer. Therefore,
it is legitimate to construct code like the following: </p><pre class="programlisting">
unsigned nbins, i;
@@ -159,62 +164,65 @@ for (i = 0; i &lt; nbins; i++) {
mib[2] = i;
len = sizeof(bin_size);
- mallctlbymib(mib, miblen, (void *)&amp;bin_size, &amp;len, NULL, 0);
+ mallctlbymib(mib, miblen, &amp;bin_size, &amp;len, NULL, 0);
/* Do something with bin_size... */
-}</pre><p>The <code class="function">malloc_stats_print()</code> function writes
- summary statistics via the <em class="parameter"><code>write_cb</code></em> callback
- function pointer and <em class="parameter"><code>cbopaque</code></em> data passed to
- <em class="parameter"><code>write_cb</code></em>, or <code class="function">malloc_message()</code>
- if <em class="parameter"><code>write_cb</code></em> is <code class="constant">NULL</code>. The
- statistics are presented in human-readable form unless <span class="quote">“<span class="quote">J</span>â€</span> is
- specified as a character within the <em class="parameter"><code>opts</code></em> string, in
- which case the statistics are presented in <a class="ulink" href="http://www.json.org/" target="_top">JSON format</a>. This function can be
- called repeatedly. General information that never changes during
- execution can be omitted by specifying <span class="quote">“<span class="quote">g</span>â€</span> as a character
+}</pre><p>The <code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>) function
+ writes human-readable summary statistics via the
+ <em class="parameter"><code>write_cb</code></em> callback function pointer and
+ <em class="parameter"><code>cbopaque</code></em> data passed to
+ <em class="parameter"><code>write_cb</code></em>, or
+ <code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) if
+ <em class="parameter"><code>write_cb</code></em> is <code class="constant">NULL</code>. This
+ function can be called repeatedly. General information that never
+ changes during execution can be omitted by specifying "g" as a character
within the <em class="parameter"><code>opts</code></em> string. Note that
- <code class="function">malloc_message()</code> uses the
- <code class="function">mallctl*()</code> functions internally, so inconsistent
- statistics can be reported if multiple threads use these functions
- simultaneously. If <code class="option">--enable-stats</code> is specified during
- configuration, <span class="quote">“<span class="quote">m</span>â€</span> and <span class="quote">“<span class="quote">a</span>â€</span> can be specified to
- omit merged arena and per arena statistics, respectively;
- <span class="quote">“<span class="quote">b</span>â€</span>, <span class="quote">“<span class="quote">l</span>â€</span>, and <span class="quote">“<span class="quote">h</span>â€</span> can be specified
- to omit per size class statistics for bins, large objects, and huge
- objects, respectively. Unrecognized characters are silently ignored.
- Note that thread caching may prevent some statistics from being completely
- up to date, since extra locking would be required to merge counters that
- track thread cache operations.</p><p>The <code class="function">malloc_usable_size()</code> function
+ <code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) uses the
+ <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions internally, so
+ inconsistent statistics can be reported if multiple threads use these
+ functions simultaneously. If <code class="option">--enable-stats</code> is
+ specified during configuration, &#8220;m&#8221; and &#8220;a&#8221; can
+ be specified to omit merged arena and per arena statistics, respectively;
+ &#8220;b&#8221;, &#8220;l&#8221;, and &#8220;h&#8221; can be specified to
+ omit per size class statistics for bins, large objects, and huge objects,
+ respectively. Unrecognized characters are silently ignored. Note that
+ thread caching may prevent some statistics from being completely up to
+ date, since extra locking would be required to merge counters that track
+ thread cache operations.
+ </p><p>The <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function
returns the usable size of the allocation pointed to by
<em class="parameter"><code>ptr</code></em>. The return value may be larger than the size
that was requested during allocation. The
- <code class="function">malloc_usable_size()</code> function is not a
- mechanism for in-place <code class="function">realloc()</code>; rather
+ <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function is not a
+ mechanism for in-place <code class="function">realloc</code>(<em class="parameter"><code></code></em>); rather
it is provided solely as a tool for introspection purposes. Any
discrepancy between the requested allocation size and the size reported
- by <code class="function">malloc_usable_size()</code> should not be
+ by <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) should not be
depended on, since such behavior is entirely implementation-dependent.
- </p></div></div><div class="refsect1"><a name="tuning"/><h2>TUNING</h2><p>Once, when the first call is made to one of the memory allocation
+ </p></div></div><div class="refsect1"><a name="tuning"></a><h2>TUNING</h2><p>Once, when the first call is made to one of the memory allocation
routines, the allocator initializes its internals based in part on various
- options that can be specified at compile- or run-time.</p><p>The string specified via <code class="option">--with-malloc-conf</code>, the
- string pointed to by the global variable <code class="varname">malloc_conf</code>, the
- <span class="quote">“<span class="quote">name</span>â€</span> of the file referenced by the symbolic link named
- <code class="filename">/etc/malloc.conf</code>, and the value of the
+ options that can be specified at compile- or run-time.</p><p>The string pointed to by the global variable
+ <code class="varname">malloc_conf</code>, the &#8220;name&#8221; of the file
+ referenced by the symbolic link named <code class="filename">/etc/malloc.conf</code>, and the value of the
environment variable <code class="envar">MALLOC_CONF</code>, will be interpreted, in
that order, from left to right as options. Note that
<code class="varname">malloc_conf</code> may be read before
- <code class="function">main()</code> is entered, so the declaration of
+ <code class="function">main</code>(<em class="parameter"><code></code></em>) is entered, so the declaration of
<code class="varname">malloc_conf</code> should specify an initializer that contains
- the final value to be read by jemalloc. <code class="option">--with-malloc-conf</code>
- and <code class="varname">malloc_conf</code> are compile-time mechanisms, whereas
- <code class="filename">/etc/malloc.conf</code> and
- <code class="envar">MALLOC_CONF</code> can be safely set any time prior to program
- invocation.</p><p>An options string is a comma-separated list of option:value pairs.
- There is one key corresponding to each <a class="link" href="#opt.abort"><quote><code class="mallctl">opt.*</code></quote></a> mallctl (see the <a class="xref" href="#mallctl_namespace" title="MALLCTL NAMESPACE">MALLCTL NAMESPACE</a> section for options
+ the final value to be read by jemalloc. <code class="varname">malloc_conf</code> is
+ a compile-time setting, whereas <code class="filename">/etc/malloc.conf</code> and <code class="envar">MALLOC_CONF</code>
+ can be safely set any time prior to program invocation.</p><p>An options string is a comma-separated list of option:value pairs.
+ There is one key corresponding to each <a class="link" href="#opt.abort">
+ "<code class="mallctl">opt.*</code>"
+ </a> mallctl (see the <a class="xref" href="#mallctl_namespace" title="MALLCTL NAMESPACE">MALLCTL NAMESPACE</a> section for options
documentation). For example, <code class="literal">abort:true,narenas:1</code> sets
- the <a class="link" href="#opt.abort"><quote><code class="mallctl">opt.abort</code></quote></a> and <a class="link" href="#opt.narenas"><quote><code class="mallctl">opt.narenas</code></quote></a> options. Some
+ the <a class="link" href="#opt.abort">
+ "<code class="mallctl">opt.abort</code>"
+ </a> and <a class="link" href="#opt.narenas">
+ "<code class="mallctl">opt.narenas</code>"
+ </a> options. Some
options have boolean values (true/false), others have integer values (base
8, 10, or 16, depending on prefix), and yet others have raw string
- values.</p></div><div class="refsect1"><a name="implementation_notes"/><h2>IMPLEMENTATION NOTES</h2><p>Traditionally, allocators have used
+ values.</p></div><div class="refsect1"><a name="implementation_notes"></a><h2>IMPLEMENTATION NOTES</h2><p>Traditionally, allocators have used
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> to obtain memory, which is
suboptimal for several reasons, including race conditions, increased
fragmentation, and artificial limitations on maximum usable memory. If
@@ -238,18 +246,19 @@ for (i = 0; i &lt; nbins; i++) {
order to make it possible to completely avoid synchronization for most
allocation requests. Such caching allows very fast allocation in the
common case, but it increases memory usage and fragmentation, since a
- bounded number of objects can remain allocated in each thread cache.</p><p>Memory is conceptually broken into equal-sized chunks, where the chunk
- size is a power of two that is greater than the page size. Chunks are
- always aligned to multiples of the chunk size. This alignment makes it
- possible to find metadata for user objects very quickly. User objects are
- broken into three categories according to size: small, large, and huge.
- Multiple small and large objects can reside within a single chunk, whereas
- huge objects each have one or more chunks backing them. Each chunk that
- contains small and/or large objects tracks its contents as runs of
+ bounded number of objects can remain allocated in each thread cache.</p><p>Memory is conceptually broken into equal-sized chunks, where the
+ chunk size is a power of two that is greater than the page size. Chunks
+ are always aligned to multiples of the chunk size. This alignment makes it
+ possible to find metadata for user objects very quickly.</p><p>User objects are broken into three categories according to size:
+ small, large, and huge. Small and large objects are managed entirely by
+ arenas; huge objects are additionally aggregated in a single data structure
+ that is shared by all threads. Huge objects are typically used by
+ applications infrequently enough that this single data structure is not a
+ scalability issue.</p><p>Each chunk that is managed by an arena tracks its contents as runs of
contiguous pages (unused, backing a set of small objects, or backing one
- large object). The combination of chunk alignment and chunk page maps makes
- it possible to determine all metadata regarding small and large allocations
- in constant time.</p><p>Small objects are managed in groups by page runs. Each run maintains
+ large object). The combination of chunk alignment and chunk page maps
+ makes it possible to determine all metadata regarding small and large
+ allocations in constant time.</p><p>Small objects are managed in groups by page runs. Each run maintains
a bitmap to track which regions are in use. Allocation requests that are no
more than half the quantum (8 or 16, depending on architecture) are rounded
up to the nearest power of two that is at least <code class="code">sizeof(<span class="type">double</span>)</code>. All other object size
@@ -257,20 +266,22 @@ for (i = 0; i &lt; nbins; i++) {
classes for each doubling in size, which limits internal fragmentation to
approximately 20% for all but the smallest size classes. Small size classes
are smaller than four times the page size, large size classes are smaller
- than the chunk size (see the <a class="link" href="#opt.lg_chunk"><quote><code class="mallctl">opt.lg_chunk</code></quote></a> option), and
- huge size classes extend from the chunk size up to the largest size class
- that does not exceed <code class="constant">PTRDIFF_MAX</code>.</p><p>Allocations are packed tightly together, which can be an issue for
+ than the chunk size (see the <a class="link" href="#opt.lg_chunk">
+ "<code class="mallctl">opt.lg_chunk</code>"
+ </a> option), and
+ huge size classes extend from the chunk size up to one size class less than
+ the full address space size.</p><p>Allocations are packed tightly together, which can be an issue for
multi-threaded applications. If you need to assure that allocations do not
suffer from cacheline sharing, round your allocation requests up to the
nearest multiple of the cacheline size, or specify cacheline alignment when
- allocating.</p><p>The <code class="function">realloc()</code>,
- <code class="function">rallocx()</code>, and
- <code class="function">xallocx()</code> functions may resize allocations
+ allocating.</p><p>The <code class="function">realloc</code>(<em class="parameter"><code></code></em>),
+ <code class="function">rallocx</code>(<em class="parameter"><code></code></em>), and
+ <code class="function">xallocx</code>(<em class="parameter"><code></code></em>) functions may resize allocations
without moving them under limited circumstances. Unlike the
- <code class="function">*allocx()</code> API, the standard API does not
+ <code class="function">*allocx</code>(<em class="parameter"><code></code></em>) API, the standard API does not
officially round up the usable size of an allocation to the nearest size
class, so technically it is necessary to call
- <code class="function">realloc()</code> to grow e.g. a 9-byte allocation to
+ <code class="function">realloc</code>(<em class="parameter"><code></code></em>) to grow e.g. a 9-byte allocation to
16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage
trivially succeeds in place as long as the pre-size and post-size both round
up to the same size class. No other API guarantees are made regarding
@@ -278,111 +289,147 @@ for (i = 0; i &lt; nbins; i++) {
and huge allocations in place, as long as the pre-size and post-size are
both large or both huge. In such cases shrinkage always succeeds for large
size classes, but for huge size classes the chunk allocator must support
- splitting (see <a class="link" href="#arena.i.chunk_hooks"><quote><code class="mallctl">arena.&lt;i&gt;.chunk_hooks</code></quote></a>).
+ splitting (see <a class="link" href="#arena.i.chunk_hooks">
+ "<code class="mallctl">arena.&lt;i&gt;.chunk_hooks</code>"
+ </a>).
Growth only succeeds if the trailing memory is currently available, and
additionally for huge size classes the chunk allocator must support
merging.</p><p>Assuming 2 MiB chunks, 4 KiB pages, and a 16-byte quantum on a
- 64-bit system, the size classes in each category are as shown in <a class="xref" href="#size_classes" title="Table 1. Size classes">Table 1</a>.</p><div class="table"><a name="size_classes"/><p class="title"><b>Table 1. Size classes</b></p><div class="table-contents"><table class="table" summary="Size classes" border="1"><colgroup><col align="left" class="c1"/><col align="right" class="c2"/><col align="left" class="c3"/></colgroup><thead><tr><th align="left">Category</th><th align="right">Spacing</th><th align="left">Size</th></tr></thead><tbody><tr><td rowspan="9" align="left">Small</td><td align="right">lg</td><td align="left">[8]</td></tr><tr><td align="right">16</td><td align="left">[16, 32, 48, 64, 80, 96, 112, 128]</td></tr><tr><td align="right">32</td><td align="left">[160, 192, 224, 256]</td></tr><tr><td align="right">64</td><td align="left">[320, 384, 448, 512]</td></tr><tr><td align="right">128</td><td align="left">[640, 768, 896, 1024]</td></tr><tr><td align="right">256</td><td align="left">[1280, 1536, 1792, 2048]</td></tr><tr><td align="right">512</td><td align="left">[2560, 3072, 3584, 4096]</td></tr><tr><td align="right">1 KiB</td><td align="left">[5 KiB, 6 KiB, 7 KiB, 8 KiB]</td></tr><tr><td align="right">2 KiB</td><td align="left">[10 KiB, 12 KiB, 14 KiB]</td></tr><tr><td rowspan="8" align="left">Large</td><td align="right">2 KiB</td><td align="left">[16 KiB]</td></tr><tr><td align="right">4 KiB</td><td align="left">[20 KiB, 24 KiB, 28 KiB, 32 KiB]</td></tr><tr><td align="right">8 KiB</td><td align="left">[40 KiB, 48 KiB, 54 KiB, 64 KiB]</td></tr><tr><td align="right">16 KiB</td><td align="left">[80 KiB, 96 KiB, 112 KiB, 128 KiB]</td></tr><tr><td align="right">32 KiB</td><td align="left">[160 KiB, 192 KiB, 224 KiB, 256 KiB]</td></tr><tr><td align="right">64 KiB</td><td align="left">[320 KiB, 384 KiB, 448 KiB, 512 KiB]</td></tr><tr><td align="right">128 KiB</td><td align="left">[640 KiB, 768 KiB, 896 KiB, 1 MiB]</td></tr><tr><td align="right">256 KiB</td><td align="left">[1280 KiB, 1536 KiB, 1792 KiB]</td></tr><tr><td rowspan="9" align="left">Huge</td><td align="right">256 KiB</td><td align="left">[2 MiB]</td></tr><tr><td align="right">512 KiB</td><td align="left">[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]</td></tr><tr><td align="right">1 MiB</td><td align="left">[5 MiB, 6 MiB, 7 MiB, 8 MiB]</td></tr><tr><td align="right">2 MiB</td><td align="left">[10 MiB, 12 MiB, 14 MiB, 16 MiB]</td></tr><tr><td align="right">4 MiB</td><td align="left">[20 MiB, 24 MiB, 28 MiB, 32 MiB]</td></tr><tr><td align="right">8 MiB</td><td align="left">[40 MiB, 48 MiB, 56 MiB, 64 MiB]</td></tr><tr><td align="right">...</td><td align="left">...</td></tr><tr><td align="right">512 PiB</td><td align="left">[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]</td></tr><tr><td align="right">1 EiB</td><td align="left">[5 EiB, 6 EiB, 7 EiB]</td></tr></tbody></table></div></div><br class="table-break"/></div><div class="refsect1"><a name="mallctl_namespace"/><h2>MALLCTL NAMESPACE</h2><p>The following names are defined in the namespace accessible via the
- <code class="function">mallctl*()</code> functions. Value types are
+ 64-bit system, the size classes in each category are as shown in <a class="xref" href="#size_classes" title="Table 1. Size classes">Table 1</a>.</p><div class="table"><a name="size_classes"></a><p class="title"><b>Table 1. Size classes</b></p><div class="table-contents"><table summary="Size classes" border="1"><colgroup><col align="left" class="c1"><col align="right" class="c2"><col align="left" class="c3"></colgroup><thead><tr><th align="left">Category</th><th align="right">Spacing</th><th align="left">Size</th></tr></thead><tbody><tr><td rowspan="9" align="left">Small</td><td align="right">lg</td><td align="left">[8]</td></tr><tr><td align="right">16</td><td align="left">[16, 32, 48, 64, 80, 96, 112, 128]</td></tr><tr><td align="right">32</td><td align="left">[160, 192, 224, 256]</td></tr><tr><td align="right">64</td><td align="left">[320, 384, 448, 512]</td></tr><tr><td align="right">128</td><td align="left">[640, 768, 896, 1024]</td></tr><tr><td align="right">256</td><td align="left">[1280, 1536, 1792, 2048]</td></tr><tr><td align="right">512</td><td align="left">[2560, 3072, 3584, 4096]</td></tr><tr><td align="right">1 KiB</td><td align="left">[5 KiB, 6 KiB, 7 KiB, 8 KiB]</td></tr><tr><td align="right">2 KiB</td><td align="left">[10 KiB, 12 KiB, 14 KiB]</td></tr><tr><td rowspan="8" align="left">Large</td><td align="right">2 KiB</td><td align="left">[16 KiB]</td></tr><tr><td align="right">4 KiB</td><td align="left">[20 KiB, 24 KiB, 28 KiB, 32 KiB]</td></tr><tr><td align="right">8 KiB</td><td align="left">[40 KiB, 48 KiB, 54 KiB, 64 KiB]</td></tr><tr><td align="right">16 KiB</td><td align="left">[80 KiB, 96 KiB, 112 KiB, 128 KiB]</td></tr><tr><td align="right">32 KiB</td><td align="left">[160 KiB, 192 KiB, 224 KiB, 256 KiB]</td></tr><tr><td align="right">64 KiB</td><td align="left">[320 KiB, 384 KiB, 448 KiB, 512 KiB]</td></tr><tr><td align="right">128 KiB</td><td align="left">[640 KiB, 768 KiB, 896 KiB, 1 MiB]</td></tr><tr><td align="right">256 KiB</td><td align="left">[1280 KiB, 1536 KiB, 1792 KiB]</td></tr><tr><td rowspan="7" align="left">Huge</td><td align="right">256 KiB</td><td align="left">[2 MiB]</td></tr><tr><td align="right">512 KiB</td><td align="left">[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]</td></tr><tr><td align="right">1 MiB</td><td align="left">[5 MiB, 6 MiB, 7 MiB, 8 MiB]</td></tr><tr><td align="right">2 MiB</td><td align="left">[10 MiB, 12 MiB, 14 MiB, 16 MiB]</td></tr><tr><td align="right">4 MiB</td><td align="left">[20 MiB, 24 MiB, 28 MiB, 32 MiB]</td></tr><tr><td align="right">8 MiB</td><td align="left">[40 MiB, 48 MiB, 56 MiB, 64 MiB]</td></tr><tr><td align="right">...</td><td align="left">...</td></tr></tbody></table></div></div><br class="table-break"></div><div class="refsect1"><a name="mallctl_namespace"></a><h2>MALLCTL NAMESPACE</h2><p>The following names are defined in the namespace accessible via the
+ <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions. Value types are
specified in parentheses, their readable/writable statuses are encoded as
<code class="literal">rw</code>, <code class="literal">r-</code>, <code class="literal">-w</code>, or
<code class="literal">--</code>, and required build configuration flags follow, if
any. A name element encoded as <code class="literal">&lt;i&gt;</code> or
<code class="literal">&lt;j&gt;</code> indicates an integer component, where the
integer varies from 0 to some upper value that must be determined via
- introspection. In the case of <quote><code class="mallctl">stats.arenas.&lt;i&gt;.*</code></quote>,
- <code class="literal">&lt;i&gt;</code> equal to <a class="link" href="#arenas.narenas"><quote><code class="mallctl">arenas.narenas</code></quote></a> can be
+ introspection. In the case of
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.*</code>"
+ ,
+ <code class="literal">&lt;i&gt;</code> equal to <a class="link" href="#arenas.narenas">
+ "<code class="mallctl">arenas.narenas</code>"
+ </a> can be
used to access the summation of statistics from all arenas. Take special
- note of the <a class="link" href="#epoch"><quote><code class="mallctl">epoch</code></quote></a> mallctl,
- which controls refreshing of cached dynamic statistics.</p><div class="variablelist"><dl class="variablelist"><dt><a name="version"/><span class="term">
- <quote><code class="mallctl">version</code></quote>
+ note of the <a class="link" href="#epoch">
+ "<code class="mallctl">epoch</code>"
+ </a> mallctl,
+ which controls refreshing of cached dynamic statistics.</p><div class="variablelist"><dl class="variablelist"><dt><a name="version"></a><span class="term">
+
+ "<code class="mallctl">version</code>"
+
(<span class="type">const char *</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Return the jemalloc version string.</p></dd><dt><a name="epoch"/><span class="term">
- <quote><code class="mallctl">epoch</code></quote>
+ </span></dt><dd><p>Return the jemalloc version string.</p></dd><dt><a name="epoch"></a><span class="term">
+
+ "<code class="mallctl">epoch</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">rw</code>
</span></dt><dd><p>If a value is passed in, refresh the data from which
- the <code class="function">mallctl*()</code> functions report values,
+ the <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) functions report values,
and increment the epoch. Return the current epoch. This is useful for
- detecting whether another thread caused a refresh.</p></dd><dt><a name="config.cache_oblivious"/><span class="term">
- <quote><code class="mallctl">config.cache_oblivious</code></quote>
+ detecting whether another thread caused a refresh.</p></dd><dt><a name="config.cache_oblivious"></a><span class="term">
+
+ "<code class="mallctl">config.cache_oblivious</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-cache-oblivious</code> was specified
- during build configuration.</p></dd><dt><a name="config.debug"/><span class="term">
- <quote><code class="mallctl">config.debug</code></quote>
+ during build configuration.</p></dd><dt><a name="config.debug"></a><span class="term">
+
+ "<code class="mallctl">config.debug</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-debug</code> was specified during
- build configuration.</p></dd><dt><a name="config.fill"/><span class="term">
- <quote><code class="mallctl">config.fill</code></quote>
+ build configuration.</p></dd><dt><a name="config.fill"></a><span class="term">
+
+ "<code class="mallctl">config.fill</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-fill</code> was specified during
- build configuration.</p></dd><dt><a name="config.lazy_lock"/><span class="term">
- <quote><code class="mallctl">config.lazy_lock</code></quote>
+ build configuration.</p></dd><dt><a name="config.lazy_lock"></a><span class="term">
+
+ "<code class="mallctl">config.lazy_lock</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-lazy-lock</code> was specified
- during build configuration.</p></dd><dt><a name="config.malloc_conf"/><span class="term">
- <quote><code class="mallctl">config.malloc_conf</code></quote>
- (<span class="type">const char *</span>)
- <code class="literal">r-</code>
- </span></dt><dd><p>Embedded configure-time-specified run-time options
- string, empty unless <code class="option">--with-malloc-conf</code> was specified
- during build configuration.</p></dd><dt><a name="config.munmap"/><span class="term">
- <quote><code class="mallctl">config.munmap</code></quote>
+ during build configuration.</p></dd><dt><a name="config.munmap"></a><span class="term">
+
+ "<code class="mallctl">config.munmap</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-munmap</code> was specified during
- build configuration.</p></dd><dt><a name="config.prof"/><span class="term">
- <quote><code class="mallctl">config.prof</code></quote>
+ build configuration.</p></dd><dt><a name="config.prof"></a><span class="term">
+
+ "<code class="mallctl">config.prof</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-prof</code> was specified during
- build configuration.</p></dd><dt><a name="config.prof_libgcc"/><span class="term">
- <quote><code class="mallctl">config.prof_libgcc</code></quote>
+ build configuration.</p></dd><dt><a name="config.prof_libgcc"></a><span class="term">
+
+ "<code class="mallctl">config.prof_libgcc</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--disable-prof-libgcc</code> was not
- specified during build configuration.</p></dd><dt><a name="config.prof_libunwind"/><span class="term">
- <quote><code class="mallctl">config.prof_libunwind</code></quote>
+ specified during build configuration.</p></dd><dt><a name="config.prof_libunwind"></a><span class="term">
+
+ "<code class="mallctl">config.prof_libunwind</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-prof-libunwind</code> was specified
- during build configuration.</p></dd><dt><a name="config.stats"/><span class="term">
- <quote><code class="mallctl">config.stats</code></quote>
+ during build configuration.</p></dd><dt><a name="config.stats"></a><span class="term">
+
+ "<code class="mallctl">config.stats</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-stats</code> was specified during
- build configuration.</p></dd><dt><a name="config.tcache"/><span class="term">
- <quote><code class="mallctl">config.tcache</code></quote>
+ build configuration.</p></dd><dt><a name="config.tcache"></a><span class="term">
+
+ "<code class="mallctl">config.tcache</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--disable-tcache</code> was not specified
- during build configuration.</p></dd><dt><a name="config.tls"/><span class="term">
- <quote><code class="mallctl">config.tls</code></quote>
+ during build configuration.</p></dd><dt><a name="config.tls"></a><span class="term">
+
+ "<code class="mallctl">config.tls</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--disable-tls</code> was not specified during
- build configuration.</p></dd><dt><a name="config.utrace"/><span class="term">
- <quote><code class="mallctl">config.utrace</code></quote>
+ build configuration.</p></dd><dt><a name="config.utrace"></a><span class="term">
+
+ "<code class="mallctl">config.utrace</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-utrace</code> was specified during
- build configuration.</p></dd><dt><a name="config.valgrind"/><span class="term">
- <quote><code class="mallctl">config.valgrind</code></quote>
+ build configuration.</p></dd><dt><a name="config.valgrind"></a><span class="term">
+
+ "<code class="mallctl">config.valgrind</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-valgrind</code> was specified during
- build configuration.</p></dd><dt><a name="config.xmalloc"/><span class="term">
- <quote><code class="mallctl">config.xmalloc</code></quote>
+ build configuration.</p></dd><dt><a name="config.xmalloc"></a><span class="term">
+
+ "<code class="mallctl">config.xmalloc</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p><code class="option">--enable-xmalloc</code> was specified during
- build configuration.</p></dd><dt><a name="opt.abort"/><span class="term">
- <quote><code class="mallctl">opt.abort</code></quote>
+ build configuration.</p></dd><dt><a name="opt.abort"></a><span class="term">
+
+ "<code class="mallctl">opt.abort</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Abort-on-warning enabled/disabled. If true, most
@@ -390,42 +437,43 @@ for (i = 0; i &lt; nbins; i++) {
<span class="citerefentry"><span class="refentrytitle">abort</span>(3)</span> in these cases. This option is
disabled by default unless <code class="option">--enable-debug</code> is
specified during configuration, in which case it is enabled by default.
- </p></dd><dt><a name="opt.dss"/><span class="term">
- <quote><code class="mallctl">opt.dss</code></quote>
+ </p></dd><dt><a name="opt.dss"></a><span class="term">
+
+ "<code class="mallctl">opt.dss</code>"
+
(<span class="type">const char *</span>)
<code class="literal">r-</code>
</span></dt><dd><p>dss (<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>) allocation precedence as
related to <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> allocation. The following
settings are supported if
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> is supported by the operating
- system: <span class="quote">“<span class="quote">disabled</span>â€</span>, <span class="quote">“<span class="quote">primary</span>â€</span>, and
- <span class="quote">“<span class="quote">secondary</span>â€</span>; otherwise only <span class="quote">“<span class="quote">disabled</span>â€</span> is
- supported. The default is <span class="quote">“<span class="quote">secondary</span>â€</span> if
+ system: &#8220;disabled&#8221;, &#8220;primary&#8221;, and
+ &#8220;secondary&#8221;; otherwise only &#8220;disabled&#8221; is
+ supported. The default is &#8220;secondary&#8221; if
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span> is supported by the operating
- system; <span class="quote">“<span class="quote">disabled</span>â€</span> otherwise.
- </p></dd><dt><a name="opt.lg_chunk"/><span class="term">
- <quote><code class="mallctl">opt.lg_chunk</code></quote>
+ system; &#8220;disabled&#8221; otherwise.
+ </p></dd><dt><a name="opt.lg_chunk"></a><span class="term">
+
+ "<code class="mallctl">opt.lg_chunk</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Virtual memory chunk size (log base 2). If a chunk
size outside the supported size range is specified, the size is
silently clipped to the minimum/maximum supported size. The default
chunk size is 2 MiB (2^21).
- </p></dd><dt><a name="opt.narenas"/><span class="term">
- <quote><code class="mallctl">opt.narenas</code></quote>
- (<span class="type">unsigned</span>)
+ </p></dd><dt><a name="opt.narenas"></a><span class="term">
+
+ "<code class="mallctl">opt.narenas</code>"
+
+ (<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Maximum number of arenas to use for automatic
multiplexing of threads and arenas. The default is four times the
- number of CPUs, or one if there is a single CPU.</p></dd><dt><a name="opt.purge"/><span class="term">
- <quote><code class="mallctl">opt.purge</code></quote>
- (<span class="type">const char *</span>)
- <code class="literal">r-</code>
- </span></dt><dd><p>Purge mode is “ratio†(default) or
- “decayâ€. See <a class="link" href="#opt.lg_dirty_mult"><quote><code class="mallctl">opt.lg_dirty_mult</code></quote></a>
- for details of the ratio mode. See <a class="link" href="#opt.decay_time"><quote><code class="mallctl">opt.decay_time</code></quote></a> for
- details of the decay mode.</p></dd><dt><a name="opt.lg_dirty_mult"/><span class="term">
- <quote><code class="mallctl">opt.lg_dirty_mult</code></quote>
+ number of CPUs, or one if there is a single CPU.</p></dd><dt><a name="opt.lg_dirty_mult"></a><span class="term">
+
+ "<code class="mallctl">opt.lg_dirty_mult</code>"
+
(<span class="type">ssize_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Per-arena minimum ratio (log base 2) of active to dirty
@@ -436,57 +484,53 @@ for (i = 0; i &lt; nbins; i++) {
provides the kernel with sufficient information to recycle dirty pages
if physical memory becomes scarce and the pages remain unused. The
default minimum ratio is 8:1 (2^3:1); an option value of -1 will
- disable dirty page purging. See <a class="link" href="#arenas.lg_dirty_mult"><quote><code class="mallctl">arenas.lg_dirty_mult</code></quote></a>
- and <a class="link" href="#arena.i.lg_dirty_mult"><quote><code class="mallctl">arena.&lt;i&gt;.lg_dirty_mult</code></quote></a>
- for related dynamic control options.</p></dd><dt><a name="opt.decay_time"/><span class="term">
- <quote><code class="mallctl">opt.decay_time</code></quote>
- (<span class="type">ssize_t</span>)
- <code class="literal">r-</code>
- </span></dt><dd><p>Approximate time in seconds from the creation of a set
- of unused dirty pages until an equivalent set of unused dirty pages is
- purged and/or reused. The pages are incrementally purged according to a
- sigmoidal decay curve that starts and ends with zero purge rate. A
- decay time of 0 causes all unused dirty pages to be purged immediately
- upon creation. A decay time of -1 disables purging. The default decay
- time is 10 seconds. See <a class="link" href="#arenas.decay_time"><quote><code class="mallctl">arenas.decay_time</code></quote></a>
- and <a class="link" href="#arena.i.decay_time"><quote><code class="mallctl">arena.&lt;i&gt;.decay_time</code></quote></a>
- for related dynamic control options.
- </p></dd><dt><a name="opt.stats_print"/><span class="term">
- <quote><code class="mallctl">opt.stats_print</code></quote>
+ disable dirty page purging. See <a class="link" href="#arenas.lg_dirty_mult">
+ "<code class="mallctl">arenas.lg_dirty_mult</code>"
+ </a>
+ and <a class="link" href="#arena.i.lg_dirty_mult">
+ "<code class="mallctl">arena.&lt;i&gt;.lg_dirty_mult</code>"
+ </a>
+ for related dynamic control options.</p></dd><dt><a name="opt.stats_print"></a><span class="term">
+
+ "<code class="mallctl">opt.stats_print</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Enable/disable statistics printing at exit. If
- enabled, the <code class="function">malloc_stats_print()</code>
+ enabled, the <code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>)
function is called at program exit via an
<span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span> function. If
<code class="option">--enable-stats</code> is specified during configuration, this
has the potential to cause deadlock for a multi-threaded process that
exits while one or more threads are executing in the memory allocation
- functions. Furthermore, <code class="function">atexit()</code> may
+ functions. Furthermore, <code class="function">atexit</code>(<em class="parameter"><code></code></em>) may
allocate memory during application initialization and then deadlock
internally when jemalloc in turn calls
- <code class="function">atexit()</code>, so this option is not
- universally usable (though the application can register its own
- <code class="function">atexit()</code> function with equivalent
+ <code class="function">atexit</code>(<em class="parameter"><code></code></em>), so this option is not
+ univerally usable (though the application can register its own
+ <code class="function">atexit</code>(<em class="parameter"><code></code></em>) function with equivalent
functionality). Therefore, this option should only be used with care;
it is primarily intended as a performance tuning aid during application
- development. This option is disabled by default.</p></dd><dt><a name="opt.junk"/><span class="term">
- <quote><code class="mallctl">opt.junk</code></quote>
+ development. This option is disabled by default.</p></dd><dt><a name="opt.junk"></a><span class="term">
+
+ "<code class="mallctl">opt.junk</code>"
+
(<span class="type">const char *</span>)
<code class="literal">r-</code>
[<code class="option">--enable-fill</code>]
- </span></dt><dd><p>Junk filling. If set to <span class="quote">“<span class="quote">alloc</span>â€</span>, each byte
- of uninitialized allocated memory will be initialized to
- <code class="literal">0xa5</code>. If set to <span class="quote">“<span class="quote">free</span>â€</span>, all deallocated
- memory will be initialized to <code class="literal">0x5a</code>. If set to
- <span class="quote">“<span class="quote">true</span>â€</span>, both allocated and deallocated memory will be
- initialized, and if set to <span class="quote">“<span class="quote">false</span>â€</span>, junk filling be
- disabled entirely. This is intended for debugging and will impact
- performance negatively. This option is <span class="quote">“<span class="quote">false</span>â€</span> by default
- unless <code class="option">--enable-debug</code> is specified during
- configuration, in which case it is <span class="quote">“<span class="quote">true</span>â€</span> by default unless
- running inside <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>.</p></dd><dt><a name="opt.quarantine"/><span class="term">
- <quote><code class="mallctl">opt.quarantine</code></quote>
+ </span></dt><dd><p>Junk filling. If set to "alloc", each byte of
+ uninitialized allocated memory will be initialized to
+ <code class="literal">0xa5</code>. If set to "free", all deallocated memory will
+ be initialized to <code class="literal">0x5a</code>. If set to "true", both
+ allocated and deallocated memory will be initialized, and if set to
+ "false", junk filling be disabled entirely. This is intended for
+ debugging and will impact performance negatively. This option is
+ "false" by default unless <code class="option">--enable-debug</code> is specified
+ during configuration, in which case it is "true" by default unless
+ running inside <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>.</p></dd><dt><a name="opt.quarantine"></a><span class="term">
+
+ "<code class="mallctl">opt.quarantine</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-fill</code>]
@@ -494,46 +538,58 @@ for (i = 0; i &lt; nbins; i++) {
thread maintains a FIFO object quarantine that stores up to the
specified number of bytes of memory. The quarantined memory is not
freed until it is released from quarantine, though it is immediately
- junk-filled if the <a class="link" href="#opt.junk"><quote><code class="mallctl">opt.junk</code></quote></a> option is
+ junk-filled if the <a class="link" href="#opt.junk">
+ "<code class="mallctl">opt.junk</code>"
+ </a> option is
enabled. This feature is of particular use in combination with <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>, which can detect attempts
to access quarantined objects. This is intended for debugging and will
impact performance negatively. The default quarantine size is 0 unless
running inside Valgrind, in which case the default is 16
- MiB.</p></dd><dt><a name="opt.redzone"/><span class="term">
- <quote><code class="mallctl">opt.redzone</code></quote>
+ MiB.</p></dd><dt><a name="opt.redzone"></a><span class="term">
+
+ "<code class="mallctl">opt.redzone</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-fill</code>]
</span></dt><dd><p>Redzones enabled/disabled. If enabled, small
allocations have redzones before and after them. Furthermore, if the
- <a class="link" href="#opt.junk"><quote><code class="mallctl">opt.junk</code></quote></a> option is
+ <a class="link" href="#opt.junk">
+ "<code class="mallctl">opt.junk</code>"
+ </a> option is
enabled, the redzones are checked for corruption during deallocation.
However, the primary intended purpose of this feature is to be used in
combination with <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>,
which needs redzones in order to do effective buffer overflow/underflow
detection. This option is intended for debugging and will impact
performance negatively. This option is disabled by
- default unless running inside Valgrind.</p></dd><dt><a name="opt.zero"/><span class="term">
- <quote><code class="mallctl">opt.zero</code></quote>
+ default unless running inside Valgrind.</p></dd><dt><a name="opt.zero"></a><span class="term">
+
+ "<code class="mallctl">opt.zero</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-fill</code>]
</span></dt><dd><p>Zero filling enabled/disabled. If enabled, each byte
of uninitialized allocated memory will be initialized to 0. Note that
this initialization only happens once for each byte, so
- <code class="function">realloc()</code> and
- <code class="function">rallocx()</code> calls do not zero memory that
+ <code class="function">realloc</code>(<em class="parameter"><code></code></em>) and
+ <code class="function">rallocx</code>(<em class="parameter"><code></code></em>) calls do not zero memory that
was previously allocated. This is intended for debugging and will
impact performance negatively. This option is disabled by default.
- </p></dd><dt><a name="opt.utrace"/><span class="term">
- <quote><code class="mallctl">opt.utrace</code></quote>
+ </p></dd><dt><a name="opt.utrace"></a><span class="term">
+
+ "<code class="mallctl">opt.utrace</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-utrace</code>]
</span></dt><dd><p>Allocation tracing based on
<span class="citerefentry"><span class="refentrytitle">utrace</span>(2)</span> enabled/disabled. This option
- is disabled by default.</p></dd><dt><a name="opt.xmalloc"/><span class="term">
- <quote><code class="mallctl">opt.xmalloc</code></quote>
+ is disabled by default.</p></dd><dt><a name="opt.xmalloc"></a><span class="term">
+
+ "<code class="mallctl">opt.xmalloc</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-xmalloc</code>]
@@ -546,8 +602,10 @@ for (i = 0; i &lt; nbins; i++) {
including the following in the source code:
</p><pre class="programlisting">
malloc_conf = "xmalloc:true";</pre><p>
- This option is disabled by default.</p></dd><dt><a name="opt.tcache"/><span class="term">
- <quote><code class="mallctl">opt.tcache</code></quote>
+ This option is disabled by default.</p></dd><dt><a name="opt.tcache"></a><span class="term">
+
+ "<code class="mallctl">opt.tcache</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
@@ -555,36 +613,55 @@ malloc_conf = "xmalloc:true";</pre><p>
there are multiple threads, each thread uses a tcache for objects up to
a certain size. Thread-specific caching allows many allocations to be
satisfied without performing any thread synchronization, at the cost of
- increased memory use. See the <a class="link" href="#opt.lg_tcache_max"><quote><code class="mallctl">opt.lg_tcache_max</code></quote></a>
+ increased memory use. See the <a class="link" href="#opt.lg_tcache_max">
+ "<code class="mallctl">opt.lg_tcache_max</code>"
+ </a>
option for related tuning information. This option is enabled by
default unless running inside <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a>, in which case it is
- forcefully disabled.</p></dd><dt><a name="opt.lg_tcache_max"/><span class="term">
- <quote><code class="mallctl">opt.lg_tcache_max</code></quote>
+ forcefully disabled.</p></dd><dt><a name="opt.lg_tcache_max"></a><span class="term">
+
+ "<code class="mallctl">opt.lg_tcache_max</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Maximum size class (log base 2) to cache in the
thread-specific cache (tcache). At a minimum, all small size classes
are cached, and at a maximum all large size classes are cached. The
- default maximum is 32 KiB (2^15).</p></dd><dt><a name="opt.prof"/><span class="term">
- <quote><code class="mallctl">opt.prof</code></quote>
+ default maximum is 32 KiB (2^15).</p></dd><dt><a name="opt.prof"></a><span class="term">
+
+ "<code class="mallctl">opt.prof</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Memory profiling enabled/disabled. If enabled, profile
- memory allocation activity. See the <a class="link" href="#opt.prof_active"><quote><code class="mallctl">opt.prof_active</code></quote></a>
- option for on-the-fly activation/deactivation. See the <a class="link" href="#opt.lg_prof_sample"><quote><code class="mallctl">opt.lg_prof_sample</code></quote></a>
- option for probabilistic sampling control. See the <a class="link" href="#opt.prof_accum"><quote><code class="mallctl">opt.prof_accum</code></quote></a>
- option for control of cumulative sample reporting. See the <a class="link" href="#opt.lg_prof_interval"><quote><code class="mallctl">opt.lg_prof_interval</code></quote></a>
- option for information on interval-triggered profile dumping, the <a class="link" href="#opt.prof_gdump"><quote><code class="mallctl">opt.prof_gdump</code></quote></a>
+ memory allocation activity. See the <a class="link" href="#opt.prof_active">
+ "<code class="mallctl">opt.prof_active</code>"
+ </a>
+ option for on-the-fly activation/deactivation. See the <a class="link" href="#opt.lg_prof_sample">
+ "<code class="mallctl">opt.lg_prof_sample</code>"
+ </a>
+ option for probabilistic sampling control. See the <a class="link" href="#opt.prof_accum">
+ "<code class="mallctl">opt.prof_accum</code>"
+ </a>
+ option for control of cumulative sample reporting. See the <a class="link" href="#opt.lg_prof_interval">
+ "<code class="mallctl">opt.lg_prof_interval</code>"
+ </a>
+ option for information on interval-triggered profile dumping, the <a class="link" href="#opt.prof_gdump">
+ "<code class="mallctl">opt.prof_gdump</code>"
+ </a>
option for information on high-water-triggered profile dumping, and the
- <a class="link" href="#opt.prof_final"><quote><code class="mallctl">opt.prof_final</code></quote></a>
+ <a class="link" href="#opt.prof_final">
+ "<code class="mallctl">opt.prof_final</code>"
+ </a>
option for final profile dumping. Profile output is compatible with
the <span class="command"><strong>jeprof</strong></span> command, which is based on the
<span class="command"><strong>pprof</strong></span> that is developed as part of the <a class="ulink" href="http://code.google.com/p/gperftools/" target="_top">gperftools
- package</a>. See <a class="link" href="#heap_profile_format" title="HEAP PROFILE FORMAT">HEAP PROFILE
- FORMAT</a> for heap profile format documentation.</p></dd><dt><a name="opt.prof_prefix"/><span class="term">
- <quote><code class="mallctl">opt.prof_prefix</code></quote>
+ package</a>.</p></dd><dt><a name="opt.prof_prefix"></a><span class="term">
+
+ "<code class="mallctl">opt.prof_prefix</code>"
+
(<span class="type">const char *</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
@@ -592,26 +669,40 @@ malloc_conf = "xmalloc:true";</pre><p>
set to the empty string, no automatic dumps will occur; this is
primarily useful for disabling the automatic final heap dump (which
also disables leak reporting, if enabled). The default prefix is
- <code class="filename">jeprof</code>.</p></dd><dt><a name="opt.prof_active"/><span class="term">
- <quote><code class="mallctl">opt.prof_active</code></quote>
+ <code class="filename">jeprof</code>.</p></dd><dt><a name="opt.prof_active"></a><span class="term">
+
+ "<code class="mallctl">opt.prof_active</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Profiling activated/deactivated. This is a secondary
control mechanism that makes it possible to start the application with
- profiling enabled (see the <a class="link" href="#opt.prof"><quote><code class="mallctl">opt.prof</code></quote></a> option) but
+ profiling enabled (see the <a class="link" href="#opt.prof">
+ "<code class="mallctl">opt.prof</code>"
+ </a> option) but
inactive, then toggle profiling at any time during program execution
- with the <a class="link" href="#prof.active"><quote><code class="mallctl">prof.active</code></quote></a> mallctl.
- This option is enabled by default.</p></dd><dt><a name="opt.prof_thread_active_init"/><span class="term">
- <quote><code class="mallctl">opt.prof_thread_active_init</code></quote>
+ with the <a class="link" href="#prof.active">
+ "<code class="mallctl">prof.active</code>"
+ </a> mallctl.
+ This option is enabled by default.</p></dd><dt><a name="opt.prof_thread_active_init"></a><span class="term">
+
+ "<code class="mallctl">opt.prof_thread_active_init</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
- </span></dt><dd><p>Initial setting for <a class="link" href="#thread.prof.active"><quote><code class="mallctl">thread.prof.active</code></quote></a>
+ </span></dt><dd><p>Initial setting for <a class="link" href="#thread.prof.active">
+ "<code class="mallctl">thread.prof.active</code>"
+ </a>
in newly created threads. The initial setting for newly created threads
- can also be changed during execution via the <a class="link" href="#prof.thread_active_init"><quote><code class="mallctl">prof.thread_active_init</code></quote></a>
- mallctl. This option is enabled by default.</p></dd><dt><a name="opt.lg_prof_sample"/><span class="term">
- <quote><code class="mallctl">opt.lg_prof_sample</code></quote>
+ can also be changed during execution via the <a class="link" href="#prof.thread_active_init">
+ "<code class="mallctl">prof.thread_active_init</code>"
+ </a>
+ mallctl. This option is enabled by default.</p></dd><dt><a name="opt.lg_prof_sample"></a><span class="term">
+
+ "<code class="mallctl">opt.lg_prof_sample</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
@@ -619,8 +710,10 @@ malloc_conf = "xmalloc:true";</pre><p>
samples, as measured in bytes of allocation activity. Increasing the
sampling interval decreases profile fidelity, but also decreases the
computational overhead. The default sample interval is 512 KiB (2^19
- B).</p></dd><dt><a name="opt.prof_accum"/><span class="term">
- <quote><code class="mallctl">opt.prof_accum</code></quote>
+ B).</p></dd><dt><a name="opt.prof_accum"></a><span class="term">
+
+ "<code class="mallctl">opt.prof_accum</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
@@ -629,8 +722,10 @@ malloc_conf = "xmalloc:true";</pre><p>
backtrace must be stored for the duration of execution. Depending on
the application, this can impose a large memory overhead, and the
cumulative counts are not always of interest. This option is disabled
- by default.</p></dd><dt><a name="opt.lg_prof_interval"/><span class="term">
- <quote><code class="mallctl">opt.lg_prof_interval</code></quote>
+ by default.</p></dd><dt><a name="opt.lg_prof_interval"></a><span class="term">
+
+ "<code class="mallctl">opt.lg_prof_interval</code>"
+
(<span class="type">ssize_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
@@ -641,19 +736,27 @@ malloc_conf = "xmalloc:true";</pre><p>
dumped to files named according to the pattern
<code class="filename">&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.i&lt;iseq&gt;.heap</code>,
where <code class="literal">&lt;prefix&gt;</code> is controlled by the
- <a class="link" href="#opt.prof_prefix"><quote><code class="mallctl">opt.prof_prefix</code></quote></a>
+ <a class="link" href="#opt.prof_prefix">
+ "<code class="mallctl">opt.prof_prefix</code>"
+ </a>
option. By default, interval-triggered profile dumping is disabled
(encoded as -1).
- </p></dd><dt><a name="opt.prof_gdump"/><span class="term">
- <quote><code class="mallctl">opt.prof_gdump</code></quote>
+ </p></dd><dt><a name="opt.prof_gdump"></a><span class="term">
+
+ "<code class="mallctl">opt.prof_gdump</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
- </span></dt><dd><p>Set the initial state of <a class="link" href="#prof.gdump"><quote><code class="mallctl">prof.gdump</code></quote></a>, which when
+ </span></dt><dd><p>Set the initial state of <a class="link" href="#prof.gdump">
+ "<code class="mallctl">prof.gdump</code>"
+ </a>, which when
enabled triggers a memory profile dump every time the total virtual
memory exceeds the previous maximum. This option is disabled by
- default.</p></dd><dt><a name="opt.prof_final"/><span class="term">
- <quote><code class="mallctl">opt.prof_final</code></quote>
+ default.</p></dd><dt><a name="opt.prof_final"></a><span class="term">
+
+ "<code class="mallctl">opt.prof_final</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
@@ -661,73 +764,101 @@ malloc_conf = "xmalloc:true";</pre><p>
<span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span> function to dump final memory
usage to a file named according to the pattern
<code class="filename">&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</code>,
- where <code class="literal">&lt;prefix&gt;</code> is controlled by the <a class="link" href="#opt.prof_prefix"><quote><code class="mallctl">opt.prof_prefix</code></quote></a>
- option. Note that <code class="function">atexit()</code> may allocate
+ where <code class="literal">&lt;prefix&gt;</code> is controlled by the <a class="link" href="#opt.prof_prefix">
+ "<code class="mallctl">opt.prof_prefix</code>"
+ </a>
+ option. Note that <code class="function">atexit</code>(<em class="parameter"><code></code></em>) may allocate
memory during application initialization and then deadlock internally
- when jemalloc in turn calls <code class="function">atexit()</code>, so
- this option is not universally usable (though the application can
- register its own <code class="function">atexit()</code> function with
+ when jemalloc in turn calls <code class="function">atexit</code>(<em class="parameter"><code></code></em>), so
+ this option is not univerally usable (though the application can
+ register its own <code class="function">atexit</code>(<em class="parameter"><code></code></em>) function with
equivalent functionality). This option is disabled by
- default.</p></dd><dt><a name="opt.prof_leak"/><span class="term">
- <quote><code class="mallctl">opt.prof_leak</code></quote>
+ default.</p></dd><dt><a name="opt.prof_leak"></a><span class="term">
+
+ "<code class="mallctl">opt.prof_leak</code>"
+
(<span class="type">bool</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Leak reporting enabled/disabled. If enabled, use an
<span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span> function to report memory leaks
detected by allocation sampling. See the
- <a class="link" href="#opt.prof"><quote><code class="mallctl">opt.prof</code></quote></a> option for
+ <a class="link" href="#opt.prof">
+ "<code class="mallctl">opt.prof</code>"
+ </a> option for
information on analyzing heap profile output. This option is disabled
- by default.</p></dd><dt><a name="thread.arena"/><span class="term">
- <quote><code class="mallctl">thread.arena</code></quote>
+ by default.</p></dd><dt><a name="thread.arena"></a><span class="term">
+
+ "<code class="mallctl">thread.arena</code>"
+
(<span class="type">unsigned</span>)
<code class="literal">rw</code>
</span></dt><dd><p>Get or set the arena associated with the calling
thread. If the specified arena was not initialized beforehand (see the
- <a class="link" href="#arenas.initialized"><quote><code class="mallctl">arenas.initialized</code></quote></a>
+ <a class="link" href="#arenas.initialized">
+ "<code class="mallctl">arenas.initialized</code>"
+ </a>
mallctl), it will be automatically initialized as a side effect of
- calling this interface.</p></dd><dt><a name="thread.allocated"/><span class="term">
- <quote><code class="mallctl">thread.allocated</code></quote>
+ calling this interface.</p></dd><dt><a name="thread.allocated"></a><span class="term">
+
+ "<code class="mallctl">thread.allocated</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Get the total number of bytes ever allocated by the
calling thread. This counter has the potential to wrap around; it is
up to the application to appropriately interpret the counter in such
- cases.</p></dd><dt><a name="thread.allocatedp"/><span class="term">
- <quote><code class="mallctl">thread.allocatedp</code></quote>
+ cases.</p></dd><dt><a name="thread.allocatedp"></a><span class="term">
+
+ "<code class="mallctl">thread.allocatedp</code>"
+
(<span class="type">uint64_t *</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Get a pointer to the the value that is returned by the
- <a class="link" href="#thread.allocated"><quote><code class="mallctl">thread.allocated</code></quote></a>
+ <a class="link" href="#thread.allocated">
+ "<code class="mallctl">thread.allocated</code>"
+ </a>
mallctl. This is useful for avoiding the overhead of repeated
- <code class="function">mallctl*()</code> calls.</p></dd><dt><a name="thread.deallocated"/><span class="term">
- <quote><code class="mallctl">thread.deallocated</code></quote>
+ <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) calls.</p></dd><dt><a name="thread.deallocated"></a><span class="term">
+
+ "<code class="mallctl">thread.deallocated</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Get the total number of bytes ever deallocated by the
calling thread. This counter has the potential to wrap around; it is
up to the application to appropriately interpret the counter in such
- cases.</p></dd><dt><a name="thread.deallocatedp"/><span class="term">
- <quote><code class="mallctl">thread.deallocatedp</code></quote>
+ cases.</p></dd><dt><a name="thread.deallocatedp"></a><span class="term">
+
+ "<code class="mallctl">thread.deallocatedp</code>"
+
(<span class="type">uint64_t *</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Get a pointer to the the value that is returned by the
- <a class="link" href="#thread.deallocated"><quote><code class="mallctl">thread.deallocated</code></quote></a>
+ <a class="link" href="#thread.deallocated">
+ "<code class="mallctl">thread.deallocated</code>"
+ </a>
mallctl. This is useful for avoiding the overhead of repeated
- <code class="function">mallctl*()</code> calls.</p></dd><dt><a name="thread.tcache.enabled"/><span class="term">
- <quote><code class="mallctl">thread.tcache.enabled</code></quote>
+ <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>) calls.</p></dd><dt><a name="thread.tcache.enabled"></a><span class="term">
+
+ "<code class="mallctl">thread.tcache.enabled</code>"
+
(<span class="type">bool</span>)
<code class="literal">rw</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Enable/disable calling thread's tcache. The tcache is
implicitly flushed as a side effect of becoming
- disabled (see <a class="link" href="#thread.tcache.flush"><quote><code class="mallctl">thread.tcache.flush</code></quote></a>).
- </p></dd><dt><a name="thread.tcache.flush"/><span class="term">
- <quote><code class="mallctl">thread.tcache.flush</code></quote>
+ disabled (see <a class="link" href="#thread.tcache.flush">
+ "<code class="mallctl">thread.tcache.flush</code>"
+ </a>).
+ </p></dd><dt><a name="thread.tcache.flush"></a><span class="term">
+
+ "<code class="mallctl">thread.tcache.flush</code>"
+
(<span class="type">void</span>)
<code class="literal">--</code>
[<code class="option">--enable-tcache</code>]
@@ -739,8 +870,10 @@ malloc_conf = "xmalloc:true";</pre><p>
a thread exits. However, garbage collection is triggered by allocation
activity, so it is possible for a thread that stops
allocating/deallocating to retain its cache indefinitely, in which case
- the developer may find manual flushing useful.</p></dd><dt><a name="thread.prof.name"/><span class="term">
- <quote><code class="mallctl">thread.prof.name</code></quote>
+ the developer may find manual flushing useful.</p></dd><dt><a name="thread.prof.name"></a><span class="term">
+
+ "<code class="mallctl">thread.prof.name</code>"
+
(<span class="type">const char *</span>)
<code class="literal">r-</code> or
<code class="literal">-w</code>
@@ -753,19 +886,25 @@ malloc_conf = "xmalloc:true";</pre><p>
can cause asynchronous string deallocation. Furthermore, each
invocation of this interface can only read or write; simultaneous
read/write is not supported due to string lifetime limitations. The
- name string must be nil-terminated and comprised only of characters in
- the sets recognized
+ name string must nil-terminated and comprised only of characters in the
+ sets recognized
by <span class="citerefentry"><span class="refentrytitle">isgraph</span>(3)</span> and
- <span class="citerefentry"><span class="refentrytitle">isblank</span>(3)</span>.</p></dd><dt><a name="thread.prof.active"/><span class="term">
- <quote><code class="mallctl">thread.prof.active</code></quote>
+ <span class="citerefentry"><span class="refentrytitle">isblank</span>(3)</span>.</p></dd><dt><a name="thread.prof.active"></a><span class="term">
+
+ "<code class="mallctl">thread.prof.active</code>"
+
(<span class="type">bool</span>)
<code class="literal">rw</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Control whether sampling is currently active for the
- calling thread. This is an activation mechanism in addition to <a class="link" href="#prof.active"><quote><code class="mallctl">prof.active</code></quote></a>; both must
+ calling thread. This is an activation mechanism in addition to <a class="link" href="#prof.active">
+ "<code class="mallctl">prof.active</code>"
+ </a>; both must
be active for the calling thread to sample. This flag is enabled by
- default.</p></dd><dt><a name="tcache.create"/><span class="term">
- <quote><code class="mallctl">tcache.create</code></quote>
+ default.</p></dd><dt><a name="tcache.create"></a><span class="term">
+
+ "<code class="mallctl">tcache.create</code>"
+
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
@@ -775,81 +914,76 @@ malloc_conf = "xmalloc:true";</pre><p>
automatically managed one that is used by default. Each explicit cache
can be used by only one thread at a time; the application must assure
that this constraint holds.
- </p></dd><dt><a name="tcache.flush"/><span class="term">
- <quote><code class="mallctl">tcache.flush</code></quote>
+ </p></dd><dt><a name="tcache.flush"></a><span class="term">
+
+ "<code class="mallctl">tcache.flush</code>"
+
(<span class="type">unsigned</span>)
<code class="literal">-w</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Flush the specified thread-specific cache (tcache). The
- same considerations apply to this interface as to <a class="link" href="#thread.tcache.flush"><quote><code class="mallctl">thread.tcache.flush</code></quote></a>,
- except that the tcache will never be automatically discarded.
- </p></dd><dt><a name="tcache.destroy"/><span class="term">
- <quote><code class="mallctl">tcache.destroy</code></quote>
+ same considerations apply to this interface as to <a class="link" href="#thread.tcache.flush">
+ "<code class="mallctl">thread.tcache.flush</code>"
+ </a>,
+ except that the tcache will never be automatically be discarded.
+ </p></dd><dt><a name="tcache.destroy"></a><span class="term">
+
+ "<code class="mallctl">tcache.destroy</code>"
+
(<span class="type">unsigned</span>)
<code class="literal">-w</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Flush the specified thread-specific cache (tcache) and
make the identifier available for use during a future tcache creation.
- </p></dd><dt><a name="arena.i.purge"/><span class="term">
- <quote><code class="mallctl">arena.&lt;i&gt;.purge</code></quote>
- (<span class="type">void</span>)
- <code class="literal">--</code>
- </span></dt><dd><p>Purge all unused dirty pages for arena &lt;i&gt;, or for
- all arenas if &lt;i&gt; equals <a class="link" href="#arenas.narenas"><quote><code class="mallctl">arenas.narenas</code></quote></a>.
- </p></dd><dt><a name="arena.i.decay"/><span class="term">
- <quote><code class="mallctl">arena.&lt;i&gt;.decay</code></quote>
+ </p></dd><dt><a name="arena.i.purge"></a><span class="term">
+
+ "<code class="mallctl">arena.&lt;i&gt;.purge</code>"
+
(<span class="type">void</span>)
<code class="literal">--</code>
- </span></dt><dd><p>Trigger decay-based purging of unused dirty pages for
- arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals <a class="link" href="#arenas.narenas"><quote><code class="mallctl">arenas.narenas</code></quote></a>.
- The proportion of unused dirty pages to be purged depends on the current
- time; see <a class="link" href="#opt.decay_time"><quote><code class="mallctl">opt.decay_time</code></quote></a> for
- details.</p></dd><dt><a name="arena.i.reset"/><span class="term">
- <quote><code class="mallctl">arena.&lt;i&gt;.reset</code></quote>
- (<span class="type">void</span>)
- <code class="literal">--</code>
- </span></dt><dd><p>Discard all of the arena's extant allocations. This
- interface can only be used with arenas created via <a class="link" href="#arenas.extend"><quote><code class="mallctl">arenas.extend</code></quote></a>. None
- of the arena's discarded/cached allocations may accessed afterward. As
- part of this requirement, all thread caches which were used to
- allocate/deallocate in conjunction with the arena must be flushed
- beforehand. This interface cannot be used if running inside Valgrind,
- nor if the <a class="link" href="#opt.quarantine">quarantine</a> size is
- non-zero.</p></dd><dt><a name="arena.i.dss"/><span class="term">
- <quote><code class="mallctl">arena.&lt;i&gt;.dss</code></quote>
+ </span></dt><dd><p>Purge unused dirty pages for arena &lt;i&gt;, or for
+ all arenas if &lt;i&gt; equals <a class="link" href="#arenas.narenas">
+ "<code class="mallctl">arenas.narenas</code>"
+ </a>.
+ </p></dd><dt><a name="arena.i.dss"></a><span class="term">
+
+ "<code class="mallctl">arena.&lt;i&gt;.dss</code>"
+
(<span class="type">const char *</span>)
<code class="literal">rw</code>
</span></dt><dd><p>Set the precedence of dss allocation as related to mmap
allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
- <a class="link" href="#arenas.narenas"><quote><code class="mallctl">arenas.narenas</code></quote></a>. See
- <a class="link" href="#opt.dss"><quote><code class="mallctl">opt.dss</code></quote></a> for supported
- settings.</p></dd><dt><a name="arena.i.lg_dirty_mult"/><span class="term">
- <quote><code class="mallctl">arena.&lt;i&gt;.lg_dirty_mult</code></quote>
+ <a class="link" href="#arenas.narenas">
+ "<code class="mallctl">arenas.narenas</code>"
+ </a>. See
+ <a class="link" href="#opt.dss">
+ "<code class="mallctl">opt.dss</code>"
+ </a> for supported
+ settings.</p></dd><dt><a name="arena.i.lg_dirty_mult"></a><span class="term">
+
+ "<code class="mallctl">arena.&lt;i&gt;.lg_dirty_mult</code>"
+
(<span class="type">ssize_t</span>)
<code class="literal">rw</code>
</span></dt><dd><p>Current per-arena minimum ratio (log base 2) of active
to dirty pages for arena &lt;i&gt;. Each time this interface is set and
the ratio is increased, pages are synchronously purged as necessary to
- impose the new ratio. See <a class="link" href="#opt.lg_dirty_mult"><quote><code class="mallctl">opt.lg_dirty_mult</code></quote></a>
- for additional information.</p></dd><dt><a name="arena.i.decay_time"/><span class="term">
- <quote><code class="mallctl">arena.&lt;i&gt;.decay_time</code></quote>
- (<span class="type">ssize_t</span>)
- <code class="literal">rw</code>
- </span></dt><dd><p>Current per-arena approximate time in seconds from the
- creation of a set of unused dirty pages until an equivalent set of
- unused dirty pages is purged and/or reused. Each time this interface is
- set, all currently unused dirty pages are considered to have fully
- decayed, which causes immediate purging of all unused dirty pages unless
- the decay time is set to -1 (i.e. purging disabled). See <a class="link" href="#opt.decay_time"><quote><code class="mallctl">opt.decay_time</code></quote></a> for
- additional information.</p></dd><dt><a name="arena.i.chunk_hooks"/><span class="term">
- <quote><code class="mallctl">arena.&lt;i&gt;.chunk_hooks</code></quote>
+ impose the new ratio. See <a class="link" href="#opt.lg_dirty_mult">
+ "<code class="mallctl">opt.lg_dirty_mult</code>"
+ </a>
+ for additional information.</p></dd><dt><a name="arena.i.chunk_hooks"></a><span class="term">
+
+ "<code class="mallctl">arena.&lt;i&gt;.chunk_hooks</code>"
+
(<span class="type">chunk_hooks_t</span>)
<code class="literal">rw</code>
</span></dt><dd><p>Get or set the chunk management hook functions for arena
&lt;i&gt;. The functions must be capable of operating on all extant
chunks associated with arena &lt;i&gt;, usually by passing unknown
chunks to the replaced functions. In practice, it is feasible to
- control allocation for arenas created via <a class="link" href="#arenas.extend"><quote><code class="mallctl">arenas.extend</code></quote></a> such
+ control allocation for arenas created via <a class="link" href="#arenas.extend">
+ "<code class="mallctl">arenas.extend</code>"
+ </a> such
that all chunks originate from an application-supplied chunk allocator
(by setting custom chunk hook functions just after arena creation), but
the automatically created arenas may have already created chunks prior
@@ -875,7 +1009,7 @@ typedef struct {
operations can also be opted out of, but this is mainly intended to
support platforms on which virtual memory mappings provided by the
operating system kernel do not automatically coalesce and split, e.g.
- Windows.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef void *<b class="fsfunc">(chunk_alloc_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>bool *<var class="pdparam">zero</var>, </td></tr><tr><td> </td><td>bool *<var class="pdparam">commit</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p/></div><p>A chunk allocation function conforms to the
+ Windows.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef void *<b class="fsfunc">(chunk_alloc_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">alignment</var>, </td></tr><tr><td> </td><td>bool *<var class="pdparam">zero</var>, </td></tr><tr><td> </td><td>bool *<var class="pdparam">commit</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>A chunk allocation function conforms to the
<span class="type">chunk_alloc_t</span> type and upon success returns a pointer to
<em class="parameter"><code>size</code></em> bytes of mapped memory on behalf of arena
<em class="parameter"><code>arena_ind</code></em> such that the chunk's base address is a
@@ -896,8 +1030,10 @@ typedef struct {
in absolute terms as on a system that does not overcommit, or in
implicit terms as on a system that overcommits and satisfies physical
memory needs on demand via soft page faults. Note that replacing the
- default chunk allocation function makes the arena's <a class="link" href="#arena.i.dss"><quote><code class="mallctl">arena.&lt;i&gt;.dss</code></quote></a>
- setting irrelevant.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_dalloc_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>bool <var class="pdparam">committed</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p/></div><p>
+ default chunk allocation function makes the arena's <a class="link" href="#arena.i.dss">
+ "<code class="mallctl">arena.&lt;i&gt;.dss</code>"
+ </a>
+ setting irrelevant.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_dalloc_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>bool <var class="pdparam">committed</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>
A chunk deallocation function conforms to the
<span class="type">chunk_dalloc_t</span> type and deallocates a
<em class="parameter"><code>chunk</code></em> of given <em class="parameter"><code>size</code></em> with
@@ -906,7 +1042,7 @@ typedef struct {
success. If the function returns true, this indicates opt-out from
deallocation; the virtual memory mapping associated with the chunk
remains mapped, in the same commit state, and available for future use,
- in which case it will be automatically retained for later reuse.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_commit_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">offset</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">length</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p/></div><p>A chunk commit function conforms to the
+ in which case it will be automatically retained for later reuse.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_commit_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">offset</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">length</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>A chunk commit function conforms to the
<span class="type">chunk_commit_t</span> type and commits zeroed physical memory to
back pages within a <em class="parameter"><code>chunk</code></em> of given
<em class="parameter"><code>size</code></em> at <em class="parameter"><code>offset</code></em> bytes,
@@ -916,7 +1052,7 @@ typedef struct {
does not overcommit, or in implicit terms as on a system that
overcommits and satisfies physical memory needs on demand via soft page
faults. If the function returns true, this indicates insufficient
- physical memory to satisfy the request.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_decommit_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">offset</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">length</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p/></div><p>A chunk decommit function conforms to the
+ physical memory to satisfy the request.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_decommit_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">offset</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">length</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>A chunk decommit function conforms to the
<span class="type">chunk_decommit_t</span> type and decommits any physical memory
that is backing pages within a <em class="parameter"><code>chunk</code></em> of given
<em class="parameter"><code>size</code></em> at <em class="parameter"><code>offset</code></em> bytes,
@@ -925,14 +1061,14 @@ typedef struct {
case the pages will be committed via the chunk commit function before
being reused. If the function returns true, this indicates opt-out from
decommit; the memory remains committed and available for future use, in
- which case it will be automatically retained for later reuse.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_purge_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t<var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">offset</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">length</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p/></div><p>A chunk purge function conforms to the <span class="type">chunk_purge_t</span>
+ which case it will be automatically retained for later reuse.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_purge_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t<var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">offset</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">length</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>A chunk purge function conforms to the <span class="type">chunk_purge_t</span>
type and optionally discards physical pages within the virtual memory
mapping associated with <em class="parameter"><code>chunk</code></em> of given
<em class="parameter"><code>size</code></em> at <em class="parameter"><code>offset</code></em> bytes,
extending for <em class="parameter"><code>length</code></em> on behalf of arena
<em class="parameter"><code>arena_ind</code></em>, returning false if pages within the
purged virtual memory range will be zero-filled the next time they are
- accessed.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_split_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size_a</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size_b</var>, </td></tr><tr><td> </td><td>bool <var class="pdparam">committed</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p/></div><p>A chunk split function conforms to the <span class="type">chunk_split_t</span>
+ accessed.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_split_t)</b>(</code></td><td>void *<var class="pdparam">chunk</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size_a</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size_b</var>, </td></tr><tr><td> </td><td>bool <var class="pdparam">committed</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>A chunk split function conforms to the <span class="type">chunk_split_t</span>
type and optionally splits <em class="parameter"><code>chunk</code></em> of given
<em class="parameter"><code>size</code></em> into two adjacent chunks, the first of
<em class="parameter"><code>size_a</code></em> bytes, and the second of
@@ -941,7 +1077,7 @@ typedef struct {
behalf of arena <em class="parameter"><code>arena_ind</code></em>, returning false upon
success. If the function returns true, this indicates that the chunk
remains unsplit and therefore should continue to be operated on as a
- whole.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_merge_t)</b>(</code></td><td>void *<var class="pdparam">chunk_a</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size_a</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">chunk_b</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size_b</var>, </td></tr><tr><td> </td><td>bool <var class="pdparam">committed</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p/></div><p>A chunk merge function conforms to the <span class="type">chunk_merge_t</span>
+ whole.</p><div class="funcsynopsis"><table border="0" class="funcprototype-table" summary="Function synopsis" style="cellspacing: 0; cellpadding: 0;"><tr><td><code class="funcdef">typedef bool <b class="fsfunc">(chunk_merge_t)</b>(</code></td><td>void *<var class="pdparam">chunk_a</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size_a</var>, </td></tr><tr><td> </td><td>void *<var class="pdparam">chunk_b</var>, </td></tr><tr><td> </td><td>size_t <var class="pdparam">size_b</var>, </td></tr><tr><td> </td><td>bool <var class="pdparam">committed</var>, </td></tr><tr><td> </td><td>unsigned <var class="pdparam">arena_ind</var><code>)</code>;</td></tr></table><div class="funcprototype-spacer"> </div></div><div class="literallayout"><p></p></div><p>A chunk merge function conforms to the <span class="type">chunk_merge_t</span>
type and optionally merges adjacent chunks,
<em class="parameter"><code>chunk_a</code></em> of given <em class="parameter"><code>size_a</code></em>
and <em class="parameter"><code>chunk_b</code></em> of given
@@ -950,106 +1086,150 @@ typedef struct {
behalf of arena <em class="parameter"><code>arena_ind</code></em>, returning false upon
success. If the function returns true, this indicates that the chunks
remain distinct mappings and therefore should continue to be operated on
- independently.</p></dd><dt><a name="arenas.narenas"/><span class="term">
- <quote><code class="mallctl">arenas.narenas</code></quote>
+ independently.</p></dd><dt><a name="arenas.narenas"></a><span class="term">
+
+ "<code class="mallctl">arenas.narenas</code>"
+
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Current limit on number of arenas.</p></dd><dt><a name="arenas.initialized"/><span class="term">
- <quote><code class="mallctl">arenas.initialized</code></quote>
+ </span></dt><dd><p>Current limit on number of arenas.</p></dd><dt><a name="arenas.initialized"></a><span class="term">
+
+ "<code class="mallctl">arenas.initialized</code>"
+
(<span class="type">bool *</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>An array of <a class="link" href="#arenas.narenas"><quote><code class="mallctl">arenas.narenas</code></quote></a>
+ </span></dt><dd><p>An array of <a class="link" href="#arenas.narenas">
+ "<code class="mallctl">arenas.narenas</code>"
+ </a>
booleans. Each boolean indicates whether the corresponding arena is
- initialized.</p></dd><dt><a name="arenas.lg_dirty_mult"/><span class="term">
- <quote><code class="mallctl">arenas.lg_dirty_mult</code></quote>
+ initialized.</p></dd><dt><a name="arenas.lg_dirty_mult"></a><span class="term">
+
+ "<code class="mallctl">arenas.lg_dirty_mult</code>"
+
(<span class="type">ssize_t</span>)
<code class="literal">rw</code>
</span></dt><dd><p>Current default per-arena minimum ratio (log base 2) of
- active to dirty pages, used to initialize <a class="link" href="#arena.i.lg_dirty_mult"><quote><code class="mallctl">arena.&lt;i&gt;.lg_dirty_mult</code></quote></a>
- during arena creation. See <a class="link" href="#opt.lg_dirty_mult"><quote><code class="mallctl">opt.lg_dirty_mult</code></quote></a>
- for additional information.</p></dd><dt><a name="arenas.decay_time"/><span class="term">
- <quote><code class="mallctl">arenas.decay_time</code></quote>
- (<span class="type">ssize_t</span>)
- <code class="literal">rw</code>
- </span></dt><dd><p>Current default per-arena approximate time in seconds
- from the creation of a set of unused dirty pages until an equivalent set
- of unused dirty pages is purged and/or reused, used to initialize <a class="link" href="#arena.i.decay_time"><quote><code class="mallctl">arena.&lt;i&gt;.decay_time</code></quote></a>
- during arena creation. See <a class="link" href="#opt.decay_time"><quote><code class="mallctl">opt.decay_time</code></quote></a> for
- additional information.</p></dd><dt><a name="arenas.quantum"/><span class="term">
- <quote><code class="mallctl">arenas.quantum</code></quote>
+ active to dirty pages, used to initialize <a class="link" href="#arena.i.lg_dirty_mult">
+ "<code class="mallctl">arena.&lt;i&gt;.lg_dirty_mult</code>"
+ </a>
+ during arena creation. See <a class="link" href="#opt.lg_dirty_mult">
+ "<code class="mallctl">opt.lg_dirty_mult</code>"
+ </a>
+ for additional information.</p></dd><dt><a name="arenas.quantum"></a><span class="term">
+
+ "<code class="mallctl">arenas.quantum</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Quantum size.</p></dd><dt><a name="arenas.page"/><span class="term">
- <quote><code class="mallctl">arenas.page</code></quote>
+ </span></dt><dd><p>Quantum size.</p></dd><dt><a name="arenas.page"></a><span class="term">
+
+ "<code class="mallctl">arenas.page</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Page size.</p></dd><dt><a name="arenas.tcache_max"/><span class="term">
- <quote><code class="mallctl">arenas.tcache_max</code></quote>
+ </span></dt><dd><p>Page size.</p></dd><dt><a name="arenas.tcache_max"></a><span class="term">
+
+ "<code class="mallctl">arenas.tcache_max</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
- </span></dt><dd><p>Maximum thread-cached size class.</p></dd><dt><a name="arenas.nbins"/><span class="term">
- <quote><code class="mallctl">arenas.nbins</code></quote>
+ </span></dt><dd><p>Maximum thread-cached size class.</p></dd><dt><a name="arenas.nbins"></a><span class="term">
+
+ "<code class="mallctl">arenas.nbins</code>"
+
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Number of bin size classes.</p></dd><dt><a name="arenas.nhbins"/><span class="term">
- <quote><code class="mallctl">arenas.nhbins</code></quote>
+ </span></dt><dd><p>Number of bin size classes.</p></dd><dt><a name="arenas.nhbins"></a><span class="term">
+
+ "<code class="mallctl">arenas.nhbins</code>"
+
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
[<code class="option">--enable-tcache</code>]
</span></dt><dd><p>Total number of thread cache bin size
- classes.</p></dd><dt><a name="arenas.bin.i.size"/><span class="term">
- <quote><code class="mallctl">arenas.bin.&lt;i&gt;.size</code></quote>
+ classes.</p></dd><dt><a name="arenas.bin.i.size"></a><span class="term">
+
+ "<code class="mallctl">arenas.bin.&lt;i&gt;.size</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Maximum size supported by size class.</p></dd><dt><a name="arenas.bin.i.nregs"/><span class="term">
- <quote><code class="mallctl">arenas.bin.&lt;i&gt;.nregs</code></quote>
+ </span></dt><dd><p>Maximum size supported by size class.</p></dd><dt><a name="arenas.bin.i.nregs"></a><span class="term">
+
+ "<code class="mallctl">arenas.bin.&lt;i&gt;.nregs</code>"
+
(<span class="type">uint32_t</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Number of regions per page run.</p></dd><dt><a name="arenas.bin.i.run_size"/><span class="term">
- <quote><code class="mallctl">arenas.bin.&lt;i&gt;.run_size</code></quote>
+ </span></dt><dd><p>Number of regions per page run.</p></dd><dt><a name="arenas.bin.i.run_size"></a><span class="term">
+
+ "<code class="mallctl">arenas.bin.&lt;i&gt;.run_size</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Number of bytes per page run.</p></dd><dt><a name="arenas.nlruns"/><span class="term">
- <quote><code class="mallctl">arenas.nlruns</code></quote>
+ </span></dt><dd><p>Number of bytes per page run.</p></dd><dt><a name="arenas.nlruns"></a><span class="term">
+
+ "<code class="mallctl">arenas.nlruns</code>"
+
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Total number of large size classes.</p></dd><dt><a name="arenas.lrun.i.size"/><span class="term">
- <quote><code class="mallctl">arenas.lrun.&lt;i&gt;.size</code></quote>
+ </span></dt><dd><p>Total number of large size classes.</p></dd><dt><a name="arenas.lrun.i.size"></a><span class="term">
+
+ "<code class="mallctl">arenas.lrun.&lt;i&gt;.size</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Maximum size supported by this large size
- class.</p></dd><dt><a name="arenas.nhchunks"/><span class="term">
- <quote><code class="mallctl">arenas.nhchunks</code></quote>
+ class.</p></dd><dt><a name="arenas.nhchunks"></a><span class="term">
+
+ "<code class="mallctl">arenas.nhchunks</code>"
+
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Total number of huge size classes.</p></dd><dt><a name="arenas.hchunk.i.size"/><span class="term">
- <quote><code class="mallctl">arenas.hchunk.&lt;i&gt;.size</code></quote>
+ </span></dt><dd><p>Total number of huge size classes.</p></dd><dt><a name="arenas.hchunk.i.size"></a><span class="term">
+
+ "<code class="mallctl">arenas.hchunk.&lt;i&gt;.size</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Maximum size supported by this huge size
- class.</p></dd><dt><a name="arenas.extend"/><span class="term">
- <quote><code class="mallctl">arenas.extend</code></quote>
+ class.</p></dd><dt><a name="arenas.extend"></a><span class="term">
+
+ "<code class="mallctl">arenas.extend</code>"
+
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Extend the array of arenas by appending a new arena,
- and returning the new arena index.</p></dd><dt><a name="prof.thread_active_init"/><span class="term">
- <quote><code class="mallctl">prof.thread_active_init</code></quote>
+ and returning the new arena index.</p></dd><dt><a name="prof.thread_active_init"></a><span class="term">
+
+ "<code class="mallctl">prof.thread_active_init</code>"
+
(<span class="type">bool</span>)
<code class="literal">rw</code>
[<code class="option">--enable-prof</code>]
- </span></dt><dd><p>Control the initial setting for <a class="link" href="#thread.prof.active"><quote><code class="mallctl">thread.prof.active</code></quote></a>
- in newly created threads. See the <a class="link" href="#opt.prof_thread_active_init"><quote><code class="mallctl">opt.prof_thread_active_init</code></quote></a>
- option for additional information.</p></dd><dt><a name="prof.active"/><span class="term">
- <quote><code class="mallctl">prof.active</code></quote>
+ </span></dt><dd><p>Control the initial setting for <a class="link" href="#thread.prof.active">
+ "<code class="mallctl">thread.prof.active</code>"
+ </a>
+ in newly created threads. See the <a class="link" href="#opt.prof_thread_active_init">
+ "<code class="mallctl">opt.prof_thread_active_init</code>"
+ </a>
+ option for additional information.</p></dd><dt><a name="prof.active"></a><span class="term">
+
+ "<code class="mallctl">prof.active</code>"
+
(<span class="type">bool</span>)
<code class="literal">rw</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Control whether sampling is currently active. See the
- <a class="link" href="#opt.prof_active"><quote><code class="mallctl">opt.prof_active</code></quote></a>
- option for additional information, as well as the interrelated <a class="link" href="#thread.prof.active"><quote><code class="mallctl">thread.prof.active</code></quote></a>
- mallctl.</p></dd><dt><a name="prof.dump"/><span class="term">
- <quote><code class="mallctl">prof.dump</code></quote>
+ <a class="link" href="#opt.prof_active">
+ "<code class="mallctl">opt.prof_active</code>"
+ </a>
+ option for additional information, as well as the interrelated <a class="link" href="#thread.prof.active">
+ "<code class="mallctl">thread.prof.active</code>"
+ </a>
+ mallctl.</p></dd><dt><a name="prof.dump"></a><span class="term">
+
+ "<code class="mallctl">prof.dump</code>"
+
(<span class="type">const char *</span>)
<code class="literal">-w</code>
[<code class="option">--enable-prof</code>]
@@ -1057,9 +1237,13 @@ typedef struct {
is specified, to a file according to the pattern
<code class="filename">&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.m&lt;mseq&gt;.heap</code>,
where <code class="literal">&lt;prefix&gt;</code> is controlled by the
- <a class="link" href="#opt.prof_prefix"><quote><code class="mallctl">opt.prof_prefix</code></quote></a>
- option.</p></dd><dt><a name="prof.gdump"/><span class="term">
- <quote><code class="mallctl">prof.gdump</code></quote>
+ <a class="link" href="#opt.prof_prefix">
+ "<code class="mallctl">opt.prof_prefix</code>"
+ </a>
+ option.</p></dd><dt><a name="prof.gdump"></a><span class="term">
+
+ "<code class="mallctl">prof.gdump</code>"
+
(<span class="type">bool</span>)
<code class="literal">rw</code>
[<code class="option">--enable-prof</code>]
@@ -1067,67 +1251,103 @@ typedef struct {
the total virtual memory exceeds the previous maximum. Profiles are
dumped to files named according to the pattern
<code class="filename">&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</code>,
- where <code class="literal">&lt;prefix&gt;</code> is controlled by the <a class="link" href="#opt.prof_prefix"><quote><code class="mallctl">opt.prof_prefix</code></quote></a>
- option.</p></dd><dt><a name="prof.reset"/><span class="term">
- <quote><code class="mallctl">prof.reset</code></quote>
+ where <code class="literal">&lt;prefix&gt;</code> is controlled by the <a class="link" href="#opt.prof_prefix">
+ "<code class="mallctl">opt.prof_prefix</code>"
+ </a>
+ option.</p></dd><dt><a name="prof.reset"></a><span class="term">
+
+ "<code class="mallctl">prof.reset</code>"
+
(<span class="type">size_t</span>)
<code class="literal">-w</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Reset all memory profile statistics, and optionally
- update the sample rate (see <a class="link" href="#opt.lg_prof_sample"><quote><code class="mallctl">opt.lg_prof_sample</code></quote></a>
- and <a class="link" href="#prof.lg_sample"><quote><code class="mallctl">prof.lg_sample</code></quote></a>).
- </p></dd><dt><a name="prof.lg_sample"/><span class="term">
- <quote><code class="mallctl">prof.lg_sample</code></quote>
+ update the sample rate (see <a class="link" href="#opt.lg_prof_sample">
+ "<code class="mallctl">opt.lg_prof_sample</code>"
+ </a>
+ and <a class="link" href="#prof.lg_sample">
+ "<code class="mallctl">prof.lg_sample</code>"
+ </a>).
+ </p></dd><dt><a name="prof.lg_sample"></a><span class="term">
+
+ "<code class="mallctl">prof.lg_sample</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
- </span></dt><dd><p>Get the current sample rate (see <a class="link" href="#opt.lg_prof_sample"><quote><code class="mallctl">opt.lg_prof_sample</code></quote></a>).
- </p></dd><dt><a name="prof.interval"/><span class="term">
- <quote><code class="mallctl">prof.interval</code></quote>
+ </span></dt><dd><p>Get the current sample rate (see <a class="link" href="#opt.lg_prof_sample">
+ "<code class="mallctl">opt.lg_prof_sample</code>"
+ </a>).
+ </p></dd><dt><a name="prof.interval"></a><span class="term">
+
+ "<code class="mallctl">prof.interval</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-prof</code>]
</span></dt><dd><p>Average number of bytes allocated between
- interval-based profile dumps. See the
- <a class="link" href="#opt.lg_prof_interval"><quote><code class="mallctl">opt.lg_prof_interval</code></quote></a>
- option for additional information.</p></dd><dt><a name="stats.cactive"/><span class="term">
- <quote><code class="mallctl">stats.cactive</code></quote>
+ inverval-based profile dumps. See the
+ <a class="link" href="#opt.lg_prof_interval">
+ "<code class="mallctl">opt.lg_prof_interval</code>"
+ </a>
+ option for additional information.</p></dd><dt><a name="stats.cactive"></a><span class="term">
+
+ "<code class="mallctl">stats.cactive</code>"
+
(<span class="type">size_t *</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Pointer to a counter that contains an approximate count
of the current number of bytes in active pages. The estimate may be
high, but never low, because each arena rounds up when computing its
- contribution to the counter. Note that the <a class="link" href="#epoch"><quote><code class="mallctl">epoch</code></quote></a> mallctl has no bearing
+ contribution to the counter. Note that the <a class="link" href="#epoch">
+ "<code class="mallctl">epoch</code>"
+ </a> mallctl has no bearing
on this counter. Furthermore, counter consistency is maintained via
atomic operations, so it is necessary to use an atomic operation in
order to guarantee a consistent read when dereferencing the pointer.
- </p></dd><dt><a name="stats.allocated"/><span class="term">
- <quote><code class="mallctl">stats.allocated</code></quote>
+ </p></dd><dt><a name="stats.allocated"></a><span class="term">
+
+ "<code class="mallctl">stats.allocated</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Total number of bytes allocated by the
- application.</p></dd><dt><a name="stats.active"/><span class="term">
- <quote><code class="mallctl">stats.active</code></quote>
+ application.</p></dd><dt><a name="stats.active"></a><span class="term">
+
+ "<code class="mallctl">stats.active</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Total number of bytes in active pages allocated by the
application. This is a multiple of the page size, and greater than or
- equal to <a class="link" href="#stats.allocated"><quote><code class="mallctl">stats.allocated</code></quote></a>.
+ equal to <a class="link" href="#stats.allocated">
+ "<code class="mallctl">stats.allocated</code>"
+ </a>.
This does not include <a class="link" href="#stats.arenas.i.pdirty">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.pdirty</code></quote></a>, nor pages
- entirely devoted to allocator metadata.</p></dd><dt><a name="stats.metadata"/><span class="term">
- <quote><code class="mallctl">stats.metadata</code></quote>
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.pdirty</code>"
+ </a>, nor pages
+ entirely devoted to allocator metadata.</p></dd><dt><a name="stats.metadata"></a><span class="term">
+
+ "<code class="mallctl">stats.metadata</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Total number of bytes dedicated to metadata, which
comprise base allocations used for bootstrap-sensitive internal
- allocator data structures, arena chunk headers (see <a class="link" href="#stats.arenas.i.metadata.mapped"><quote><code class="mallctl">stats.arenas.&lt;i&gt;.metadata.mapped</code></quote></a>),
- and internal allocations (see <a class="link" href="#stats.arenas.i.metadata.allocated"><quote><code class="mallctl">stats.arenas.&lt;i&gt;.metadata.allocated</code></quote></a>).</p></dd><dt><a name="stats.resident"/><span class="term">
- <quote><code class="mallctl">stats.resident</code></quote>
+ allocator data structures, arena chunk headers (see <a class="link" href="#stats.arenas.i.metadata.mapped">
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.mapped</code>"
+ </a>),
+ and internal allocations (see <a class="link" href="#stats.arenas.i.metadata.allocated">
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.allocated</code>"
+ </a>).</p></dd><dt><a name="stats.resident"></a><span class="term">
+
+ "<code class="mallctl">stats.resident</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
@@ -1137,393 +1357,436 @@ typedef struct {
pages. This is a maximum rather than precise because pages may not
actually be physically resident if they correspond to demand-zeroed
virtual memory that has not yet been touched. This is a multiple of the
- page size, and is larger than <a class="link" href="#stats.active"><quote><code class="mallctl">stats.active</code></quote></a>.</p></dd><dt><a name="stats.mapped"/><span class="term">
- <quote><code class="mallctl">stats.mapped</code></quote>
+ page size, and is larger than <a class="link" href="#stats.active">
+ "<code class="mallctl">stats.active</code>"
+ </a>.</p></dd><dt><a name="stats.mapped"></a><span class="term">
+
+ "<code class="mallctl">stats.mapped</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Total number of bytes in active chunks mapped by the
allocator. This is a multiple of the chunk size, and is larger than
- <a class="link" href="#stats.active"><quote><code class="mallctl">stats.active</code></quote></a>.
+ <a class="link" href="#stats.active">
+ "<code class="mallctl">stats.active</code>"
+ </a>.
This does not include inactive chunks, even those that contain unused
dirty pages, which means that there is no strict ordering between this
- and <a class="link" href="#stats.resident"><quote><code class="mallctl">stats.resident</code></quote></a>.</p></dd><dt><a name="stats.retained"/><span class="term">
- <quote><code class="mallctl">stats.retained</code></quote>
- (<span class="type">size_t</span>)
- <code class="literal">r-</code>
- [<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Total number of bytes in virtual memory mappings that
- were retained rather than being returned to the operating system via
- e.g. <span class="citerefentry"><span class="refentrytitle">munmap</span>(2)</span>. Retained virtual memory is
- typically untouched, decommitted, or purged, so it has no strongly
- associated physical memory (see <a class="link" href="#arena.i.chunk_hooks">chunk hooks</a> for details). Retained
- memory is excluded from mapped memory statistics, e.g. <a class="link" href="#stats.mapped"><quote><code class="mallctl">stats.mapped</code></quote></a>.
- </p></dd><dt><a name="stats.arenas.i.dss"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.dss</code></quote>
+ and <a class="link" href="#stats.resident">
+ "<code class="mallctl">stats.resident</code>"
+ </a>.</p></dd><dt><a name="stats.arenas.i.dss"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.dss</code>"
+
(<span class="type">const char *</span>)
<code class="literal">r-</code>
</span></dt><dd><p>dss (<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>) allocation precedence as
- related to <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> allocation. See <a class="link" href="#opt.dss"><quote><code class="mallctl">opt.dss</code></quote></a> for details.
- </p></dd><dt><a name="stats.arenas.i.lg_dirty_mult"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.lg_dirty_mult</code></quote>
+ related to <span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span> allocation. See <a class="link" href="#opt.dss">
+ "<code class="mallctl">opt.dss</code>"
+ </a> for details.
+ </p></dd><dt><a name="stats.arenas.i.lg_dirty_mult"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.lg_dirty_mult</code>"
+
(<span class="type">ssize_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Minimum ratio (log base 2) of active to dirty pages.
- See <a class="link" href="#opt.lg_dirty_mult"><quote><code class="mallctl">opt.lg_dirty_mult</code></quote></a>
- for details.</p></dd><dt><a name="stats.arenas.i.decay_time"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.decay_time</code></quote>
- (<span class="type">ssize_t</span>)
- <code class="literal">r-</code>
- </span></dt><dd><p>Approximate time in seconds from the creation of a set
- of unused dirty pages until an equivalent set of unused dirty pages is
- purged and/or reused. See <a class="link" href="#opt.decay_time"><quote><code class="mallctl">opt.decay_time</code></quote></a>
- for details.</p></dd><dt><a name="stats.arenas.i.nthreads"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.nthreads</code></quote>
+ See <a class="link" href="#opt.lg_dirty_mult">
+ "<code class="mallctl">opt.lg_dirty_mult</code>"
+ </a>
+ for details.</p></dd><dt><a name="stats.arenas.i.nthreads"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.nthreads</code>"
+
(<span class="type">unsigned</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Number of threads currently assigned to
- arena.</p></dd><dt><a name="stats.arenas.i.pactive"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.pactive</code></quote>
+ arena.</p></dd><dt><a name="stats.arenas.i.pactive"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.pactive</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
- </span></dt><dd><p>Number of pages in active runs.</p></dd><dt><a name="stats.arenas.i.pdirty"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.pdirty</code></quote>
+ </span></dt><dd><p>Number of pages in active runs.</p></dd><dt><a name="stats.arenas.i.pdirty"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.pdirty</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
</span></dt><dd><p>Number of pages within unused runs that are potentially
- dirty, and for which <code class="function">madvise<em class="parameter"><code>...</code></em>
- <em class="parameter"><code><code class="constant">MADV_DONTNEED</code></code></em></code> or
- similar has not been called.</p></dd><dt><a name="stats.arenas.i.mapped"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.mapped</code></quote>
- (<span class="type">size_t</span>)
- <code class="literal">r-</code>
- [<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Number of mapped bytes.</p></dd><dt><a name="stats.arenas.i.retained"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.retained</code></quote>
+ dirty, and for which <code class="function">madvise</code>(<em class="parameter"><code>...</code></em>,
+ <em class="parameter"><code><code class="constant">MADV_DONTNEED</code></code></em>) or
+ similar has not been called.</p></dd><dt><a name="stats.arenas.i.mapped"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.mapped</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Number of retained bytes. See <a class="link" href="#stats.retained"><quote><code class="mallctl">stats.retained</code></quote></a> for
- details.</p></dd><dt><a name="stats.arenas.i.metadata.mapped"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.metadata.mapped</code></quote>
+ </span></dt><dd><p>Number of mapped bytes.</p></dd><dt><a name="stats.arenas.i.metadata.mapped"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.mapped</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of mapped bytes in arena chunk headers, which
- track the states of the non-metadata pages.</p></dd><dt><a name="stats.arenas.i.metadata.allocated"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.metadata.allocated</code></quote>
+ track the states of the non-metadata pages.</p></dd><dt><a name="stats.arenas.i.metadata.allocated"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.allocated</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of bytes dedicated to internal allocations.
Internal allocations differ from application-originated allocations in
that they are for internal use, and that they are omitted from heap
- profiles. This statistic is reported separately from <a class="link" href="#stats.metadata"><quote><code class="mallctl">stats.metadata</code></quote></a> and
- <a class="link" href="#stats.arenas.i.metadata.mapped"><quote><code class="mallctl">stats.arenas.&lt;i&gt;.metadata.mapped</code></quote></a>
- because it overlaps with e.g. the <a class="link" href="#stats.allocated"><quote><code class="mallctl">stats.allocated</code></quote></a> and
- <a class="link" href="#stats.active"><quote><code class="mallctl">stats.active</code></quote></a>
+ profiles. This statistic is reported separately from <a class="link" href="#stats.metadata">
+ "<code class="mallctl">stats.metadata</code>"
+ </a> and
+ <a class="link" href="#stats.arenas.i.metadata.mapped">
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.metadata.mapped</code>"
+ </a>
+ because it overlaps with e.g. the <a class="link" href="#stats.allocated">
+ "<code class="mallctl">stats.allocated</code>"
+ </a> and
+ <a class="link" href="#stats.active">
+ "<code class="mallctl">stats.active</code>"
+ </a>
statistics, whereas the other metadata statistics do
- not.</p></dd><dt><a name="stats.arenas.i.npurge"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.npurge</code></quote>
+ not.</p></dd><dt><a name="stats.arenas.i.npurge"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.npurge</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of dirty page purge sweeps performed.
- </p></dd><dt><a name="stats.arenas.i.nmadvise"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.nmadvise</code></quote>
+ </p></dd><dt><a name="stats.arenas.i.nmadvise"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.nmadvise</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Number of <code class="function">madvise<em class="parameter"><code>...</code></em>
- <em class="parameter"><code><code class="constant">MADV_DONTNEED</code></code></em></code> or
- similar calls made to purge dirty pages.</p></dd><dt><a name="stats.arenas.i.purged"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.purged</code></quote>
+ </span></dt><dd><p>Number of <code class="function">madvise</code>(<em class="parameter"><code>...</code></em>,
+ <em class="parameter"><code><code class="constant">MADV_DONTNEED</code></code></em>) or
+ similar calls made to purge dirty pages.</p></dd><dt><a name="stats.arenas.i.purged"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.purged</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Number of pages purged.</p></dd><dt><a name="stats.arenas.i.small.allocated"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.small.allocated</code></quote>
+ </span></dt><dd><p>Number of pages purged.</p></dd><dt><a name="stats.arenas.i.small.allocated"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.small.allocated</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of bytes currently allocated by small objects.
- </p></dd><dt><a name="stats.arenas.i.small.nmalloc"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.small.nmalloc</code></quote>
+ </p></dd><dt><a name="stats.arenas.i.small.nmalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.small.nmalloc</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests served by
- small bins.</p></dd><dt><a name="stats.arenas.i.small.ndalloc"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.small.ndalloc</code></quote>
+ small bins.</p></dd><dt><a name="stats.arenas.i.small.ndalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.small.ndalloc</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of small objects returned to bins.
- </p></dd><dt><a name="stats.arenas.i.small.nrequests"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.small.nrequests</code></quote>
+ </p></dd><dt><a name="stats.arenas.i.small.nrequests"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.small.nrequests</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of small allocation requests.
- </p></dd><dt><a name="stats.arenas.i.large.allocated"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.large.allocated</code></quote>
+ </p></dd><dt><a name="stats.arenas.i.large.allocated"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.large.allocated</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of bytes currently allocated by large objects.
- </p></dd><dt><a name="stats.arenas.i.large.nmalloc"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.large.nmalloc</code></quote>
+ </p></dd><dt><a name="stats.arenas.i.large.nmalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.large.nmalloc</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of large allocation requests served
- directly by the arena.</p></dd><dt><a name="stats.arenas.i.large.ndalloc"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.large.ndalloc</code></quote>
+ directly by the arena.</p></dd><dt><a name="stats.arenas.i.large.ndalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.large.ndalloc</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of large deallocation requests served
- directly by the arena.</p></dd><dt><a name="stats.arenas.i.large.nrequests"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.large.nrequests</code></quote>
+ directly by the arena.</p></dd><dt><a name="stats.arenas.i.large.nrequests"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.large.nrequests</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of large allocation requests.
- </p></dd><dt><a name="stats.arenas.i.huge.allocated"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.huge.allocated</code></quote>
+ </p></dd><dt><a name="stats.arenas.i.huge.allocated"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.huge.allocated</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Number of bytes currently allocated by huge objects.
- </p></dd><dt><a name="stats.arenas.i.huge.nmalloc"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.huge.nmalloc</code></quote>
+ </p></dd><dt><a name="stats.arenas.i.huge.nmalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.huge.nmalloc</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of huge allocation requests served
- directly by the arena.</p></dd><dt><a name="stats.arenas.i.huge.ndalloc"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.huge.ndalloc</code></quote>
+ directly by the arena.</p></dd><dt><a name="stats.arenas.i.huge.ndalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.huge.ndalloc</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of huge deallocation requests served
- directly by the arena.</p></dd><dt><a name="stats.arenas.i.huge.nrequests"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.huge.nrequests</code></quote>
+ directly by the arena.</p></dd><dt><a name="stats.arenas.i.huge.nrequests"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.huge.nrequests</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of huge allocation requests.
- </p></dd><dt><a name="stats.arenas.i.bins.j.nmalloc"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</code></quote>
+ </p></dd><dt><a name="stats.arenas.i.bins.j.nmalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocations served by bin.
- </p></dd><dt><a name="stats.arenas.i.bins.j.ndalloc"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.ndalloc</code></quote>
+ </p></dd><dt><a name="stats.arenas.i.bins.j.ndalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.ndalloc</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocations returned to bin.
- </p></dd><dt><a name="stats.arenas.i.bins.j.nrequests"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nrequests</code></quote>
+ </p></dd><dt><a name="stats.arenas.i.bins.j.nrequests"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nrequests</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation
- requests.</p></dd><dt><a name="stats.arenas.i.bins.j.curregs"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curregs</code></quote>
+ requests.</p></dd><dt><a name="stats.arenas.i.bins.j.curregs"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curregs</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Current number of regions for this size
- class.</p></dd><dt><a name="stats.arenas.i.bins.j.nfills"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</code></quote>
+ class.</p></dd><dt><a name="stats.arenas.i.bins.j.nfills"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code> <code class="option">--enable-tcache</code>]
- </span></dt><dd><p>Cumulative number of tcache fills.</p></dd><dt><a name="stats.arenas.i.bins.j.nflushes"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</code></quote>
+ </span></dt><dd><p>Cumulative number of tcache fills.</p></dd><dt><a name="stats.arenas.i.bins.j.nflushes"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code> <code class="option">--enable-tcache</code>]
- </span></dt><dd><p>Cumulative number of tcache flushes.</p></dd><dt><a name="stats.arenas.i.bins.j.nruns"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nruns</code></quote>
+ </span></dt><dd><p>Cumulative number of tcache flushes.</p></dd><dt><a name="stats.arenas.i.bins.j.nruns"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nruns</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Cumulative number of runs created.</p></dd><dt><a name="stats.arenas.i.bins.j.nreruns"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreruns</code></quote>
+ </span></dt><dd><p>Cumulative number of runs created.</p></dd><dt><a name="stats.arenas.i.bins.j.nreruns"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreruns</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of times the current run from which
- to allocate changed.</p></dd><dt><a name="stats.arenas.i.bins.j.curruns"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curruns</code></quote>
+ to allocate changed.</p></dd><dt><a name="stats.arenas.i.bins.j.curruns"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curruns</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
- </span></dt><dd><p>Current number of runs.</p></dd><dt><a name="stats.arenas.i.lruns.j.nmalloc"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nmalloc</code></quote>
+ </span></dt><dd><p>Current number of runs.</p></dd><dt><a name="stats.arenas.i.lruns.j.nmalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nmalloc</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests for this size
- class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.lruns.j.ndalloc"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.ndalloc</code></quote>
+ class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.lruns.j.ndalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.ndalloc</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of deallocation requests for this
- size class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.lruns.j.nrequests"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nrequests</code></quote>
+ size class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.lruns.j.nrequests"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.nrequests</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests for this size
- class.</p></dd><dt><a name="stats.arenas.i.lruns.j.curruns"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.curruns</code></quote>
+ class.</p></dd><dt><a name="stats.arenas.i.lruns.j.curruns"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.lruns.&lt;j&gt;.curruns</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Current number of runs for this size class.
- </p></dd><dt><a name="stats.arenas.i.hchunks.j.nmalloc"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nmalloc</code></quote>
+ </p></dd><dt><a name="stats.arenas.i.hchunks.j.nmalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nmalloc</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests for this size
- class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.hchunks.j.ndalloc"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.ndalloc</code></quote>
+ class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.hchunks.j.ndalloc"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.ndalloc</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of deallocation requests for this
- size class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.hchunks.j.nrequests"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nrequests</code></quote>
+ size class served directly by the arena.</p></dd><dt><a name="stats.arenas.i.hchunks.j.nrequests"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.nrequests</code>"
+
(<span class="type">uint64_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Cumulative number of allocation requests for this size
- class.</p></dd><dt><a name="stats.arenas.i.hchunks.j.curhchunks"/><span class="term">
- <quote><code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.curhchunks</code></quote>
+ class.</p></dd><dt><a name="stats.arenas.i.hchunks.j.curhchunks"></a><span class="term">
+
+ "<code class="mallctl">stats.arenas.&lt;i&gt;.hchunks.&lt;j&gt;.curhchunks</code>"
+
(<span class="type">size_t</span>)
<code class="literal">r-</code>
[<code class="option">--enable-stats</code>]
</span></dt><dd><p>Current number of huge allocations for this size class.
- </p></dd></dl></div></div><div class="refsect1"><a name="heap_profile_format"/><h2>HEAP PROFILE FORMAT</h2><p>Although the heap profiling functionality was originally designed to
- be compatible with the
- <span class="command"><strong>pprof</strong></span> command that is developed as part of the <a class="ulink" href="http://code.google.com/p/gperftools/" target="_top">gperftools
- package</a>, the addition of per thread heap profiling functionality
- required a different heap profile format. The <span class="command"><strong>jeprof</strong></span>
- command is derived from <span class="command"><strong>pprof</strong></span>, with enhancements to
- support the heap profile format described here.</p><p>In the following hypothetical heap profile, <code class="constant">[...]</code>
- indicates elision for the sake of compactness. </p><pre class="programlisting">
-heap_v2/524288
- t*: 28106: 56637512 [0: 0]
- [...]
- t3: 352: 16777344 [0: 0]
- [...]
- t99: 17754: 29341640 [0: 0]
- [...]
-@ 0x5f86da8 0x5f5a1dc [...] 0x29e4d4e 0xa200316 0xabb2988 [...]
- t*: 13: 6688 [0: 0]
- t3: 12: 6496 [0: ]
- t99: 1: 192 [0: 0]
-[...]
-
-MAPPED_LIBRARIES:
-[...]</pre><p> The following matches the above heap profile, but most
-tokens are replaced with <code class="constant">&lt;description&gt;</code> to indicate
-descriptions of the corresponding fields. </p><pre class="programlisting">
-&lt;heap_profile_format_version&gt;/&lt;mean_sample_interval&gt;
- &lt;aggregate&gt;: &lt;curobjs&gt;: &lt;curbytes&gt; [&lt;cumobjs&gt;: &lt;cumbytes&gt;]
- [...]
- &lt;thread_3_aggregate&gt;: &lt;curobjs&gt;: &lt;curbytes&gt;[&lt;cumobjs&gt;: &lt;cumbytes&gt;]
- [...]
- &lt;thread_99_aggregate&gt;: &lt;curobjs&gt;: &lt;curbytes&gt;[&lt;cumobjs&gt;: &lt;cumbytes&gt;]
- [...]
-@ &lt;top_frame&gt; &lt;frame&gt; [...] &lt;frame&gt; &lt;frame&gt; &lt;frame&gt; [...]
- &lt;backtrace_aggregate&gt;: &lt;curobjs&gt;: &lt;curbytes&gt; [&lt;cumobjs&gt;: &lt;cumbytes&gt;]
- &lt;backtrace_thread_3&gt;: &lt;curobjs&gt;: &lt;curbytes&gt; [&lt;cumobjs&gt;: &lt;cumbytes&gt;]
- &lt;backtrace_thread_99&gt;: &lt;curobjs&gt;: &lt;curbytes&gt; [&lt;cumobjs&gt;: &lt;cumbytes&gt;]
-[...]
-
-MAPPED_LIBRARIES:
-&lt;/proc/&lt;pid&gt;/maps&gt;</pre></div><div class="refsect1"><a name="debugging_malloc_problems"/><h2>DEBUGGING MALLOC PROBLEMS</h2><p>When debugging, it is a good idea to configure/build jemalloc with
+ </p></dd></dl></div></div><div class="refsect1"><a name="debugging_malloc_problems"></a><h2>DEBUGGING MALLOC PROBLEMS</h2><p>When debugging, it is a good idea to configure/build jemalloc with
the <code class="option">--enable-debug</code> and <code class="option">--enable-fill</code>
options, and recompile the program with suitable options and symbols for
debugger support. When so configured, jemalloc incorporates a wide variety
of run-time assertions that catch application errors such as double-free,
- write-after-free, etc.</p><p>Programs often accidentally depend on <span class="quote">“<span class="quote">uninitialized</span>â€</span>
+ write-after-free, etc.</p><p>Programs often accidentally depend on &#8220;uninitialized&#8221;
memory actually being filled with zero bytes. Junk filling
- (see the <a class="link" href="#opt.junk"><quote><code class="mallctl">opt.junk</code></quote></a>
+ (see the <a class="link" href="#opt.junk">
+ "<code class="mallctl">opt.junk</code>"
+ </a>
option) tends to expose such bugs in the form of obviously incorrect
results and/or coredumps. Conversely, zero
- filling (see the <a class="link" href="#opt.zero"><quote><code class="mallctl">opt.zero</code></quote></a> option) eliminates
+ filling (see the <a class="link" href="#opt.zero">
+ "<code class="mallctl">opt.zero</code>"
+ </a> option) eliminates
the symptoms of such bugs. Between these two options, it is usually
possible to quickly detect, diagnose, and eliminate such bugs.</p><p>This implementation does not provide much detail about the problems
it detects, because the performance impact for storing such information
would be prohibitive. However, jemalloc does integrate with the most
excellent <a class="ulink" href="http://valgrind.org/" target="_top">Valgrind</a> tool if the
- <code class="option">--enable-valgrind</code> configuration option is enabled.</p></div><div class="refsect1"><a name="diagnostic_messages"/><h2>DIAGNOSTIC MESSAGES</h2><p>If any of the memory allocation/deallocation functions detect an
+ <code class="option">--enable-valgrind</code> configuration option is enabled.</p></div><div class="refsect1"><a name="diagnostic_messages"></a><h2>DIAGNOSTIC MESSAGES</h2><p>If any of the memory allocation/deallocation functions detect an
error or warning condition, a message will be printed to file descriptor
<code class="constant">STDERR_FILENO</code>. Errors will result in the process
- dumping core. If the <a class="link" href="#opt.abort"><quote><code class="mallctl">opt.abort</code></quote></a> option is set, most
+ dumping core. If the <a class="link" href="#opt.abort">
+ "<code class="mallctl">opt.abort</code>"
+ </a> option is set, most
warnings are treated as errors.</p><p>The <code class="varname">malloc_message</code> variable allows the programmer
to override the function which emits the text strings forming the errors
and warnings if for some reason the <code class="constant">STDERR_FILENO</code> file
descriptor is not suitable for this.
- <code class="function">malloc_message()</code> takes the
+ <code class="function">malloc_message</code>(<em class="parameter"><code></code></em>) takes the
<em class="parameter"><code>cbopaque</code></em> pointer argument that is
<code class="constant">NULL</code> unless overridden by the arguments in a call to
- <code class="function">malloc_stats_print()</code>, followed by a string
+ <code class="function">malloc_stats_print</code>(<em class="parameter"><code></code></em>), followed by a string
pointer. Please note that doing anything which tries to allocate memory in
this function is likely to result in a crash or deadlock.</p><p>All messages are prefixed by
- <span class="quote">“<span class="quote"><code class="computeroutput">&lt;jemalloc&gt;: </code></span>â€</span>.</p></div><div class="refsect1"><a name="return_values"/><h2>RETURN VALUES</h2><div class="refsect2"><a name="idm45291897515152"/><h3>Standard API</h3><p>The <code class="function">malloc()</code> and
- <code class="function">calloc()</code> functions return a pointer to the
+ &#8220;<code class="computeroutput">&lt;jemalloc&gt;: </code>&#8221;.</p></div><div class="refsect1"><a name="return_values"></a><h2>RETURN VALUES</h2><div class="refsect2"><a name="idp46949776"></a><h3>Standard API</h3><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>) and
+ <code class="function">calloc</code>(<em class="parameter"><code></code></em>) functions return a pointer to the
allocated memory if successful; otherwise a <code class="constant">NULL</code>
pointer is returned and <code class="varname">errno</code> is set to
- <span class="errorname">ENOMEM</span>.</p><p>The <code class="function">posix_memalign()</code> function
+ <span class="errorname">ENOMEM</span>.</p><p>The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function
returns the value 0 if successful; otherwise it returns an error value.
- The <code class="function">posix_memalign()</code> function will fail
+ The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function will fail
if:
</p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p>The <em class="parameter"><code>alignment</code></em> parameter is
not a power of 2 at least as large as
<code class="code">sizeof(<span class="type">void *</span>)</code>.
</p></dd><dt><span class="term"><span class="errorname">ENOMEM</span></span></dt><dd><p>Memory allocation error.</p></dd></dl></div><p>
- </p><p>The <code class="function">aligned_alloc()</code> function returns
+ </p><p>The <code class="function">aligned_alloc</code>(<em class="parameter"><code></code></em>) function returns
a pointer to the allocated memory if successful; otherwise a
<code class="constant">NULL</code> pointer is returned and
<code class="varname">errno</code> is set. The
- <code class="function">aligned_alloc()</code> function will fail if:
+ <code class="function">aligned_alloc</code>(<em class="parameter"><code></code></em>) function will fail if:
</p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p>The <em class="parameter"><code>alignment</code></em> parameter is
not a power of 2.
</p></dd><dt><span class="term"><span class="errorname">ENOMEM</span></span></dt><dd><p>Memory allocation error.</p></dd></dl></div><p>
- </p><p>The <code class="function">realloc()</code> function returns a
+ </p><p>The <code class="function">realloc</code>(<em class="parameter"><code></code></em>) function returns a
pointer, possibly identical to <em class="parameter"><code>ptr</code></em>, to the
allocated memory if successful; otherwise a <code class="constant">NULL</code>
pointer is returned, and <code class="varname">errno</code> is set to
<span class="errorname">ENOMEM</span> if the error was the result of an
- allocation failure. The <code class="function">realloc()</code>
+ allocation failure. The <code class="function">realloc</code>(<em class="parameter"><code></code></em>)
function always leaves the original buffer intact when an error occurs.
- </p><p>The <code class="function">free()</code> function returns no
- value.</p></div><div class="refsect2"><a name="idm45291897493664"/><h3>Non-standard API</h3><p>The <code class="function">mallocx()</code> and
- <code class="function">rallocx()</code> functions return a pointer to
+ </p><p>The <code class="function">free</code>(<em class="parameter"><code></code></em>) function returns no
+ value.</p></div><div class="refsect2"><a name="idp46974576"></a><h3>Non-standard API</h3><p>The <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) and
+ <code class="function">rallocx</code>(<em class="parameter"><code></code></em>) functions return a pointer to
the allocated memory if successful; otherwise a <code class="constant">NULL</code>
pointer is returned to indicate insufficient contiguous memory was
- available to service the allocation request. </p><p>The <code class="function">xallocx()</code> function returns the
+ available to service the allocation request. </p><p>The <code class="function">xallocx</code>(<em class="parameter"><code></code></em>) function returns the
real size of the resulting resized allocation pointed to by
<em class="parameter"><code>ptr</code></em>, which is a value less than
<em class="parameter"><code>size</code></em> if the allocation could not be adequately
- grown in place. </p><p>The <code class="function">sallocx()</code> function returns the
+ grown in place. </p><p>The <code class="function">sallocx</code>(<em class="parameter"><code></code></em>) function returns the
real size of the allocation pointed to by <em class="parameter"><code>ptr</code></em>.
- </p><p>The <code class="function">nallocx()</code> returns the real size
+ </p><p>The <code class="function">nallocx</code>(<em class="parameter"><code></code></em>) returns the real size
that would result from a successful equivalent
- <code class="function">mallocx()</code> function call, or zero if
- insufficient memory is available to perform the size computation. </p><p>The <code class="function">mallctl()</code>,
- <code class="function">mallctlnametomib()</code>, and
- <code class="function">mallctlbymib()</code> functions return 0 on
+ <code class="function">mallocx</code>(<em class="parameter"><code></code></em>) function call, or zero if
+ insufficient memory is available to perform the size computation. </p><p>The <code class="function">mallctl</code>(<em class="parameter"><code></code></em>),
+ <code class="function">mallctlnametomib</code>(<em class="parameter"><code></code></em>), and
+ <code class="function">mallctlbymib</code>(<em class="parameter"><code></code></em>) functions return 0 on
success; otherwise they return an error value. The functions will fail
if:
</p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><span class="errorname">EINVAL</span></span></dt><dd><p><em class="parameter"><code>newp</code></em> is not
@@ -1535,28 +1798,28 @@ MAPPED_LIBRARIES:
value.</p></dd><dt><span class="term"><span class="errorname">EPERM</span></span></dt><dd><p>Attempt to read or write void value, or attempt to
write read-only value.</p></dd><dt><span class="term"><span class="errorname">EAGAIN</span></span></dt><dd><p>A memory allocation failure
occurred.</p></dd><dt><span class="term"><span class="errorname">EFAULT</span></span></dt><dd><p>An interface with side effects failed in some way
- not directly related to <code class="function">mallctl*()</code>
+ not directly related to <code class="function">mallctl*</code>(<em class="parameter"><code></code></em>)
read/write processing.</p></dd></dl></div><p>
- </p><p>The <code class="function">malloc_usable_size()</code> function
+ </p><p>The <code class="function">malloc_usable_size</code>(<em class="parameter"><code></code></em>) function
returns the usable size of the allocation pointed to by
- <em class="parameter"><code>ptr</code></em>. </p></div></div><div class="refsect1"><a name="environment"/><h2>ENVIRONMENT</h2><p>The following environment variable affects the execution of the
+ <em class="parameter"><code>ptr</code></em>. </p></div></div><div class="refsect1"><a name="environment"></a><h2>ENVIRONMENT</h2><p>The following environment variable affects the execution of the
allocation functions:
</p><div class="variablelist"><dl class="variablelist"><dt><span class="term"><code class="envar">MALLOC_CONF</code></span></dt><dd><p>If the environment variable
<code class="envar">MALLOC_CONF</code> is set, the characters it contains
will be interpreted as options.</p></dd></dl></div><p>
- </p></div><div class="refsect1"><a name="examples"/><h2>EXAMPLES</h2><p>To dump core whenever a problem occurs:
+ </p></div><div class="refsect1"><a name="examples"></a><h2>EXAMPLES</h2><p>To dump core whenever a problem occurs:
</p><pre class="screen">ln -s 'abort:true' /etc/malloc.conf</pre><p>
</p><p>To specify in the source a chunk size that is 16 MiB:
</p><pre class="programlisting">
-malloc_conf = "lg_chunk:24";</pre></div><div class="refsect1"><a name="see_also"/><h2>SEE ALSO</h2><p><span class="citerefentry"><span class="refentrytitle">madvise</span>(2)</span>,
+malloc_conf = "lg_chunk:24";</pre></div><div class="refsect1"><a name="see_also"></a><h2>SEE ALSO</h2><p><span class="citerefentry"><span class="refentrytitle">madvise</span>(2)</span>,
<span class="citerefentry"><span class="refentrytitle">mmap</span>(2)</span>,
<span class="citerefentry"><span class="refentrytitle">sbrk</span>(2)</span>,
<span class="citerefentry"><span class="refentrytitle">utrace</span>(2)</span>,
<span class="citerefentry"><span class="refentrytitle">alloca</span>(3)</span>,
<span class="citerefentry"><span class="refentrytitle">atexit</span>(3)</span>,
- <span class="citerefentry"><span class="refentrytitle">getpagesize</span>(3)</span></p></div><div class="refsect1"><a name="standards"/><h2>STANDARDS</h2><p>The <code class="function">malloc()</code>,
- <code class="function">calloc()</code>,
- <code class="function">realloc()</code>, and
- <code class="function">free()</code> functions conform to ISO/IEC
- 9899:1990 (<span class="quote">“<span class="quote">ISO C90</span>â€</span>).</p><p>The <code class="function">posix_memalign()</code> function conforms
- to IEEE Std 1003.1-2001 (<span class="quote">“<span class="quote">POSIX.1</span>â€</span>).</p></div></div></body></html> \ No newline at end of file
+ <span class="citerefentry"><span class="refentrytitle">getpagesize</span>(3)</span></p></div><div class="refsect1"><a name="standards"></a><h2>STANDARDS</h2><p>The <code class="function">malloc</code>(<em class="parameter"><code></code></em>),
+ <code class="function">calloc</code>(<em class="parameter"><code></code></em>),
+ <code class="function">realloc</code>(<em class="parameter"><code></code></em>), and
+ <code class="function">free</code>(<em class="parameter"><code></code></em>) functions conform to ISO/IEC
+ 9899:1990 (&#8220;ISO C90&#8221;).</p><p>The <code class="function">posix_memalign</code>(<em class="parameter"><code></code></em>) function conforms
+ to IEEE Std 1003.1-2001 (&#8220;POSIX.1&#8221;).</p></div></div></body></html>
diff --git a/deps/jemalloc/doc/jemalloc.xml.in b/deps/jemalloc/doc/jemalloc.xml.in
index d9c83452d..8fc774b18 100644
--- a/deps/jemalloc/doc/jemalloc.xml.in
+++ b/deps/jemalloc/doc/jemalloc.xml.in
@@ -52,7 +52,7 @@
<title>LIBRARY</title>
<para>This manual describes jemalloc @jemalloc_version@. More information
can be found at the <ulink
- url="http://jemalloc.net/">jemalloc website</ulink>.</para>
+ url="http://www.canonware.com/jemalloc/">jemalloc website</ulink>.</para>
</refsect1>
<refsynopsisdiv>
<title>SYNOPSIS</title>
@@ -180,20 +180,20 @@
<refsect2>
<title>Standard API</title>
- <para>The <function>malloc()</function> function allocates
+ <para>The <function>malloc<parameter/></function> function allocates
<parameter>size</parameter> bytes of uninitialized memory. The allocated
space is suitably aligned (after possible pointer coercion) for storage
of any type of object.</para>
- <para>The <function>calloc()</function> function allocates
+ <para>The <function>calloc<parameter/></function> function allocates
space for <parameter>number</parameter> objects, each
<parameter>size</parameter> bytes in length. The result is identical to
- calling <function>malloc()</function> with an argument of
+ calling <function>malloc<parameter/></function> with an argument of
<parameter>number</parameter> * <parameter>size</parameter>, with the
exception that the allocated memory is explicitly initialized to zero
bytes.</para>
- <para>The <function>posix_memalign()</function> function
+ <para>The <function>posix_memalign<parameter/></function> function
allocates <parameter>size</parameter> bytes of memory such that the
allocation's base address is a multiple of
<parameter>alignment</parameter>, and returns the allocation in the value
@@ -201,7 +201,7 @@
<parameter>alignment</parameter> must be a power of 2 at least as large as
<code language="C">sizeof(<type>void *</type>)</code>.</para>
- <para>The <function>aligned_alloc()</function> function
+ <para>The <function>aligned_alloc<parameter/></function> function
allocates <parameter>size</parameter> bytes of memory such that the
allocation's base address is a multiple of
<parameter>alignment</parameter>. The requested
@@ -209,7 +209,7 @@
undefined if <parameter>size</parameter> is not an integral multiple of
<parameter>alignment</parameter>.</para>
- <para>The <function>realloc()</function> function changes the
+ <para>The <function>realloc<parameter/></function> function changes the
size of the previously allocated memory referenced by
<parameter>ptr</parameter> to <parameter>size</parameter> bytes. The
contents of the memory are unchanged up to the lesser of the new and old
@@ -217,26 +217,26 @@
portion of the memory are undefined. Upon success, the memory referenced
by <parameter>ptr</parameter> is freed and a pointer to the newly
allocated memory is returned. Note that
- <function>realloc()</function> may move the memory allocation,
+ <function>realloc<parameter/></function> may move the memory allocation,
resulting in a different return value than <parameter>ptr</parameter>.
If <parameter>ptr</parameter> is <constant>NULL</constant>, the
- <function>realloc()</function> function behaves identically to
- <function>malloc()</function> for the specified size.</para>
+ <function>realloc<parameter/></function> function behaves identically to
+ <function>malloc<parameter/></function> for the specified size.</para>
- <para>The <function>free()</function> function causes the
+ <para>The <function>free<parameter/></function> function causes the
allocated memory referenced by <parameter>ptr</parameter> to be made
available for future allocations. If <parameter>ptr</parameter> is
<constant>NULL</constant>, no action occurs.</para>
</refsect2>
<refsect2>
<title>Non-standard API</title>
- <para>The <function>mallocx()</function>,
- <function>rallocx()</function>,
- <function>xallocx()</function>,
- <function>sallocx()</function>,
- <function>dallocx()</function>,
- <function>sdallocx()</function>, and
- <function>nallocx()</function> functions all have a
+ <para>The <function>mallocx<parameter/></function>,
+ <function>rallocx<parameter/></function>,
+ <function>xallocx<parameter/></function>,
+ <function>sallocx<parameter/></function>,
+ <function>dallocx<parameter/></function>,
+ <function>sdallocx<parameter/></function>, and
+ <function>nallocx<parameter/></function> functions all have a
<parameter>flags</parameter> argument that can be used to specify
options. The functions only check the options that are contextually
relevant. Use bitwise or (<code language="C">|</code>) operations to
@@ -307,19 +307,21 @@
</variablelist>
</para>
- <para>The <function>mallocx()</function> function allocates at
+ <para>The <function>mallocx<parameter/></function> function allocates at
least <parameter>size</parameter> bytes of memory, and returns a pointer
to the base address of the allocation. Behavior is undefined if
- <parameter>size</parameter> is <constant>0</constant>.</para>
+ <parameter>size</parameter> is <constant>0</constant>, or if request size
+ overflows due to size class and/or alignment constraints.</para>
- <para>The <function>rallocx()</function> function resizes the
+ <para>The <function>rallocx<parameter/></function> function resizes the
allocation at <parameter>ptr</parameter> to be at least
<parameter>size</parameter> bytes, and returns a pointer to the base
address of the resulting allocation, which may or may not have moved from
its original location. Behavior is undefined if
- <parameter>size</parameter> is <constant>0</constant>.</para>
+ <parameter>size</parameter> is <constant>0</constant>, or if request size
+ overflows due to size class and/or alignment constraints.</para>
- <para>The <function>xallocx()</function> function resizes the
+ <para>The <function>xallocx<parameter/></function> function resizes the
allocation at <parameter>ptr</parameter> in place to be at least
<parameter>size</parameter> bytes, and returns the real size of the
allocation. If <parameter>extra</parameter> is non-zero, an attempt is
@@ -332,32 +334,32 @@
language="C">(<parameter>size</parameter> + <parameter>extra</parameter>
&gt; <constant>SIZE_T_MAX</constant>)</code>.</para>
- <para>The <function>sallocx()</function> function returns the
+ <para>The <function>sallocx<parameter/></function> function returns the
real size of the allocation at <parameter>ptr</parameter>.</para>
- <para>The <function>dallocx()</function> function causes the
+ <para>The <function>dallocx<parameter/></function> function causes the
memory referenced by <parameter>ptr</parameter> to be made available for
future allocations.</para>
- <para>The <function>sdallocx()</function> function is an
- extension of <function>dallocx()</function> with a
+ <para>The <function>sdallocx<parameter/></function> function is an
+ extension of <function>dallocx<parameter/></function> with a
<parameter>size</parameter> parameter to allow the caller to pass in the
allocation size as an optimization. The minimum valid input size is the
original requested size of the allocation, and the maximum valid input
size is the corresponding value returned by
- <function>nallocx()</function> or
- <function>sallocx()</function>.</para>
+ <function>nallocx<parameter/></function> or
+ <function>sallocx<parameter/></function>.</para>
- <para>The <function>nallocx()</function> function allocates no
+ <para>The <function>nallocx<parameter/></function> function allocates no
memory, but it performs the same size computation as the
- <function>mallocx()</function> function, and returns the real
+ <function>mallocx<parameter/></function> function, and returns the real
size of the allocation that would result from the equivalent
- <function>mallocx()</function> function call, or
- <constant>0</constant> if the inputs exceed the maximum supported size
- class and/or alignment. Behavior is undefined if
- <parameter>size</parameter> is <constant>0</constant>.</para>
+ <function>mallocx<parameter/></function> function call. Behavior is
+ undefined if <parameter>size</parameter> is <constant>0</constant>, or if
+ request size overflows due to size class and/or alignment
+ constraints.</para>
- <para>The <function>mallctl()</function> function provides a
+ <para>The <function>mallctl<parameter/></function> function provides a
general interface for introspecting the memory allocator, as well as
setting modifiable parameters and triggering actions. The
period-separated <parameter>name</parameter> argument specifies a
@@ -372,12 +374,12 @@
<parameter>newlen</parameter>; otherwise pass <constant>NULL</constant>
and <constant>0</constant>.</para>
- <para>The <function>mallctlnametomib()</function> function
+ <para>The <function>mallctlnametomib<parameter/></function> function
provides a way to avoid repeated name lookups for applications that
repeatedly query the same portion of the namespace, by translating a name
- to a <quote>Management Information Base</quote> (MIB) that can be passed
- repeatedly to <function>mallctlbymib()</function>. Upon
- successful return from <function>mallctlnametomib()</function>,
+ to a &ldquo;Management Information Base&rdquo; (MIB) that can be passed
+ repeatedly to <function>mallctlbymib<parameter/></function>. Upon
+ successful return from <function>mallctlnametomib<parameter/></function>,
<parameter>mibp</parameter> contains an array of
<parameter>*miblenp</parameter> integers, where
<parameter>*miblenp</parameter> is the lesser of the number of components
@@ -406,44 +408,43 @@ for (i = 0; i < nbins; i++) {
mib[2] = i;
len = sizeof(bin_size);
- mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0);
+ mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
/* Do something with bin_size... */
}]]></programlisting></para>
- <para>The <function>malloc_stats_print()</function> function writes
- summary statistics via the <parameter>write_cb</parameter> callback
- function pointer and <parameter>cbopaque</parameter> data passed to
- <parameter>write_cb</parameter>, or <function>malloc_message()</function>
- if <parameter>write_cb</parameter> is <constant>NULL</constant>. The
- statistics are presented in human-readable form unless <quote>J</quote> is
- specified as a character within the <parameter>opts</parameter> string, in
- which case the statistics are presented in <ulink
- url="http://www.json.org/">JSON format</ulink>. This function can be
- called repeatedly. General information that never changes during
- execution can be omitted by specifying <quote>g</quote> as a character
+ <para>The <function>malloc_stats_print<parameter/></function> function
+ writes human-readable summary statistics via the
+ <parameter>write_cb</parameter> callback function pointer and
+ <parameter>cbopaque</parameter> data passed to
+ <parameter>write_cb</parameter>, or
+ <function>malloc_message<parameter/></function> if
+ <parameter>write_cb</parameter> is <constant>NULL</constant>. This
+ function can be called repeatedly. General information that never
+ changes during execution can be omitted by specifying "g" as a character
within the <parameter>opts</parameter> string. Note that
- <function>malloc_message()</function> uses the
- <function>mallctl*()</function> functions internally, so inconsistent
- statistics can be reported if multiple threads use these functions
- simultaneously. If <option>--enable-stats</option> is specified during
- configuration, <quote>m</quote> and <quote>a</quote> can be specified to
- omit merged arena and per arena statistics, respectively;
- <quote>b</quote>, <quote>l</quote>, and <quote>h</quote> can be specified
- to omit per size class statistics for bins, large objects, and huge
- objects, respectively. Unrecognized characters are silently ignored.
- Note that thread caching may prevent some statistics from being completely
- up to date, since extra locking would be required to merge counters that
- track thread cache operations.</para>
-
- <para>The <function>malloc_usable_size()</function> function
+ <function>malloc_message<parameter/></function> uses the
+ <function>mallctl*<parameter/></function> functions internally, so
+ inconsistent statistics can be reported if multiple threads use these
+ functions simultaneously. If <option>--enable-stats</option> is
+ specified during configuration, &ldquo;m&rdquo; and &ldquo;a&rdquo; can
+ be specified to omit merged arena and per arena statistics, respectively;
+ &ldquo;b&rdquo;, &ldquo;l&rdquo;, and &ldquo;h&rdquo; can be specified to
+ omit per size class statistics for bins, large objects, and huge objects,
+ respectively. Unrecognized characters are silently ignored. Note that
+ thread caching may prevent some statistics from being completely up to
+ date, since extra locking would be required to merge counters that track
+ thread cache operations.
+ </para>
+
+ <para>The <function>malloc_usable_size<parameter/></function> function
returns the usable size of the allocation pointed to by
<parameter>ptr</parameter>. The return value may be larger than the size
that was requested during allocation. The
- <function>malloc_usable_size()</function> function is not a
- mechanism for in-place <function>realloc()</function>; rather
+ <function>malloc_usable_size<parameter/></function> function is not a
+ mechanism for in-place <function>realloc<parameter/></function>; rather
it is provided solely as a tool for introspection purposes. Any
discrepancy between the requested allocation size and the size reported
- by <function>malloc_usable_size()</function> should not be
+ by <function>malloc_usable_size<parameter/></function> should not be
depended on, since such behavior is entirely implementation-dependent.
</para>
</refsect2>
@@ -454,20 +455,19 @@ for (i = 0; i < nbins; i++) {
routines, the allocator initializes its internals based in part on various
options that can be specified at compile- or run-time.</para>
- <para>The string specified via <option>--with-malloc-conf</option>, the
- string pointed to by the global variable <varname>malloc_conf</varname>, the
- <quote>name</quote> of the file referenced by the symbolic link named
- <filename class="symlink">/etc/malloc.conf</filename>, and the value of the
+ <para>The string pointed to by the global variable
+ <varname>malloc_conf</varname>, the &ldquo;name&rdquo; of the file
+ referenced by the symbolic link named <filename
+ class="symlink">/etc/malloc.conf</filename>, and the value of the
environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in
that order, from left to right as options. Note that
<varname>malloc_conf</varname> may be read before
- <function>main()</function> is entered, so the declaration of
+ <function>main<parameter/></function> is entered, so the declaration of
<varname>malloc_conf</varname> should specify an initializer that contains
- the final value to be read by jemalloc. <option>--with-malloc-conf</option>
- and <varname>malloc_conf</varname> are compile-time mechanisms, whereas
- <filename class="symlink">/etc/malloc.conf</filename> and
- <envar>MALLOC_CONF</envar> can be safely set any time prior to program
- invocation.</para>
+ the final value to be read by jemalloc. <varname>malloc_conf</varname> is
+ a compile-time setting, whereas <filename
+ class="symlink">/etc/malloc.conf</filename> and <envar>MALLOC_CONF</envar>
+ can be safely set any time prior to program invocation.</para>
<para>An options string is a comma-separated list of option:value pairs.
There is one key corresponding to each <link
@@ -517,18 +517,23 @@ for (i = 0; i < nbins; i++) {
common case, but it increases memory usage and fragmentation, since a
bounded number of objects can remain allocated in each thread cache.</para>
- <para>Memory is conceptually broken into equal-sized chunks, where the chunk
- size is a power of two that is greater than the page size. Chunks are
- always aligned to multiples of the chunk size. This alignment makes it
- possible to find metadata for user objects very quickly. User objects are
- broken into three categories according to size: small, large, and huge.
- Multiple small and large objects can reside within a single chunk, whereas
- huge objects each have one or more chunks backing them. Each chunk that
- contains small and/or large objects tracks its contents as runs of
+ <para>Memory is conceptually broken into equal-sized chunks, where the
+ chunk size is a power of two that is greater than the page size. Chunks
+ are always aligned to multiples of the chunk size. This alignment makes it
+ possible to find metadata for user objects very quickly.</para>
+
+ <para>User objects are broken into three categories according to size:
+ small, large, and huge. Small and large objects are managed entirely by
+ arenas; huge objects are additionally aggregated in a single data structure
+ that is shared by all threads. Huge objects are typically used by
+ applications infrequently enough that this single data structure is not a
+ scalability issue.</para>
+
+ <para>Each chunk that is managed by an arena tracks its contents as runs of
contiguous pages (unused, backing a set of small objects, or backing one
- large object). The combination of chunk alignment and chunk page maps makes
- it possible to determine all metadata regarding small and large allocations
- in constant time.</para>
+ large object). The combination of chunk alignment and chunk page maps
+ makes it possible to determine all metadata regarding small and large
+ allocations in constant time.</para>
<para>Small objects are managed in groups by page runs. Each run maintains
a bitmap to track which regions are in use. Allocation requests that are no
@@ -541,8 +546,8 @@ for (i = 0; i < nbins; i++) {
are smaller than four times the page size, large size classes are smaller
than the chunk size (see the <link
linkend="opt.lg_chunk"><mallctl>opt.lg_chunk</mallctl></link> option), and
- huge size classes extend from the chunk size up to the largest size class
- that does not exceed <constant>PTRDIFF_MAX</constant>.</para>
+ huge size classes extend from the chunk size up to one size class less than
+ the full address space size.</para>
<para>Allocations are packed tightly together, which can be an issue for
multi-threaded applications. If you need to assure that allocations do not
@@ -550,14 +555,14 @@ for (i = 0; i < nbins; i++) {
nearest multiple of the cacheline size, or specify cacheline alignment when
allocating.</para>
- <para>The <function>realloc()</function>,
- <function>rallocx()</function>, and
- <function>xallocx()</function> functions may resize allocations
+ <para>The <function>realloc<parameter/></function>,
+ <function>rallocx<parameter/></function>, and
+ <function>xallocx<parameter/></function> functions may resize allocations
without moving them under limited circumstances. Unlike the
- <function>*allocx()</function> API, the standard API does not
+ <function>*allocx<parameter/></function> API, the standard API does not
officially round up the usable size of an allocation to the nearest size
class, so technically it is necessary to call
- <function>realloc()</function> to grow e.g. a 9-byte allocation to
+ <function>realloc<parameter/></function> to grow e.g. a 9-byte allocation to
16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage
trivially succeeds in place as long as the pre-size and post-size both round
up to the same size class. No other API guarantees are made regarding
@@ -660,7 +665,7 @@ for (i = 0; i < nbins; i++) {
<entry>[1280 KiB, 1536 KiB, 1792 KiB]</entry>
</row>
<row>
- <entry morerows="8">Huge</entry>
+ <entry morerows="6">Huge</entry>
<entry>256 KiB</entry>
<entry>[2 MiB]</entry>
</row>
@@ -688,14 +693,6 @@ for (i = 0; i < nbins; i++) {
<entry>...</entry>
<entry>...</entry>
</row>
- <row>
- <entry>512 PiB</entry>
- <entry>[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]</entry>
- </row>
- <row>
- <entry>1 EiB</entry>
- <entry>[5 EiB, 6 EiB, 7 EiB]</entry>
- </row>
</tbody>
</tgroup>
</table>
@@ -703,7 +700,7 @@ for (i = 0; i < nbins; i++) {
<refsect1 id="mallctl_namespace">
<title>MALLCTL NAMESPACE</title>
<para>The following names are defined in the namespace accessible via the
- <function>mallctl*()</function> functions. Value types are
+ <function>mallctl*<parameter/></function> functions. Value types are
specified in parentheses, their readable/writable statuses are encoded as
<literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or
<literal>--</literal>, and required build configuration flags follow, if
@@ -734,7 +731,7 @@ for (i = 0; i < nbins; i++) {
<literal>rw</literal>
</term>
<listitem><para>If a value is passed in, refresh the data from which
- the <function>mallctl*()</function> functions report values,
+ the <function>mallctl*<parameter/></function> functions report values,
and increment the epoch. Return the current epoch. This is useful for
detecting whether another thread caused a refresh.</para></listitem>
</varlistentry>
@@ -779,17 +776,6 @@ for (i = 0; i < nbins; i++) {
during build configuration.</para></listitem>
</varlistentry>
- <varlistentry id="config.malloc_conf">
- <term>
- <mallctl>config.malloc_conf</mallctl>
- (<type>const char *</type>)
- <literal>r-</literal>
- </term>
- <listitem><para>Embedded configure-time-specified run-time options
- string, empty unless <option>--with-malloc-conf</option> was specified
- during build configuration.</para></listitem>
- </varlistentry>
-
<varlistentry id="config.munmap">
<term>
<mallctl>config.munmap</mallctl>
@@ -918,12 +904,12 @@ for (i = 0; i < nbins; i++) {
settings are supported if
<citerefentry><refentrytitle>sbrk</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> is supported by the operating
- system: <quote>disabled</quote>, <quote>primary</quote>, and
- <quote>secondary</quote>; otherwise only <quote>disabled</quote> is
- supported. The default is <quote>secondary</quote> if
+ system: &ldquo;disabled&rdquo;, &ldquo;primary&rdquo;, and
+ &ldquo;secondary&rdquo;; otherwise only &ldquo;disabled&rdquo; is
+ supported. The default is &ldquo;secondary&rdquo; if
<citerefentry><refentrytitle>sbrk</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> is supported by the operating
- system; <quote>disabled</quote> otherwise.
+ system; &ldquo;disabled&rdquo; otherwise.
</para></listitem>
</varlistentry>
@@ -943,7 +929,7 @@ for (i = 0; i < nbins; i++) {
<varlistentry id="opt.narenas">
<term>
<mallctl>opt.narenas</mallctl>
- (<type>unsigned</type>)
+ (<type>size_t</type>)
<literal>r-</literal>
</term>
<listitem><para>Maximum number of arenas to use for automatic
@@ -951,20 +937,6 @@ for (i = 0; i < nbins; i++) {
number of CPUs, or one if there is a single CPU.</para></listitem>
</varlistentry>
- <varlistentry id="opt.purge">
- <term>
- <mallctl>opt.purge</mallctl>
- (<type>const char *</type>)
- <literal>r-</literal>
- </term>
- <listitem><para>Purge mode is &ldquo;ratio&rdquo; (default) or
- &ldquo;decay&rdquo;. See <link
- linkend="opt.lg_dirty_mult"><mallctl>opt.lg_dirty_mult</mallctl></link>
- for details of the ratio mode. See <link
- linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
- details of the decay mode.</para></listitem>
- </varlistentry>
-
<varlistentry id="opt.lg_dirty_mult">
<term>
<mallctl>opt.lg_dirty_mult</mallctl>
@@ -987,26 +959,6 @@ for (i = 0; i < nbins; i++) {
for related dynamic control options.</para></listitem>
</varlistentry>
- <varlistentry id="opt.decay_time">
- <term>
- <mallctl>opt.decay_time</mallctl>
- (<type>ssize_t</type>)
- <literal>r-</literal>
- </term>
- <listitem><para>Approximate time in seconds from the creation of a set
- of unused dirty pages until an equivalent set of unused dirty pages is
- purged and/or reused. The pages are incrementally purged according to a
- sigmoidal decay curve that starts and ends with zero purge rate. A
- decay time of 0 causes all unused dirty pages to be purged immediately
- upon creation. A decay time of -1 disables purging. The default decay
- time is 10 seconds. See <link
- linkend="arenas.decay_time"><mallctl>arenas.decay_time</mallctl></link>
- and <link
- linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
- for related dynamic control options.
- </para></listitem>
- </varlistentry>
-
<varlistentry id="opt.stats_print">
<term>
<mallctl>opt.stats_print</mallctl>
@@ -1014,19 +966,19 @@ for (i = 0; i < nbins; i++) {
<literal>r-</literal>
</term>
<listitem><para>Enable/disable statistics printing at exit. If
- enabled, the <function>malloc_stats_print()</function>
+ enabled, the <function>malloc_stats_print<parameter/></function>
function is called at program exit via an
<citerefentry><refentrytitle>atexit</refentrytitle>
<manvolnum>3</manvolnum></citerefentry> function. If
<option>--enable-stats</option> is specified during configuration, this
has the potential to cause deadlock for a multi-threaded process that
exits while one or more threads are executing in the memory allocation
- functions. Furthermore, <function>atexit()</function> may
+ functions. Furthermore, <function>atexit<parameter/></function> may
allocate memory during application initialization and then deadlock
internally when jemalloc in turn calls
- <function>atexit()</function>, so this option is not
- universally usable (though the application can register its own
- <function>atexit()</function> function with equivalent
+ <function>atexit<parameter/></function>, so this option is not
+ univerally usable (though the application can register its own
+ <function>atexit<parameter/></function> function with equivalent
functionality). Therefore, this option should only be used with care;
it is primarily intended as a performance tuning aid during application
development. This option is disabled by default.</para></listitem>
@@ -1039,16 +991,15 @@ for (i = 0; i < nbins; i++) {
<literal>r-</literal>
[<option>--enable-fill</option>]
</term>
- <listitem><para>Junk filling. If set to <quote>alloc</quote>, each byte
- of uninitialized allocated memory will be initialized to
- <literal>0xa5</literal>. If set to <quote>free</quote>, all deallocated
- memory will be initialized to <literal>0x5a</literal>. If set to
- <quote>true</quote>, both allocated and deallocated memory will be
- initialized, and if set to <quote>false</quote>, junk filling be
- disabled entirely. This is intended for debugging and will impact
- performance negatively. This option is <quote>false</quote> by default
- unless <option>--enable-debug</option> is specified during
- configuration, in which case it is <quote>true</quote> by default unless
+ <listitem><para>Junk filling. If set to "alloc", each byte of
+ uninitialized allocated memory will be initialized to
+ <literal>0xa5</literal>. If set to "free", all deallocated memory will
+ be initialized to <literal>0x5a</literal>. If set to "true", both
+ allocated and deallocated memory will be initialized, and if set to
+ "false", junk filling be disabled entirely. This is intended for
+ debugging and will impact performance negatively. This option is
+ "false" by default unless <option>--enable-debug</option> is specified
+ during configuration, in which case it is "true" by default unless
running inside <ulink
url="http://valgrind.org/">Valgrind</ulink>.</para></listitem>
</varlistentry>
@@ -1103,8 +1054,8 @@ for (i = 0; i < nbins; i++) {
<listitem><para>Zero filling enabled/disabled. If enabled, each byte
of uninitialized allocated memory will be initialized to 0. Note that
this initialization only happens once for each byte, so
- <function>realloc()</function> and
- <function>rallocx()</function> calls do not zero memory that
+ <function>realloc<parameter/></function> and
+ <function>rallocx<parameter/></function> calls do not zero memory that
was previously allocated. This is intended for debugging and will
impact performance negatively. This option is disabled by default.
</para></listitem>
@@ -1199,8 +1150,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
the <command>jeprof</command> command, which is based on the
<command>pprof</command> that is developed as part of the <ulink
url="http://code.google.com/p/gperftools/">gperftools
- package</ulink>. See <link linkend="heap_profile_format">HEAP PROFILE
- FORMAT</link> for heap profile format documentation.</para></listitem>
+ package</ulink>.</para></listitem>
</varlistentry>
<varlistentry id="opt.prof_prefix">
@@ -1327,11 +1277,11 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</filename>,
where <literal>&lt;prefix&gt;</literal> is controlled by the <link
linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
- option. Note that <function>atexit()</function> may allocate
+ option. Note that <function>atexit<parameter/></function> may allocate
memory during application initialization and then deadlock internally
- when jemalloc in turn calls <function>atexit()</function>, so
- this option is not universally usable (though the application can
- register its own <function>atexit()</function> function with
+ when jemalloc in turn calls <function>atexit<parameter/></function>, so
+ this option is not univerally usable (though the application can
+ register its own <function>atexit<parameter/></function> function with
equivalent functionality). This option is disabled by
default.</para></listitem>
</varlistentry>
@@ -1390,7 +1340,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<link
linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link>
mallctl. This is useful for avoiding the overhead of repeated
- <function>mallctl*()</function> calls.</para></listitem>
+ <function>mallctl*<parameter/></function> calls.</para></listitem>
</varlistentry>
<varlistentry id="thread.deallocated">
@@ -1417,7 +1367,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<link
linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link>
mallctl. This is useful for avoiding the overhead of repeated
- <function>mallctl*()</function> calls.</para></listitem>
+ <function>mallctl*<parameter/></function> calls.</para></listitem>
</varlistentry>
<varlistentry id="thread.tcache.enabled">
@@ -1468,8 +1418,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
can cause asynchronous string deallocation. Furthermore, each
invocation of this interface can only read or write; simultaneous
read/write is not supported due to string lifetime limitations. The
- name string must be nil-terminated and comprised only of characters in
- the sets recognized
+ name string must nil-terminated and comprised only of characters in the
+ sets recognized
by <citerefentry><refentrytitle>isgraph</refentrytitle>
<manvolnum>3</manvolnum></citerefentry> and
<citerefentry><refentrytitle>isblank</refentrytitle>
@@ -1517,7 +1467,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<listitem><para>Flush the specified thread-specific cache (tcache). The
same considerations apply to this interface as to <link
linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>,
- except that the tcache will never be automatically discarded.
+ except that the tcache will never be automatically be discarded.
</para></listitem>
</varlistentry>
@@ -1539,44 +1489,12 @@ malloc_conf = "xmalloc:true";]]></programlisting>
(<type>void</type>)
<literal>--</literal>
</term>
- <listitem><para>Purge all unused dirty pages for arena &lt;i&gt;, or for
+ <listitem><para>Purge unused dirty pages for arena &lt;i&gt;, or for
all arenas if &lt;i&gt; equals <link
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
</para></listitem>
</varlistentry>
- <varlistentry id="arena.i.decay">
- <term>
- <mallctl>arena.&lt;i&gt;.decay</mallctl>
- (<type>void</type>)
- <literal>--</literal>
- </term>
- <listitem><para>Trigger decay-based purging of unused dirty pages for
- arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals <link
- linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
- The proportion of unused dirty pages to be purged depends on the current
- time; see <link
- linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
- details.</para></listitem>
- </varlistentry>
-
- <varlistentry id="arena.i.reset">
- <term>
- <mallctl>arena.&lt;i&gt;.reset</mallctl>
- (<type>void</type>)
- <literal>--</literal>
- </term>
- <listitem><para>Discard all of the arena's extant allocations. This
- interface can only be used with arenas created via <link
- linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link>. None
- of the arena's discarded/cached allocations may accessed afterward. As
- part of this requirement, all thread caches which were used to
- allocate/deallocate in conjunction with the arena must be flushed
- beforehand. This interface cannot be used if running inside Valgrind,
- nor if the <link linkend="opt.quarantine">quarantine</link> size is
- non-zero.</para></listitem>
- </varlistentry>
-
<varlistentry id="arena.i.dss">
<term>
<mallctl>arena.&lt;i&gt;.dss</mallctl>
@@ -1605,22 +1523,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
for additional information.</para></listitem>
</varlistentry>
- <varlistentry id="arena.i.decay_time">
- <term>
- <mallctl>arena.&lt;i&gt;.decay_time</mallctl>
- (<type>ssize_t</type>)
- <literal>rw</literal>
- </term>
- <listitem><para>Current per-arena approximate time in seconds from the
- creation of a set of unused dirty pages until an equivalent set of
- unused dirty pages is purged and/or reused. Each time this interface is
- set, all currently unused dirty pages are considered to have fully
- decayed, which causes immediate purging of all unused dirty pages unless
- the decay time is set to -1 (i.e. purging disabled). See <link
- linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
- additional information.</para></listitem>
- </varlistentry>
-
<varlistentry id="arena.i.chunk_hooks">
<term>
<mallctl>arena.&lt;i&gt;.chunk_hooks</mallctl>
@@ -1855,21 +1757,6 @@ typedef struct {
for additional information.</para></listitem>
</varlistentry>
- <varlistentry id="arenas.decay_time">
- <term>
- <mallctl>arenas.decay_time</mallctl>
- (<type>ssize_t</type>)
- <literal>rw</literal>
- </term>
- <listitem><para>Current default per-arena approximate time in seconds
- from the creation of a set of unused dirty pages until an equivalent set
- of unused dirty pages is purged and/or reused, used to initialize <link
- linkend="arena.i.decay_time"><mallctl>arena.&lt;i&gt;.decay_time</mallctl></link>
- during arena creation. See <link
- linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link> for
- additional information.</para></listitem>
- </varlistentry>
-
<varlistentry id="arenas.quantum">
<term>
<mallctl>arenas.quantum</mallctl>
@@ -2089,7 +1976,7 @@ typedef struct {
[<option>--enable-prof</option>]
</term>
<listitem><para>Average number of bytes allocated between
- interval-based profile dumps. See the
+ inverval-based profile dumps. See the
<link
linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
option for additional information.</para></listitem>
@@ -2188,25 +2075,6 @@ typedef struct {
linkend="stats.resident"><mallctl>stats.resident</mallctl></link>.</para></listitem>
</varlistentry>
- <varlistentry id="stats.retained">
- <term>
- <mallctl>stats.retained</mallctl>
- (<type>size_t</type>)
- <literal>r-</literal>
- [<option>--enable-stats</option>]
- </term>
- <listitem><para>Total number of bytes in virtual memory mappings that
- were retained rather than being returned to the operating system via
- e.g. <citerefentry><refentrytitle>munmap</refentrytitle>
- <manvolnum>2</manvolnum></citerefentry>. Retained virtual memory is
- typically untouched, decommitted, or purged, so it has no strongly
- associated physical memory (see <link
- linkend="arena.i.chunk_hooks">chunk hooks</link> for details). Retained
- memory is excluded from mapped memory statistics, e.g. <link
- linkend="stats.mapped"><mallctl>stats.mapped</mallctl></link>.
- </para></listitem>
- </varlistentry>
-
<varlistentry id="stats.arenas.i.dss">
<term>
<mallctl>stats.arenas.&lt;i&gt;.dss</mallctl>
@@ -2233,19 +2101,6 @@ typedef struct {
for details.</para></listitem>
</varlistentry>
- <varlistentry id="stats.arenas.i.decay_time">
- <term>
- <mallctl>stats.arenas.&lt;i&gt;.decay_time</mallctl>
- (<type>ssize_t</type>)
- <literal>r-</literal>
- </term>
- <listitem><para>Approximate time in seconds from the creation of a set
- of unused dirty pages until an equivalent set of unused dirty pages is
- purged and/or reused. See <link
- linkend="opt.decay_time"><mallctl>opt.decay_time</mallctl></link>
- for details.</para></listitem>
- </varlistentry>
-
<varlistentry id="stats.arenas.i.nthreads">
<term>
<mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>
@@ -2287,18 +2142,6 @@ typedef struct {
<listitem><para>Number of mapped bytes.</para></listitem>
</varlistentry>
- <varlistentry id="stats.arenas.i.retained">
- <term>
- <mallctl>stats.arenas.&lt;i&gt;.retained</mallctl>
- (<type>size_t</type>)
- <literal>r-</literal>
- [<option>--enable-stats</option>]
- </term>
- <listitem><para>Number of retained bytes. See <link
- linkend="stats.retained"><mallctl>stats.retained</mallctl></link> for
- details.</para></listitem>
- </varlistentry>
-
<varlistentry id="stats.arenas.i.metadata.mapped">
<term>
<mallctl>stats.arenas.&lt;i&gt;.metadata.mapped</mallctl>
@@ -2680,53 +2523,6 @@ typedef struct {
</varlistentry>
</variablelist>
</refsect1>
- <refsect1 id="heap_profile_format">
- <title>HEAP PROFILE FORMAT</title>
- <para>Although the heap profiling functionality was originally designed to
- be compatible with the
- <command>pprof</command> command that is developed as part of the <ulink
- url="http://code.google.com/p/gperftools/">gperftools
- package</ulink>, the addition of per thread heap profiling functionality
- required a different heap profile format. The <command>jeprof</command>
- command is derived from <command>pprof</command>, with enhancements to
- support the heap profile format described here.</para>
-
- <para>In the following hypothetical heap profile, <constant>[...]</constant>
- indicates elision for the sake of compactness. <programlisting><![CDATA[
-heap_v2/524288
- t*: 28106: 56637512 [0: 0]
- [...]
- t3: 352: 16777344 [0: 0]
- [...]
- t99: 17754: 29341640 [0: 0]
- [...]
-@ 0x5f86da8 0x5f5a1dc [...] 0x29e4d4e 0xa200316 0xabb2988 [...]
- t*: 13: 6688 [0: 0]
- t3: 12: 6496 [0: ]
- t99: 1: 192 [0: 0]
-[...]
-
-MAPPED_LIBRARIES:
-[...]]]></programlisting> The following matches the above heap profile, but most
-tokens are replaced with <constant>&lt;description&gt;</constant> to indicate
-descriptions of the corresponding fields. <programlisting><![CDATA[
-<heap_profile_format_version>/<mean_sample_interval>
- <aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
- [...]
- <thread_3_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
- [...]
- <thread_99_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
- [...]
-@ <top_frame> <frame> [...] <frame> <frame> <frame> [...]
- <backtrace_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
- <backtrace_thread_3>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
- <backtrace_thread_99>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
-[...]
-
-MAPPED_LIBRARIES:
-</proc/<pid>/maps>]]></programlisting></para>
- </refsect1>
-
<refsect1 id="debugging_malloc_problems">
<title>DEBUGGING MALLOC PROBLEMS</title>
<para>When debugging, it is a good idea to configure/build jemalloc with
@@ -2736,7 +2532,7 @@ MAPPED_LIBRARIES:
of run-time assertions that catch application errors such as double-free,
write-after-free, etc.</para>
- <para>Programs often accidentally depend on <quote>uninitialized</quote>
+ <para>Programs often accidentally depend on &ldquo;uninitialized&rdquo;
memory actually being filled with zero bytes. Junk filling
(see the <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link>
option) tends to expose such bugs in the form of obviously incorrect
@@ -2765,29 +2561,29 @@ MAPPED_LIBRARIES:
to override the function which emits the text strings forming the errors
and warnings if for some reason the <constant>STDERR_FILENO</constant> file
descriptor is not suitable for this.
- <function>malloc_message()</function> takes the
+ <function>malloc_message<parameter/></function> takes the
<parameter>cbopaque</parameter> pointer argument that is
<constant>NULL</constant> unless overridden by the arguments in a call to
- <function>malloc_stats_print()</function>, followed by a string
+ <function>malloc_stats_print<parameter/></function>, followed by a string
pointer. Please note that doing anything which tries to allocate memory in
this function is likely to result in a crash or deadlock.</para>
<para>All messages are prefixed by
- <quote><computeroutput>&lt;jemalloc&gt;: </computeroutput></quote>.</para>
+ &ldquo;<computeroutput>&lt;jemalloc&gt;: </computeroutput>&rdquo;.</para>
</refsect1>
<refsect1 id="return_values">
<title>RETURN VALUES</title>
<refsect2>
<title>Standard API</title>
- <para>The <function>malloc()</function> and
- <function>calloc()</function> functions return a pointer to the
+ <para>The <function>malloc<parameter/></function> and
+ <function>calloc<parameter/></function> functions return a pointer to the
allocated memory if successful; otherwise a <constant>NULL</constant>
pointer is returned and <varname>errno</varname> is set to
<errorname>ENOMEM</errorname>.</para>
- <para>The <function>posix_memalign()</function> function
+ <para>The <function>posix_memalign<parameter/></function> function
returns the value 0 if successful; otherwise it returns an error value.
- The <function>posix_memalign()</function> function will fail
+ The <function>posix_memalign<parameter/></function> function will fail
if:
<variablelist>
<varlistentry>
@@ -2806,11 +2602,11 @@ MAPPED_LIBRARIES:
</variablelist>
</para>
- <para>The <function>aligned_alloc()</function> function returns
+ <para>The <function>aligned_alloc<parameter/></function> function returns
a pointer to the allocated memory if successful; otherwise a
<constant>NULL</constant> pointer is returned and
<varname>errno</varname> is set. The
- <function>aligned_alloc()</function> function will fail if:
+ <function>aligned_alloc<parameter/></function> function will fail if:
<variablelist>
<varlistentry>
<term><errorname>EINVAL</errorname></term>
@@ -2827,44 +2623,44 @@ MAPPED_LIBRARIES:
</variablelist>
</para>
- <para>The <function>realloc()</function> function returns a
+ <para>The <function>realloc<parameter/></function> function returns a
pointer, possibly identical to <parameter>ptr</parameter>, to the
allocated memory if successful; otherwise a <constant>NULL</constant>
pointer is returned, and <varname>errno</varname> is set to
<errorname>ENOMEM</errorname> if the error was the result of an
- allocation failure. The <function>realloc()</function>
+ allocation failure. The <function>realloc<parameter/></function>
function always leaves the original buffer intact when an error occurs.
</para>
- <para>The <function>free()</function> function returns no
+ <para>The <function>free<parameter/></function> function returns no
value.</para>
</refsect2>
<refsect2>
<title>Non-standard API</title>
- <para>The <function>mallocx()</function> and
- <function>rallocx()</function> functions return a pointer to
+ <para>The <function>mallocx<parameter/></function> and
+ <function>rallocx<parameter/></function> functions return a pointer to
the allocated memory if successful; otherwise a <constant>NULL</constant>
pointer is returned to indicate insufficient contiguous memory was
available to service the allocation request. </para>
- <para>The <function>xallocx()</function> function returns the
+ <para>The <function>xallocx<parameter/></function> function returns the
real size of the resulting resized allocation pointed to by
<parameter>ptr</parameter>, which is a value less than
<parameter>size</parameter> if the allocation could not be adequately
grown in place. </para>
- <para>The <function>sallocx()</function> function returns the
+ <para>The <function>sallocx<parameter/></function> function returns the
real size of the allocation pointed to by <parameter>ptr</parameter>.
</para>
- <para>The <function>nallocx()</function> returns the real size
+ <para>The <function>nallocx<parameter/></function> returns the real size
that would result from a successful equivalent
- <function>mallocx()</function> function call, or zero if
+ <function>mallocx<parameter/></function> function call, or zero if
insufficient memory is available to perform the size computation. </para>
- <para>The <function>mallctl()</function>,
- <function>mallctlnametomib()</function>, and
- <function>mallctlbymib()</function> functions return 0 on
+ <para>The <function>mallctl<parameter/></function>,
+ <function>mallctlnametomib<parameter/></function>, and
+ <function>mallctlbymib<parameter/></function> functions return 0 on
success; otherwise they return an error value. The functions will fail
if:
<variablelist>
@@ -2900,13 +2696,13 @@ MAPPED_LIBRARIES:
<term><errorname>EFAULT</errorname></term>
<listitem><para>An interface with side effects failed in some way
- not directly related to <function>mallctl*()</function>
+ not directly related to <function>mallctl*<parameter/></function>
read/write processing.</para></listitem>
</varlistentry>
</variablelist>
</para>
- <para>The <function>malloc_usable_size()</function> function
+ <para>The <function>malloc_usable_size<parameter/></function> function
returns the usable size of the allocation pointed to by
<parameter>ptr</parameter>. </para>
</refsect2>
@@ -2954,13 +2750,13 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
</refsect1>
<refsect1 id="standards">
<title>STANDARDS</title>
- <para>The <function>malloc()</function>,
- <function>calloc()</function>,
- <function>realloc()</function>, and
- <function>free()</function> functions conform to ISO/IEC
- 9899:1990 (<quote>ISO C90</quote>).</para>
-
- <para>The <function>posix_memalign()</function> function conforms
- to IEEE Std 1003.1-2001 (<quote>POSIX.1</quote>).</para>
+ <para>The <function>malloc<parameter/></function>,
+ <function>calloc<parameter/></function>,
+ <function>realloc<parameter/></function>, and
+ <function>free<parameter/></function> functions conform to ISO/IEC
+ 9899:1990 (&ldquo;ISO C90&rdquo;).</para>
+
+ <para>The <function>posix_memalign<parameter/></function> function conforms
+ to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
</refsect1>
</refentry>
diff --git a/deps/jemalloc/doc/stylesheet.xsl b/deps/jemalloc/doc/stylesheet.xsl
index 619365d82..4e334a86f 100644
--- a/deps/jemalloc/doc/stylesheet.xsl
+++ b/deps/jemalloc/doc/stylesheet.xsl
@@ -1,10 +1,7 @@
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:param name="funcsynopsis.style">ansi</xsl:param>
- <xsl:param name="function.parens" select="0"/>
- <xsl:template match="function">
- <xsl:call-template name="inline.monoseq"/>
- </xsl:template>
+ <xsl:param name="function.parens" select="1"/>
<xsl:template match="mallctl">
- <quote><xsl:call-template name="inline.monoseq"/></quote>
+ "<xsl:call-template name="inline.monoseq"/>"
</xsl:template>
</xsl:stylesheet>
diff --git a/deps/jemalloc/include/jemalloc/internal/arena.h b/deps/jemalloc/include/jemalloc/internal/arena.h
index ce4e6029e..12c617979 100644
--- a/deps/jemalloc/include/jemalloc/internal/arena.h
+++ b/deps/jemalloc/include/jemalloc/internal/arena.h
@@ -23,29 +23,14 @@
*/
#define LG_DIRTY_MULT_DEFAULT 3
-typedef enum {
- purge_mode_ratio = 0,
- purge_mode_decay = 1,
-
- purge_mode_limit = 2
-} purge_mode_t;
-#define PURGE_DEFAULT purge_mode_ratio
-/* Default decay time in seconds. */
-#define DECAY_TIME_DEFAULT 10
-/* Number of event ticks between time checks. */
-#define DECAY_NTICKS_PER_UPDATE 1000
-
typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
-typedef struct arena_avail_links_s arena_avail_links_t;
typedef struct arena_run_s arena_run_t;
typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
typedef struct arena_chunk_s arena_chunk_t;
typedef struct arena_bin_info_s arena_bin_info_t;
-typedef struct arena_decay_s arena_decay_t;
typedef struct arena_bin_s arena_bin_t;
typedef struct arena_s arena_t;
-typedef struct arena_tdata_s arena_tdata_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
@@ -155,13 +140,13 @@ struct arena_runs_dirty_link_s {
*/
struct arena_chunk_map_misc_s {
/*
- * Linkage for run heaps. There are two disjoint uses:
+ * Linkage for run trees. There are two disjoint uses:
*
- * 1) arena_t's runs_avail heaps.
+ * 1) arena_t's runs_avail tree.
* 2) arena_run_t conceptually uses this linkage for in-use non-full
* runs, rather than directly embedding linkage.
*/
- phn(arena_chunk_map_misc_t) ph_link;
+ rb_node(arena_chunk_map_misc_t) rb_link;
union {
/* Linkage for list of dirty runs. */
@@ -169,15 +154,16 @@ struct arena_chunk_map_misc_s {
/* Profile counters, used for large object runs. */
union {
- void *prof_tctx_pun;
- prof_tctx_t *prof_tctx;
+ void *prof_tctx_pun;
+ prof_tctx_t *prof_tctx;
};
/* Small region run metadata. */
arena_run_t run;
};
};
-typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
+typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t;
+typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
#endif /* JEMALLOC_ARENA_STRUCTS_A */
#ifdef JEMALLOC_ARENA_STRUCTS_B
@@ -191,14 +177,6 @@ struct arena_chunk_s {
extent_node_t node;
/*
- * True if memory could be backed by transparent huge pages. This is
- * only directly relevant to Linux, since it is the only supported
- * platform on which jemalloc interacts with explicit transparent huge
- * page controls.
- */
- bool hugepage;
-
- /*
* Map of pages within chunk that keeps track of free/large/small. The
* first map_bias entries are omitted, since the chunk header does not
* need to be tracked in the map. This omission saves a header page
@@ -242,71 +220,28 @@ struct arena_chunk_s {
*/
struct arena_bin_info_s {
/* Size of regions in a run for this bin's size class. */
- size_t reg_size;
+ size_t reg_size;
/* Redzone size. */
- size_t redzone_size;
+ size_t redzone_size;
/* Interval between regions (reg_size + (redzone_size << 1)). */
- size_t reg_interval;
+ size_t reg_interval;
/* Total size of a run for this bin's size class. */
- size_t run_size;
+ size_t run_size;
/* Total number of regions in a run for this bin's size class. */
- uint32_t nregs;
+ uint32_t nregs;
/*
* Metadata used to manipulate bitmaps for runs associated with this
* bin.
*/
- bitmap_info_t bitmap_info;
+ bitmap_info_t bitmap_info;
/* Offset of first region in a run for this bin's size class. */
- uint32_t reg0_offset;
-};
-
-struct arena_decay_s {
- /*
- * Approximate time in seconds from the creation of a set of unused
- * dirty pages until an equivalent set of unused dirty pages is purged
- * and/or reused.
- */
- ssize_t time;
- /* time / SMOOTHSTEP_NSTEPS. */
- nstime_t interval;
- /*
- * Time at which the current decay interval logically started. We do
- * not actually advance to a new epoch until sometime after it starts
- * because of scheduling and computation delays, and it is even possible
- * to completely skip epochs. In all cases, during epoch advancement we
- * merge all relevant activity into the most recently recorded epoch.
- */
- nstime_t epoch;
- /* Deadline randomness generator. */
- uint64_t jitter_state;
- /*
- * Deadline for current epoch. This is the sum of interval and per
- * epoch jitter which is a uniform random variable in [0..interval).
- * Epochs always advance by precise multiples of interval, but we
- * randomize the deadline to reduce the likelihood of arenas purging in
- * lockstep.
- */
- nstime_t deadline;
- /*
- * Number of dirty pages at beginning of current epoch. During epoch
- * advancement we use the delta between arena->decay.ndirty and
- * arena->ndirty to determine how many dirty pages, if any, were
- * generated.
- */
- size_t ndirty;
- /*
- * Trailing log of how many unused dirty pages were generated during
- * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
- * element is the most recent epoch. Corresponding epoch times are
- * relative to epoch.
- */
- size_t backlog[SMOOTHSTEP_NSTEPS];
+ uint32_t reg0_offset;
};
struct arena_bin_s {
@@ -316,25 +251,25 @@ struct arena_bin_s {
* which may be acquired while holding one or more bin locks, but not
* vise versa.
*/
- malloc_mutex_t lock;
+ malloc_mutex_t lock;
/*
* Current run being used to service allocations of this bin's size
* class.
*/
- arena_run_t *runcur;
+ arena_run_t *runcur;
/*
- * Heap of non-full runs. This heap is used when looking for an
+ * Tree of non-full runs. This tree is used when looking for an
* existing run when runcur is no longer usable. We choose the
* non-full run that is lowest in memory; this policy tends to keep
* objects packed well, and it can also help reduce the number of
* almost-empty chunks.
*/
- arena_run_heap_t runs;
+ arena_run_tree_t runs;
/* Bin statistics. */
- malloc_bin_stats_t stats;
+ malloc_bin_stats_t stats;
};
struct arena_s {
@@ -342,23 +277,15 @@ struct arena_s {
unsigned ind;
/*
- * Number of threads currently assigned to this arena, synchronized via
- * atomic operations. Each thread has two distinct assignments, one for
- * application-serving allocation, and the other for internal metadata
- * allocation. Internal metadata must not be allocated from arenas
- * created via the arenas.extend mallctl, because the arena.<i>.reset
- * mallctl indiscriminately discards all allocations for the affected
- * arena.
- *
- * 0: Application allocation.
- * 1: Internal metadata allocation.
+ * Number of threads currently assigned to this arena. This field is
+ * protected by arenas_lock.
*/
- unsigned nthreads[2];
+ unsigned nthreads;
/*
* There are three classes of arena operations from a locking
* perspective:
- * 1) Thread assignment (modifies nthreads) is synchronized via atomics.
+ * 1) Thread assignment (modifies nthreads) is protected by arenas_lock.
* 2) Bin-related operations are protected by bin locks.
* 3) Chunk- and run-related operations are protected by this mutex.
*/
@@ -378,16 +305,10 @@ struct arena_s {
* PRNG state for cache index randomization of large allocation base
* pointers.
*/
- size_t offset_state;
+ uint64_t offset_state;
dss_prec_t dss_prec;
- /* Extant arena chunks. */
- ql_head(extent_node_t) achunks;
-
- /* Extent serial number generator state. */
- size_t extent_sn_next;
-
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
@@ -403,7 +324,7 @@ struct arena_s {
/* Minimum ratio (log base 2) of nactive:ndirty. */
ssize_t lg_dirty_mult;
- /* True if a thread is currently executing arena_purge_to_limit(). */
+ /* True if a thread is currently executing arena_purge(). */
bool purging;
/* Number of pages in active runs and huge regions. */
@@ -418,6 +339,12 @@ struct arena_s {
size_t ndirty;
/*
+ * Size/address-ordered tree of this arena's available runs. The tree
+ * is used for first-best-fit run allocation.
+ */
+ arena_avail_tree_t runs_avail;
+
+ /*
* Unused dirty memory this arena manages. Dirty memory is conceptually
* tracked as an arbitrarily interleaved LRU of dirty runs and cached
* chunks, but the list linkage is actually semi-duplicated in order to
@@ -448,9 +375,6 @@ struct arena_s {
arena_runs_dirty_link_t runs_dirty;
extent_node_t chunks_cache;
- /* Decay-based purging state. */
- arena_decay_t decay;
-
/* Extant huge allocations. */
ql_head(extent_node_t) huge;
/* Synchronizes all huge allocation/update/deallocation. */
@@ -463,9 +387,9 @@ struct arena_s {
* orderings are needed, which is why there are two trees with the same
* contents.
*/
- extent_tree_t chunks_szsnad_cached;
+ extent_tree_t chunks_szad_cached;
extent_tree_t chunks_ad_cached;
- extent_tree_t chunks_szsnad_retained;
+ extent_tree_t chunks_szad_retained;
extent_tree_t chunks_ad_retained;
malloc_mutex_t chunks_mtx;
@@ -478,19 +402,6 @@ struct arena_s {
/* bins is used to store trees of free regions. */
arena_bin_t bins[NBINS];
-
- /*
- * Size-segregated address-ordered heaps of this arena's available runs,
- * used for first-best-fit run allocation. Runs are quantized, i.e.
- * they reside in the last heap which corresponds to a size class less
- * than or equal to the run size.
- */
- arena_run_heap_t runs_avail[NPSIZES];
-};
-
-/* Used in conjunction with tsd for fast arena-related context lookup. */
-struct arena_tdata_s {
- ticker_t decay_ticker;
};
#endif /* JEMALLOC_ARENA_STRUCTS_B */
@@ -506,10 +417,7 @@ static const size_t large_pad =
#endif
;
-extern purge_mode_t opt_purge;
-extern const char *purge_mode_names[];
extern ssize_t opt_lg_dirty_mult;
-extern ssize_t opt_decay_time;
extern arena_bin_info_t arena_bin_info[NBINS];
@@ -520,37 +428,27 @@ extern size_t large_maxclass; /* Max large size class. */
extern unsigned nlclasses; /* Number of large size classes. */
extern unsigned nhclasses; /* Number of huge size classes. */
-#ifdef JEMALLOC_JET
-typedef size_t (run_quantize_t)(size_t);
-extern run_quantize_t *run_quantize_floor;
-extern run_quantize_t *run_quantize_ceil;
-#endif
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
bool cache);
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
bool cache);
-extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
-void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
-void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, size_t *sn, bool *zero);
-void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t usize, size_t sn);
-void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
- void *chunk, size_t oldsize, size_t usize);
-void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
- void *chunk, size_t oldsize, size_t usize, size_t sn);
-bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
- void *chunk, size_t oldsize, size_t usize, bool *zero);
-ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
-bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
- ssize_t lg_dirty_mult);
-ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
-bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
-void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
-void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
-void arena_reset(tsd_t *tsd, arena_t *arena);
-void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
- tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
+extent_node_t *arena_node_alloc(arena_t *arena);
+void arena_node_dalloc(arena_t *arena, extent_node_t *node);
+void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
+ bool *zero);
+void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
+void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize);
+void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize);
+bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize, bool *zero);
+ssize_t arena_lg_dirty_mult_get(arena_t *arena);
+bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
+void arena_maybe_purge(arena_t *arena);
+void arena_purge_all(arena_t *arena);
+void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
+ szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
@@ -563,100 +461,75 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
#endif
void arena_quarantine_junk_small(void *ptr, size_t usize);
-void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
- bool zero);
-void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
- szind_t ind, bool zero);
-void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
+void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
+void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
+void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
-void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size);
-void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm);
-void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm);
-void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t pageind);
+void arena_prof_promoted(const void *ptr, size_t size);
+void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, arena_chunk_map_bits_t *bitselm);
+void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind, arena_chunk_map_bits_t *bitselm);
+void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind);
#ifdef JEMALLOC_JET
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
#else
void arena_dalloc_junk_large(void *ptr, size_t usize);
#endif
-void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr);
-void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
+void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
#ifdef JEMALLOC_JET
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
#endif
-bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t size, size_t extra, bool zero);
+bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+ size_t extra, bool zero);
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache);
-dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
-bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
+dss_prec_t arena_dss_prec_get(arena_t *arena);
+bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_lg_dirty_mult_default_get(void);
bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
-ssize_t arena_decay_time_default_get(void);
-bool arena_decay_time_default_set(ssize_t decay_time);
-void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
- unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult,
- ssize_t *decay_time, size_t *nactive, size_t *ndirty);
-void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
- size_t *nactive, size_t *ndirty, arena_stats_t *astats,
- malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
- malloc_huge_stats_t *hstats);
-unsigned arena_nthreads_get(arena_t *arena, bool internal);
-void arena_nthreads_inc(arena_t *arena, bool internal);
-void arena_nthreads_dec(arena_t *arena, bool internal);
-size_t arena_extent_sn_next(arena_t *arena);
-arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
-void arena_boot(void);
-void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
-void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
-void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
-void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
+void arena_stats_merge(arena_t *arena, const char **dss,
+ ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty,
+ arena_stats_t *astats, malloc_bin_stats_t *bstats,
+ malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
+arena_t *arena_new(unsigned ind);
+bool arena_boot(void);
+void arena_prefork(arena_t *arena);
+void arena_postfork_parent(arena_t *arena);
+void arena_postfork_child(arena_t *arena);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk,
+arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
size_t pageind);
-const arena_chunk_map_bits_t *arena_bitselm_get_const(
- const arena_chunk_t *chunk, size_t pageind);
-arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk,
+arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
size_t pageind);
-const arena_chunk_map_misc_t *arena_miscelm_get_const(
- const arena_chunk_t *chunk, size_t pageind);
-size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
-void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm);
+size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm);
+void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
-size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
-const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk,
- size_t pageind);
-size_t arena_mapbitsp_read(const size_t *mapbitsp);
-size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
+size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbitsp_read(size_t *mapbitsp);
+size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_size_decode(size_t mapbits);
-size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
- size_t pageind);
-size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk,
- size_t pageind);
-size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
+size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
-szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
- size_t pageind);
-size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
+szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
size_t arena_mapbits_size_encode(size_t size);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
@@ -676,31 +549,27 @@ void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
-bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
+bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
-size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
+unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
-prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
-void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx);
-void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
+prof_tctx_t *arena_prof_tctx_get(const void *ptr);
+void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
+void arena_prof_tctx_reset(const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *old_tctx);
-void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
-void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
-void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
- bool zero, tcache_t *tcache, bool slow_path);
+void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache);
arena_t *arena_aalloc(const void *ptr);
-size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote);
-void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
-void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path);
+size_t arena_salloc(const void *ptr, bool demote);
+void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
+void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
-arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
+arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
@@ -709,15 +578,8 @@ arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
return (&chunk->map_bits[pageind-map_bias]);
}
-JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
-arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
-{
-
- return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
-}
-
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
-arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
+arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
@@ -727,15 +589,8 @@ arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
(uintptr_t)map_misc_offset) + pageind-map_bias);
}
-JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
-arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
-{
-
- return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
-}
-
JEMALLOC_ALWAYS_INLINE size_t
-arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
+arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk +
@@ -748,7 +603,7 @@ arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
}
JEMALLOC_ALWAYS_INLINE void *
-arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm)
+arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -781,31 +636,24 @@ arena_run_to_miscelm(arena_run_t *run)
}
JEMALLOC_ALWAYS_INLINE size_t *
-arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
+arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
{
- return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
-}
-
-JEMALLOC_ALWAYS_INLINE const size_t *
-arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
-{
-
- return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
+ return (&arena_bitselm_get(chunk, pageind)->bits);
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbitsp_read(const size_t *mapbitsp)
+arena_mapbitsp_read(size_t *mapbitsp)
{
return (*mapbitsp);
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
{
- return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind)));
+ return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -825,7 +673,7 @@ arena_mapbits_size_decode(size_t mapbits)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -835,7 +683,7 @@ arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -846,7 +694,7 @@ arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -857,7 +705,7 @@ arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE szind_t
-arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
szind_t binind;
@@ -869,7 +717,7 @@ arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -880,7 +728,7 @@ arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -891,7 +739,7 @@ arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -902,7 +750,7 @@ arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -911,7 +759,7 @@ arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -947,7 +795,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
@@ -961,7 +809,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert((size & PAGE_MASK) == 0);
@@ -973,7 +821,7 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((flags & CHUNK_MAP_UNZEROED) == flags);
arena_mapbitsp_write(mapbitsp, flags);
@@ -983,7 +831,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
@@ -998,7 +846,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
szind_t binind)
{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
@@ -1012,7 +860,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
szind_t binind, size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(binind < BININD_INVALID);
assert(pageind - runind >= map_bias);
@@ -1069,7 +917,7 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
}
JEMALLOC_INLINE bool
-arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
+arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
@@ -1080,9 +928,9 @@ arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
{
bool ret;
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
ret = arena_prof_accum_impl(arena, accumbytes);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (ret);
}
}
@@ -1100,12 +948,12 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
size_t pageind;
size_t actual_mapbits;
size_t rpages_ind;
- const arena_run_t *run;
+ arena_run_t *run;
arena_bin_t *bin;
szind_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
- const arena_chunk_map_misc_t *miscelm;
- const void *rpages;
+ arena_chunk_map_misc_t *miscelm;
+ void *rpages;
assert(binind != BININD_INVALID);
assert(binind < NBINS);
@@ -1118,11 +966,11 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
pageind);
- miscelm = arena_miscelm_get_const(chunk, rpages_ind);
+ miscelm = arena_miscelm_get(chunk, rpages_ind);
run = &miscelm->run;
run_binind = run->binind;
bin = &arena->bins[run_binind];
- actual_binind = (szind_t)(bin - arena->bins);
+ actual_binind = bin - arena->bins;
assert(run_binind == actual_binind);
bin_info = &arena_bin_info[actual_binind];
rpages = arena_miscelm_to_rpages(miscelm);
@@ -1139,15 +987,16 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
JEMALLOC_INLINE szind_t
arena_bin_index(arena_t *arena, arena_bin_t *bin)
{
- szind_t binind = (szind_t)(bin - arena->bins);
+ szind_t binind = bin - arena->bins;
assert(binind < NBINS);
return (binind);
}
-JEMALLOC_INLINE size_t
+JEMALLOC_INLINE unsigned
arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
{
- size_t diff, interval, shift, regind;
+ unsigned shift, diff, regind;
+ size_t interval;
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
void *rpages = arena_miscelm_to_rpages(miscelm);
@@ -1162,12 +1011,12 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
* Avoid doing division with a variable divisor if possible. Using
* actual division here can reduce allocator throughput by over 20%!
*/
- diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages -
+ diff = (unsigned)((uintptr_t)ptr - (uintptr_t)rpages -
bin_info->reg0_offset);
/* Rescale (factor powers of 2 out of the numerator and denominator). */
interval = bin_info->reg_interval;
- shift = ffs_zu(interval) - 1;
+ shift = jemalloc_ffs(interval) - 1;
diff >>= shift;
interval >>= shift;
@@ -1189,9 +1038,9 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
* divide by 0, and 1 and 2 are both powers of two, which are
* handled above.
*/
-#define SIZE_INV_SHIFT ((sizeof(size_t) << 3) - LG_RUN_MAXREGS)
-#define SIZE_INV(s) (((ZU(1) << SIZE_INV_SHIFT) / (s)) + 1)
- static const size_t interval_invs[] = {
+#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
+#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
+ static const unsigned interval_invs[] = {
SIZE_INV(3),
SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
@@ -1202,8 +1051,8 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
};
- if (likely(interval <= ((sizeof(interval_invs) / sizeof(size_t))
- + 2))) {
+ if (likely(interval <= ((sizeof(interval_invs) /
+ sizeof(unsigned)) + 2))) {
regind = (diff * interval_invs[interval - 3]) >>
SIZE_INV_SHIFT;
} else
@@ -1218,7 +1067,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
}
JEMALLOC_INLINE prof_tctx_t *
-arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
+arena_prof_tctx_get(const void *ptr)
{
prof_tctx_t *ret;
arena_chunk_t *chunk;
@@ -1234,19 +1083,18 @@ arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
- arena_chunk_map_misc_t *elm =
- arena_miscelm_get_mutable(chunk, pageind);
+ arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
+ pageind);
ret = atomic_read_p(&elm->prof_tctx_pun);
}
} else
- ret = huge_prof_tctx_get(tsdn, ptr);
+ ret = huge_prof_tctx_get(ptr);
return (ret);
}
JEMALLOC_INLINE void
-arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx)
+arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
arena_chunk_t *chunk;
@@ -1265,7 +1113,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
assert(arena_mapbits_large_get(chunk, pageind) != 0);
- elm = arena_miscelm_get_mutable(chunk, pageind);
+ elm = arena_miscelm_get(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun, tctx);
} else {
/*
@@ -1277,12 +1125,12 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
assert(arena_mapbits_large_get(chunk, pageind) == 0);
}
} else
- huge_prof_tctx_set(tsdn, ptr, tctx);
+ huge_prof_tctx_set(ptr, tctx);
}
JEMALLOC_INLINE void
-arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
- const void *old_ptr, prof_tctx_t *old_tctx)
+arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+ prof_tctx_t *old_tctx)
{
cassert(config_prof);
@@ -1301,59 +1149,43 @@ arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
0);
assert(arena_mapbits_large_get(chunk, pageind) != 0);
- elm = arena_miscelm_get_mutable(chunk, pageind);
+ elm = arena_miscelm_get(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun,
(prof_tctx_t *)(uintptr_t)1U);
} else
- huge_prof_tctx_reset(tsdn, ptr);
+ huge_prof_tctx_reset(ptr);
}
}
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
-{
- tsd_t *tsd;
- ticker_t *decay_ticker;
-
- if (unlikely(tsdn_null(tsdn)))
- return;
- tsd = tsdn_tsd(tsdn);
- decay_ticker = decay_ticker_get(tsd, arena->ind);
- if (unlikely(decay_ticker == NULL))
- return;
- if (unlikely(ticker_ticks(decay_ticker, nticks)))
- arena_purge(tsdn, arena, false);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
-{
-
- arena_decay_ticks(tsdn, arena, 1);
-}
-
JEMALLOC_ALWAYS_INLINE void *
-arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
- tcache_t *tcache, bool slow_path)
+arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache)
{
- assert(!tsdn_null(tsdn) || tcache == NULL);
assert(size != 0);
- if (likely(tcache != NULL)) {
- if (likely(size <= SMALL_MAXCLASS)) {
- return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
- tcache, size, ind, zero, slow_path));
- }
- if (likely(size <= tcache_maxclass)) {
- return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
- tcache, size, ind, zero, slow_path));
- }
- /* (size > tcache_maxclass) case falls through. */
- assert(size > tcache_maxclass);
- }
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL))
+ return (NULL);
- return (arena_malloc_hard(tsdn, arena, size, ind, zero));
+ if (likely(size <= SMALL_MAXCLASS)) {
+ if (likely(tcache != NULL)) {
+ return (tcache_alloc_small(tsd, arena, tcache, size,
+ zero));
+ } else
+ return (arena_malloc_small(arena, size, zero));
+ } else if (likely(size <= large_maxclass)) {
+ /*
+ * Initialize tcache after checking size in order to avoid
+ * infinite recursion during tcache initialization.
+ */
+ if (likely(tcache != NULL) && size <= tcache_maxclass) {
+ return (tcache_alloc_large(tsd, arena, tcache, size,
+ zero));
+ } else
+ return (arena_malloc_large(arena, size, zero));
+ } else
+ return (huge_malloc(tsd, arena, size, zero, tcache));
}
JEMALLOC_ALWAYS_INLINE arena_t *
@@ -1370,7 +1202,7 @@ arena_aalloc(const void *ptr)
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE size_t
-arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote)
+arena_salloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
@@ -1413,18 +1245,17 @@ arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote)
ret = index2size(binind);
}
} else
- ret = huge_salloc(tsdn, ptr);
+ ret = huge_salloc(ptr);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
-arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
+arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
arena_chunk_t *chunk;
size_t pageind, mapbits;
- assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
@@ -1437,12 +1268,10 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
if (likely(tcache != NULL)) {
szind_t binind = arena_ptr_small_binind_get(ptr,
mapbits);
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
- binind, slow_path);
+ tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
- arena_dalloc_small(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
- ptr, pageind);
+ arena_dalloc_small(extent_node_arena_get(
+ &chunk->node), chunk, ptr, pageind);
}
} else {
size_t size = arena_mapbits_large_size_get(chunk,
@@ -1453,33 +1282,28 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
if (likely(tcache != NULL) && size - large_pad <=
tcache_maxclass) {
- tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
- size - large_pad, slow_path);
+ tcache_dalloc_large(tsd, tcache, ptr, size -
+ large_pad);
} else {
- arena_dalloc_large(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
- ptr);
+ arena_dalloc_large(extent_node_arena_get(
+ &chunk->node), chunk, ptr);
}
}
} else
- huge_dalloc(tsdn, ptr);
+ huge_dalloc(tsd, ptr, tcache);
}
JEMALLOC_ALWAYS_INLINE void
-arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path)
+arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
arena_chunk_t *chunk;
- assert(!tsdn_null(tsdn) || tcache == NULL);
-
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
if (config_prof && opt_prof) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
LG_PAGE;
- assert(arena_mapbits_allocated_get(chunk, pageind) !=
- 0);
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (arena_mapbits_large_get(chunk, pageind) != 0) {
/*
* Make sure to use promoted size, not request
@@ -1489,36 +1313,32 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
pageind) - large_pad;
}
}
- assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false)));
+ assert(s2u(size) == s2u(arena_salloc(ptr, false)));
if (likely(size <= SMALL_MAXCLASS)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
szind_t binind = size2index(size);
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
- binind, slow_path);
+ tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
- arena_dalloc_small(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
- ptr, pageind);
+ arena_dalloc_small(extent_node_arena_get(
+ &chunk->node), chunk, ptr, pageind);
}
} else {
assert(config_cache_oblivious || ((uintptr_t)ptr &
PAGE_MASK) == 0);
- if (likely(tcache != NULL) && size <= tcache_maxclass) {
- tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
- size, slow_path);
- } else {
- arena_dalloc_large(tsdn,
- extent_node_arena_get(&chunk->node), chunk,
- ptr);
+ if (likely(tcache != NULL) && size <= tcache_maxclass)
+ tcache_dalloc_large(tsd, tcache, ptr, size);
+ else {
+ arena_dalloc_large(extent_node_arena_get(
+ &chunk->node), chunk, ptr);
}
}
} else
- huge_dalloc(tsdn, ptr);
+ huge_dalloc(tsd, ptr, tcache);
}
# endif /* JEMALLOC_ARENA_INLINE_B */
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/assert.h b/deps/jemalloc/include/jemalloc/internal/assert.h
deleted file mode 100644
index 6f8f7eb93..000000000
--- a/deps/jemalloc/include/jemalloc/internal/assert.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Define a custom assert() in order to reduce the chances of deadlock during
- * assertion failure.
- */
-#ifndef assert
-#define assert(e) do { \
- if (unlikely(config_debug && !(e))) { \
- malloc_printf( \
- "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
- __FILE__, __LINE__, #e); \
- abort(); \
- } \
-} while (0)
-#endif
-
-#ifndef not_reached
-#define not_reached() do { \
- if (config_debug) { \
- malloc_printf( \
- "<jemalloc>: %s:%d: Unreachable code reached\n", \
- __FILE__, __LINE__); \
- abort(); \
- } \
- unreachable(); \
-} while (0)
-#endif
-
-#ifndef not_implemented
-#define not_implemented() do { \
- if (config_debug) { \
- malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
- __FILE__, __LINE__); \
- abort(); \
- } \
-} while (0)
-#endif
-
-#ifndef assert_not_implemented
-#define assert_not_implemented(e) do { \
- if (unlikely(config_debug && !(e))) \
- not_implemented(); \
-} while (0)
-#endif
-
-
diff --git a/deps/jemalloc/include/jemalloc/internal/atomic.h b/deps/jemalloc/include/jemalloc/internal/atomic.h
index 3f15ea149..a9aad35d1 100644
--- a/deps/jemalloc/include/jemalloc/internal/atomic.h
+++ b/deps/jemalloc/include/jemalloc/internal/atomic.h
@@ -28,8 +28,8 @@
* callers.
*
* <t> atomic_read_<t>(<t> *p) { return (*p); }
- * <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
- * <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
+ * <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); }
+ * <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); }
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
* {
* if (*p != c)
diff --git a/deps/jemalloc/include/jemalloc/internal/base.h b/deps/jemalloc/include/jemalloc/internal/base.h
index d6b81e162..39e46ee44 100644
--- a/deps/jemalloc/include/jemalloc/internal/base.h
+++ b/deps/jemalloc/include/jemalloc/internal/base.h
@@ -9,13 +9,12 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *base_alloc(tsdn_t *tsdn, size_t size);
-void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
- size_t *mapped);
+void *base_alloc(size_t size);
+void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped);
bool base_boot(void);
-void base_prefork(tsdn_t *tsdn);
-void base_postfork_parent(tsdn_t *tsdn);
-void base_postfork_child(tsdn_t *tsdn);
+void base_prefork(void);
+void base_postfork_parent(void);
+void base_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/bitmap.h b/deps/jemalloc/include/jemalloc/internal/bitmap.h
index 36f38b59c..fcc6005c7 100644
--- a/deps/jemalloc/include/jemalloc/internal/bitmap.h
+++ b/deps/jemalloc/include/jemalloc/internal/bitmap.h
@@ -15,15 +15,6 @@ typedef unsigned long bitmap_t;
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
-/*
- * Do some analysis on how big the bitmap is before we use a tree. For a brute
- * force linear search, if we would have to call ffs_lu() more than 2^3 times,
- * use a tree instead.
- */
-#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
-# define USE_TREE
-#endif
-
/* Number of groups required to store a given number of bits. */
#define BITMAP_BITS2GROUPS(nbits) \
((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
@@ -57,8 +48,6 @@ typedef unsigned long bitmap_t;
/*
* Maximum number of groups required to support LG_BITMAP_MAXBITS.
*/
-#ifdef USE_TREE
-
#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
@@ -76,12 +65,6 @@ typedef unsigned long bitmap_t;
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
-#else /* USE_TREE */
-
-#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
-
-#endif /* USE_TREE */
-
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
@@ -95,7 +78,6 @@ struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t nbits;
-#ifdef USE_TREE
/* Number of levels necessary for nbits. */
unsigned nlevels;
@@ -104,10 +86,6 @@ struct bitmap_info_s {
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
-#else /* USE_TREE */
- /* Number of groups necessary for nbits. */
- size_t ngroups;
-#endif /* USE_TREE */
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -115,8 +93,9 @@ struct bitmap_info_s {
#ifdef JEMALLOC_H_EXTERNS
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
+size_t bitmap_info_ngroups(const bitmap_info_t *binfo);
+size_t bitmap_size(size_t nbits);
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
-size_t bitmap_size(const bitmap_info_t *binfo);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
@@ -134,20 +113,10 @@ void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
JEMALLOC_INLINE bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
-#ifdef USE_TREE
- size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
+ unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
return (rg == 0);
-#else
- size_t i;
-
- for (i = 0; i < binfo->ngroups; i++) {
- if (bitmap[i] != 0)
- return (false);
- }
- return (true);
-#endif
}
JEMALLOC_INLINE bool
@@ -159,7 +128,7 @@ bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
- return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))));
+ return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
}
JEMALLOC_INLINE void
@@ -174,11 +143,10 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
- assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
+ g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit));
-#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if (g == 0) {
unsigned i;
@@ -187,14 +155,13 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
- assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
+ g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (g != 0)
break;
}
}
-#endif
}
/* sfu: set first unset. */
@@ -207,24 +174,15 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
assert(!bitmap_full(bitmap, binfo));
-#ifdef USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
- bit = ffs_lu(g) - 1;
+ bit = jemalloc_ffsl(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
- bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
+ bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1);
}
-#else
- i = 0;
- g = bitmap[0];
- while ((bit = ffs_lu(g)) == 0) {
- i++;
- g = bitmap[i];
- }
- bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
-#endif
+
bitmap_set(bitmap, binfo, bit);
return (bit);
}
@@ -235,7 +193,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
size_t goff;
bitmap_t *gp;
bitmap_t g;
- UNUSED bool propagate;
+ bool propagate;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit));
@@ -243,11 +201,10 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
gp = &bitmap[goff];
g = *gp;
propagate = (g == 0);
- assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
+ g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(!bitmap_get(bitmap, binfo, bit));
-#ifdef USE_TREE
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
@@ -257,15 +214,14 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
propagate = (g == 0);
- assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
+ assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
== 0);
- g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
+ g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (!propagate)
break;
}
}
-#endif /* USE_TREE */
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk.h b/deps/jemalloc/include/jemalloc/internal/chunk.h
index 50b9904b0..5d1938353 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk.h
@@ -48,30 +48,32 @@ extern size_t chunk_npages;
extern const chunk_hooks_t chunk_hooks_default;
-chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
-chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
+chunk_hooks_t chunk_hooks_get(arena_t *arena);
+chunk_hooks_t chunk_hooks_set(arena_t *arena,
const chunk_hooks_t *chunk_hooks);
-bool chunk_register(tsdn_t *tsdn, const void *chunk,
- const extent_node_t *node);
+bool chunk_register(const void *chunk, const extent_node_t *node);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
-void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
- size_t *sn, bool *zero, bool *commit, bool dalloc_node);
-void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
- size_t *sn, bool *zero, bool *commit);
-void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
- bool committed);
-void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn,
- bool zeroed, bool committed);
-bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
+void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool dalloc_node);
+void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
+void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool committed);
+void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool zeroed, bool committed);
+void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool committed);
+bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
size_t length);
+bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, size_t offset, size_t length);
bool chunk_boot(void);
+void chunk_prefork(void);
+void chunk_postfork_parent(void);
+void chunk_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk_dss.h b/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
index da8511ba0..388f46be0 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
@@ -23,11 +23,13 @@ extern const char *dss_prec_names[];
dss_prec_t chunk_dss_prec_get(void);
bool chunk_dss_prec_set(dss_prec_t dss_prec);
-void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit);
+void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit);
bool chunk_in_dss(void *chunk);
-bool chunk_dss_mergeable(void *chunk_a, void *chunk_b);
-void chunk_dss_boot(void);
+bool chunk_dss_boot(void);
+void chunk_dss_prefork(void);
+void chunk_dss_postfork_parent(void);
+void chunk_dss_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h b/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
index 6f2d0ac2e..7d8014c58 100644
--- a/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
+++ b/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
@@ -9,8 +9,8 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment,
- bool *zero, bool *commit);
+void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero,
+ bool *commit);
bool chunk_dalloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
diff --git a/deps/jemalloc/include/jemalloc/internal/ckh.h b/deps/jemalloc/include/jemalloc/internal/ckh.h
index f75ad90b7..75c1c979f 100644
--- a/deps/jemalloc/include/jemalloc/internal/ckh.h
+++ b/deps/jemalloc/include/jemalloc/internal/ckh.h
@@ -40,7 +40,9 @@ struct ckh_s {
#endif
/* Used for pseudo-random number generation. */
- uint64_t prng_state;
+#define CKH_A 1103515241
+#define CKH_C 12347
+ uint32_t prng_state;
/* Total number of items. */
size_t count;
@@ -72,7 +74,7 @@ bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
void **data);
-bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
+bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]);
bool ckh_string_keycomp(const void *k1, const void *k2);
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
diff --git a/deps/jemalloc/include/jemalloc/internal/ctl.h b/deps/jemalloc/include/jemalloc/internal/ctl.h
index af0f6d7c5..751c14b5b 100644
--- a/deps/jemalloc/include/jemalloc/internal/ctl.h
+++ b/deps/jemalloc/include/jemalloc/internal/ctl.h
@@ -21,14 +21,13 @@ struct ctl_named_node_s {
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
- int (*ctl)(tsd_t *, const size_t *, size_t, void *,
- size_t *, void *, size_t);
+ int (*ctl)(const size_t *, size_t, void *, size_t *,
+ void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
- const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
- size_t);
+ const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
};
struct ctl_arena_stats_s {
@@ -36,12 +35,8 @@ struct ctl_arena_stats_s {
unsigned nthreads;
const char *dss;
ssize_t lg_dirty_mult;
- ssize_t decay_time;
size_t pactive;
size_t pdirty;
-
- /* The remainder are only populated if config_stats is true. */
-
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
@@ -61,7 +56,6 @@ struct ctl_stats_s {
size_t metadata;
size_t resident;
size_t mapped;
- size_t retained;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
@@ -70,17 +64,16 @@ struct ctl_stats_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen);
-int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
- size_t *miblenp);
+int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen);
+int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
-int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen);
+int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen);
bool ctl_boot(void);
-void ctl_prefork(tsdn_t *tsdn);
-void ctl_postfork_parent(tsdn_t *tsdn);
-void ctl_postfork_child(tsdn_t *tsdn);
+void ctl_prefork(void);
+void ctl_postfork_parent(void);
+void ctl_postfork_child(void);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
diff --git a/deps/jemalloc/include/jemalloc/internal/extent.h b/deps/jemalloc/include/jemalloc/internal/extent.h
index 168ffe643..386d50ef4 100644
--- a/deps/jemalloc/include/jemalloc/internal/extent.h
+++ b/deps/jemalloc/include/jemalloc/internal/extent.h
@@ -19,20 +19,6 @@ struct extent_node_s {
size_t en_size;
/*
- * Serial number (potentially non-unique).
- *
- * In principle serial numbers can wrap around on 32-bit systems if
- * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall
- * back on address comparison for equal serial numbers, stable (if
- * imperfect) ordering is maintained.
- *
- * Serial numbers may not be unique even in the absence of wrap-around,
- * e.g. when splitting an extent and assigning the same serial number to
- * both resulting adjacent extents.
- */
- size_t en_sn;
-
- /*
* The zeroed flag is used by chunk recycling code to track whether
* memory is zero-filled.
*/
@@ -59,10 +45,10 @@ struct extent_node_s {
qr(extent_node_t) cc_link;
union {
- /* Linkage for the size/sn/address-ordered tree. */
- rb_node(extent_node_t) szsnad_link;
+ /* Linkage for the size/address-ordered tree. */
+ rb_node(extent_node_t) szad_link;
- /* Linkage for arena's achunks, huge, and node_cache lists. */
+ /* Linkage for arena's huge and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};
@@ -75,7 +61,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
+rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
@@ -87,7 +73,6 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
arena_t *extent_node_arena_get(const extent_node_t *node);
void *extent_node_addr_get(const extent_node_t *node);
size_t extent_node_size_get(const extent_node_t *node);
-size_t extent_node_sn_get(const extent_node_t *node);
bool extent_node_zeroed_get(const extent_node_t *node);
bool extent_node_committed_get(const extent_node_t *node);
bool extent_node_achunk_get(const extent_node_t *node);
@@ -95,13 +80,12 @@ prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node);
void extent_node_arena_set(extent_node_t *node, arena_t *arena);
void extent_node_addr_set(extent_node_t *node, void *addr);
void extent_node_size_set(extent_node_t *node, size_t size);
-void extent_node_sn_set(extent_node_t *node, size_t sn);
void extent_node_zeroed_set(extent_node_t *node, bool zeroed);
void extent_node_committed_set(extent_node_t *node, bool committed);
void extent_node_achunk_set(extent_node_t *node, bool achunk);
void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx);
void extent_node_init(extent_node_t *node, arena_t *arena, void *addr,
- size_t size, size_t sn, bool zeroed, bool committed);
+ size_t size, bool zeroed, bool committed);
void extent_node_dirty_linkage_init(extent_node_t *node);
void extent_node_dirty_insert(extent_node_t *node,
arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty);
@@ -130,13 +114,6 @@ extent_node_size_get(const extent_node_t *node)
return (node->en_size);
}
-JEMALLOC_INLINE size_t
-extent_node_sn_get(const extent_node_t *node)
-{
-
- return (node->en_sn);
-}
-
JEMALLOC_INLINE bool
extent_node_zeroed_get(const extent_node_t *node)
{
@@ -188,13 +165,6 @@ extent_node_size_set(extent_node_t *node, size_t size)
}
JEMALLOC_INLINE void
-extent_node_sn_set(extent_node_t *node, size_t sn)
-{
-
- node->en_sn = sn;
-}
-
-JEMALLOC_INLINE void
extent_node_zeroed_set(extent_node_t *node, bool zeroed)
{
@@ -224,13 +194,12 @@ extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx)
JEMALLOC_INLINE void
extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size,
- size_t sn, bool zeroed, bool committed)
+ bool zeroed, bool committed)
{
extent_node_arena_set(node, arena);
extent_node_addr_set(node, addr);
extent_node_size_set(node, size);
- extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, zeroed);
extent_node_committed_set(node, committed);
extent_node_achunk_set(node, false);
diff --git a/deps/jemalloc/include/jemalloc/internal/hash.h b/deps/jemalloc/include/jemalloc/internal/hash.h
index 1ff2d9a05..bcead337a 100644
--- a/deps/jemalloc/include/jemalloc/internal/hash.h
+++ b/deps/jemalloc/include/jemalloc/internal/hash.h
@@ -1,6 +1,6 @@
/*
* The following hash function is based on MurmurHash3, placed into the public
- * domain by Austin Appleby. See https://github.com/aappleby/smhasher for
+ * domain by Austin Appleby. See http://code.google.com/p/smhasher/ for
* details.
*/
/******************************************************************************/
@@ -49,14 +49,6 @@ JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
- /* Handle unaligned read. */
- if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
- uint32_t ret;
-
- memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
- return (ret);
- }
-
return (p[i]);
}
@@ -64,14 +56,6 @@ JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
- /* Handle unaligned read. */
- if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
- uint64_t ret;
-
- memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
- return (ret);
- }
-
return (p[i]);
}
@@ -337,18 +321,13 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
-
- assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
-
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
- hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
+ hash_x64_128(key, len, seed, (uint64_t *)r_hash);
#else
- {
- uint64_t hashes[2];
- hash_x86_128(key, (int)len, seed, hashes);
- r_hash[0] = (size_t)hashes[0];
- r_hash[1] = (size_t)hashes[1];
- }
+ uint64_t hashes[2];
+ hash_x86_128(key, len, seed, hashes);
+ r_hash[0] = (size_t)hashes[0];
+ r_hash[1] = (size_t)hashes[1];
#endif
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/huge.h b/deps/jemalloc/include/jemalloc/internal/huge.h
index 22184d9bb..ece7af980 100644
--- a/deps/jemalloc/include/jemalloc/internal/huge.h
+++ b/deps/jemalloc/include/jemalloc/internal/huge.h
@@ -9,23 +9,24 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
-void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero);
-bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t usize_min, size_t usize_max, bool zero);
+void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache);
+void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
+ bool zero, tcache_t *tcache);
+bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero);
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
typedef void (huge_dalloc_junk_t)(void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
-void huge_dalloc(tsdn_t *tsdn, void *ptr);
+void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
arena_t *huge_aalloc(const void *ptr);
-size_t huge_salloc(tsdn_t *tsdn, const void *ptr);
-prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
-void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
-void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr);
+size_t huge_salloc(const void *ptr);
+prof_tctx_t *huge_prof_tctx_get(const void *ptr);
+void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
+void huge_prof_tctx_reset(const void *ptr);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
index e7ace7d8c..8536a3eda 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in
@@ -49,7 +49,6 @@ static const bool config_lazy_lock =
false
#endif
;
-static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
@@ -161,10 +160,7 @@ static const bool config_cache_oblivious =
#include <malloc/malloc.h>
#endif
-#include "jemalloc/internal/ph.h"
-#ifndef __PGI
#define RB_COMPACT
-#endif
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
@@ -187,9 +183,6 @@ static const bool config_cache_oblivious =
#include "jemalloc/internal/jemalloc_internal_macros.h"
-/* Page size index type. */
-typedef unsigned pszind_t;
-
/* Size class index type. */
typedef unsigned szind_t;
@@ -239,7 +232,7 @@ typedef unsigned szind_t;
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
-# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
+# if (defined(__sparc64__) || defined(__sparcv9))
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
@@ -263,9 +256,6 @@ typedef unsigned szind_t;
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
-# ifdef __riscv__
-# define LG_QUANTUM 4
-# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
@@ -327,17 +317,13 @@ typedef unsigned szind_t;
#define PAGE ((size_t)(1U << LG_PAGE))
#define PAGE_MASK ((size_t)(PAGE - 1))
-/* Return the page base address for the page containing address a. */
-#define PAGE_ADDR2BASE(a) \
- ((void *)((uintptr_t)(a) & ~PAGE_MASK))
-
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
- ((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
+ ((void *)((uintptr_t)(a) & (-(alignment))))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
@@ -345,7 +331,7 @@ typedef unsigned szind_t;
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
- (((s) + (alignment - 1)) & ((~(alignment)) + 1))
+ (((s) + (alignment - 1)) & (-(alignment)))
/* Declare a variable-length array. */
#if __STDC_VERSION__ < 199901L
@@ -365,19 +351,14 @@ typedef unsigned szind_t;
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
-#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
@@ -398,19 +379,14 @@ typedef unsigned szind_t;
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
-#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
@@ -446,28 +422,14 @@ extern bool opt_redzone;
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_zero;
-extern unsigned opt_narenas;
+extern size_t opt_narenas;
extern bool in_valgrind;
/* Number of CPUs. */
-extern unsigned ncpus;
-
-/* Number of arenas used for automatic multiplexing of threads and arenas. */
-extern unsigned narenas_auto;
+extern unsigned ncpus;
/*
- * Arenas that are used to service external requests. Not all elements of the
- * arenas array are necessarily used; arenas are created lazily as needed.
- */
-extern arena_t **arenas;
-
-/*
- * pind2sz_tab encodes the same information as could be computed by
- * pind2sz_compute().
- */
-extern size_t const pind2sz_tab[NPSIZES];
-/*
* index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by index2size_compute().
*/
@@ -485,35 +447,31 @@ void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
+arena_t *arenas_extend(unsigned ind);
+arena_t *arena_init(unsigned ind);
unsigned narenas_total_get(void);
-arena_t *arena_init(tsdn_t *tsdn, unsigned ind);
-arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
-arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
+arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing);
+arena_t *arena_choose_hard(tsd_t *tsd);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
+unsigned arena_nbound(unsigned ind);
void thread_allocated_cleanup(tsd_t *tsd);
void thread_deallocated_cleanup(tsd_t *tsd);
-void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
-void arenas_tdata_cleanup(tsd_t *tsd);
-void narenas_tdata_cleanup(tsd_t *tsd);
-void arenas_tdata_bypass_cleanup(tsd_t *tsd);
+void arenas_cache_cleanup(tsd_t *tsd);
+void narenas_cache_cleanup(tsd_t *tsd);
+void arenas_cache_bypass_cleanup(tsd_t *tsd);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
-#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
@@ -534,21 +492,16 @@ void jemalloc_postfork_child(void);
/******************************************************************************/
#define JEMALLOC_H_INLINES
-#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
-#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/tsd.h"
-#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
@@ -558,11 +511,6 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
-pszind_t psz2ind(size_t psz);
-size_t pind2sz_compute(pszind_t pind);
-size_t pind2sz_lookup(pszind_t pind);
-size_t pind2sz(pszind_t pind);
-size_t psz2u(size_t psz);
szind_t size2index_compute(size_t size);
szind_t size2index_lookup(size_t size);
szind_t size2index(size_t size);
@@ -573,121 +521,39 @@ size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
-arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
-arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena);
-arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
+arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
bool refresh_if_missing);
-arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
-ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
-JEMALLOC_INLINE pszind_t
-psz2ind(size_t psz)
-{
-
- if (unlikely(psz > HUGE_MAXCLASS))
- return (NPSIZES);
- {
- pszind_t x = lg_floor((psz<<1)-1);
- pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
- (LG_SIZE_CLASS_GROUP + LG_PAGE);
- pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
-
- pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
- LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
-
- size_t delta_inverse_mask = ZI(-1) << lg_delta;
- pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
- ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
- pszind_t ind = grp + mod;
- return (ind);
- }
-}
-
-JEMALLOC_INLINE size_t
-pind2sz_compute(pszind_t pind)
-{
-
- {
- size_t grp = pind >> LG_SIZE_CLASS_GROUP;
- size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
-
- size_t grp_size_mask = ~((!!grp)-1);
- size_t grp_size = ((ZU(1) << (LG_PAGE +
- (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
-
- size_t shift = (grp == 0) ? 1 : grp;
- size_t lg_delta = shift + (LG_PAGE-1);
- size_t mod_size = (mod+1) << lg_delta;
-
- size_t sz = grp_size + mod_size;
- return (sz);
- }
-}
-
-JEMALLOC_INLINE size_t
-pind2sz_lookup(pszind_t pind)
-{
- size_t ret = (size_t)pind2sz_tab[pind];
- assert(ret == pind2sz_compute(pind));
- return (ret);
-}
-
-JEMALLOC_INLINE size_t
-pind2sz(pszind_t pind)
-{
-
- assert(pind < NPSIZES);
- return (pind2sz_lookup(pind));
-}
-
-JEMALLOC_INLINE size_t
-psz2u(size_t psz)
-{
-
- if (unlikely(psz > HUGE_MAXCLASS))
- return (0);
- {
- size_t x = lg_floor((psz<<1)-1);
- size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
- LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
- size_t delta = ZU(1) << lg_delta;
- size_t delta_mask = delta - 1;
- size_t usize = (psz + delta_mask) & ~delta_mask;
- return (usize);
- }
-}
-
JEMALLOC_INLINE szind_t
size2index_compute(size_t size)
{
- if (unlikely(size > HUGE_MAXCLASS))
- return (NSIZES);
#if (NTBINS != 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
- szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
- szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+ size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
+ size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
}
#endif
{
- szind_t x = lg_floor((size<<1)-1);
- szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
+ size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
+ (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
+ : lg_floor((size<<1)-1);
+ size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
- szind_t grp = shift << LG_SIZE_CLASS_GROUP;
+ size_t grp = shift << LG_SIZE_CLASS_GROUP;
- szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
+ size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta_inverse_mask = ZI(-1) << lg_delta;
- szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
+ size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
- szind_t index = NTBINS + grp + mod;
+ size_t index = NTBINS + grp + mod;
return (index);
}
}
@@ -698,7 +564,8 @@ size2index_lookup(size_t size)
assert(size <= LOOKUP_MAXCLASS);
{
- szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
+ size_t ret = ((size_t)(size2index_tab[(size-1) >>
+ LG_TINY_MIN]));
assert(ret == size2index_compute(size));
return (ret);
}
@@ -761,18 +628,18 @@ JEMALLOC_ALWAYS_INLINE size_t
s2u_compute(size_t size)
{
- if (unlikely(size > HUGE_MAXCLASS))
- return (0);
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
- size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
+ size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));
}
#endif
{
- size_t x = lg_floor((size<<1)-1);
+ size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
+ (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
+ : lg_floor((size<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
@@ -856,16 +723,17 @@ sa2u(size_t size, size_t alignment)
return (usize);
}
- /* Huge size class. Beware of overflow. */
-
- if (unlikely(alignment > HUGE_MAXCLASS))
- return (0);
+ /* Huge size class. Beware of size_t overflow. */
/*
* We can't achieve subchunk alignment, so round up alignment to the
* minimum that can actually be supported.
*/
alignment = CHUNK_CEILING(alignment);
+ if (alignment == 0) {
+ /* size_t overflow. */
+ return (0);
+ }
/* Make sure result is a huge size class. */
if (size <= chunksize)
@@ -891,84 +759,45 @@ sa2u(size_t size, size_t alignment)
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
-arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
+arena_choose(tsd_t *tsd, arena_t *arena)
{
arena_t *ret;
if (arena != NULL)
return (arena);
- ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
- if (unlikely(ret == NULL))
- ret = arena_choose_hard(tsd, internal);
+ if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
+ ret = arena_choose_hard(tsd);
return (ret);
}
JEMALLOC_INLINE arena_t *
-arena_choose(tsd_t *tsd, arena_t *arena)
+arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
+ bool refresh_if_missing)
{
+ arena_t *arena;
+ arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
- return (arena_choose_impl(tsd, arena, false));
-}
-
-JEMALLOC_INLINE arena_t *
-arena_ichoose(tsd_t *tsd, arena_t *arena)
-{
+ /* init_if_missing requires refresh_if_missing. */
+ assert(!init_if_missing || refresh_if_missing);
- return (arena_choose_impl(tsd, arena, true));
-}
-
-JEMALLOC_INLINE arena_tdata_t *
-arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
-{
- arena_tdata_t *tdata;
- arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
-
- if (unlikely(arenas_tdata == NULL)) {
- /* arenas_tdata hasn't been initialized yet. */
- return (arena_tdata_get_hard(tsd, ind));
+ if (unlikely(arenas_cache == NULL)) {
+ /* arenas_cache hasn't been initialized yet. */
+ return (arena_get_hard(tsd, ind, init_if_missing));
}
- if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
+ if (unlikely(ind >= tsd_narenas_cache_get(tsd))) {
/*
- * ind is invalid, cache is old (too small), or tdata to be
+ * ind is invalid, cache is old (too small), or arena to be
* initialized.
*/
- return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
- NULL);
- }
-
- tdata = &arenas_tdata[ind];
- if (likely(tdata != NULL) || !refresh_if_missing)
- return (tdata);
- return (arena_tdata_get_hard(tsd, ind));
-}
-
-JEMALLOC_INLINE arena_t *
-arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
-{
- arena_t *ret;
-
- assert(ind <= MALLOCX_ARENA_MAX);
-
- ret = arenas[ind];
- if (unlikely(ret == NULL)) {
- ret = atomic_read_p((void *)&arenas[ind]);
- if (init_if_missing && unlikely(ret == NULL))
- ret = arena_init(tsdn, ind);
+ return (refresh_if_missing ? arena_get_hard(tsd, ind,
+ init_if_missing) : NULL);
}
- return (ret);
-}
-
-JEMALLOC_INLINE ticker_t *
-decay_ticker_get(tsd_t *tsd, unsigned ind)
-{
- arena_tdata_t *tdata;
-
- tdata = arena_tdata_get(tsd, ind, true);
- if (unlikely(tdata == NULL))
- return (NULL);
- return (&tdata->decay_ticker);
+ arena = arenas_cache[ind];
+ if (likely(arena != NULL) || !refresh_if_missing)
+ return (arena);
+ return (arena_get_hard(tsd, ind, init_if_missing));
}
#endif
@@ -989,27 +818,27 @@ decay_ticker_get(tsd_t *tsd, unsigned ind)
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *iaalloc(const void *ptr);
-size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
-void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
- tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
-void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
- bool slow_path);
-void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+size_t isalloc(const void *ptr, bool demote);
+void *iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache,
+ bool is_metadata, arena_t *arena);
+void *imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
+void *imalloc(tsd_t *tsd, size_t size);
+void *icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
+void *icalloc(tsd_t *tsd, size_t size);
+void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena);
-void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
-size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
+size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
-size_t p2rz(tsdn_t *tsdn, const void *ptr);
-void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
- bool slow_path);
+size_t p2rz(const void *ptr);
+void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata);
+void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
void idalloc(tsd_t *tsd, void *ptr);
-void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
-void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path);
-void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path);
+void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
+void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
+void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena);
@@ -1017,8 +846,8 @@ void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero);
-bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero);
+bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
@@ -1033,85 +862,100 @@ iaalloc(const void *ptr)
/*
* Typical usage:
- * tsdn_t *tsdn = [...]
* void *ptr = [...]
- * size_t sz = isalloc(tsdn, ptr, config_prof);
+ * size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
-isalloc(tsdn_t *tsdn, const void *ptr, bool demote)
+isalloc(const void *ptr, bool demote)
{
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
- return (arena_salloc(tsdn, ptr, demote));
+ return (arena_salloc(ptr, demote));
}
JEMALLOC_ALWAYS_INLINE void *
-iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
- bool is_metadata, arena_t *arena, bool slow_path)
+iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata,
+ arena_t *arena)
{
void *ret;
assert(size != 0);
- assert(!is_metadata || tcache == NULL);
- assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
- ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
+ ret = arena_malloc(tsd, arena, size, zero, tcache);
if (config_stats && is_metadata && likely(ret != NULL)) {
- arena_metadata_allocated_add(iaalloc(ret),
- isalloc(tsdn, ret, config_prof));
+ arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
+ config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
-ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
+imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
{
- return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
- false, NULL, slow_path));
+ return (iallocztm(tsd, size, false, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
-ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+imalloc(tsd_t *tsd, size_t size)
+{
+
+ return (iallocztm(tsd, size, false, tcache_get(tsd, true), false, NULL));
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
+{
+
+ return (iallocztm(tsd, size, true, tcache, false, arena));
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+icalloc(tsd_t *tsd, size_t size)
+{
+
+ return (iallocztm(tsd, size, true, tcache_get(tsd, true), false, NULL));
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
- assert(!is_metadata || tcache == NULL);
- assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
- ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
+ ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
- arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret,
+ arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
-ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{
- return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
+ return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
- return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
- tcache_get(tsd, true), false, NULL));
+ return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd,
+ NULL), false, NULL));
}
JEMALLOC_ALWAYS_INLINE size_t
-ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
+ivsalloc(const void *ptr, bool demote)
{
extent_node_t *node;
@@ -1123,7 +967,7 @@ ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
- return (isalloc(tsdn, ptr, demote));
+ return (isalloc(ptr, demote));
}
JEMALLOC_INLINE size_t
@@ -1141,62 +985,65 @@ u2rz(size_t usize)
}
JEMALLOC_INLINE size_t
-p2rz(tsdn_t *tsdn, const void *ptr)
+p2rz(const void *ptr)
{
- size_t usize = isalloc(tsdn, ptr, false);
+ size_t usize = isalloc(ptr, false);
return (u2rz(usize));
}
JEMALLOC_ALWAYS_INLINE void
-idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
- bool slow_path)
+idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata)
{
assert(ptr != NULL);
- assert(!is_metadata || tcache == NULL);
- assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
if (config_stats && is_metadata) {
- arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr,
+ arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
config_prof));
}
- arena_dalloc(tsdn, ptr, tcache, slow_path);
+ arena_dalloc(tsd, ptr, tcache);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
+{
+
+ idalloctm(tsd, ptr, tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr)
{
- idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true);
+ idalloctm(tsd, ptr, tcache_get(tsd, false), false);
}
JEMALLOC_ALWAYS_INLINE void
-iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
+iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
- if (slow_path && config_fill && unlikely(opt_quarantine))
+ if (config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
- idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path);
+ idalloctm(tsd, ptr, tcache, false);
}
JEMALLOC_ALWAYS_INLINE void
-isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- bool slow_path)
+isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
- arena_sdalloc(tsdn, ptr, size, tcache, slow_path);
+ arena_sdalloc(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void
-isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path)
+isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
- if (slow_path && config_fill && unlikely(opt_quarantine))
+ if (config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
- isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path);
+ isdalloct(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
@@ -1207,18 +1054,17 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t usize, copysize;
usize = sa2u(size + extra, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ if (usize == 0)
return (NULL);
- p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena);
+ p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ if (usize == 0)
return (NULL);
- p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache,
- arena);
+ p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
if (p == NULL)
return (NULL);
}
@@ -1228,7 +1074,7 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache, true);
+ isqalloc(tsd, ptr, oldsize, tcache);
return (p);
}
@@ -1264,8 +1110,8 @@ iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
}
JEMALLOC_ALWAYS_INLINE bool
-ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero)
+ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
+ bool zero)
{
assert(ptr != NULL);
@@ -1277,7 +1123,7 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
return (true);
}
- return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero));
+ return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
index c907d9109..a601d6ebb 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
@@ -17,18 +17,7 @@
# include <sys/uio.h>
# endif
# include <pthread.h>
-# ifdef JEMALLOC_OS_UNFAIR_LOCK
-# include <os/lock.h>
-# endif
-# ifdef JEMALLOC_GLIBC_MALLOC_HOOK
-# include <sched.h>
-# endif
# include <errno.h>
-# include <sys/time.h>
-# include <time.h>
-# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
-# include <mach/mach_time.h>
-# endif
#endif
#include <sys/types.h>
diff --git a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
index def4ba550..b0f8caaf8 100644
--- a/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ b/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -56,9 +56,9 @@
#undef JEMALLOC_HAVE_BUILTIN_CLZ
/*
- * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
+ * Defined if madvise(2) is available.
*/
-#undef JEMALLOC_OS_UNFAIR_LOCK
+#undef JEMALLOC_HAVE_MADVISE
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
@@ -66,9 +66,6 @@
*/
#undef JEMALLOC_OSSPIN
-/* Defined if syscall(2) is usable. */
-#undef JEMALLOC_USE_SYSCALL
-
/*
* Defined if secure_getenv(3) is available.
*/
@@ -79,24 +76,6 @@
*/
#undef JEMALLOC_HAVE_ISSETUGID
-/* Defined if pthread_atfork(3) is available. */
-#undef JEMALLOC_HAVE_PTHREAD_ATFORK
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
-
-/*
- * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
- */
-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
-
-/*
- * Defined if mach_absolute_time() is available.
- */
-#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
-
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
@@ -210,16 +189,9 @@
#undef JEMALLOC_TLS
/*
- * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
- * Don't use this directly; instead use unreachable() from util.h
+ * ffs()/ffsl() functions to use for bitmapping. Don't use these directly;
+ * instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h.
*/
-#undef JEMALLOC_INTERNAL_UNREACHABLE
-
-/*
- * ffs*() functions to use for bitmapping. Don't use these directly; instead,
- * use ffs_*() from util.h.
- */
-#undef JEMALLOC_INTERNAL_FFSLL
#undef JEMALLOC_INTERNAL_FFSL
#undef JEMALLOC_INTERNAL_FFS
@@ -242,34 +214,17 @@
#undef JEMALLOC_ZONE_VERSION
/*
- * Methods for determining whether the OS overcommits.
- * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
- * /proc/sys/vm.overcommit_memory file.
- * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
- */
-#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
-#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
-
-/* Defined if madvise(2) is available. */
-#undef JEMALLOC_HAVE_MADVISE
-
-/*
* Methods for purging unused pages differ between operating systems.
*
- * madvise(..., MADV_FREE) : This marks pages as being unused, such that they
- * will be discarded rather than swapped out.
- * madvise(..., MADV_DONTNEED) : This immediately discards pages, such that
- * new pages will be demand-zeroed if the
- * address region is later touched.
+ * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
+ * such that new pages will be demand-zeroed if
+ * the address region is later touched.
+ * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
+ * unused, such that they will be discarded rather
+ * than swapped out.
*/
-#undef JEMALLOC_PURGE_MADVISE_FREE
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
-
-/*
- * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
- * arguments to madvise(2).
- */
-#undef JEMALLOC_THP
+#undef JEMALLOC_PURGE_MADVISE_FREE
/* Define if operating system has alloca.h header. */
#undef JEMALLOC_HAS_ALLOCA_H
@@ -286,9 +241,6 @@
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#undef LG_SIZEOF_LONG
-/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
-#undef LG_SIZEOF_LONG_LONG
-
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#undef LG_SIZEOF_INTMAX_T
@@ -307,7 +259,4 @@
*/
#undef JEMALLOC_EXPORT
-/* config.malloc_conf options string. */
-#undef JEMALLOC_CONFIG_MALLOC_CONF
-
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
diff --git a/deps/jemalloc/include/jemalloc/internal/mb.h b/deps/jemalloc/include/jemalloc/internal/mb.h
index 5384728fd..3cfa78729 100644
--- a/deps/jemalloc/include/jemalloc/internal/mb.h
+++ b/deps/jemalloc/include/jemalloc/internal/mb.h
@@ -42,7 +42,7 @@ mb_write(void)
: /* Inputs. */
: "memory" /* Clobbers. */
);
-# else
+#else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
@@ -52,7 +52,7 @@ mb_write(void)
: /* Inputs. */
: "memory" /* Clobbers. */
);
-# endif
+#endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE void
@@ -104,9 +104,9 @@ mb_write(void)
{
malloc_mutex_t mtx;
- malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
- malloc_mutex_lock(TSDN_NULL, &mtx);
- malloc_mutex_unlock(TSDN_NULL, &mtx);
+ malloc_mutex_init(&mtx);
+ malloc_mutex_lock(&mtx);
+ malloc_mutex_unlock(&mtx);
}
#endif
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/mutex.h b/deps/jemalloc/include/jemalloc/internal/mutex.h
index b442d2d4e..f051f2917 100644
--- a/deps/jemalloc/include/jemalloc/internal/mutex.h
+++ b/deps/jemalloc/include/jemalloc/internal/mutex.h
@@ -5,25 +5,18 @@ typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
-# define MALLOC_MUTEX_INITIALIZER \
- {OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_OSSPIN))
-# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+# define MALLOC_MUTEX_INITIALIZER {0}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
-# define MALLOC_MUTEX_INITIALIZER \
- {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
#else
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
-# define MALLOC_MUTEX_INITIALIZER \
- {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
- WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
-# define MALLOC_MUTEX_INITIALIZER \
- {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
+# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
# endif
#endif
@@ -38,8 +31,6 @@ struct malloc_mutex_s {
# else
CRITICAL_SECTION lock;
# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
@@ -48,7 +39,6 @@ struct malloc_mutex_s {
#else
pthread_mutex_t lock;
#endif
- witness_t witness;
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -62,62 +52,52 @@ extern bool isthreaded;
# define isthreaded true
#endif
-bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
- witness_rank_t rank);
-void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
-bool malloc_mutex_boot(void);
+bool malloc_mutex_init(malloc_mutex_t *mutex);
+void malloc_mutex_prefork(malloc_mutex_t *mutex);
+void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
+void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
+bool mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
-void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_lock(malloc_mutex_t *mutex);
+void malloc_mutex_unlock(malloc_mutex_t *mutex);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
-malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
+malloc_mutex_lock(malloc_mutex_t *mutex)
{
if (isthreaded) {
- witness_assert_not_owner(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
# else
EnterCriticalSection(&mutex->lock);
# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock_lock(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock);
#else
pthread_mutex_lock(&mutex->lock);
#endif
- witness_lock(tsdn, &mutex->witness);
}
}
JEMALLOC_INLINE void
-malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
+malloc_mutex_unlock(malloc_mutex_t *mutex)
{
if (isthreaded) {
- witness_unlock(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
# else
LeaveCriticalSection(&mutex->lock);
# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock_unlock(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock);
#else
@@ -125,22 +105,6 @@ malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
#endif
}
}
-
-JEMALLOC_INLINE void
-malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-
- if (isthreaded)
- witness_assert_owner(tsdn, &mutex->witness);
-}
-
-JEMALLOC_INLINE void
-malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
-{
-
- if (isthreaded)
- witness_assert_not_owner(tsdn, &mutex->witness);
-}
#endif
#endif /* JEMALLOC_H_INLINES */
diff --git a/deps/jemalloc/include/jemalloc/internal/nstime.h b/deps/jemalloc/include/jemalloc/internal/nstime.h
deleted file mode 100644
index 93b27dc80..000000000
--- a/deps/jemalloc/include/jemalloc/internal/nstime.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct nstime_s nstime_t;
-
-/* Maximum supported number of seconds (~584 years). */
-#define NSTIME_SEC_MAX KQU(18446744072)
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct nstime_s {
- uint64_t ns;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-void nstime_init(nstime_t *time, uint64_t ns);
-void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
-uint64_t nstime_ns(const nstime_t *time);
-uint64_t nstime_sec(const nstime_t *time);
-uint64_t nstime_nsec(const nstime_t *time);
-void nstime_copy(nstime_t *time, const nstime_t *source);
-int nstime_compare(const nstime_t *a, const nstime_t *b);
-void nstime_add(nstime_t *time, const nstime_t *addend);
-void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
-void nstime_imultiply(nstime_t *time, uint64_t multiplier);
-void nstime_idivide(nstime_t *time, uint64_t divisor);
-uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
-#ifdef JEMALLOC_JET
-typedef bool (nstime_monotonic_t)(void);
-extern nstime_monotonic_t *nstime_monotonic;
-typedef bool (nstime_update_t)(nstime_t *);
-extern nstime_update_t *nstime_update;
-#else
-bool nstime_monotonic(void);
-bool nstime_update(nstime_t *time);
-#endif
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/pages.h b/deps/jemalloc/include/jemalloc/internal/pages.h
index 4ae9f156a..da7eb9686 100644
--- a/deps/jemalloc/include/jemalloc/internal/pages.h
+++ b/deps/jemalloc/include/jemalloc/internal/pages.h
@@ -9,16 +9,13 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *pages_map(void *addr, size_t size, bool *commit);
+void *pages_map(void *addr, size_t size);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
- size_t size, bool *commit);
+ size_t size);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge(void *addr, size_t size);
-bool pages_huge(void *addr, size_t size);
-bool pages_nohuge(void *addr, size_t size);
-void pages_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/ph.h b/deps/jemalloc/include/jemalloc/internal/ph.h
deleted file mode 100644
index 4f91c333f..000000000
--- a/deps/jemalloc/include/jemalloc/internal/ph.h
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * A Pairing Heap implementation.
- *
- * "The Pairing Heap: A New Form of Self-Adjusting Heap"
- * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
- *
- * With auxiliary twopass list, described in a follow on paper.
- *
- * "Pairing Heaps: Experiments and Analysis"
- * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
- *
- *******************************************************************************
- */
-
-#ifndef PH_H_
-#define PH_H_
-
-/* Node structure. */
-#define phn(a_type) \
-struct { \
- a_type *phn_prev; \
- a_type *phn_next; \
- a_type *phn_lchild; \
-}
-
-/* Root structure. */
-#define ph(a_type) \
-struct { \
- a_type *ph_root; \
-}
-
-/* Internal utility macros. */
-#define phn_lchild_get(a_type, a_field, a_phn) \
- (a_phn->a_field.phn_lchild)
-#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
- a_phn->a_field.phn_lchild = a_lchild; \
-} while (0)
-
-#define phn_next_get(a_type, a_field, a_phn) \
- (a_phn->a_field.phn_next)
-#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
- a_phn->a_field.phn_prev = a_prev; \
-} while (0)
-
-#define phn_prev_get(a_type, a_field, a_phn) \
- (a_phn->a_field.phn_prev)
-#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
- a_phn->a_field.phn_next = a_next; \
-} while (0)
-
-#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
- a_type *phn0child; \
- \
- assert(a_phn0 != NULL); \
- assert(a_phn1 != NULL); \
- assert(a_cmp(a_phn0, a_phn1) <= 0); \
- \
- phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
- phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
- phn_next_set(a_type, a_field, a_phn1, phn0child); \
- if (phn0child != NULL) \
- phn_prev_set(a_type, a_field, phn0child, a_phn1); \
- phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
-} while (0)
-
-#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
- if (a_phn0 == NULL) \
- r_phn = a_phn1; \
- else if (a_phn1 == NULL) \
- r_phn = a_phn0; \
- else if (a_cmp(a_phn0, a_phn1) < 0) { \
- phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
- a_cmp); \
- r_phn = a_phn0; \
- } else { \
- phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
- a_cmp); \
- r_phn = a_phn1; \
- } \
-} while (0)
-
-#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
- a_type *head = NULL; \
- a_type *tail = NULL; \
- a_type *phn0 = a_phn; \
- a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
- \
- /* \
- * Multipass merge, wherein the first two elements of a FIFO \
- * are repeatedly merged, and each result is appended to the \
- * singly linked FIFO, until the FIFO contains only a single \
- * element. We start with a sibling list but no reference to \
- * its tail, so we do a single pass over the sibling list to \
- * populate the FIFO. \
- */ \
- if (phn1 != NULL) { \
- a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
- if (phnrest != NULL) \
- phn_prev_set(a_type, a_field, phnrest, NULL); \
- phn_prev_set(a_type, a_field, phn0, NULL); \
- phn_next_set(a_type, a_field, phn0, NULL); \
- phn_prev_set(a_type, a_field, phn1, NULL); \
- phn_next_set(a_type, a_field, phn1, NULL); \
- phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
- head = tail = phn0; \
- phn0 = phnrest; \
- while (phn0 != NULL) { \
- phn1 = phn_next_get(a_type, a_field, phn0); \
- if (phn1 != NULL) { \
- phnrest = phn_next_get(a_type, a_field, \
- phn1); \
- if (phnrest != NULL) { \
- phn_prev_set(a_type, a_field, \
- phnrest, NULL); \
- } \
- phn_prev_set(a_type, a_field, phn0, \
- NULL); \
- phn_next_set(a_type, a_field, phn0, \
- NULL); \
- phn_prev_set(a_type, a_field, phn1, \
- NULL); \
- phn_next_set(a_type, a_field, phn1, \
- NULL); \
- phn_merge(a_type, a_field, phn0, phn1, \
- a_cmp, phn0); \
- phn_next_set(a_type, a_field, tail, \
- phn0); \
- tail = phn0; \
- phn0 = phnrest; \
- } else { \
- phn_next_set(a_type, a_field, tail, \
- phn0); \
- tail = phn0; \
- phn0 = NULL; \
- } \
- } \
- phn0 = head; \
- phn1 = phn_next_get(a_type, a_field, phn0); \
- if (phn1 != NULL) { \
- while (true) { \
- head = phn_next_get(a_type, a_field, \
- phn1); \
- assert(phn_prev_get(a_type, a_field, \
- phn0) == NULL); \
- phn_next_set(a_type, a_field, phn0, \
- NULL); \
- assert(phn_prev_get(a_type, a_field, \
- phn1) == NULL); \
- phn_next_set(a_type, a_field, phn1, \
- NULL); \
- phn_merge(a_type, a_field, phn0, phn1, \
- a_cmp, phn0); \
- if (head == NULL) \
- break; \
- phn_next_set(a_type, a_field, tail, \
- phn0); \
- tail = phn0; \
- phn0 = head; \
- phn1 = phn_next_get(a_type, a_field, \
- phn0); \
- } \
- } \
- } \
- r_phn = phn0; \
-} while (0)
-
-#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
- a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
- if (phn != NULL) { \
- phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
- phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
- phn_prev_set(a_type, a_field, phn, NULL); \
- ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
- assert(phn_next_get(a_type, a_field, phn) == NULL); \
- phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
- a_ph->ph_root); \
- } \
-} while (0)
-
-#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
- a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
- if (lchild == NULL) \
- r_phn = NULL; \
- else { \
- ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
- r_phn); \
- } \
-} while (0)
-
-/*
- * The ph_proto() macro generates function prototypes that correspond to the
- * functions generated by an equivalently parameterized call to ph_gen().
- */
-#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
-a_attr void a_prefix##new(a_ph_type *ph); \
-a_attr bool a_prefix##empty(a_ph_type *ph); \
-a_attr a_type *a_prefix##first(a_ph_type *ph); \
-a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
-a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
-a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
-
-/*
- * The ph_gen() macro generates a type-specific pairing heap implementation,
- * based on the above cpp macros.
- */
-#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
-a_attr void \
-a_prefix##new(a_ph_type *ph) \
-{ \
- \
- memset(ph, 0, sizeof(ph(a_type))); \
-} \
-a_attr bool \
-a_prefix##empty(a_ph_type *ph) \
-{ \
- \
- return (ph->ph_root == NULL); \
-} \
-a_attr a_type * \
-a_prefix##first(a_ph_type *ph) \
-{ \
- \
- if (ph->ph_root == NULL) \
- return (NULL); \
- ph_merge_aux(a_type, a_field, ph, a_cmp); \
- return (ph->ph_root); \
-} \
-a_attr void \
-a_prefix##insert(a_ph_type *ph, a_type *phn) \
-{ \
- \
- memset(&phn->a_field, 0, sizeof(phn(a_type))); \
- \
- /* \
- * Treat the root as an aux list during insertion, and lazily \
- * merge during a_prefix##remove_first(). For elements that \
- * are inserted, then removed via a_prefix##remove() before the \
- * aux list is ever processed, this makes insert/remove \
- * constant-time, whereas eager merging would make insert \
- * O(log n). \
- */ \
- if (ph->ph_root == NULL) \
- ph->ph_root = phn; \
- else { \
- phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
- a_field, ph->ph_root)); \
- if (phn_next_get(a_type, a_field, ph->ph_root) != \
- NULL) { \
- phn_prev_set(a_type, a_field, \
- phn_next_get(a_type, a_field, ph->ph_root), \
- phn); \
- } \
- phn_prev_set(a_type, a_field, phn, ph->ph_root); \
- phn_next_set(a_type, a_field, ph->ph_root, phn); \
- } \
-} \
-a_attr a_type * \
-a_prefix##remove_first(a_ph_type *ph) \
-{ \
- a_type *ret; \
- \
- if (ph->ph_root == NULL) \
- return (NULL); \
- ph_merge_aux(a_type, a_field, ph, a_cmp); \
- \
- ret = ph->ph_root; \
- \
- ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
- ph->ph_root); \
- \
- return (ret); \
-} \
-a_attr void \
-a_prefix##remove(a_ph_type *ph, a_type *phn) \
-{ \
- a_type *replace, *parent; \
- \
- /* \
- * We can delete from aux list without merging it, but we need \
- * to merge if we are dealing with the root node. \
- */ \
- if (ph->ph_root == phn) { \
- ph_merge_aux(a_type, a_field, ph, a_cmp); \
- if (ph->ph_root == phn) { \
- ph_merge_children(a_type, a_field, ph->ph_root, \
- a_cmp, ph->ph_root); \
- return; \
- } \
- } \
- \
- /* Get parent (if phn is leftmost child) before mutating. */ \
- if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
- if (phn_lchild_get(a_type, a_field, parent) != phn) \
- parent = NULL; \
- } \
- /* Find a possible replacement node, and link to parent. */ \
- ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
- /* Set next/prev for sibling linked list. */ \
- if (replace != NULL) { \
- if (parent != NULL) { \
- phn_prev_set(a_type, a_field, replace, parent); \
- phn_lchild_set(a_type, a_field, parent, \
- replace); \
- } else { \
- phn_prev_set(a_type, a_field, replace, \
- phn_prev_get(a_type, a_field, phn)); \
- if (phn_prev_get(a_type, a_field, phn) != \
- NULL) { \
- phn_next_set(a_type, a_field, \
- phn_prev_get(a_type, a_field, phn), \
- replace); \
- } \
- } \
- phn_next_set(a_type, a_field, replace, \
- phn_next_get(a_type, a_field, phn)); \
- if (phn_next_get(a_type, a_field, phn) != NULL) { \
- phn_prev_set(a_type, a_field, \
- phn_next_get(a_type, a_field, phn), \
- replace); \
- } \
- } else { \
- if (parent != NULL) { \
- a_type *next = phn_next_get(a_type, a_field, \
- phn); \
- phn_lchild_set(a_type, a_field, parent, next); \
- if (next != NULL) { \
- phn_prev_set(a_type, a_field, next, \
- parent); \
- } \
- } else { \
- assert(phn_prev_get(a_type, a_field, phn) != \
- NULL); \
- phn_next_set(a_type, a_field, \
- phn_prev_get(a_type, a_field, phn), \
- phn_next_get(a_type, a_field, phn)); \
- } \
- if (phn_next_get(a_type, a_field, phn) != NULL) { \
- phn_prev_set(a_type, a_field, \
- phn_next_get(a_type, a_field, phn), \
- phn_prev_get(a_type, a_field, phn)); \
- } \
- } \
-}
-
-#endif /* PH_H_ */
diff --git a/deps/jemalloc/include/jemalloc/internal/private_symbols.txt b/deps/jemalloc/include/jemalloc/internal/private_symbols.txt
index c1c6c4090..a90021aa6 100644
--- a/deps/jemalloc/include/jemalloc/internal/private_symbols.txt
+++ b/deps/jemalloc/include/jemalloc/internal/private_symbols.txt
@@ -3,15 +3,12 @@ a0get
a0malloc
arena_aalloc
arena_alloc_junk_small
-arena_basic_stats_merge
arena_bin_index
arena_bin_info
-arena_bitselm_get_const
-arena_bitselm_get_mutable
+arena_bitselm_get
arena_boot
arena_choose
arena_choose_hard
-arena_choose_impl
arena_chunk_alloc_huge
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
@@ -28,25 +25,18 @@ arena_dalloc_junk_small
arena_dalloc_large
arena_dalloc_large_junked_locked
arena_dalloc_small
-arena_decay_tick
-arena_decay_ticks
-arena_decay_time_default_get
-arena_decay_time_default_set
-arena_decay_time_get
-arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
-arena_extent_sn_next
arena_get
-arena_ichoose
+arena_get_hard
arena_init
arena_lg_dirty_mult_default_get
arena_lg_dirty_mult_default_set
arena_lg_dirty_mult_get
arena_lg_dirty_mult_set
arena_malloc
-arena_malloc_hard
arena_malloc_large
+arena_malloc_small
arena_mapbits_allocated_get
arena_mapbits_binind_get
arena_mapbits_decommitted_get
@@ -57,6 +47,9 @@ arena_mapbits_large_binind_set
arena_mapbits_large_get
arena_mapbits_large_set
arena_mapbits_large_size_get
+arena_mapbitsp_get
+arena_mapbitsp_read
+arena_mapbitsp_write
arena_mapbits_size_decode
arena_mapbits_size_encode
arena_mapbits_small_runind_get
@@ -65,33 +58,23 @@ arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
-arena_mapbitsp_get_const
-arena_mapbitsp_get_mutable
-arena_mapbitsp_read
-arena_mapbitsp_write
arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add
arena_metadata_allocated_get
arena_metadata_allocated_sub
arena_migrate
-arena_miscelm_get_const
-arena_miscelm_get_mutable
+arena_miscelm_get
arena_miscelm_to_pageind
arena_miscelm_to_rpages
+arena_nbound
arena_new
arena_node_alloc
arena_node_dalloc
-arena_nthreads_dec
-arena_nthreads_get
-arena_nthreads_inc
arena_palloc
arena_postfork_child
arena_postfork_parent
-arena_prefork0
-arena_prefork1
-arena_prefork2
-arena_prefork3
+arena_prefork
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
@@ -100,25 +83,21 @@ arena_prof_tctx_get
arena_prof_tctx_reset
arena_prof_tctx_set
arena_ptr_small_binind_get
-arena_purge
+arena_purge_all
arena_quarantine_junk_small
arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption
-arena_reset
arena_run_regind
arena_run_to_miscelm
arena_salloc
+arenas_cache_bypass_cleanup
+arenas_cache_cleanup
arena_sdalloc
arena_stats_merge
arena_tcache_fill_small
-arena_tdata_get
-arena_tdata_get_hard
-arenas
-arenas_tdata_bypass_cleanup
-arenas_tdata_cleanup
atomic_add_p
atomic_add_u
atomic_add_uint32
@@ -134,11 +113,6 @@ atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
-atomic_write_p
-atomic_write_u
-atomic_write_uint32
-atomic_write_uint64
-atomic_write_z
base_alloc
base_boot
base_postfork_child
@@ -148,6 +122,7 @@ base_stats_get
bitmap_full
bitmap_get
bitmap_info_init
+bitmap_info_ngroups
bitmap_init
bitmap_set
bitmap_sfu
@@ -164,25 +139,32 @@ chunk_alloc_dss
chunk_alloc_mmap
chunk_alloc_wrapper
chunk_boot
+chunk_dalloc_arena
chunk_dalloc_cache
chunk_dalloc_mmap
chunk_dalloc_wrapper
chunk_deregister
chunk_dss_boot
-chunk_dss_mergeable
+chunk_dss_postfork_child
+chunk_dss_postfork_parent
chunk_dss_prec_get
chunk_dss_prec_set
+chunk_dss_prefork
chunk_hooks_default
chunk_hooks_get
chunk_hooks_set
chunk_in_dss
chunk_lookup
chunk_npages
+chunk_postfork_child
+chunk_postfork_parent
+chunk_prefork
+chunk_purge_arena
chunk_purge_wrapper
chunk_register
-chunks_rtree
chunksize
chunksize_mask
+chunks_rtree
ckh_count
ckh_delete
ckh_insert
@@ -201,7 +183,6 @@ ctl_nametomib
ctl_postfork_child
ctl_postfork_parent
ctl_prefork
-decay_ticker_get
dss_prec_names
extent_node_achunk_get
extent_node_achunk_set
@@ -209,8 +190,6 @@ extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
-extent_node_committed_get
-extent_node_committed_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
@@ -219,12 +198,8 @@ extent_node_prof_tctx_get
extent_node_prof_tctx_set
extent_node_size_get
extent_node_size_set
-extent_node_sn_get
-extent_node_sn_set
extent_node_zeroed_get
extent_node_zeroed_set
-extent_tree_ad_destroy
-extent_tree_ad_destroy_recurse
extent_tree_ad_empty
extent_tree_ad_first
extent_tree_ad_insert
@@ -242,31 +217,23 @@ extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
-extent_tree_szsnad_destroy
-extent_tree_szsnad_destroy_recurse
-extent_tree_szsnad_empty
-extent_tree_szsnad_first
-extent_tree_szsnad_insert
-extent_tree_szsnad_iter
-extent_tree_szsnad_iter_recurse
-extent_tree_szsnad_iter_start
-extent_tree_szsnad_last
-extent_tree_szsnad_new
-extent_tree_szsnad_next
-extent_tree_szsnad_nsearch
-extent_tree_szsnad_prev
-extent_tree_szsnad_psearch
-extent_tree_szsnad_remove
-extent_tree_szsnad_reverse_iter
-extent_tree_szsnad_reverse_iter_recurse
-extent_tree_szsnad_reverse_iter_start
-extent_tree_szsnad_search
-ffs_llu
-ffs_lu
-ffs_u
-ffs_u32
-ffs_u64
-ffs_zu
+extent_tree_szad_empty
+extent_tree_szad_first
+extent_tree_szad_insert
+extent_tree_szad_iter
+extent_tree_szad_iter_recurse
+extent_tree_szad_iter_start
+extent_tree_szad_last
+extent_tree_szad_new
+extent_tree_szad_next
+extent_tree_szad_nsearch
+extent_tree_szad_prev
+extent_tree_szad_psearch
+extent_tree_szad_remove
+extent_tree_szad_reverse_iter
+extent_tree_szad_reverse_iter_recurse
+extent_tree_szad_reverse_iter_start
+extent_tree_szad_search
get_errno
hash
hash_fmix_32
@@ -290,16 +257,19 @@ huge_ralloc
huge_ralloc_no_move
huge_salloc
iaalloc
-ialloc
iallocztm
-iarena_cleanup
+icalloc
+icalloct
idalloc
+idalloct
idalloctm
-in_valgrind
+imalloc
+imalloct
index2size
index2size_compute
index2size_lookup
index2size_tab
+in_valgrind
ipalloc
ipalloct
ipallocztm
@@ -318,11 +288,7 @@ jemalloc_postfork_parent
jemalloc_prefork
large_maxclass
lg_floor
-lg_prof_sample
malloc_cprintf
-malloc_mutex_assert_not_owner
-malloc_mutex_assert_owner
-malloc_mutex_boot
malloc_mutex_init
malloc_mutex_lock
malloc_mutex_postfork_child
@@ -344,29 +310,12 @@ malloc_write
map_bias
map_misc_offset
mb_write
-narenas_auto
-narenas_tdata_cleanup
+mutex_boot
+narenas_cache_cleanup
narenas_total_get
ncpus
nhbins
-nhclasses
-nlclasses
-nstime_add
-nstime_compare
-nstime_copy
-nstime_divide
-nstime_idivide
-nstime_imultiply
-nstime_init
-nstime_init2
-nstime_monotonic
-nstime_ns
-nstime_nsec
-nstime_sec
-nstime_subtract
-nstime_update
opt_abort
-opt_decay_time
opt_dss
opt_junk
opt_junk_alloc
@@ -385,7 +334,6 @@ opt_prof_gdump
opt_prof_leak
opt_prof_prefix
opt_prof_thread_active_init
-opt_purge
opt_quarantine
opt_redzone
opt_stats_print
@@ -394,32 +342,13 @@ opt_utrace
opt_xmalloc
opt_zero
p2rz
-pages_boot
pages_commit
pages_decommit
-pages_huge
pages_map
-pages_nohuge
pages_purge
pages_trim
pages_unmap
-pind2sz
-pind2sz_compute
-pind2sz_lookup
-pind2sz_tab
-pow2_ceil_u32
-pow2_ceil_u64
-pow2_ceil_zu
-prng_lg_range_u32
-prng_lg_range_u64
-prng_lg_range_zu
-prng_range_u32
-prng_range_u64
-prng_range_zu
-prng_state_next_u32
-prng_state_next_u64
-prng_state_next_zu
-prof_active
+pow2_ceil
prof_active_get
prof_active_get_unlocked
prof_active_set
@@ -429,7 +358,6 @@ prof_backtrace
prof_boot0
prof_boot1
prof_boot2
-prof_bt_count
prof_dump_header
prof_dump_open
prof_free
@@ -447,8 +375,7 @@ prof_malloc_sample_object
prof_mdump
prof_postfork_child
prof_postfork_parent
-prof_prefork0
-prof_prefork1
+prof_prefork
prof_realloc
prof_reset
prof_sample_accum_update
@@ -457,7 +384,6 @@ prof_tctx_get
prof_tctx_reset
prof_tctx_set
prof_tdata_cleanup
-prof_tdata_count
prof_tdata_get
prof_tdata_init
prof_tdata_reinit
@@ -467,13 +393,11 @@ prof_thread_active_init_set
prof_thread_active_set
prof_thread_name_get
prof_thread_name_set
-psz2ind
-psz2u
-purge_mode_names
quarantine
quarantine_alloc_hook
quarantine_alloc_hook_work
quarantine_cleanup
+register_zone
rtree_child_read
rtree_child_read_hard
rtree_child_tryread
@@ -489,8 +413,6 @@ rtree_subtree_read_hard
rtree_subtree_tryread
rtree_val_read
rtree_val_write
-run_quantize_ceil
-run_quantize_floor
s2u
s2u_compute
s2u_lookup
@@ -500,8 +422,6 @@ size2index
size2index_compute
size2index_lookup
size2index_tab
-spin_adaptive
-spin_init
stats_cactive
stats_cactive_add
stats_cactive_get
@@ -511,6 +431,8 @@ tcache_alloc_easy
tcache_alloc_large
tcache_alloc_small
tcache_alloc_small_hard
+tcache_arena_associate
+tcache_arena_dissociate
tcache_arena_reassociate
tcache_bin_flush_large
tcache_bin_flush_small
@@ -529,103 +451,49 @@ tcache_flush
tcache_get
tcache_get_hard
tcache_maxclass
-tcache_salloc
-tcache_stats_merge
tcaches
+tcache_salloc
tcaches_create
tcaches_destroy
tcaches_flush
tcaches_get
+tcache_stats_merge
thread_allocated_cleanup
thread_deallocated_cleanup
-ticker_copy
-ticker_init
-ticker_read
-ticker_tick
-ticker_ticks
tsd_arena_get
tsd_arena_set
-tsd_arenap_get
-tsd_arenas_tdata_bypass_get
-tsd_arenas_tdata_bypass_set
-tsd_arenas_tdata_bypassp_get
-tsd_arenas_tdata_get
-tsd_arenas_tdata_set
-tsd_arenas_tdatap_get
tsd_boot
tsd_boot0
tsd_boot1
tsd_booted
-tsd_booted_get
tsd_cleanup
tsd_cleanup_wrapper
tsd_fetch
-tsd_fetch_impl
tsd_get
-tsd_get_allocates
-tsd_iarena_get
-tsd_iarena_set
-tsd_iarenap_get
+tsd_wrapper_get
+tsd_wrapper_set
tsd_initialized
tsd_init_check_recursion
tsd_init_finish
tsd_init_head
-tsd_narenas_tdata_get
-tsd_narenas_tdata_set
-tsd_narenas_tdatap_get
-tsd_wrapper_get
-tsd_wrapper_set
tsd_nominal
-tsd_prof_tdata_get
-tsd_prof_tdata_set
-tsd_prof_tdatap_get
tsd_quarantine_get
tsd_quarantine_set
-tsd_quarantinep_get
tsd_set
tsd_tcache_enabled_get
tsd_tcache_enabled_set
-tsd_tcache_enabledp_get
tsd_tcache_get
tsd_tcache_set
-tsd_tcachep_get
+tsd_tls
+tsd_tsd
+tsd_prof_tdata_get
+tsd_prof_tdata_set
tsd_thread_allocated_get
tsd_thread_allocated_set
-tsd_thread_allocatedp_get
tsd_thread_deallocated_get
tsd_thread_deallocated_set
-tsd_thread_deallocatedp_get
-tsd_tls
-tsd_tsd
-tsd_tsdn
-tsd_witness_fork_get
-tsd_witness_fork_set
-tsd_witness_forkp_get
-tsd_witnesses_get
-tsd_witnesses_set
-tsd_witnessesp_get
-tsdn_fetch
-tsdn_null
-tsdn_tsd
u2rz
valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
-witness_assert_lockless
-witness_assert_not_owner
-witness_assert_owner
-witness_fork_cleanup
-witness_init
-witness_lock
-witness_lock_error
-witness_lockless_error
-witness_not_owner_error
-witness_owner
-witness_owner_error
-witness_postfork_child
-witness_postfork_parent
-witness_prefork
-witness_unlock
-witnesses_cleanup
-zone_register
diff --git a/deps/jemalloc/include/jemalloc/internal/prng.h b/deps/jemalloc/include/jemalloc/internal/prng.h
index c2bda19c6..216d0ef47 100644
--- a/deps/jemalloc/include/jemalloc/internal/prng.h
+++ b/deps/jemalloc/include/jemalloc/internal/prng.h
@@ -18,13 +18,31 @@
* proportional to bit position. For example, the lowest bit has a cycle of 2,
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper
* bits.
+ *
+ * Macro parameters:
+ * uint32_t r : Result.
+ * unsigned lg_range : (0..32], number of least significant bits to return.
+ * uint32_t state : Seed value.
+ * const uint32_t a, c : See above discussion.
*/
-
-#define PRNG_A_32 UINT32_C(1103515241)
-#define PRNG_C_32 UINT32_C(12347)
-
-#define PRNG_A_64 UINT64_C(6364136223846793005)
-#define PRNG_C_64 UINT64_C(1442695040888963407)
+#define prng32(r, lg_range, state, a, c) do { \
+ assert((lg_range) > 0); \
+ assert((lg_range) <= 32); \
+ \
+ r = (state * (a)) + (c); \
+ state = r; \
+ r >>= (32 - (lg_range)); \
+} while (false)
+
+/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
+#define prng64(r, lg_range, state, a, c) do { \
+ assert((lg_range) > 0); \
+ assert((lg_range) <= 64); \
+ \
+ r = (state * (a)) + (c); \
+ state = r; \
+ r >>= (64 - (lg_range)); \
+} while (false)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
@@ -38,170 +56,5 @@
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
-#ifndef JEMALLOC_ENABLE_INLINE
-uint32_t prng_state_next_u32(uint32_t state);
-uint64_t prng_state_next_u64(uint64_t state);
-size_t prng_state_next_zu(size_t state);
-
-uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range,
- bool atomic);
-uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range);
-size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic);
-
-uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic);
-uint64_t prng_range_u64(uint64_t *state, uint64_t range);
-size_t prng_range_zu(size_t *state, size_t range, bool atomic);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_state_next_u32(uint32_t state)
-{
-
- return ((state * PRNG_A_32) + PRNG_C_32);
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_state_next_u64(uint64_t state)
-{
-
- return ((state * PRNG_A_64) + PRNG_C_64);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_state_next_zu(size_t state)
-{
-
-#if LG_SIZEOF_PTR == 2
- return ((state * PRNG_A_32) + PRNG_C_32);
-#elif LG_SIZEOF_PTR == 3
- return ((state * PRNG_A_64) + PRNG_C_64);
-#else
-#error Unsupported pointer size
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
-{
- uint32_t ret, state1;
-
- assert(lg_range > 0);
- assert(lg_range <= 32);
-
- if (atomic) {
- uint32_t state0;
-
- do {
- state0 = atomic_read_uint32(state);
- state1 = prng_state_next_u32(state0);
- } while (atomic_cas_uint32(state, state0, state1));
- } else {
- state1 = prng_state_next_u32(*state);
- *state = state1;
- }
- ret = state1 >> (32 - lg_range);
-
- return (ret);
-}
-
-/* 64-bit atomic operations cannot be supported on all relevant platforms. */
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_lg_range_u64(uint64_t *state, unsigned lg_range)
-{
- uint64_t ret, state1;
-
- assert(lg_range > 0);
- assert(lg_range <= 64);
-
- state1 = prng_state_next_u64(*state);
- *state = state1;
- ret = state1 >> (64 - lg_range);
-
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
-{
- size_t ret, state1;
-
- assert(lg_range > 0);
- assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
-
- if (atomic) {
- size_t state0;
-
- do {
- state0 = atomic_read_z(state);
- state1 = prng_state_next_zu(state0);
- } while (atomic_cas_z(state, state0, state1));
- } else {
- state1 = prng_state_next_zu(*state);
- *state = state1;
- }
- ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
-
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE uint32_t
-prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
-{
- uint32_t ret;
- unsigned lg_range;
-
- assert(range > 1);
-
- /* Compute the ceiling of lg(range). */
- lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
-
- /* Generate a result in [0..range) via repeated trial. */
- do {
- ret = prng_lg_range_u32(state, lg_range, atomic);
- } while (ret >= range);
-
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE uint64_t
-prng_range_u64(uint64_t *state, uint64_t range)
-{
- uint64_t ret;
- unsigned lg_range;
-
- assert(range > 1);
-
- /* Compute the ceiling of lg(range). */
- lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
-
- /* Generate a result in [0..range) via repeated trial. */
- do {
- ret = prng_lg_range_u64(state, lg_range);
- } while (ret >= range);
-
- return (ret);
-}
-
-JEMALLOC_ALWAYS_INLINE size_t
-prng_range_zu(size_t *state, size_t range, bool atomic)
-{
- size_t ret;
- unsigned lg_range;
-
- assert(range > 1);
-
- /* Compute the ceiling of lg(range). */
- lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
-
- /* Generate a result in [0..range) via repeated trial. */
- do {
- ret = prng_lg_range_zu(state, lg_range, atomic);
- } while (ret >= range);
-
- return (ret);
-}
-#endif
-
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/prof.h b/deps/jemalloc/include/jemalloc/internal/prof.h
index 8293b71ed..e5198c3e8 100644
--- a/deps/jemalloc/include/jemalloc/internal/prof.h
+++ b/deps/jemalloc/include/jemalloc/internal/prof.h
@@ -281,7 +281,7 @@ extern uint64_t prof_interval;
extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
-void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
+void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
@@ -293,33 +293,32 @@ size_t prof_bt_count(void);
const prof_cnt_t *prof_cnt_all(void);
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
-typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
+typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *);
extern prof_dump_header_t *prof_dump_header;
#endif
-void prof_idump(tsdn_t *tsdn);
-bool prof_mdump(tsd_t *tsd, const char *filename);
-void prof_gdump(tsdn_t *tsdn);
+void prof_idump(void);
+bool prof_mdump(const char *filename);
+void prof_gdump(void);
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
void prof_reset(tsd_t *tsd, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
-bool prof_active_get(tsdn_t *tsdn);
-bool prof_active_set(tsdn_t *tsdn, bool active);
-const char *prof_thread_name_get(tsd_t *tsd);
+const char *prof_thread_name_get(void);
+bool prof_active_get(void);
+bool prof_active_set(bool active);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
-bool prof_thread_active_get(tsd_t *tsd);
-bool prof_thread_active_set(tsd_t *tsd, bool active);
-bool prof_thread_active_init_get(tsdn_t *tsdn);
-bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
-bool prof_gdump_get(tsdn_t *tsdn);
-bool prof_gdump_set(tsdn_t *tsdn, bool active);
+bool prof_thread_active_get(void);
+bool prof_thread_active_set(bool active);
+bool prof_thread_active_init_get(void);
+bool prof_thread_active_init_set(bool active_init);
+bool prof_gdump_get(void);
+bool prof_gdump_set(bool active);
void prof_boot0(void);
void prof_boot1(void);
-bool prof_boot2(tsd_t *tsd);
-void prof_prefork0(tsdn_t *tsdn);
-void prof_prefork1(tsdn_t *tsdn);
-void prof_postfork_parent(tsdn_t *tsdn);
-void prof_postfork_child(tsdn_t *tsdn);
+bool prof_boot2(void);
+void prof_prefork(void);
+void prof_postfork_parent(void);
+void prof_postfork_child(void);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */
@@ -330,17 +329,17 @@ void prof_sample_threshold_update(prof_tdata_t *tdata);
bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
-prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
-void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx);
-void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
- const void *old_ptr, prof_tctx_t *tctx);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
bool update);
-void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
+prof_tctx_t *prof_tctx_get(const void *ptr);
+void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
+void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+ prof_tctx_t *tctx);
+void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
+void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
size_t old_usize, prof_tctx_t *old_tctx);
@@ -398,34 +397,34 @@ prof_tdata_get(tsd_t *tsd, bool create)
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_tctx_get(tsdn_t *tsdn, const void *ptr)
+prof_tctx_get(const void *ptr)
{
cassert(config_prof);
assert(ptr != NULL);
- return (arena_prof_tctx_get(tsdn, ptr));
+ return (arena_prof_tctx_get(ptr));
}
JEMALLOC_ALWAYS_INLINE void
-prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
+prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- arena_prof_tctx_set(tsdn, ptr, usize, tctx);
+ arena_prof_tctx_set(ptr, usize, tctx);
}
JEMALLOC_ALWAYS_INLINE void
-prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
+prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
+ arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
}
JEMALLOC_ALWAYS_INLINE bool
@@ -437,16 +436,16 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
cassert(config_prof);
tdata = prof_tdata_get(tsd, true);
- if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
+ if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
tdata = NULL;
if (tdata_out != NULL)
*tdata_out = tdata;
- if (unlikely(tdata == NULL))
+ if (tdata == NULL)
return (true);
- if (likely(tdata->bytes_until_sample >= usize)) {
+ if (tdata->bytes_until_sample >= usize) {
if (update)
tdata->bytes_until_sample -= usize;
return (true);
@@ -480,17 +479,17 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
}
JEMALLOC_ALWAYS_INLINE void
-prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
+prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- assert(usize == isalloc(tsdn, ptr, true));
+ assert(usize == isalloc(ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
- prof_malloc_sample_object(tsdn, ptr, usize, tctx);
+ prof_malloc_sample_object(ptr, usize, tctx);
else
- prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
+ prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void
@@ -504,7 +503,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
- assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
+ assert(usize == isalloc(ptr, true));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
@@ -513,7 +512,6 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
* though its actual usize was insufficient to cross the
* sample threshold.
*/
- prof_alloc_rollback(tsd, tctx, true);
tctx = (prof_tctx_t *)(uintptr_t)1U;
}
}
@@ -522,9 +520,9 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
if (unlikely(sampled))
- prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
+ prof_malloc_sample_object(ptr, usize, tctx);
else
- prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
+ prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
if (unlikely(old_sampled))
prof_free_sampled_object(tsd, old_usize, old_tctx);
@@ -533,10 +531,10 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{
- prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
+ prof_tctx_t *tctx = prof_tctx_get(ptr);
cassert(config_prof);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
+ assert(usize == isalloc(ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_free_sampled_object(tsd, usize, tctx);
diff --git a/deps/jemalloc/include/jemalloc/internal/rb.h b/deps/jemalloc/include/jemalloc/internal/rb.h
index 3770342f8..2ca8e5933 100644
--- a/deps/jemalloc/include/jemalloc/internal/rb.h
+++ b/deps/jemalloc/include/jemalloc/internal/rb.h
@@ -42,6 +42,7 @@ struct { \
#define rb_tree(a_type) \
struct { \
a_type *rbt_root; \
+ a_type rbt_nil; \
}
/* Left accessors. */
@@ -78,15 +79,6 @@ struct { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
-
-/* Node initializer. */
-#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
- /* Bookkeeping bit cannot be used by node pointer. */ \
- assert(((uintptr_t)(a_node) & 0x1) == 0); \
- rbtn_left_set(a_type, a_field, (a_node), NULL); \
- rbtn_right_set(a_type, a_field, (a_node), NULL); \
- rbtn_red_set(a_type, a_field, (a_node)); \
-} while (0)
#else
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
@@ -107,26 +99,28 @@ struct { \
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \
} while (0)
+#endif
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
- rbtn_left_set(a_type, a_field, (a_node), NULL); \
- rbtn_right_set(a_type, a_field, (a_node), NULL); \
+ rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
+ rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
-#endif
/* Tree initializer. */
#define rb_new(a_type, a_field, a_rbt) do { \
- (a_rbt)->rbt_root = NULL; \
+ (a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \
+ rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \
+ rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \
} while (0)
/* Internal utility macros. */
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
- if ((r_node) != NULL) { \
+ if ((r_node) != &(a_rbt)->rbt_nil) { \
for (; \
- rbtn_left_get(a_type, a_field, (r_node)) != NULL; \
+ rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
} \
} \
@@ -134,9 +128,10 @@ struct { \
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
- if ((r_node) != NULL) { \
- for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \
- (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \
+ if ((r_node) != &(a_rbt)->rbt_nil) { \
+ for (; rbtn_right_get(a_type, a_field, (r_node)) != \
+ &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
+ (r_node))) { \
} \
} \
} while (0)
@@ -174,11 +169,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
-a_prefix##search(a_rbt_type *rbtree, const a_type *key); \
+a_prefix##search(a_rbt_type *rbtree, a_type *key); \
a_attr a_type * \
-a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \
+a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \
a_attr a_type * \
-a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \
+a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_attr void \
@@ -188,10 +183,7 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg); \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
- a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
-a_attr void \
-a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
- void *arg);
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
@@ -262,7 +254,7 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* last/first.
*
* static ex_node_t *
- * ex_search(ex_t *tree, const ex_node_t *key);
+ * ex_search(ex_t *tree, ex_node_t *key);
* Description: Search for node that matches key.
* Args:
* tree: Pointer to an initialized red-black tree object.
@@ -270,9 +262,9 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* Ret: Node in tree that matches key, or NULL if no match.
*
* static ex_node_t *
- * ex_nsearch(ex_t *tree, const ex_node_t *key);
+ * ex_nsearch(ex_t *tree, ex_node_t *key);
* static ex_node_t *
- * ex_psearch(ex_t *tree, const ex_node_t *key);
+ * ex_psearch(ex_t *tree, ex_node_t *key);
* Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were
* key in tree.
@@ -320,20 +312,6 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration.
- *
- * static void
- * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
- * Description: Iterate over the tree with post-order traversal, remove
- * each node, and run the callback if non-null. This is
- * used for destroying a tree without paying the cost to
- * rebalance it. The tree must not be otherwise altered
- * during traversal.
- * Args:
- * tree: Pointer to an initialized red-black tree object.
- * cb : Callback function, which, if non-null, is called for each node
- * during iteration. There is no way to stop iteration once it
- * has begun.
- * arg : Opaque pointer passed to cb().
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \
@@ -342,30 +320,36 @@ a_prefix##new(a_rbt_type *rbtree) { \
} \
a_attr bool \
a_prefix##empty(a_rbt_type *rbtree) { \
- return (rbtree->rbt_root == NULL); \
+ return (rbtree->rbt_root == &rbtree->rbt_nil); \
} \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = NULL; \
+ } \
return (ret); \
} \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = NULL; \
+ } \
return (ret); \
} \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
- if (rbtn_right_get(a_type, a_field, node) != NULL) { \
+ if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
- assert(tnode != NULL); \
- ret = NULL; \
+ assert(tnode != &rbtree->rbt_nil); \
+ ret = &rbtree->rbt_nil; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
@@ -376,21 +360,24 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
} else { \
break; \
} \
- assert(tnode != NULL); \
+ assert(tnode != &rbtree->rbt_nil); \
} \
} \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = (NULL); \
+ } \
return (ret); \
} \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
- if (rbtn_left_get(a_type, a_field, node) != NULL) { \
+ if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
- assert(tnode != NULL); \
- ret = NULL; \
+ assert(tnode != &rbtree->rbt_nil); \
+ ret = &rbtree->rbt_nil; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
@@ -401,17 +388,20 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
} else { \
break; \
} \
- assert(tnode != NULL); \
+ assert(tnode != &rbtree->rbt_nil); \
} \
} \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = (NULL); \
+ } \
return (ret); \
} \
a_attr a_type * \
-a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
+a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
a_type *ret; \
int cmp; \
ret = rbtree->rbt_root; \
- while (ret != NULL \
+ while (ret != &rbtree->rbt_nil \
&& (cmp = (a_cmp)(key, ret)) != 0) { \
if (cmp < 0) { \
ret = rbtn_left_get(a_type, a_field, ret); \
@@ -419,14 +409,17 @@ a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = (NULL); \
+ } \
return (ret); \
} \
a_attr a_type * \
-a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
+a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
- ret = NULL; \
- while (tnode != NULL) { \
+ ret = &rbtree->rbt_nil; \
+ while (tnode != &rbtree->rbt_nil) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
ret = tnode; \
@@ -438,14 +431,17 @@ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
break; \
} \
} \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = (NULL); \
+ } \
return (ret); \
} \
a_attr a_type * \
-a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
+a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
- ret = NULL; \
- while (tnode != NULL) { \
+ ret = &rbtree->rbt_nil; \
+ while (tnode != &rbtree->rbt_nil) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
@@ -457,6 +453,9 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
break; \
} \
} \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = (NULL); \
+ } \
return (ret); \
} \
a_attr void \
@@ -468,7 +467,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */ \
path->node = rbtree->rbt_root; \
- for (pathp = path; pathp->node != NULL; pathp++) { \
+ for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
assert(cmp != 0); \
if (cmp < 0) { \
@@ -488,8 +487,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_left_set(a_type, a_field, cnode, left); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
- leftleft)) { \
+ if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* Fix up 4-node. */ \
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
@@ -504,8 +502,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, cnode, right); \
if (rbtn_red_get(a_type, a_field, right)) { \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
- if (left != NULL && rbtn_red_get(a_type, a_field, \
- left)) { \
+ if (rbtn_red_get(a_type, a_field, left)) { \
/* Split 4-node. */ \
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, right); \
@@ -538,7 +535,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Wind. */ \
nodep = NULL; /* Silence compiler warning. */ \
path->node = rbtree->rbt_root; \
- for (pathp = path; pathp->node != NULL; pathp++) { \
+ for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
@@ -550,7 +547,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* Find node's successor, in preparation for swap. */ \
pathp->cmp = 1; \
nodep = pathp; \
- for (pathp++; pathp->node != NULL; \
+ for (pathp++; pathp->node != &rbtree->rbt_nil; \
pathp++) { \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
@@ -593,7 +590,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} else { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
- if (left != NULL) { \
+ if (left != &rbtree->rbt_nil) { \
/* node has no successor, but it has a left child. */\
/* Splice node out, without losing the left child. */\
assert(!rbtn_red_get(a_type, a_field, node)); \
@@ -613,32 +610,33 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} else if (pathp == path) { \
/* The tree only contained one node. */ \
- rbtree->rbt_root = NULL; \
+ rbtree->rbt_root = &rbtree->rbt_nil; \
return; \
} \
} \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */ \
assert(pathp[-1].cmp < 0); \
- rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
+ rbtn_left_set(a_type, a_field, pathp[-1].node, \
+ &rbtree->rbt_nil); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */\
/* restored. */\
- pathp->node = NULL; \
+ pathp->node = &rbtree->rbt_nil; \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
assert(pathp->cmp != 0); \
if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \
+ assert(!rbtn_red_get(a_type, a_field, pathp[1].node)); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
a_type *tnode; \
- if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
- rightleft)) { \
+ if (rbtn_red_get(a_type, a_field, rightleft)) { \
/* In the following diagrams, ||, //, and \\ */\
/* indicate the path to the removed node. */\
/* */\
@@ -681,8 +679,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
- if (rightleft != NULL && rbtn_red_get(a_type, a_field, \
- rightleft)) { \
+ if (rbtn_red_get(a_type, a_field, rightleft)) { \
/* || */\
/* pathp(b) */\
/* // \ */\
@@ -736,8 +733,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
left); \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
leftright); \
- if (leftrightleft != NULL && rbtn_red_get(a_type, \
- a_field, leftrightleft)) { \
+ if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
/* || */\
/* pathp(b) */\
/* / \\ */\
@@ -763,7 +759,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* (b) */\
/* / */\
/* (b) */\
- assert(leftright != NULL); \
+ assert(leftright != &rbtree->rbt_nil); \
rbtn_red_set(a_type, a_field, leftright); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
@@ -786,8 +782,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
- leftleft)) { \
+ if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* || */\
/* pathp(r) */\
/* / \\ */\
@@ -825,8 +820,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} else { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
- if (leftleft != NULL && rbtn_red_get(a_type, a_field, \
- leftleft)) { \
+ if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* || */\
/* pathp(b) */\
/* / \\ */\
@@ -872,13 +866,13 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
- if (node == NULL) { \
- return (NULL); \
+ if (node == &rbtree->rbt_nil) { \
+ return (&rbtree->rbt_nil); \
} else { \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
- a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
- arg)) != NULL) { \
+ a_field, node), cb, arg)) != &rbtree->rbt_nil \
+ || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
@@ -892,8 +886,8 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
if (cmp < 0) { \
a_type *ret; \
if ((ret = a_prefix##iter_start(rbtree, start, \
- rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
- (ret = cb(rbtree, node, arg)) != NULL) { \
+ rbtn_left_get(a_type, a_field, node), cb, arg)) != \
+ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
@@ -920,18 +914,21 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = NULL; \
+ } \
return (ret); \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
- if (node == NULL) { \
- return (NULL); \
+ if (node == &rbtree->rbt_nil) { \
+ return (&rbtree->rbt_nil); \
} else { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
- rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
- (ret = cb(rbtree, node, arg)) != NULL) { \
+ rbtn_right_get(a_type, a_field, node), cb, arg)) != \
+ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
@@ -946,8 +943,8 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
if (cmp > 0) { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
- rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
- (ret = cb(rbtree, node, arg)) != NULL) { \
+ rbtn_right_get(a_type, a_field, node), cb, arg)) != \
+ &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
@@ -975,29 +972,10 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
} \
- return (ret); \
-} \
-a_attr void \
-a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \
- a_type *, void *), void *arg) { \
- if (node == NULL) { \
- return; \
- } \
- a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \
- node), cb, arg); \
- rbtn_left_set(a_type, a_field, (node), NULL); \
- a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \
- node), cb, arg); \
- rbtn_right_set(a_type, a_field, (node), NULL); \
- if (cb) { \
- cb(node, arg); \
+ if (ret == &rbtree->rbt_nil) { \
+ ret = NULL; \
} \
-} \
-a_attr void \
-a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
- void *arg) { \
- a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
- rbtree->rbt_root = NULL; \
+ return (ret); \
}
#endif /* RB_H_ */
diff --git a/deps/jemalloc/include/jemalloc/internal/rtree.h b/deps/jemalloc/include/jemalloc/internal/rtree.h
index 8d0c584da..28ae9d1dd 100644
--- a/deps/jemalloc/include/jemalloc/internal/rtree.h
+++ b/deps/jemalloc/include/jemalloc/internal/rtree.h
@@ -15,10 +15,9 @@ typedef struct rtree_s rtree_t;
* machine address width.
*/
#define LG_RTREE_BITS_PER_LEVEL 4
-#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
-/* Maximum rtree height. */
+#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
#define RTREE_HEIGHT_MAX \
- ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
+ ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
@@ -112,25 +111,22 @@ unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
bool rtree_node_valid(rtree_node_elm_t *node);
-rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
- bool dependent);
+rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm);
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
- unsigned level, bool dependent);
+ unsigned level);
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
bool dependent);
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
const extent_node_t *val);
-rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
- bool dependent);
-rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
- bool dependent);
+rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level);
+rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level);
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
-JEMALLOC_ALWAYS_INLINE unsigned
+JEMALLOC_INLINE unsigned
rtree_start_level(rtree_t *rtree, uintptr_t key)
{
unsigned start_level;
@@ -144,7 +140,7 @@ rtree_start_level(rtree_t *rtree, uintptr_t key)
return (start_level);
}
-JEMALLOC_ALWAYS_INLINE uintptr_t
+JEMALLOC_INLINE uintptr_t
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
{
@@ -153,40 +149,37 @@ rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
rtree->levels[level].bits) - 1));
}
-JEMALLOC_ALWAYS_INLINE bool
+JEMALLOC_INLINE bool
rtree_node_valid(rtree_node_elm_t *node)
{
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
}
-JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
-rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_child_tryread(rtree_node_elm_t *elm)
{
rtree_node_elm_t *child;
/* Double-checked read (first read may be stale. */
child = elm->child;
- if (!dependent && !rtree_node_valid(child))
+ if (!rtree_node_valid(child))
child = atomic_read_p(&elm->pun);
- assert(!dependent || child != NULL);
return (child);
}
-JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
-rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
- bool dependent)
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
{
rtree_node_elm_t *child;
- child = rtree_child_tryread(elm, dependent);
- if (!dependent && unlikely(!rtree_node_valid(child)))
+ child = rtree_child_tryread(elm);
+ if (unlikely(!rtree_node_valid(child)))
child = rtree_child_read_hard(rtree, elm, level);
- assert(!dependent || child != NULL);
return (child);
}
-JEMALLOC_ALWAYS_INLINE extent_node_t *
+JEMALLOC_INLINE extent_node_t *
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
{
@@ -215,119 +208,54 @@ rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
atomic_write_p(&elm->pun, val);
}
-JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
-rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_subtree_tryread(rtree_t *rtree, unsigned level)
{
rtree_node_elm_t *subtree;
/* Double-checked read (first read may be stale. */
subtree = rtree->levels[level].subtree;
- if (!dependent && unlikely(!rtree_node_valid(subtree)))
+ if (!rtree_node_valid(subtree))
subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
- assert(!dependent || subtree != NULL);
return (subtree);
}
-JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
-rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
+JEMALLOC_INLINE rtree_node_elm_t *
+rtree_subtree_read(rtree_t *rtree, unsigned level)
{
rtree_node_elm_t *subtree;
- subtree = rtree_subtree_tryread(rtree, level, dependent);
- if (!dependent && unlikely(!rtree_node_valid(subtree)))
+ subtree = rtree_subtree_tryread(rtree, level);
+ if (unlikely(!rtree_node_valid(subtree)))
subtree = rtree_subtree_read_hard(rtree, level);
- assert(!dependent || subtree != NULL);
return (subtree);
}
-JEMALLOC_ALWAYS_INLINE extent_node_t *
+JEMALLOC_INLINE extent_node_t *
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
{
uintptr_t subkey;
- unsigned start_level;
- rtree_node_elm_t *node;
+ unsigned i, start_level;
+ rtree_node_elm_t *node, *child;
start_level = rtree_start_level(rtree, key);
- node = rtree_subtree_tryread(rtree, start_level, dependent);
-#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
- switch (start_level + RTREE_GET_BIAS) {
-#define RTREE_GET_SUBTREE(level) \
- case level: \
- assert(level < (RTREE_HEIGHT_MAX-1)); \
- if (!dependent && unlikely(!rtree_node_valid(node))) \
- return (NULL); \
- subkey = rtree_subkey(rtree, key, level - \
- RTREE_GET_BIAS); \
- node = rtree_child_tryread(&node[subkey], dependent); \
- /* Fall through. */
-#define RTREE_GET_LEAF(level) \
- case level: \
- assert(level == (RTREE_HEIGHT_MAX-1)); \
- if (!dependent && unlikely(!rtree_node_valid(node))) \
- return (NULL); \
- subkey = rtree_subkey(rtree, key, level - \
- RTREE_GET_BIAS); \
- /* \
- * node is a leaf, so it contains values rather than \
- * child pointers. \
- */ \
- return (rtree_val_read(rtree, &node[subkey], \
- dependent));
-#if RTREE_HEIGHT_MAX > 1
- RTREE_GET_SUBTREE(0)
-#endif
-#if RTREE_HEIGHT_MAX > 2
- RTREE_GET_SUBTREE(1)
-#endif
-#if RTREE_HEIGHT_MAX > 3
- RTREE_GET_SUBTREE(2)
-#endif
-#if RTREE_HEIGHT_MAX > 4
- RTREE_GET_SUBTREE(3)
-#endif
-#if RTREE_HEIGHT_MAX > 5
- RTREE_GET_SUBTREE(4)
-#endif
-#if RTREE_HEIGHT_MAX > 6
- RTREE_GET_SUBTREE(5)
-#endif
-#if RTREE_HEIGHT_MAX > 7
- RTREE_GET_SUBTREE(6)
-#endif
-#if RTREE_HEIGHT_MAX > 8
- RTREE_GET_SUBTREE(7)
-#endif
-#if RTREE_HEIGHT_MAX > 9
- RTREE_GET_SUBTREE(8)
-#endif
-#if RTREE_HEIGHT_MAX > 10
- RTREE_GET_SUBTREE(9)
-#endif
-#if RTREE_HEIGHT_MAX > 11
- RTREE_GET_SUBTREE(10)
-#endif
-#if RTREE_HEIGHT_MAX > 12
- RTREE_GET_SUBTREE(11)
-#endif
-#if RTREE_HEIGHT_MAX > 13
- RTREE_GET_SUBTREE(12)
-#endif
-#if RTREE_HEIGHT_MAX > 14
- RTREE_GET_SUBTREE(13)
-#endif
-#if RTREE_HEIGHT_MAX > 15
- RTREE_GET_SUBTREE(14)
-#endif
-#if RTREE_HEIGHT_MAX > 16
-# error Unsupported RTREE_HEIGHT_MAX
-#endif
- RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
-#undef RTREE_GET_SUBTREE
-#undef RTREE_GET_LEAF
- default: not_reached();
+ for (i = start_level, node = rtree_subtree_tryread(rtree, start_level);
+ /**/; i++, node = child) {
+ if (!dependent && unlikely(!rtree_node_valid(node)))
+ return (NULL);
+ subkey = rtree_subkey(rtree, key, i);
+ if (i == rtree->height - 1) {
+ /*
+ * node is a leaf, so it contains values rather than
+ * child pointers.
+ */
+ return (rtree_val_read(rtree, &node[subkey],
+ dependent));
+ }
+ assert(i < rtree->height - 1);
+ child = rtree_child_tryread(&node[subkey]);
}
-#undef RTREE_GET_BIAS
not_reached();
}
@@ -340,7 +268,7 @@ rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
start_level = rtree_start_level(rtree, key);
- node = rtree_subtree_read(rtree, start_level, false);
+ node = rtree_subtree_read(rtree, start_level);
if (node == NULL)
return (true);
for (i = start_level; /**/; i++, node = child) {
@@ -354,7 +282,7 @@ rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
return (false);
}
assert(i + 1 < rtree->height);
- child = rtree_child_read(rtree, &node[subkey], i, false);
+ child = rtree_child_read(rtree, &node[subkey], i);
if (child == NULL)
return (true);
}
diff --git a/deps/jemalloc/include/jemalloc/internal/size_classes.sh b/deps/jemalloc/include/jemalloc/internal/size_classes.sh
index f6fbce4ef..fc82036d3 100755
--- a/deps/jemalloc/include/jemalloc/internal/size_classes.sh
+++ b/deps/jemalloc/include/jemalloc/internal/size_classes.sh
@@ -48,21 +48,6 @@ size_class() {
lg_p=$5
lg_kmax=$6
- if [ ${lg_delta} -ge ${lg_p} ] ; then
- psz="yes"
- else
- pow2 ${lg_p}; p=${pow2_result}
- pow2 ${lg_grp}; grp=${pow2_result}
- pow2 ${lg_delta}; delta=${pow2_result}
- sz=$((${grp} + ${delta} * ${ndelta}))
- npgs=$((${sz} / ${p}))
- if [ ${sz} -eq $((${npgs} * ${p})) ] ; then
- psz="yes"
- else
- psz="no"
- fi
- fi
-
lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta}
if [ ${pow2_result} -lt ${ndelta} ] ; then
rem="yes"
@@ -89,15 +74,14 @@ size_class() {
else
lg_delta_lookup="no"
fi
- printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${lg_delta_lookup}
+ printf ' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${lg_delta_lookup}
# Defined upon return:
- # - psz ("yes" or "no")
- # - bin ("yes" or "no")
# - lg_delta_lookup (${lg_delta} or "no")
+ # - bin ("yes" or "no")
}
sep_line() {
- echo " \\"
+ echo " \\"
}
size_classes() {
@@ -111,13 +95,12 @@ size_classes() {
pow2 ${lg_g}; g=${pow2_result}
echo "#define SIZE_CLASSES \\"
- echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup */ \\"
+ echo " /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \\"
ntbins=0
nlbins=0
lg_tiny_maxclass='"NA"'
nbins=0
- npsizes=0
# Tiny size classes.
ndelta=0
@@ -129,9 +112,6 @@ size_classes() {
if [ ${lg_delta_lookup} != "no" ] ; then
nlbins=$((${index} + 1))
fi
- if [ ${psz} = "yes" ] ; then
- npsizes=$((${npsizes} + 1))
- fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
fi
@@ -153,25 +133,19 @@ size_classes() {
index=$((${index} + 1))
lg_grp=$((${lg_grp} + 1))
lg_delta=$((${lg_delta} + 1))
- if [ ${psz} = "yes" ] ; then
- npsizes=$((${npsizes} + 1))
- fi
fi
while [ ${ndelta} -lt ${g} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
index=$((${index} + 1))
ndelta=$((${ndelta} + 1))
- if [ ${psz} = "yes" ] ; then
- npsizes=$((${npsizes} + 1))
- fi
done
# All remaining groups.
lg_grp=$((${lg_grp} + ${lg_g}))
- while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do
+ while [ ${lg_grp} -lt ${ptr_bits} ] ; do
sep_line
ndelta=1
- if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then
+ if [ ${lg_grp} -eq $((${ptr_bits} - 1)) ] ; then
ndelta_limit=$((${g} - 1))
else
ndelta_limit=${g}
@@ -183,9 +157,6 @@ size_classes() {
# Final written value is correct:
lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
fi
- if [ ${psz} = "yes" ] ; then
- npsizes=$((${npsizes} + 1))
- fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
# Final written value is correct:
@@ -212,7 +183,6 @@ size_classes() {
# - nlbins
# - nbins
# - nsizes
- # - npsizes
# - lg_tiny_maxclass
# - lookup_maxclass
# - small_maxclass
@@ -230,13 +200,13 @@ cat <<EOF
* be defined prior to inclusion, and it in turn defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
- * SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz,
- * bin, lg_delta_lookup) tuples.
+ * SIZE_CLASSES: Complete table of
+ * SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)
+ * tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
- * psz: 'yes' if a multiple of the page size, 'no' otherwise.
* bin: 'yes' if a small bin size class, 'no' otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
@@ -244,7 +214,6 @@ cat <<EOF
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* NSIZES: Number of size classes.
- * NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE).
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
@@ -269,7 +238,6 @@ for lg_z in ${lg_zarr} ; do
echo "#define NLBINS ${nlbins}"
echo "#define NBINS ${nbins}"
echo "#define NSIZES ${nsizes}"
- echo "#define NPSIZES ${npsizes}"
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
echo "#define SMALL_MAXCLASS ${small_maxclass}"
diff --git a/deps/jemalloc/include/jemalloc/internal/smoothstep.h b/deps/jemalloc/include/jemalloc/internal/smoothstep.h
deleted file mode 100644
index c5333ccad..000000000
--- a/deps/jemalloc/include/jemalloc/internal/smoothstep.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * This file was generated by the following command:
- * sh smoothstep.sh smoother 200 24 3 15
- */
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-/*
- * This header defines a precomputed table based on the smoothstep family of
- * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
- * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
- * that floating point math can be avoided.
- *
- * 3 2
- * smoothstep(x) = -2x + 3x
- *
- * 5 4 3
- * smootherstep(x) = 6x - 15x + 10x
- *
- * 7 6 5 4
- * smootheststep(x) = -20x + 70x - 84x + 35x
- */
-
-#define SMOOTHSTEP_VARIANT "smoother"
-#define SMOOTHSTEP_NSTEPS 200
-#define SMOOTHSTEP_BFP 24
-#define SMOOTHSTEP \
- /* STEP(step, h, x, y) */ \
- STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
- STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
- STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
- STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
- STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
- STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
- STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
- STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
- STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
- STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
- STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
- STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
- STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
- STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
- STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
- STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
- STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
- STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
- STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
- STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
- STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
- STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
- STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
- STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
- STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
- STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
- STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
- STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
- STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
- STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
- STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
- STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
- STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
- STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
- STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
- STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
- STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
- STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
- STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
- STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
- STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
- STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
- STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
- STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
- STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
- STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
- STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
- STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
- STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
- STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
- STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
- STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
- STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
- STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
- STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
- STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
- STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
- STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
- STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
- STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
- STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
- STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
- STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
- STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
- STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
- STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
- STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
- STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
- STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
- STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
- STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
- STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
- STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
- STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
- STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
- STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
- STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
- STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
- STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
- STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
- STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
- STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
- STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
- STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
- STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
- STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
- STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
- STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
- STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
- STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
- STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
- STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
- STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
- STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
- STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
- STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
- STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
- STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
- STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
- STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
- STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
- STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
- STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
- STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
- STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
- STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
- STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
- STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
- STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
- STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
- STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
- STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
- STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
- STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
- STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
- STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
- STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
- STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
- STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
- STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
- STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
- STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
- STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
- STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
- STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
- STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
- STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
- STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
- STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
- STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
- STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
- STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
- STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
- STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
- STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
- STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
- STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
- STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
- STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
- STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
- STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
- STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
- STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
- STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
- STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
- STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
- STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
- STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
- STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
- STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
- STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
- STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
- STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
- STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
- STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
- STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
- STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
- STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
- STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
- STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
- STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
- STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
- STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
- STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
- STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
- STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
- STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
- STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
- STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
- STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
- STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
- STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
- STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
- STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
- STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
- STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
- STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
- STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
- STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
- STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
- STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
- STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
- STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
- STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
- STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
- STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
- STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
- STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
- STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
- STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
- STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
- STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
- STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
- STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
- STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
- STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
- STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
- STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
- STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
- STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/smoothstep.sh b/deps/jemalloc/include/jemalloc/internal/smoothstep.sh
deleted file mode 100755
index 8124693f7..000000000
--- a/deps/jemalloc/include/jemalloc/internal/smoothstep.sh
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/bin/sh
-#
-# Generate a discrete lookup table for a sigmoid function in the smoothstep
-# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
-# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode
-# the entries using a binary fixed point representation.
-#
-# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
-#
-# <variant> is in {smooth, smoother, smoothest}.
-# <nsteps> must be greater than zero.
-# <bfp> must be in [0..62]; reasonable values are roughly [10..30].
-# <xprec> is x decimal precision.
-# <yprec> is y decimal precision.
-
-#set -x
-
-cmd="sh smoothstep.sh $*"
-variant=$1
-nsteps=$2
-bfp=$3
-xprec=$4
-yprec=$5
-
-case "${variant}" in
- smooth)
- ;;
- smoother)
- ;;
- smoothest)
- ;;
- *)
- echo "Unsupported variant"
- exit 1
- ;;
-esac
-
-smooth() {
- step=$1
- y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
- h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
-}
-
-smoother() {
- step=$1
- y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
- h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
-}
-
-smoothest() {
- step=$1
- y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
- h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
-}
-
-cat <<EOF
-/*
- * This file was generated by the following command:
- * $cmd
- */
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-/*
- * This header defines a precomputed table based on the smoothstep family of
- * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
- * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
- * that floating point math can be avoided.
- *
- * 3 2
- * smoothstep(x) = -2x + 3x
- *
- * 5 4 3
- * smootherstep(x) = 6x - 15x + 10x
- *
- * 7 6 5 4
- * smootheststep(x) = -20x + 70x - 84x + 35x
- */
-
-#define SMOOTHSTEP_VARIANT "${variant}"
-#define SMOOTHSTEP_NSTEPS ${nsteps}
-#define SMOOTHSTEP_BFP ${bfp}
-#define SMOOTHSTEP \\
- /* STEP(step, h, x, y) */ \\
-EOF
-
-s=1
-while [ $s -le $nsteps ] ; do
- $variant ${s}
- x=`echo ${xprec} k ${s} ${nsteps} / p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
- printf ' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n' ${s} ${h} ${x} ${y}
-
- s=$((s+1))
-done
-echo
-
-cat <<EOF
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-EOF
diff --git a/deps/jemalloc/include/jemalloc/internal/spin.h b/deps/jemalloc/include/jemalloc/internal/spin.h
deleted file mode 100644
index 9ef5ceb92..000000000
--- a/deps/jemalloc/include/jemalloc/internal/spin.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct spin_s spin_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct spin_s {
- unsigned iteration;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void spin_init(spin_t *spin);
-void spin_adaptive(spin_t *spin);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_))
-JEMALLOC_INLINE void
-spin_init(spin_t *spin)
-{
-
- spin->iteration = 0;
-}
-
-JEMALLOC_INLINE void
-spin_adaptive(spin_t *spin)
-{
- volatile uint64_t i;
-
- for (i = 0; i < (KQU(1) << spin->iteration); i++)
- CPU_SPINWAIT;
-
- if (spin->iteration < 63)
- spin->iteration++;
-}
-
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
-
diff --git a/deps/jemalloc/include/jemalloc/internal/stats.h b/deps/jemalloc/include/jemalloc/internal/stats.h
index 04e7dae14..c91dba99d 100644
--- a/deps/jemalloc/include/jemalloc/internal/stats.h
+++ b/deps/jemalloc/include/jemalloc/internal/stats.h
@@ -103,14 +103,6 @@ struct arena_stats_s {
size_t mapped;
/*
- * Number of bytes currently retained as a side effect of munmap() being
- * disabled/bypassed. Retained bytes are technically mapped (though
- * always decommitted or purged), but they are excluded from the mapped
- * statistic (above).
- */
- size_t retained;
-
- /*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
@@ -176,9 +168,6 @@ JEMALLOC_INLINE void
stats_cactive_add(size_t size)
{
- assert(size > 0);
- assert((size & chunksize_mask) == 0);
-
atomic_add_z(&stats_cactive, size);
}
@@ -186,9 +175,6 @@ JEMALLOC_INLINE void
stats_cactive_sub(size_t size)
{
- assert(size > 0);
- assert((size & chunksize_mask) == 0);
-
atomic_sub_z(&stats_cactive, size);
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/tcache.h b/deps/jemalloc/include/jemalloc/internal/tcache.h
index 01ba062de..5079cd266 100644
--- a/deps/jemalloc/include/jemalloc/internal/tcache.h
+++ b/deps/jemalloc/include/jemalloc/internal/tcache.h
@@ -70,20 +70,13 @@ struct tcache_bin_s {
int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */
- /*
- * To make use of adjacent cacheline prefetch, the items in the avail
- * stack goes to higher address for newer allocations. avail points
- * just above the available space, which means that
- * avail[-ncached, ... -1] are available items and the lowest item will
- * be allocated first.
- */
void **avail; /* Stack of available objects. */
};
struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
- ticker_t gc_ticker; /* Drives incremental GC. */
+ unsigned ev_cnt; /* Event count since incremental GC. */
szind_t next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
@@ -115,7 +108,7 @@ extern tcache_bin_info_t *tcache_bin_info;
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
-extern unsigned nhbins;
+extern size_t nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
@@ -130,25 +123,27 @@ extern size_t tcache_maxclass;
*/
extern tcaches_t *tcaches;
-size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
+size_t tcache_salloc(const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
-void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
+void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
-void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
- arena_t *oldarena, arena_t *newarena);
+void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
+void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
+ arena_t *newarena);
+void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena);
tcache_t *tcache_get_hard(tsd_t *tsd);
-tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
+tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
void tcache_cleanup(tsd_t *tsd);
void tcache_enabled_cleanup(tsd_t *tsd);
-void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
+void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
-bool tcache_boot(tsdn_t *tsdn);
+bool tcache_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
@@ -160,15 +155,15 @@ void tcache_flush(void);
bool tcache_enabled_get(void);
tcache_t *tcache_get(tsd_t *tsd, bool create);
void tcache_enabled_set(bool enabled);
-void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
+void *tcache_alloc_easy(tcache_bin_t *tbin);
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
- size_t size, szind_t ind, bool zero, bool slow_path);
+ size_t size, bool zero);
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
- size_t size, szind_t ind, bool zero, bool slow_path);
+ size_t size, bool zero);
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
- szind_t binind, bool slow_path);
+ szind_t binind);
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
- size_t size, bool slow_path);
+ size_t size);
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
#endif
@@ -245,74 +240,51 @@ tcache_event(tsd_t *tsd, tcache_t *tcache)
if (TCACHE_GC_INCR == 0)
return;
- if (unlikely(ticker_tick(&tcache->gc_ticker)))
+ tcache->ev_cnt++;
+ assert(tcache->ev_cnt <= TCACHE_GC_INCR);
+ if (unlikely(tcache->ev_cnt == TCACHE_GC_INCR))
tcache_event_hard(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
-tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
+tcache_alloc_easy(tcache_bin_t *tbin)
{
void *ret;
if (unlikely(tbin->ncached == 0)) {
tbin->low_water = -1;
- *tcache_success = false;
return (NULL);
}
- /*
- * tcache_success (instead of ret) should be checked upon the return of
- * this function. We avoid checking (ret == NULL) because there is
- * never a null stored on the avail stack (which is unknown to the
- * compiler), and eagerly checking ret would cause pipeline stall
- * (waiting for the cacheline).
- */
- *tcache_success = true;
- ret = *(tbin->avail - tbin->ncached);
tbin->ncached--;
-
if (unlikely((int)tbin->ncached < tbin->low_water))
tbin->low_water = tbin->ncached;
-
+ ret = tbin->avail[tbin->ncached];
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
- szind_t binind, bool zero, bool slow_path)
+ bool zero)
{
void *ret;
+ szind_t binind;
+ size_t usize;
tcache_bin_t *tbin;
- bool tcache_success;
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+ binind = size2index(size);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
- ret = tcache_alloc_easy(tbin, &tcache_success);
- assert(tcache_success == (ret != NULL));
- if (unlikely(!tcache_success)) {
- bool tcache_hard_success;
- arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL))
- return (NULL);
-
- ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
- tbin, binind, &tcache_hard_success);
- if (tcache_hard_success == false)
+ usize = index2size(binind);
+ ret = tcache_alloc_easy(tbin);
+ if (unlikely(ret == NULL)) {
+ ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind);
+ if (ret == NULL)
return (NULL);
}
-
- assert(ret);
- /*
- * Only compute usize if required. The checks in the following if
- * statement are all static.
- */
- if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
- usize = index2size(binind);
- assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
- }
+ assert(tcache_salloc(ret) == usize);
if (likely(!zero)) {
- if (slow_path && config_fill) {
+ if (config_fill) {
if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
@@ -320,7 +292,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
memset(ret, 0, usize);
}
} else {
- if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
+ if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
@@ -337,38 +309,28 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
- szind_t binind, bool zero, bool slow_path)
+ bool zero)
{
void *ret;
+ szind_t binind;
+ size_t usize;
tcache_bin_t *tbin;
- bool tcache_success;
+ binind = size2index(size);
+ usize = index2size(binind);
+ assert(usize <= tcache_maxclass);
assert(binind < nhbins);
tbin = &tcache->tbins[binind];
- ret = tcache_alloc_easy(tbin, &tcache_success);
- assert(tcache_success == (ret != NULL));
- if (unlikely(!tcache_success)) {
+ ret = tcache_alloc_easy(tbin);
+ if (unlikely(ret == NULL)) {
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
- arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL))
- return (NULL);
-
- ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
+ ret = arena_malloc_large(arena, usize, zero);
if (ret == NULL)
return (NULL);
} else {
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
-
- /* Only compute usize on demand */
- if (config_prof || (slow_path && config_fill) ||
- unlikely(zero)) {
- usize = index2size(binind);
- assert(usize <= tcache_maxclass);
- }
-
if (config_prof && usize == LARGE_MINCLASS) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
@@ -378,11 +340,10 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
BININD_INVALID);
}
if (likely(!zero)) {
- if (slow_path && config_fill) {
- if (unlikely(opt_junk_alloc)) {
- memset(ret, JEMALLOC_ALLOC_JUNK,
- usize);
- } else if (unlikely(opt_zero))
+ if (config_fill) {
+ if (unlikely(opt_junk_alloc))
+ memset(ret, 0xa5, usize);
+ else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
} else
@@ -399,15 +360,14 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
- bool slow_path)
+tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind)
{
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
- assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
+ assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
- if (slow_path && config_fill && unlikely(opt_junk_free))
+ if (config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
tbin = &tcache->tbins[binind];
@@ -417,27 +377,26 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
(tbin_info->ncached_max >> 1));
}
assert(tbin->ncached < tbin_info->ncached_max);
+ tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
- *(tbin->avail - tbin->ncached) = ptr;
tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void
-tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
- bool slow_path)
+tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size)
{
szind_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
- assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
- assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
+ assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
+ assert(tcache_salloc(ptr) <= tcache_maxclass);
binind = size2index(size);
- if (slow_path && config_fill && unlikely(opt_junk_free))
+ if (config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_large(ptr, size);
tbin = &tcache->tbins[binind];
@@ -447,8 +406,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
(tbin_info->ncached_max >> 1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
+ tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
- *(tbin->avail - tbin->ncached) = ptr;
tcache_event(tsd, tcache);
}
@@ -457,10 +416,8 @@ JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t *tsd, unsigned ind)
{
tcaches_t *elm = &tcaches[ind];
- if (unlikely(elm->tcache == NULL)) {
- elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
- NULL));
- }
+ if (unlikely(elm->tcache == NULL))
+ elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
return (elm->tcache);
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/ticker.h b/deps/jemalloc/include/jemalloc/internal/ticker.h
deleted file mode 100644
index 4696e56d2..000000000
--- a/deps/jemalloc/include/jemalloc/internal/ticker.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct ticker_s ticker_t;
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct ticker_s {
- int32_t tick;
- int32_t nticks;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-void ticker_init(ticker_t *ticker, int32_t nticks);
-void ticker_copy(ticker_t *ticker, const ticker_t *other);
-int32_t ticker_read(const ticker_t *ticker);
-bool ticker_ticks(ticker_t *ticker, int32_t nticks);
-bool ticker_tick(ticker_t *ticker);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
-JEMALLOC_INLINE void
-ticker_init(ticker_t *ticker, int32_t nticks)
-{
-
- ticker->tick = nticks;
- ticker->nticks = nticks;
-}
-
-JEMALLOC_INLINE void
-ticker_copy(ticker_t *ticker, const ticker_t *other)
-{
-
- *ticker = *other;
-}
-
-JEMALLOC_INLINE int32_t
-ticker_read(const ticker_t *ticker)
-{
-
- return (ticker->tick);
-}
-
-JEMALLOC_INLINE bool
-ticker_ticks(ticker_t *ticker, int32_t nticks)
-{
-
- if (unlikely(ticker->tick < nticks)) {
- ticker->tick = ticker->nticks;
- return (true);
- }
- ticker->tick -= nticks;
- return(false);
-}
-
-JEMALLOC_INLINE bool
-ticker_tick(ticker_t *ticker)
-{
-
- return (ticker_ticks(ticker, 1));
-}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/internal/tsd.h b/deps/jemalloc/include/jemalloc/internal/tsd.h
index 9055acafd..eed7aa013 100644
--- a/deps/jemalloc/include/jemalloc/internal/tsd.h
+++ b/deps/jemalloc/include/jemalloc/internal/tsd.h
@@ -13,9 +13,6 @@ typedef struct tsd_init_head_s tsd_init_head_t;
#endif
typedef struct tsd_s tsd_t;
-typedef struct tsdn_s tsdn_t;
-
-#define TSDN_NULL ((tsdn_t *)0)
typedef enum {
tsd_state_uninitialized,
@@ -47,8 +44,7 @@ typedef enum {
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
- * bool example_tsd_booted_get(void) {...}
- * example_t *example_tsd_get(bool init) {...}
+ * example_t *example_tsd_get() {...}
* void example_tsd_set(example_t *val) {...}
*
* Note that all of the functions deal in terms of (a_type *) rather than
@@ -102,10 +98,8 @@ a_attr void \
a_name##tsd_boot1(void); \
a_attr bool \
a_name##tsd_boot(void); \
-a_attr bool \
-a_name##tsd_booted_get(void); \
a_attr a_type * \
-a_name##tsd_get(bool init); \
+a_name##tsd_get(void); \
a_attr void \
a_name##tsd_set(a_type *val);
@@ -207,21 +201,9 @@ a_name##tsd_boot(void) \
\
return (a_name##tsd_boot0()); \
} \
-a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
- \
- return (a_name##tsd_booted); \
-} \
-a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
- \
- return (false); \
-} \
/* Get/set. */ \
a_attr a_type * \
-a_name##tsd_get(bool init) \
+a_name##tsd_get(void) \
{ \
\
assert(a_name##tsd_booted); \
@@ -264,21 +246,9 @@ a_name##tsd_boot(void) \
\
return (a_name##tsd_boot0()); \
} \
-a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
- \
- return (a_name##tsd_booted); \
-} \
-a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
- \
- return (false); \
-} \
/* Get/set. */ \
a_attr a_type * \
-a_name##tsd_get(bool init) \
+a_name##tsd_get(void) \
{ \
\
assert(a_name##tsd_booted); \
@@ -337,14 +307,14 @@ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
} \
} \
a_attr a_name##tsd_wrapper_t * \
-a_name##tsd_wrapper_get(bool init) \
+a_name##tsd_wrapper_get(void) \
{ \
DWORD error = GetLastError(); \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
TlsGetValue(a_name##tsd_tsd); \
SetLastError(error); \
\
- if (init && unlikely(wrapper == NULL)) { \
+ if (unlikely(wrapper == NULL)) { \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
if (wrapper == NULL) { \
@@ -398,28 +368,14 @@ a_name##tsd_boot(void) \
a_name##tsd_boot1(); \
return (false); \
} \
-a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
- \
- return (a_name##tsd_booted); \
-} \
-a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
- \
- return (true); \
-} \
/* Get/set. */ \
a_attr a_type * \
-a_name##tsd_get(bool init) \
+a_name##tsd_get(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
- wrapper = a_name##tsd_wrapper_get(init); \
- if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
- return (NULL); \
+ wrapper = a_name##tsd_wrapper_get(); \
return (&wrapper->val); \
} \
a_attr void \
@@ -428,7 +384,7 @@ a_name##tsd_set(a_type *val) \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
- wrapper = a_name##tsd_wrapper_get(true); \
+ wrapper = a_name##tsd_wrapper_get(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
@@ -472,12 +428,12 @@ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
} \
} \
a_attr a_name##tsd_wrapper_t * \
-a_name##tsd_wrapper_get(bool init) \
+a_name##tsd_wrapper_get(void) \
{ \
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
pthread_getspecific(a_name##tsd_tsd); \
\
- if (init && unlikely(wrapper == NULL)) { \
+ if (unlikely(wrapper == NULL)) { \
tsd_init_block_t block; \
wrapper = tsd_init_check_recursion( \
&a_name##tsd_init_head, &block); \
@@ -534,28 +490,14 @@ a_name##tsd_boot(void) \
a_name##tsd_boot1(); \
return (false); \
} \
-a_attr bool \
-a_name##tsd_booted_get(void) \
-{ \
- \
- return (a_name##tsd_booted); \
-} \
-a_attr bool \
-a_name##tsd_get_allocates(void) \
-{ \
- \
- return (true); \
-} \
/* Get/set. */ \
a_attr a_type * \
-a_name##tsd_get(bool init) \
+a_name##tsd_get(void) \
{ \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
- wrapper = a_name##tsd_wrapper_get(init); \
- if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
- return (NULL); \
+ wrapper = a_name##tsd_wrapper_get(); \
return (&wrapper->val); \
} \
a_attr void \
@@ -564,7 +506,7 @@ a_name##tsd_set(a_type *val) \
a_name##tsd_wrapper_t *wrapper; \
\
assert(a_name##tsd_booted); \
- wrapper = a_name##tsd_wrapper_get(true); \
+ wrapper = a_name##tsd_wrapper_get(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
@@ -594,15 +536,12 @@ struct tsd_init_head_s {
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
- O(iarena, arena_t *) \
O(arena, arena_t *) \
- O(arenas_tdata, arena_tdata_t *) \
- O(narenas_tdata, unsigned) \
- O(arenas_tdata_bypass, bool) \
+ O(arenas_cache, arena_t **) \
+ O(narenas_cache, unsigned) \
+ O(arenas_cache_bypass, bool) \
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
- O(witnesses, witness_list_t) \
- O(witness_fork, bool) \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
@@ -612,13 +551,10 @@ struct tsd_init_head_s {
NULL, \
NULL, \
NULL, \
- NULL, \
0, \
false, \
tcache_enabled_default, \
- NULL, \
- ql_head_initializer(witnesses), \
- false \
+ NULL \
}
struct tsd_s {
@@ -629,15 +565,6 @@ MALLOC_TSD
#undef O
};
-/*
- * Wrapper around tsd_t that makes it possible to avoid implicit conversion
- * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
- * explicitly converted to tsd_t, which is non-nullable.
- */
-struct tsdn_s {
- tsd_t tsd;
-};
-
static const tsd_t tsd_initializer = TSD_INITIALIZER;
malloc_tsd_types(, tsd_t)
@@ -650,7 +577,7 @@ void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *arg);
void malloc_tsd_cleanup_register(bool (*f)(void));
-tsd_t *malloc_tsd_boot0(void);
+bool malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
@@ -667,9 +594,7 @@ void tsd_cleanup(void *arg);
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
-tsd_t *tsd_fetch_impl(bool init);
tsd_t *tsd_fetch(void);
-tsdn_t *tsd_tsdn(tsd_t *tsd);
bool tsd_nominal(tsd_t *tsd);
#define O(n, t) \
t *tsd_##n##p_get(tsd_t *tsd); \
@@ -677,9 +602,6 @@ t tsd_##n##_get(tsd_t *tsd); \
void tsd_##n##_set(tsd_t *tsd, t n);
MALLOC_TSD
#undef O
-tsdn_t *tsdn_fetch(void);
-bool tsdn_null(const tsdn_t *tsdn);
-tsd_t *tsdn_tsd(tsdn_t *tsdn);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
@@ -687,13 +609,9 @@ malloc_tsd_externs(, tsd_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch_impl(bool init)
+tsd_fetch(void)
{
- tsd_t *tsd = tsd_get(init);
-
- if (!init && tsd_get_allocates() && tsd == NULL)
- return (NULL);
- assert(tsd != NULL);
+ tsd_t *tsd = tsd_get();
if (unlikely(tsd->state != tsd_state_nominal)) {
if (tsd->state == tsd_state_uninitialized) {
@@ -710,20 +628,6 @@ tsd_fetch_impl(bool init)
return (tsd);
}
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsd_fetch(void)
-{
-
- return (tsd_fetch_impl(true));
-}
-
-JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsd_tsdn(tsd_t *tsd)
-{
-
- return ((tsdn_t *)tsd);
-}
-
JEMALLOC_INLINE bool
tsd_nominal(tsd_t *tsd)
{
@@ -755,32 +659,6 @@ tsd_##n##_set(tsd_t *tsd, t n) \
}
MALLOC_TSD
#undef O
-
-JEMALLOC_ALWAYS_INLINE tsdn_t *
-tsdn_fetch(void)
-{
-
- if (!tsd_booted_get())
- return (NULL);
-
- return (tsd_tsdn(tsd_fetch_impl(false)));
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-tsdn_null(const tsdn_t *tsdn)
-{
-
- return (tsdn == NULL);
-}
-
-JEMALLOC_ALWAYS_INLINE tsd_t *
-tsdn_tsd(tsdn_t *tsdn)
-{
-
- assert(!tsdn_null(tsdn));
-
- return (&tsdn->tsd);
-}
#endif
#endif /* JEMALLOC_H_INLINES */
diff --git a/deps/jemalloc/include/jemalloc/internal/util.h b/deps/jemalloc/include/jemalloc/internal/util.h
index 4b56d652e..b2ea740fd 100644
--- a/deps/jemalloc/include/jemalloc/internal/util.h
+++ b/deps/jemalloc/include/jemalloc/internal/util.h
@@ -40,14 +40,6 @@
*/
#define MALLOC_PRINTF_BUFSIZE 4096
-/* Junk fill patterns. */
-#ifndef JEMALLOC_ALLOC_JUNK
-# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
-#endif
-#ifndef JEMALLOC_FREE_JUNK
-# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
-#endif
-
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
@@ -65,21 +57,73 @@
# define JEMALLOC_CC_SILENCE_INIT(v)
#endif
+#define JEMALLOC_GNUC_PREREQ(major, minor) \
+ (!defined(__clang__) && \
+ (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))))
+#ifndef __has_builtin
+# define __has_builtin(builtin) (0)
+#endif
+#define JEMALLOC_CLANG_HAS_BUILTIN(builtin) \
+ (defined(__clang__) && __has_builtin(builtin))
+
#ifdef __GNUC__
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
+# if JEMALLOC_GNUC_PREREQ(4, 6) || \
+ JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable)
+# define unreachable() __builtin_unreachable()
+# else
+# define unreachable()
+# endif
#else
# define likely(x) !!(x)
# define unlikely(x) !!(x)
+# define unreachable()
#endif
-#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
-# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
+/*
+ * Define a custom assert() in order to reduce the chances of deadlock during
+ * assertion failure.
+ */
+#ifndef assert
+#define assert(e) do { \
+ if (unlikely(config_debug && !(e))) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
+ __FILE__, __LINE__, #e); \
+ abort(); \
+ } \
+} while (0)
#endif
-#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
+#ifndef not_reached
+#define not_reached() do { \
+ if (config_debug) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Unreachable code reached\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+ } \
+ unreachable(); \
+} while (0)
+#endif
-#include "jemalloc/internal/assert.h"
+#ifndef not_implemented
+#define not_implemented() do { \
+ if (config_debug) { \
+ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+ } \
+} while (0)
+#endif
+
+#ifndef assert_not_implemented
+#define assert_not_implemented(e) do { \
+ if (unlikely(config_debug && !(e))) \
+ not_implemented(); \
+} while (0)
+#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
@@ -104,9 +148,9 @@ void malloc_write(const char *s);
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
-size_t malloc_vsnprintf(char *str, size_t size, const char *format,
+int malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
-size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
+int malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);
@@ -119,16 +163,10 @@ void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-unsigned ffs_llu(unsigned long long bitmap);
-unsigned ffs_lu(unsigned long bitmap);
-unsigned ffs_u(unsigned bitmap);
-unsigned ffs_zu(size_t bitmap);
-unsigned ffs_u64(uint64_t bitmap);
-unsigned ffs_u32(uint32_t bitmap);
-uint64_t pow2_ceil_u64(uint64_t x);
-uint32_t pow2_ceil_u32(uint32_t x);
-size_t pow2_ceil_zu(size_t x);
-unsigned lg_floor(size_t x);
+int jemalloc_ffsl(long bitmap);
+int jemalloc_ffs(int bitmap);
+size_t pow2_ceil(size_t x);
+size_t lg_floor(size_t x);
void set_errno(int errnum);
int get_errno(void);
#endif
@@ -136,74 +174,27 @@ int get_errno(void);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
/* Sanity check. */
-#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
- || !defined(JEMALLOC_INTERNAL_FFS)
-# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
+#if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS)
+# error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure
#endif
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_llu(unsigned long long bitmap)
-{
-
- return (JEMALLOC_INTERNAL_FFSLL(bitmap));
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_lu(unsigned long bitmap)
+JEMALLOC_ALWAYS_INLINE int
+jemalloc_ffsl(long bitmap)
{
return (JEMALLOC_INTERNAL_FFSL(bitmap));
}
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u(unsigned bitmap)
+JEMALLOC_ALWAYS_INLINE int
+jemalloc_ffs(int bitmap)
{
return (JEMALLOC_INTERNAL_FFS(bitmap));
}
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_zu(size_t bitmap)
-{
-
-#if LG_SIZEOF_PTR == LG_SIZEOF_INT
- return (ffs_u(bitmap));
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
- return (ffs_lu(bitmap));
-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
- return (ffs_llu(bitmap));
-#else
-#error No implementation for size_t ffs()
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u64(uint64_t bitmap)
-{
-
-#if LG_SIZEOF_LONG == 3
- return (ffs_lu(bitmap));
-#elif LG_SIZEOF_LONG_LONG == 3
- return (ffs_llu(bitmap));
-#else
-#error No implementation for 64-bit ffs()
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE unsigned
-ffs_u32(uint32_t bitmap)
-{
-
-#if LG_SIZEOF_INT == 2
- return (ffs_u(bitmap));
-#else
-#error No implementation for 32-bit ffs()
-#endif
- return (ffs_u(bitmap));
-}
-
-JEMALLOC_INLINE uint64_t
-pow2_ceil_u64(uint64_t x)
+/* Compute the smallest power of 2 that is >= x. */
+JEMALLOC_INLINE size_t
+pow2_ceil(size_t x)
{
x--;
@@ -212,39 +203,15 @@ pow2_ceil_u64(uint64_t x)
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
+#if (LG_SIZEOF_PTR == 3)
x |= x >> 32;
+#endif
x++;
return (x);
}
-JEMALLOC_INLINE uint32_t
-pow2_ceil_u32(uint32_t x)
-{
-
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
- x++;
- return (x);
-}
-
-/* Compute the smallest power of 2 that is >= x. */
-JEMALLOC_INLINE size_t
-pow2_ceil_zu(size_t x)
-{
-
-#if (LG_SIZEOF_PTR == 3)
- return (pow2_ceil_u64(x));
-#else
- return (pow2_ceil_u32(x));
-#endif
-}
-
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
-JEMALLOC_INLINE unsigned
+JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
size_t ret;
@@ -255,11 +222,10 @@ lg_floor(size_t x)
: "=r"(ret) // Outputs.
: "r"(x) // Inputs.
);
- assert(ret < UINT_MAX);
- return ((unsigned)ret);
+ return (ret);
}
#elif (defined(_MSC_VER))
-JEMALLOC_INLINE unsigned
+JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
unsigned long ret;
@@ -271,13 +237,12 @@ lg_floor(size_t x)
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse(&ret, x);
#else
-# error "Unsupported type size for lg_floor()"
+# error "Unsupported type sizes for lg_floor()"
#endif
- assert(ret < UINT_MAX);
- return ((unsigned)ret);
+ return (ret);
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
-JEMALLOC_INLINE unsigned
+JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
@@ -288,11 +253,11 @@ lg_floor(size_t x)
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
#else
-# error "Unsupported type size for lg_floor()"
+# error "Unsupported type sizes for lg_floor()"
#endif
}
#else
-JEMALLOC_INLINE unsigned
+JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
@@ -303,13 +268,20 @@ lg_floor(size_t x)
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
-#if (LG_SIZEOF_PTR == 3)
+#if (LG_SIZEOF_PTR == 3 && LG_SIZEOF_PTR == LG_SIZEOF_LONG)
x |= (x >> 32);
-#endif
- if (x == SIZE_T_MAX)
- return ((8 << LG_SIZEOF_PTR) - 1);
+ if (x == KZU(0xffffffffffffffff))
+ return (63);
x++;
- return (ffs_zu(x) - 2);
+ return (jemalloc_ffsl(x) - 2);
+#elif (LG_SIZEOF_PTR == 2)
+ if (x == KZU(0xffffffff))
+ return (31);
+ x++;
+ return (jemalloc_ffs(x) - 2);
+#else
+# error "Unsupported type sizes for lg_floor()"
+#endif
}
#endif
diff --git a/deps/jemalloc/include/jemalloc/internal/valgrind.h b/deps/jemalloc/include/jemalloc/internal/valgrind.h
index 877a142b6..a3380df92 100644
--- a/deps/jemalloc/include/jemalloc/internal/valgrind.h
+++ b/deps/jemalloc/include/jemalloc/internal/valgrind.h
@@ -30,31 +30,17 @@
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
-#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
- if (unlikely(in_valgrind && cond)) { \
- VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
- zero); \
- } \
+#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
+ if (unlikely(in_valgrind && cond)) \
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
} while (0)
-#define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \
- (false)
-#define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \
- ((ptr) != (old_ptr))
-#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \
- (false)
-#define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \
- (ptr == NULL)
-#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \
- (false)
-#define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \
- (old_ptr == NULL)
-#define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \
- old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \
+#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
+ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
+ zero) do { \
if (unlikely(in_valgrind)) { \
- size_t rzsize = p2rz(tsdn, ptr); \
+ size_t rzsize = p2rz(ptr); \
\
- if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \
- old_ptr)) { \
+ if (!maybe_moved || ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
@@ -63,13 +49,11 @@
old_usize), usize - old_usize); \
} \
} else { \
- if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \
- old_ptr_null(old_ptr)) { \
+ if (!old_ptr_maybe_null || old_ptr != NULL) { \
valgrind_freelike_block(old_ptr, \
old_rzsize); \
} \
- if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \
- ptr_null(ptr)) { \
+ if (!ptr_maybe_null || ptr != NULL) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
@@ -97,8 +81,8 @@
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
-#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0)
-#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
+#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
+#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
diff --git a/deps/jemalloc/include/jemalloc/internal/witness.h b/deps/jemalloc/include/jemalloc/internal/witness.h
deleted file mode 100644
index cdf15d797..000000000
--- a/deps/jemalloc/include/jemalloc/internal/witness.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/******************************************************************************/
-#ifdef JEMALLOC_H_TYPES
-
-typedef struct witness_s witness_t;
-typedef unsigned witness_rank_t;
-typedef ql_head(witness_t) witness_list_t;
-typedef int witness_comp_t (const witness_t *, const witness_t *);
-
-/*
- * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
- * the witness machinery.
- */
-#define WITNESS_RANK_OMIT 0U
-
-#define WITNESS_RANK_INIT 1U
-#define WITNESS_RANK_CTL 1U
-#define WITNESS_RANK_ARENAS 2U
-
-#define WITNESS_RANK_PROF_DUMP 3U
-#define WITNESS_RANK_PROF_BT2GCTX 4U
-#define WITNESS_RANK_PROF_TDATAS 5U
-#define WITNESS_RANK_PROF_TDATA 6U
-#define WITNESS_RANK_PROF_GCTX 7U
-
-#define WITNESS_RANK_ARENA 8U
-#define WITNESS_RANK_ARENA_CHUNKS 9U
-#define WITNESS_RANK_ARENA_NODE_CACHE 10
-
-#define WITNESS_RANK_BASE 11U
-
-#define WITNESS_RANK_LEAF 0xffffffffU
-#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
-#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
-#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
-
-#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}}
-
-#endif /* JEMALLOC_H_TYPES */
-/******************************************************************************/
-#ifdef JEMALLOC_H_STRUCTS
-
-struct witness_s {
- /* Name, used for printing lock order reversal messages. */
- const char *name;
-
- /*
- * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
- * must be acquired in order of increasing rank.
- */
- witness_rank_t rank;
-
- /*
- * If two witnesses are of equal rank and they have the samp comp
- * function pointer, it is called as a last attempt to differentiate
- * between witnesses of equal rank.
- */
- witness_comp_t *comp;
-
- /* Linkage for thread's currently owned locks. */
- ql_elm(witness_t) link;
-};
-
-#endif /* JEMALLOC_H_STRUCTS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_EXTERNS
-
-void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
- witness_comp_t *comp);
-#ifdef JEMALLOC_JET
-typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
-extern witness_lock_error_t *witness_lock_error;
-#else
-void witness_lock_error(const witness_list_t *witnesses,
- const witness_t *witness);
-#endif
-#ifdef JEMALLOC_JET
-typedef void (witness_owner_error_t)(const witness_t *);
-extern witness_owner_error_t *witness_owner_error;
-#else
-void witness_owner_error(const witness_t *witness);
-#endif
-#ifdef JEMALLOC_JET
-typedef void (witness_not_owner_error_t)(const witness_t *);
-extern witness_not_owner_error_t *witness_not_owner_error;
-#else
-void witness_not_owner_error(const witness_t *witness);
-#endif
-#ifdef JEMALLOC_JET
-typedef void (witness_lockless_error_t)(const witness_list_t *);
-extern witness_lockless_error_t *witness_lockless_error;
-#else
-void witness_lockless_error(const witness_list_t *witnesses);
-#endif
-
-void witnesses_cleanup(tsd_t *tsd);
-void witness_fork_cleanup(tsd_t *tsd);
-void witness_prefork(tsd_t *tsd);
-void witness_postfork_parent(tsd_t *tsd);
-void witness_postfork_child(tsd_t *tsd);
-
-#endif /* JEMALLOC_H_EXTERNS */
-/******************************************************************************/
-#ifdef JEMALLOC_H_INLINES
-
-#ifndef JEMALLOC_ENABLE_INLINE
-bool witness_owner(tsd_t *tsd, const witness_t *witness);
-void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
-void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
-void witness_assert_lockless(tsdn_t *tsdn);
-void witness_lock(tsdn_t *tsdn, witness_t *witness);
-void witness_unlock(tsdn_t *tsdn, witness_t *witness);
-#endif
-
-#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
-JEMALLOC_INLINE bool
-witness_owner(tsd_t *tsd, const witness_t *witness)
-{
- witness_list_t *witnesses;
- witness_t *w;
-
- witnesses = tsd_witnessesp_get(tsd);
- ql_foreach(w, witnesses, link) {
- if (w == witness)
- return (true);
- }
-
- return (false);
-}
-
-JEMALLOC_INLINE void
-witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
-{
- tsd_t *tsd;
-
- if (!config_debug)
- return;
-
- if (tsdn_null(tsdn))
- return;
- tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
- return;
-
- if (witness_owner(tsd, witness))
- return;
- witness_owner_error(witness);
-}
-
-JEMALLOC_INLINE void
-witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
-{
- tsd_t *tsd;
- witness_list_t *witnesses;
- witness_t *w;
-
- if (!config_debug)
- return;
-
- if (tsdn_null(tsdn))
- return;
- tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
- return;
-
- witnesses = tsd_witnessesp_get(tsd);
- ql_foreach(w, witnesses, link) {
- if (w == witness)
- witness_not_owner_error(witness);
- }
-}
-
-JEMALLOC_INLINE void
-witness_assert_lockless(tsdn_t *tsdn)
-{
- tsd_t *tsd;
- witness_list_t *witnesses;
- witness_t *w;
-
- if (!config_debug)
- return;
-
- if (tsdn_null(tsdn))
- return;
- tsd = tsdn_tsd(tsdn);
-
- witnesses = tsd_witnessesp_get(tsd);
- w = ql_last(witnesses, link);
- if (w != NULL)
- witness_lockless_error(witnesses);
-}
-
-JEMALLOC_INLINE void
-witness_lock(tsdn_t *tsdn, witness_t *witness)
-{
- tsd_t *tsd;
- witness_list_t *witnesses;
- witness_t *w;
-
- if (!config_debug)
- return;
-
- if (tsdn_null(tsdn))
- return;
- tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
- return;
-
- witness_assert_not_owner(tsdn, witness);
-
- witnesses = tsd_witnessesp_get(tsd);
- w = ql_last(witnesses, link);
- if (w == NULL) {
- /* No other locks; do nothing. */
- } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
- /* Forking, and relaxed ranking satisfied. */
- } else if (w->rank > witness->rank) {
- /* Not forking, rank order reversal. */
- witness_lock_error(witnesses, witness);
- } else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
- witness->comp || w->comp(w, witness) > 0)) {
- /*
- * Missing/incompatible comparison function, or comparison
- * function indicates rank order reversal.
- */
- witness_lock_error(witnesses, witness);
- }
-
- ql_elm_new(witness, link);
- ql_tail_insert(witnesses, witness, link);
-}
-
-JEMALLOC_INLINE void
-witness_unlock(tsdn_t *tsdn, witness_t *witness)
-{
- tsd_t *tsd;
- witness_list_t *witnesses;
-
- if (!config_debug)
- return;
-
- if (tsdn_null(tsdn))
- return;
- tsd = tsdn_tsd(tsdn);
- if (witness->rank == WITNESS_RANK_OMIT)
- return;
-
- /*
- * Check whether owner before removal, rather than relying on
- * witness_assert_owner() to abort, so that unit tests can test this
- * function's failure mode without causing undefined behavior.
- */
- if (witness_owner(tsd, witness)) {
- witnesses = tsd_witnessesp_get(tsd);
- ql_remove(witnesses, witness, link);
- } else
- witness_assert_owner(tsdn, witness);
-}
-#endif
-
-#endif /* JEMALLOC_H_INLINES */
-/******************************************************************************/
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in b/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
index 6d89435c2..ab13c3758 100644
--- a/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
+++ b/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in
@@ -33,13 +33,5 @@
*/
#undef JEMALLOC_USE_CXX_THROW
-#ifdef _MSC_VER
-# ifdef _WIN64
-# define LG_SIZEOF_PTR_WIN 3
-# else
-# define LG_SIZEOF_PTR_WIN 2
-# endif
-#endif
-
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#undef LG_SIZEOF_PTR
diff --git a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
index 129240ed9..7f64d9ff9 100644
--- a/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
+++ b/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in
@@ -11,13 +11,12 @@
#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
-# define MALLOCX_LG_ALIGN(la) ((int)(la))
+# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
-# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
+# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
- ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
- ffs((int)(((size_t)(a))>>32))+31))
+ ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/*
@@ -29,7 +28,7 @@
/*
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
*/
-# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
+# define MALLOCX_ARENA(a) ((int)(((a)+1) << 20))
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define JEMALLOC_CXX_THROW throw()
@@ -37,7 +36,32 @@
# define JEMALLOC_CXX_THROW
#endif
-#if _MSC_VER
+#ifdef JEMALLOC_HAVE_ATTR
+# define JEMALLOC_ATTR(s) __attribute__((s))
+# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
+# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
+# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
+# else
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# endif
+# ifndef JEMALLOC_EXPORT
+# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
+# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
+# else
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# endif
+# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
+# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
+# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_ALLOC_SIZE(s)
@@ -63,31 +87,6 @@
# else
# define JEMALLOC_ALLOCATOR
# endif
-#elif defined(JEMALLOC_HAVE_ATTR)
-# define JEMALLOC_ATTR(s) __attribute__((s))
-# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
-# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
-# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
-# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
-# else
-# define JEMALLOC_ALLOC_SIZE(s)
-# define JEMALLOC_ALLOC_SIZE2(s1, s2)
-# endif
-# ifndef JEMALLOC_EXPORT
-# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
-# endif
-# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
-# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
-# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
-# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
-# else
-# define JEMALLOC_FORMAT_PRINTF(s, i)
-# endif
-# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
-# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
-# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
-# define JEMALLOC_RESTRICT_RETURN
-# define JEMALLOC_ALLOCATOR
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s)
diff --git a/deps/jemalloc/include/msvc_compat/strings.h b/deps/jemalloc/include/msvc_compat/strings.h
index a3ee25063..f01ffdd18 100644
--- a/deps/jemalloc/include/msvc_compat/strings.h
+++ b/deps/jemalloc/include/msvc_compat/strings.h
@@ -21,37 +21,7 @@ static __forceinline int ffs(int x)
return (ffsl(x));
}
-# ifdef _M_X64
-# pragma intrinsic(_BitScanForward64)
-# endif
-
-static __forceinline int ffsll(unsigned __int64 x)
-{
- unsigned long i;
-#ifdef _M_X64
- if (_BitScanForward64(&i, x))
- return (i + 1);
- return (0);
-#else
-// Fallback for 32-bit build where 64-bit version not available
-// assuming little endian
- union {
- unsigned __int64 ll;
- unsigned long l[2];
- } s;
-
- s.ll = x;
-
- if (_BitScanForward(&i, s.l[0]))
- return (i + 1);
- else if(_BitScanForward(&i, s.l[1]))
- return (i + 33);
- return (0);
-#endif
-}
-
#else
-# define ffsll(x) __builtin_ffsll(x)
# define ffsl(x) __builtin_ffsl(x)
# define ffs(x) __builtin_ffs(x)
#endif
diff --git a/deps/jemalloc/include/msvc_compat/windows_extra.h b/deps/jemalloc/include/msvc_compat/windows_extra.h
index 3008faa37..0c5e323ff 100644
--- a/deps/jemalloc/include/msvc_compat/windows_extra.h
+++ b/deps/jemalloc/include/msvc_compat/windows_extra.h
@@ -1,6 +1,26 @@
#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
#define MSVC_COMPAT_WINDOWS_EXTRA_H
-#include <errno.h>
+#ifndef ENOENT
+# define ENOENT ERROR_PATH_NOT_FOUND
+#endif
+#ifndef EINVAL
+# define EINVAL ERROR_BAD_ARGUMENTS
+#endif
+#ifndef EAGAIN
+# define EAGAIN ERROR_OUTOFMEMORY
+#endif
+#ifndef EPERM
+# define EPERM ERROR_WRITE_FAULT
+#endif
+#ifndef EFAULT
+# define EFAULT ERROR_INVALID_ADDRESS
+#endif
+#ifndef ENOMEM
+# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
+#endif
+#ifndef ERANGE
+# define ERANGE ERROR_INVALID_DATA
+#endif
#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
diff --git a/deps/jemalloc/build-aux/install-sh b/deps/jemalloc/install-sh
index ebc66913e..ebc66913e 100755
--- a/deps/jemalloc/build-aux/install-sh
+++ b/deps/jemalloc/install-sh
diff --git a/deps/jemalloc/jemalloc.pc.in b/deps/jemalloc/jemalloc.pc.in
index a318e8dd3..1a3ad9b34 100644
--- a/deps/jemalloc/jemalloc.pc.in
+++ b/deps/jemalloc/jemalloc.pc.in
@@ -6,7 +6,7 @@ install_suffix=@install_suffix@
Name: jemalloc
Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
-URL: http://jemalloc.net/
+URL: http://www.canonware.com/jemalloc
Version: @jemalloc_version@
Cflags: -I${includedir}
Libs: -L${libdir} -ljemalloc${install_suffix}
diff --git a/deps/jemalloc/msvc/ReadMe.txt b/deps/jemalloc/msvc/ReadMe.txt
deleted file mode 100644
index 77d567da0..000000000
--- a/deps/jemalloc/msvc/ReadMe.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-
-How to build jemalloc for Windows
-=================================
-
-1. Install Cygwin with at least the following packages:
- * autoconf
- * autogen
- * gawk
- * grep
- * sed
-
-2. Install Visual Studio 2015 with Visual C++
-
-3. Add Cygwin\bin to the PATH environment variable
-
-4. Open "VS2015 x86 Native Tools Command Prompt"
- (note: x86/x64 doesn't matter at this point)
-
-5. Generate header files:
- sh -c "CC=cl ./autogen.sh"
-
-6. Now the project can be opened and built in Visual Studio:
- msvc\jemalloc_vc2015.sln
-
diff --git a/deps/jemalloc/msvc/jemalloc_vc2015.sln b/deps/jemalloc/msvc/jemalloc_vc2015.sln
deleted file mode 100644
index aedd5e5ea..000000000
--- a/deps/jemalloc/msvc/jemalloc_vc2015.sln
+++ /dev/null
@@ -1,63 +0,0 @@
-
-Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio 14
-VisualStudioVersion = 14.0.24720.0
-MinimumVisualStudioVersion = 10.0.40219.1
-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}"
- ProjectSection(SolutionItems) = preProject
- ReadMe.txt = ReadMe.txt
- EndProjectSection
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2015\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}"
-EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2015\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}"
-EndProject
-Global
- GlobalSection(SolutionConfigurationPlatforms) = preSolution
- Debug|x64 = Debug|x64
- Debug|x86 = Debug|x86
- Debug-static|x64 = Debug-static|x64
- Debug-static|x86 = Debug-static|x86
- Release|x64 = Release|x64
- Release|x86 = Release|x86
- Release-static|x64 = Release-static|x64
- Release-static|x86 = Release-static|x86
- EndGlobalSection
- GlobalSection(ProjectConfigurationPlatforms) = postSolution
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32
- {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32
- {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32
- EndGlobalSection
- GlobalSection(SolutionProperties) = preSolution
- HideSolutionNode = FALSE
- EndGlobalSection
-EndGlobal
diff --git a/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
deleted file mode 100644
index 8342ab3ab..000000000
--- a/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
+++ /dev/null
@@ -1,402 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <ItemGroup Label="ProjectConfigurations">
- <ProjectConfiguration Include="Debug-static|Win32">
- <Configuration>Debug-static</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Debug-static|x64">
- <Configuration>Debug-static</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Debug|Win32">
- <Configuration>Debug</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release-static|Win32">
- <Configuration>Release-static</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release-static|x64">
- <Configuration>Release-static</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|Win32">
- <Configuration>Release</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Debug|x64">
- <Configuration>Debug</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|x64">
- <Configuration>Release</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- </ItemGroup>
- <ItemGroup>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_dss.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_mmap.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\huge.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h" />
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h" />
- <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h" />
- <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h" />
- <ClInclude Include="..\..\..\..\include\msvc_compat\strings.h" />
- <ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h" />
- </ItemGroup>
- <ItemGroup>
- <ClCompile Include="..\..\..\..\src\arena.c" />
- <ClCompile Include="..\..\..\..\src\atomic.c" />
- <ClCompile Include="..\..\..\..\src\base.c" />
- <ClCompile Include="..\..\..\..\src\bitmap.c" />
- <ClCompile Include="..\..\..\..\src\chunk.c" />
- <ClCompile Include="..\..\..\..\src\chunk_dss.c" />
- <ClCompile Include="..\..\..\..\src\chunk_mmap.c" />
- <ClCompile Include="..\..\..\..\src\ckh.c" />
- <ClCompile Include="..\..\..\..\src\ctl.c" />
- <ClCompile Include="..\..\..\..\src\extent.c" />
- <ClCompile Include="..\..\..\..\src\hash.c" />
- <ClCompile Include="..\..\..\..\src\huge.c" />
- <ClCompile Include="..\..\..\..\src\jemalloc.c" />
- <ClCompile Include="..\..\..\..\src\mb.c" />
- <ClCompile Include="..\..\..\..\src\mutex.c" />
- <ClCompile Include="..\..\..\..\src\nstime.c" />
- <ClCompile Include="..\..\..\..\src\pages.c" />
- <ClCompile Include="..\..\..\..\src\prng.c" />
- <ClCompile Include="..\..\..\..\src\prof.c" />
- <ClCompile Include="..\..\..\..\src\quarantine.c" />
- <ClCompile Include="..\..\..\..\src\rtree.c" />
- <ClCompile Include="..\..\..\..\src\spin.c" />
- <ClCompile Include="..\..\..\..\src\stats.c" />
- <ClCompile Include="..\..\..\..\src\tcache.c" />
- <ClCompile Include="..\..\..\..\src\ticker.c" />
- <ClCompile Include="..\..\..\..\src\tsd.c" />
- <ClCompile Include="..\..\..\..\src\util.c" />
- <ClCompile Include="..\..\..\..\src\witness.c" />
- </ItemGroup>
- <PropertyGroup Label="Globals">
- <ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
- <Keyword>Win32Proj</Keyword>
- <RootNamespace>jemalloc</RootNamespace>
- <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
- <ConfigurationType>DynamicLibrary</ConfigurationType>
- <UseDebugLibraries>true</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
- <ConfigurationType>StaticLibrary</ConfigurationType>
- <UseDebugLibraries>true</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
- <ConfigurationType>DynamicLibrary</ConfigurationType>
- <UseDebugLibraries>false</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
- <ConfigurationType>StaticLibrary</ConfigurationType>
- <UseDebugLibraries>false</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
- <ConfigurationType>DynamicLibrary</ConfigurationType>
- <UseDebugLibraries>true</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
- <ConfigurationType>StaticLibrary</ConfigurationType>
- <UseDebugLibraries>true</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
- <ConfigurationType>DynamicLibrary</ConfigurationType>
- <UseDebugLibraries>false</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
- <ConfigurationType>StaticLibrary</ConfigurationType>
- <UseDebugLibraries>false</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
- <ImportGroup Label="ExtensionSettings">
- </ImportGroup>
- <ImportGroup Label="Shared">
- </ImportGroup>
- <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <PropertyGroup Label="UserMacros" />
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- <TargetName>$(ProjectName)d</TargetName>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- <TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- <TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- <TargetName>$(ProjectName)d</TargetName>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- <TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- <TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
- </PropertyGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- <ClCompile>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <WarningLevel>Level3</WarningLevel>
- <Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
- <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
- </ClCompile>
- <Link>
- <SubSystem>Windows</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
- <ClCompile>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <WarningLevel>Level3</WarningLevel>
- <Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
- <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
- <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
- </ClCompile>
- <Link>
- <SubSystem>Windows</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- <ClCompile>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <WarningLevel>Level3</WarningLevel>
- <Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
- <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
- </ClCompile>
- <Link>
- <SubSystem>Windows</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
- <ClCompile>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <WarningLevel>Level3</WarningLevel>
- <Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
- <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
- <DebugInformationFormat>OldStyle</DebugInformationFormat>
- <MinimalRebuild>false</MinimalRebuild>
- </ClCompile>
- <Link>
- <SubSystem>Windows</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- <ClCompile>
- <WarningLevel>Level3</WarningLevel>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <Optimization>MaxSpeed</Optimization>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
- <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
- </ClCompile>
- <Link>
- <SubSystem>Windows</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- <OptimizeReferences>true</OptimizeReferences>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
- <ClCompile>
- <WarningLevel>Level3</WarningLevel>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <Optimization>MaxSpeed</Optimization>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
- <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
- <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
- </ClCompile>
- <Link>
- <SubSystem>Windows</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- <OptimizeReferences>true</OptimizeReferences>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- <ClCompile>
- <WarningLevel>Level3</WarningLevel>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <Optimization>MaxSpeed</Optimization>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
- <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
- </ClCompile>
- <Link>
- <SubSystem>Windows</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- <OptimizeReferences>true</OptimizeReferences>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
- <ClCompile>
- <WarningLevel>Level3</WarningLevel>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <Optimization>MaxSpeed</Optimization>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
- <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
- <DebugInformationFormat>OldStyle</DebugInformationFormat>
- </ClCompile>
- <Link>
- <SubSystem>Windows</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- <OptimizeReferences>true</OptimizeReferences>
- </Link>
- </ItemDefinitionGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
- <ImportGroup Label="ExtensionTargets">
- </ImportGroup>
-</Project> \ No newline at end of file
diff --git a/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
deleted file mode 100644
index 37f0f02ae..000000000
--- a/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
+++ /dev/null
@@ -1,272 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <ItemGroup>
- <Filter Include="Source Files">
- <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
- <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
- </Filter>
- <Filter Include="Header Files">
- <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
- <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
- </Filter>
- <Filter Include="Header Files\internal">
- <UniqueIdentifier>{5697dfa3-16cf-4932-b428-6e0ec6e9f98e}</UniqueIdentifier>
- </Filter>
- <Filter Include="Header Files\msvc_compat">
- <UniqueIdentifier>{0cbd2ca6-42a7-4f82-8517-d7e7a14fd986}</UniqueIdentifier>
- </Filter>
- <Filter Include="Header Files\msvc_compat\C99">
- <UniqueIdentifier>{0abe6f30-49b5-46dd-8aca-6e33363fa52c}</UniqueIdentifier>
- </Filter>
- </ItemGroup>
- <ItemGroup>
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h">
- <Filter>Header Files</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h">
- <Filter>Header Files</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h">
- <Filter>Header Files</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_mangle.h">
- <Filter>Header Files</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos.h">
- <Filter>Header Files</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_protos_jet.h">
- <Filter>Header Files</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_rename.h">
- <Filter>Header Files</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_typedefs.h">
- <Filter>Header Files</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\arena.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\assert.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\atomic.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\base.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\bitmap.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_dss.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\chunk_mmap.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\ckh.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\ctl.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\extent.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\hash.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\huge.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_decls.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_defs.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\jemalloc_internal_macros.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\mb.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\mutex.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\nstime.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\pages.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\ph.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_namespace.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\private_unnamespace.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\prng.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\prof.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_namespace.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\public_unnamespace.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\ql.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\qr.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\quarantine.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\rb.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\rtree.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\size_classes.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\smoothstep.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\spin.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\stats.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\tcache.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\jemalloc\internal\witness.h">
- <Filter>Header Files\internal</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\msvc_compat\strings.h">
- <Filter>Header Files\msvc_compat</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\msvc_compat\windows_extra.h">
- <Filter>Header Files\msvc_compat</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdbool.h">
- <Filter>Header Files\msvc_compat\C99</Filter>
- </ClInclude>
- <ClInclude Include="..\..\..\..\include\msvc_compat\C99\stdint.h">
- <Filter>Header Files\msvc_compat\C99</Filter>
- </ClInclude>
- </ItemGroup>
- <ItemGroup>
- <ClCompile Include="..\..\..\..\src\arena.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\atomic.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\base.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\bitmap.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\chunk.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\chunk_dss.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\chunk_mmap.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\ckh.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\ctl.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\extent.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\hash.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\huge.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\jemalloc.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\mb.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\mutex.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\nstime.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\pages.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\prng.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\prof.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\quarantine.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\rtree.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\spin.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\stats.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\tcache.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\ticker.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\tsd.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\util.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="..\..\..\..\src\witness.c">
- <Filter>Source Files</Filter>
- </ClCompile>
- </ItemGroup>
-</Project>
diff --git a/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.cpp b/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.cpp
deleted file mode 100755
index a3d1a792a..000000000
--- a/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-// jemalloc C++ threaded test
-// Author: Rustam Abdullaev
-// Public Domain
-
-#include <atomic>
-#include <functional>
-#include <future>
-#include <random>
-#include <thread>
-#include <vector>
-#include <stdio.h>
-#include <jemalloc/jemalloc.h>
-
-using std::vector;
-using std::thread;
-using std::uniform_int_distribution;
-using std::minstd_rand;
-
-int test_threads()
-{
- je_malloc_conf = "narenas:3";
- int narenas = 0;
- size_t sz = sizeof(narenas);
- je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0);
- if (narenas != 3) {
- printf("Error: unexpected number of arenas: %d\n", narenas);
- return 1;
- }
- static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 };
- static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0]));
- vector<thread> workers;
- static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50;
- je_malloc_stats_print(NULL, NULL, NULL);
- size_t allocated1;
- size_t sz1 = sizeof(allocated1);
- je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0);
- printf("\nPress Enter to start threads...\n");
- getchar();
- printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2);
- for (int i = 0; i < numThreads; i++) {
- workers.emplace_back([tid=i]() {
- uniform_int_distribution<int> sizeDist(0, numSizes - 1);
- minstd_rand rnd(tid * 17);
- uint8_t* ptrs[numAllocsMax];
- int ptrsz[numAllocsMax];
- for (int i = 0; i < numIter1; ++i) {
- thread t([&]() {
- for (int i = 0; i < numIter2; ++i) {
- const int numAllocs = numAllocsMax - sizeDist(rnd);
- for (int j = 0; j < numAllocs; j += 64) {
- const int x = sizeDist(rnd);
- const int sz = sizes[x];
- ptrsz[j] = sz;
- ptrs[j] = (uint8_t*)je_malloc(sz);
- if (!ptrs[j]) {
- printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x);
- exit(1);
- }
- for (int k = 0; k < sz; k++)
- ptrs[j][k] = tid + k;
- }
- for (int j = 0; j < numAllocs; j += 64) {
- for (int k = 0, sz = ptrsz[j]; k < sz; k++)
- if (ptrs[j][k] != (uint8_t)(tid + k)) {
- printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k));
- exit(1);
- }
- je_free(ptrs[j]);
- }
- }
- });
- t.join();
- }
- });
- }
- for (thread& t : workers) {
- t.join();
- }
- je_malloc_stats_print(NULL, NULL, NULL);
- size_t allocated2;
- je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0);
- size_t leaked = allocated2 - allocated1;
- printf("\nDone. Leaked: %zd bytes\n", leaked);
- bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet)
- printf("\nTest %s!\n", (failed ? "FAILED" : "successful"));
- printf("\nPress Enter to continue...\n");
- getchar();
- return failed ? 1 : 0;
-}
diff --git a/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.h b/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.h
deleted file mode 100644
index 64d0cdb33..000000000
--- a/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.h
+++ /dev/null
@@ -1,3 +0,0 @@
-#pragma once
-
-int test_threads();
diff --git a/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj b/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj
deleted file mode 100644
index f5e9898f2..000000000
--- a/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj
+++ /dev/null
@@ -1,327 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <ItemGroup Label="ProjectConfigurations">
- <ProjectConfiguration Include="Debug-static|Win32">
- <Configuration>Debug-static</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Debug-static|x64">
- <Configuration>Debug-static</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Debug|Win32">
- <Configuration>Debug</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release-static|Win32">
- <Configuration>Release-static</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release-static|x64">
- <Configuration>Release-static</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|Win32">
- <Configuration>Release</Configuration>
- <Platform>Win32</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Debug|x64">
- <Configuration>Debug</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- <ProjectConfiguration Include="Release|x64">
- <Configuration>Release</Configuration>
- <Platform>x64</Platform>
- </ProjectConfiguration>
- </ItemGroup>
- <PropertyGroup Label="Globals">
- <ProjectGuid>{09028CFD-4EB7-491D-869C-0708DB97ED44}</ProjectGuid>
- <Keyword>Win32Proj</Keyword>
- <RootNamespace>test_threads</RootNamespace>
- <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseDebugLibraries>true</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseDebugLibraries>true</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseDebugLibraries>false</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseDebugLibraries>false</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseDebugLibraries>true</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseDebugLibraries>true</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseDebugLibraries>false</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
- <ConfigurationType>Application</ConfigurationType>
- <UseDebugLibraries>false</UseDebugLibraries>
- <PlatformToolset>v140</PlatformToolset>
- <WholeProgramOptimization>true</WholeProgramOptimization>
- <CharacterSet>MultiByte</CharacterSet>
- </PropertyGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
- <ImportGroup Label="ExtensionSettings">
- </ImportGroup>
- <ImportGroup Label="Shared">
- </ImportGroup>
- <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
- <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
- </ImportGroup>
- <PropertyGroup Label="UserMacros" />
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- <LinkIncremental>true</LinkIncremental>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- <LinkIncremental>true</LinkIncremental>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- <LinkIncremental>true</LinkIncremental>
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
- <LinkIncremental>true</LinkIncremental>
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- <LinkIncremental>false</LinkIncremental>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- <LinkIncremental>false</LinkIncremental>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- <LinkIncremental>false</LinkIncremental>
- </PropertyGroup>
- <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
- <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
- <IntDir>$(Platform)\$(Configuration)\</IntDir>
- <LinkIncremental>false</LinkIncremental>
- </PropertyGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
- <ClCompile>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <WarningLevel>Level3</WarningLevel>
- <Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- </ClCompile>
- <Link>
- <SubSystem>Console</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
- <AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
- <ClCompile>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <WarningLevel>Level3</WarningLevel>
- <Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
- </ClCompile>
- <Link>
- <SubSystem>Console</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
- <AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
- <ClCompile>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <WarningLevel>Level3</WarningLevel>
- <Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- </ClCompile>
- <Link>
- <SubSystem>Console</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
- <ClCompile>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <WarningLevel>Level3</WarningLevel>
- <Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
- </ClCompile>
- <Link>
- <SubSystem>Console</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
- <ClCompile>
- <WarningLevel>Level3</WarningLevel>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <Optimization>MaxSpeed</Optimization>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- </ClCompile>
- <Link>
- <SubSystem>Console</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- <OptimizeReferences>true</OptimizeReferences>
- <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
- <AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
- <ClCompile>
- <WarningLevel>Level3</WarningLevel>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <Optimization>MaxSpeed</Optimization>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
- </ClCompile>
- <Link>
- <SubSystem>Console</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- <OptimizeReferences>true</OptimizeReferences>
- <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
- <AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
- <ClCompile>
- <WarningLevel>Level3</WarningLevel>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <Optimization>MaxSpeed</Optimization>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- </ClCompile>
- <Link>
- <SubSystem>Console</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- <OptimizeReferences>true</OptimizeReferences>
- <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
- <AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
- </Link>
- </ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
- <ClCompile>
- <WarningLevel>Level3</WarningLevel>
- <PrecompiledHeader>
- </PrecompiledHeader>
- <Optimization>MaxSpeed</Optimization>
- <FunctionLevelLinking>true</FunctionLevelLinking>
- <IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
- </ClCompile>
- <Link>
- <SubSystem>Console</SubSystem>
- <GenerateDebugInformation>true</GenerateDebugInformation>
- <EnableCOMDATFolding>true</EnableCOMDATFolding>
- <OptimizeReferences>true</OptimizeReferences>
- <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
- <AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
- </Link>
- </ItemDefinitionGroup>
- <ItemGroup>
- <ClCompile Include="test_threads.cpp" />
- <ClCompile Include="test_threads_main.cpp" />
- </ItemGroup>
- <ItemGroup>
- <ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
- <Project>{8d6bb292-9e1c-413d-9f98-4864bdc1514a}</Project>
- </ProjectReference>
- </ItemGroup>
- <ItemGroup>
- <ClInclude Include="test_threads.h" />
- </ItemGroup>
- <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
- <ImportGroup Label="ExtensionTargets">
- </ImportGroup>
-</Project> \ No newline at end of file
diff --git a/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters b/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters
deleted file mode 100644
index 4c2334073..000000000
--- a/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <ItemGroup>
- <Filter Include="Source Files">
- <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
- <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
- </Filter>
- <Filter Include="Header Files">
- <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
- <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
- </Filter>
- </ItemGroup>
- <ItemGroup>
- <ClCompile Include="test_threads.cpp">
- <Filter>Source Files</Filter>
- </ClCompile>
- <ClCompile Include="test_threads_main.cpp">
- <Filter>Source Files</Filter>
- </ClCompile>
- </ItemGroup>
- <ItemGroup>
- <ClInclude Include="test_threads.h">
- <Filter>Header Files</Filter>
- </ClInclude>
- </ItemGroup>
-</Project> \ No newline at end of file
diff --git a/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads_main.cpp b/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads_main.cpp
deleted file mode 100644
index ffd96e6ab..000000000
--- a/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads_main.cpp
+++ /dev/null
@@ -1,12 +0,0 @@
-#include "test_threads.h"
-#include <future>
-#include <functional>
-#include <chrono>
-
-using namespace std::chrono_literals;
-
-int main(int argc, char** argv)
-{
- int rc = test_threads();
- return rc;
-}
diff --git a/deps/jemalloc/src/arena.c b/deps/jemalloc/src/arena.c
index 648a8da3a..3081519cc 100644
--- a/deps/jemalloc/src/arena.c
+++ b/deps/jemalloc/src/arena.c
@@ -4,23 +4,16 @@
/******************************************************************************/
/* Data. */
-purge_mode_t opt_purge = PURGE_DEFAULT;
-const char *purge_mode_names[] = {
- "ratio",
- "decay",
- "N/A"
-};
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
static ssize_t lg_dirty_mult_default;
-ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
-static ssize_t decay_time_default;
-
arena_bin_info_t arena_bin_info[NBINS];
size_t map_bias;
size_t map_misc_offset;
size_t arena_maxrun; /* Max run size for arenas. */
size_t large_maxclass; /* Max large size class. */
+static size_t small_maxrun; /* Max run size used for small size classes. */
+static bool *small_run_tab; /* Valid small run page multiples. */
unsigned nlclasses; /* Number of large size classes. */
unsigned nhclasses; /* Number of huge size classes. */
@@ -30,57 +23,60 @@ unsigned nhclasses; /* Number of huge size classes. */
* definition.
*/
-static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk);
-static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
- size_t ndirty_limit);
-static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
- bool dirty, bool cleaned, bool decommitted);
-static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
-static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
- arena_bin_t *bin);
+static void arena_purge(arena_t *arena, bool all);
+static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
+ bool cleaned, bool decommitted);
+static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, arena_bin_t *bin);
+static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, arena_bin_t *bin);
/******************************************************************************/
-JEMALLOC_INLINE_C size_t
-arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
+#define CHUNK_MAP_KEY ((uintptr_t)0x1U)
+
+JEMALLOC_INLINE_C arena_chunk_map_misc_t *
+arena_miscelm_key_create(size_t size)
{
- arena_chunk_t *chunk;
- size_t pageind, mapbits;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
- pageind = arena_miscelm_to_pageind(miscelm);
- mapbits = arena_mapbits_get(chunk, pageind);
- return (arena_mapbits_size_decode(mapbits));
+ return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) |
+ CHUNK_MAP_KEY));
}
-JEMALLOC_INLINE_C const extent_node_t *
-arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
+JEMALLOC_INLINE_C bool
+arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm)
{
- arena_chunk_t *chunk;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
- return (&chunk->node);
+ return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0);
}
-JEMALLOC_INLINE_C int
-arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
+#undef CHUNK_MAP_KEY
+
+JEMALLOC_INLINE_C size_t
+arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
{
- size_t a_sn, b_sn;
- assert(a != NULL);
- assert(b != NULL);
+ assert(arena_miscelm_is_key(miscelm));
+
+ return (arena_mapbits_size_decode((uintptr_t)miscelm));
+}
- a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
- b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
+JEMALLOC_INLINE_C size_t
+arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk;
+ size_t pageind, mapbits;
- return ((a_sn > b_sn) - (a_sn < b_sn));
+ assert(!arena_miscelm_is_key(miscelm));
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ pageind = arena_miscelm_to_pageind(miscelm);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_INLINE_C int
-arena_ad_comp(const arena_chunk_map_misc_t *a,
- const arena_chunk_map_misc_t *b)
+arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
uintptr_t a_miscelm = (uintptr_t)a;
uintptr_t b_miscelm = (uintptr_t)b;
@@ -91,79 +87,74 @@ arena_ad_comp(const arena_chunk_map_misc_t *a,
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
}
-JEMALLOC_INLINE_C int
-arena_snad_comp(const arena_chunk_map_misc_t *a,
- const arena_chunk_map_misc_t *b)
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
+ rb_link, arena_run_comp)
+
+static size_t
+run_quantize(size_t size)
{
- int ret;
+ size_t qsize;
- assert(a != NULL);
- assert(b != NULL);
+ assert(size != 0);
+ assert(size == PAGE_CEILING(size));
- ret = arena_sn_comp(a, b);
- if (ret != 0)
- return (ret);
+ /* Don't change sizes that are valid small run sizes. */
+ if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
+ return (size);
- ret = arena_ad_comp(a, b);
- return (ret);
+ /*
+ * Round down to the nearest run size that can actually be requested
+ * during normal large allocation. Add large_pad so that cache index
+ * randomization can offset the allocation from the page boundary.
+ */
+ qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
+ if (qsize <= SMALL_MAXCLASS + large_pad)
+ return (run_quantize(size - large_pad));
+ assert(qsize <= size);
+ return (qsize);
}
-/* Generate pairing heap functions. */
-ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
- ph_link, arena_snad_comp)
-
-#ifdef JEMALLOC_JET
-#undef run_quantize_floor
-#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
-#endif
static size_t
-run_quantize_floor(size_t size)
+run_quantize_next(size_t size)
{
- size_t ret;
- pszind_t pind;
-
- assert(size > 0);
- assert(size <= HUGE_MAXCLASS);
- assert((size & PAGE_MASK) == 0);
+ size_t large_run_size_next;
assert(size != 0);
assert(size == PAGE_CEILING(size));
- pind = psz2ind(size - large_pad + 1);
- if (pind == 0) {
- /*
- * Avoid underflow. This short-circuit would also do the right
- * thing for all sizes in the range for which there are
- * PAGE-spaced size classes, but it's simplest to just handle
- * the one case that would cause erroneous results.
- */
- return (size);
+ /*
+ * Return the next quantized size greater than the input size.
+ * Quantized sizes comprise the union of run sizes that back small
+ * region runs, and run sizes that back large regions with no explicit
+ * alignment constraints.
+ */
+
+ if (size > SMALL_MAXCLASS) {
+ large_run_size_next = PAGE_CEILING(index2size(size2index(size -
+ large_pad) + 1) + large_pad);
+ } else
+ large_run_size_next = SIZE_T_MAX;
+ if (size >= small_maxrun)
+ return (large_run_size_next);
+
+ while (true) {
+ size += PAGE;
+ assert(size <= small_maxrun);
+ if (small_run_tab[size >> LG_PAGE]) {
+ if (large_run_size_next < size)
+ return (large_run_size_next);
+ return (size);
+ }
}
- ret = pind2sz(pind - 1) + large_pad;
- assert(ret <= size);
- return (ret);
}
-#ifdef JEMALLOC_JET
-#undef run_quantize_floor
-#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
-run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
-#endif
-#ifdef JEMALLOC_JET
-#undef run_quantize_ceil
-#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
-#endif
static size_t
-run_quantize_ceil(size_t size)
+run_quantize_first(size_t size)
{
- size_t ret;
-
- assert(size > 0);
- assert(size <= HUGE_MAXCLASS);
- assert((size & PAGE_MASK) == 0);
+ size_t qsize = run_quantize(size);
- ret = run_quantize_floor(size);
- if (ret < size) {
+ if (qsize < size) {
/*
* Skip a quantization that may have an adequately large run,
* because under-sized runs may be mixed in. This only happens
@@ -172,50 +163,72 @@ run_quantize_ceil(size_t size)
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
- ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
+ qsize = run_quantize_next(size);
+ }
+ return (qsize);
+}
+
+JEMALLOC_INLINE_C int
+arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
+{
+ int ret;
+ uintptr_t a_miscelm = (uintptr_t)a;
+ size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ?
+ arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a));
+ size_t b_qsize = run_quantize(arena_miscelm_size_get(b));
+
+ /*
+ * Compare based on quantized size rather than size, in order to sort
+ * equally useful runs only by address.
+ */
+ ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
+ if (ret == 0) {
+ if (!arena_miscelm_is_key(a)) {
+ uintptr_t b_miscelm = (uintptr_t)b;
+
+ ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
+ } else {
+ /*
+ * Treat keys as if they are lower than anything else.
+ */
+ ret = -1;
+ }
}
+
return (ret);
}
-#ifdef JEMALLOC_JET
-#undef run_quantize_ceil
-#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
-run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
-#endif
+
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
+ arena_chunk_map_misc_t, rb_link, arena_avail_comp)
static void
arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
- pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
- arena_miscelm_get_const(chunk, pageind))));
+
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
- assert((npages << LG_PAGE) < chunksize);
- assert(pind2sz(pind) <= chunksize);
- arena_run_heap_insert(&arena->runs_avail[pind],
- arena_miscelm_get_mutable(chunk, pageind));
+ arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
+ pageind));
}
static void
arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
- pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
- arena_miscelm_get_const(chunk, pageind))));
+
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
- assert((npages << LG_PAGE) < chunksize);
- assert(pind2sz(pind) <= chunksize);
- arena_run_heap_remove(&arena->runs_avail[pind],
- arena_miscelm_get_mutable(chunk, pageind));
+ arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
+ pageind));
}
static void
arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
@@ -232,8 +245,7 @@ static void
arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
@@ -280,14 +292,14 @@ JEMALLOC_INLINE_C void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
{
void *ret;
- size_t regind;
+ unsigned regind;
arena_chunk_map_misc_t *miscelm;
void *rpages;
assert(run->nfree > 0);
assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
- regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
+ regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
miscelm = arena_run_to_miscelm(run);
rpages = arena_miscelm_to_rpages(miscelm);
ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
@@ -304,7 +316,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
size_t mapbits = arena_mapbits_get(chunk, pageind);
szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
- size_t regind = arena_run_regind(run, bin_info, ptr);
+ unsigned regind = arena_run_regind(run, bin_info, ptr);
assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */
@@ -352,30 +364,16 @@ arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
}
static void
-arena_nactive_add(arena_t *arena, size_t add_pages)
+arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
{
if (config_stats) {
- size_t cactive_add = CHUNK_CEILING((arena->nactive +
- add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
+ ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages
+ - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
LG_PAGE);
- if (cactive_add != 0)
- stats_cactive_add(cactive_add);
- }
- arena->nactive += add_pages;
-}
-
-static void
-arena_nactive_sub(arena_t *arena, size_t sub_pages)
-{
-
- if (config_stats) {
- size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
- CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
- if (cactive_sub != 0)
- stats_cactive_sub(cactive_sub);
+ if (cactive_diff != 0)
+ stats_cactive_add(cactive_diff);
}
- arena->nactive -= sub_pages;
}
static void
@@ -396,7 +394,8 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
arena_avail_remove(arena, chunk, run_ind, total_pages);
if (flag_dirty != 0)
arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
- arena_nactive_add(arena, need_pages);
+ arena_cactive_update(arena, need_pages, 0);
+ arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) {
@@ -568,8 +567,7 @@ arena_chunk_init_spare(arena_t *arena)
}
static bool
-arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- size_t sn, bool zero)
+arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
{
/*
@@ -578,67 +576,64 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
* of runs is tracked individually, and upon chunk deallocation the
* entire chunk is in a consistent commit state.
*/
- extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
+ extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
extent_node_achunk_set(&chunk->node, true);
- return (chunk_register(tsdn, chunk, &chunk->node));
+ return (chunk_register(chunk, &chunk->node));
}
static arena_chunk_t *
-arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
+arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ bool *zero, bool *commit)
{
arena_chunk_t *chunk;
- size_t sn;
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
- NULL, chunksize, chunksize, &sn, zero, commit);
+ chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
+ chunksize, chunksize, zero, commit);
if (chunk != NULL && !*commit) {
/* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) {
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
- (void *)chunk, chunksize, sn, *zero, *commit);
+ chunk_dalloc_wrapper(arena, chunk_hooks,
+ (void *)chunk, chunksize, *commit);
chunk = NULL;
}
}
- if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
- *zero)) {
+ if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
if (!*commit) {
/* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
- chunksize, sn, *zero, *commit);
+ chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
+ chunksize, *commit);
chunk = NULL;
}
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
return (chunk);
}
static arena_chunk_t *
-arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
- bool *commit)
+arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
{
arena_chunk_t *chunk;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- size_t sn;
- chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
- chunksize, &sn, zero, commit, true);
+ chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
+ chunksize, zero, true);
if (chunk != NULL) {
- if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
- chunksize, sn, true);
+ if (arena_chunk_register(arena, chunk, *zero)) {
+ chunk_dalloc_cache(arena, &chunk_hooks, chunk,
+ chunksize, true);
return (NULL);
}
+ *commit = true;
}
if (chunk == NULL) {
- chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
- &chunk_hooks, zero, commit);
+ chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
+ zero, commit);
}
if (config_stats && chunk != NULL) {
@@ -650,7 +645,7 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
}
static arena_chunk_t *
-arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
+arena_chunk_init_hard(arena_t *arena)
{
arena_chunk_t *chunk;
bool zero, commit;
@@ -660,16 +655,14 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
zero = false;
commit = false;
- chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
+ chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
if (chunk == NULL)
return (NULL);
- chunk->hugepage = true;
-
/*
* Initialize the map to contain one maximal free untouched run. Mark
- * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
- * or decommitted chunk.
+ * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
+ * chunk.
*/
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
@@ -681,18 +674,17 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
*/
if (!zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
- (void *)arena_bitselm_get_const(chunk, map_bias+1),
- (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
- chunk_npages-1) -
- (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
+ (void *)arena_bitselm_get(chunk, map_bias+1),
+ (size_t)((uintptr_t) arena_bitselm_get(chunk,
+ chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
+ map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
- *)arena_bitselm_get_const(chunk, map_bias+1),
- (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
- chunk_npages-1) -
- (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
+ *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
+ arena_bitselm_get(chunk, chunk_npages-1) -
+ (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
@@ -707,85 +699,28 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
}
static arena_chunk_t *
-arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
+arena_chunk_alloc(arena_t *arena)
{
arena_chunk_t *chunk;
if (arena->spare != NULL)
chunk = arena_chunk_init_spare(arena);
else {
- chunk = arena_chunk_init_hard(tsdn, arena);
+ chunk = arena_chunk_init_hard(arena);
if (chunk == NULL)
return (NULL);
}
- ql_elm_new(&chunk->node, ql_link);
- ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
+ /* Insert the run into the runs_avail tree. */
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
return (chunk);
}
static void
-arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
-{
- size_t sn, hugepage;
- bool committed;
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
-
- chunk_deregister(chunk, &chunk->node);
-
- sn = extent_node_sn_get(&chunk->node);
- hugepage = chunk->hugepage;
- committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
- if (!committed) {
- /*
- * Decommit the header. Mark the chunk as decommitted even if
- * header decommit fails, since treating a partially committed
- * chunk as committed has a high potential for causing later
- * access of decommitted memory.
- */
- chunk_hooks = chunk_hooks_get(tsdn, arena);
- chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
- arena->ind);
- }
- if (!hugepage) {
- /*
- * Convert chunk back to the default state, so that all
- * subsequent chunk allocations start out with chunks that can
- * be backed by transparent huge pages.
- */
- pages_huge(chunk, chunksize);
- }
-
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
- sn, committed);
-
- if (config_stats) {
- arena->stats.mapped -= chunksize;
- arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
- }
-}
-
-static void
-arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
+arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
{
- assert(arena->spare != spare);
-
- if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
- arena_run_dirty_remove(arena, spare, map_bias,
- chunk_npages-map_bias);
- }
-
- arena_chunk_discard(tsdn, arena, spare);
-}
-
-static void
-arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
-{
- arena_chunk_t *spare;
-
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
@@ -797,14 +732,49 @@ arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
arena_mapbits_decommitted_get(chunk, chunk_npages-1));
- /* Remove run from runs_avail, so that the arena does not use it. */
+ /*
+ * Remove run from the runs_avail tree, so that the arena does not use
+ * it.
+ */
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
- ql_remove(&arena->achunks, &chunk->node, ql_link);
- spare = arena->spare;
- arena->spare = chunk;
- if (spare != NULL)
- arena_spare_discard(tsdn, arena, spare);
+ if (arena->spare != NULL) {
+ arena_chunk_t *spare = arena->spare;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ bool committed;
+
+ arena->spare = chunk;
+ if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
+ arena_run_dirty_remove(arena, spare, map_bias,
+ chunk_npages-map_bias);
+ }
+
+ chunk_deregister(spare, &spare->node);
+
+ committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
+ 0);
+ if (!committed) {
+ /*
+ * Decommit the header. Mark the chunk as decommitted
+ * even if header decommit fails, since treating a
+ * partially committed chunk as committed has a high
+ * potential for causing later access of decommitted
+ * memory.
+ */
+ chunk_hooks = chunk_hooks_get(arena);
+ chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
+ LG_PAGE, arena->ind);
+ }
+
+ chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
+ chunksize, committed);
+
+ if (config_stats) {
+ arena->stats.mapped -= chunksize;
+ arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
+ }
+ } else
+ arena->spare = chunk;
}
static void
@@ -847,17 +817,6 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
}
static void
-arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
-{
- szind_t index = size2index(usize) - nlclasses - NBINS;
-
- cassert(config_stats);
-
- arena->stats.ndalloc_huge++;
- arena->stats.hstats[index].ndalloc--;
-}
-
-static void
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
{
szind_t index = size2index(usize) - nlclasses - NBINS;
@@ -888,240 +847,243 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
}
extent_node_t *
-arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
+arena_node_alloc(arena_t *arena)
{
extent_node_t *node;
- malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_lock(&arena->node_cache_mtx);
node = ql_last(&arena->node_cache, ql_link);
if (node == NULL) {
- malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
- return (base_alloc(tsdn, sizeof(extent_node_t)));
+ malloc_mutex_unlock(&arena->node_cache_mtx);
+ return (base_alloc(sizeof(extent_node_t)));
}
ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
- malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_unlock(&arena->node_cache_mtx);
return (node);
}
void
-arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
+arena_node_dalloc(arena_t *arena, extent_node_t *node)
{
- malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_lock(&arena->node_cache_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->node_cache, node, ql_link);
- malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_unlock(&arena->node_cache_mtx);
}
static void *
-arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
- bool *zero, size_t csize)
+arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ size_t usize, size_t alignment, bool *zero, size_t csize)
{
void *ret;
bool commit = true;
- ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
- alignment, sn, zero, &commit);
+ ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
+ zero, &commit);
if (ret == NULL) {
/* Revert optimistic stats updates. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_malloc_stats_update_undo(arena, usize);
arena->stats.mapped -= usize;
}
- arena_nactive_sub(arena, usize >> LG_PAGE);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ arena->nactive -= (usize >> LG_PAGE);
+ malloc_mutex_unlock(&arena->lock);
}
return (ret);
}
void *
-arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, size_t *sn, bool *zero)
+arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
+ bool *zero)
{
void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize = CHUNK_CEILING(usize);
- bool commit = true;
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
/* Optimistically update stats. */
if (config_stats) {
arena_huge_malloc_stats_update(arena, usize);
arena->stats.mapped += usize;
}
- arena_nactive_add(arena, usize >> LG_PAGE);
+ arena->nactive += (usize >> LG_PAGE);
- ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
- alignment, sn, zero, &commit, true);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
+ zero, true);
+ malloc_mutex_unlock(&arena->lock);
if (ret == NULL) {
- ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
- usize, alignment, sn, zero, csize);
+ ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
+ alignment, zero, csize);
}
+ if (config_stats && ret != NULL)
+ stats_cactive_add(usize);
return (ret);
}
void
-arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
- size_t sn)
+arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
{
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize;
csize = CHUNK_CEILING(usize);
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_dalloc_stats_update(arena, usize);
arena->stats.mapped -= usize;
+ stats_cactive_sub(usize);
}
- arena_nactive_sub(arena, usize >> LG_PAGE);
+ arena->nactive -= (usize >> LG_PAGE);
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
+ malloc_mutex_unlock(&arena->lock);
}
void
-arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize)
+arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
+ size_t usize)
{
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
assert(oldsize != usize);
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_stats)
arena_huge_ralloc_stats_update(arena, oldsize, usize);
- if (oldsize < usize)
- arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
- else
- arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ if (oldsize < usize) {
+ size_t udiff = usize - oldsize;
+ arena->nactive += udiff >> LG_PAGE;
+ if (config_stats)
+ stats_cactive_add(udiff);
+ } else {
+ size_t udiff = oldsize - usize;
+ arena->nactive -= udiff >> LG_PAGE;
+ if (config_stats)
+ stats_cactive_sub(udiff);
+ }
+ malloc_mutex_unlock(&arena->lock);
}
void
-arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize, size_t sn)
+arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
+ size_t usize)
{
size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize);
- if (cdiff != 0)
+ if (cdiff != 0) {
arena->stats.mapped -= cdiff;
+ stats_cactive_sub(udiff);
+ }
}
- arena_nactive_sub(arena, udiff >> LG_PAGE);
+ arena->nactive -= udiff >> LG_PAGE;
if (cdiff != 0) {
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
void *nchunk = (void *)((uintptr_t)chunk +
CHUNK_CEILING(usize));
- chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- sn, true);
+ chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
}
static bool
-arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
- size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
+arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
+ size_t udiff, size_t cdiff)
{
bool err;
bool commit = true;
- err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
- chunksize, sn, zero, &commit) == NULL);
+ err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
+ zero, &commit) == NULL);
if (err) {
/* Revert optimistic stats updates. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena, oldsize,
usize);
arena->stats.mapped -= cdiff;
}
- arena_nactive_sub(arena, udiff >> LG_PAGE);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ arena->nactive -= (udiff >> LG_PAGE);
+ malloc_mutex_unlock(&arena->lock);
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
- *sn, *zero, true);
+ chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
+ true);
err = true;
}
return (err);
}
bool
-arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
- size_t oldsize, size_t usize, bool *zero)
+arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
+ size_t usize, bool *zero)
{
bool err;
- chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
- size_t sn;
- bool commit = true;
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
/* Optimistically update stats. */
if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize);
arena->stats.mapped += cdiff;
}
- arena_nactive_add(arena, udiff >> LG_PAGE);
+ arena->nactive += (udiff >> LG_PAGE);
- err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- chunksize, &sn, zero, &commit, true) == NULL);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
+ chunksize, zero, true) == NULL);
+ malloc_mutex_unlock(&arena->lock);
if (err) {
- err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
- &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
- udiff, cdiff);
+ err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
+ chunk, oldsize, usize, zero, nchunk, udiff,
+ cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
- chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
- sn, *zero, true);
+ chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
+ true);
err = true;
}
+ if (config_stats && !err)
+ stats_cactive_add(udiff);
return (err);
}
/*
* Do first-best-fit run selection, i.e. select the lowest run that best fits.
- * Run sizes are indexed, so not all candidate runs are necessarily exactly the
- * same size.
+ * Run sizes are quantized, so not all candidate runs are necessarily exactly
+ * the same size.
*/
static arena_run_t *
arena_run_first_best_fit(arena_t *arena, size_t size)
{
- pszind_t pind, i;
-
- pind = psz2ind(run_quantize_ceil(size));
-
- for (i = pind; pind2sz(i) <= chunksize; i++) {
- arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
- &arena->runs_avail[i]);
- if (miscelm != NULL)
- return (&miscelm->run);
- }
-
- return (NULL);
+ size_t search_size = run_quantize_first(size);
+ arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size);
+ arena_chunk_map_misc_t *miscelm =
+ arena_avail_tree_nsearch(&arena->runs_avail, key);
+ if (miscelm == NULL)
+ return (NULL);
+ return (&miscelm->run);
}
static arena_run_t *
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{
- arena_run_t *run = arena_run_first_best_fit(arena, size);
+ arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
if (run != NULL) {
if (arena_run_split_large(arena, run, size, zero))
run = NULL;
@@ -1130,7 +1092,7 @@ arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
}
static arena_run_t *
-arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
+arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
{
arena_chunk_t *chunk;
arena_run_t *run;
@@ -1146,9 +1108,9 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
- chunk = arena_chunk_alloc(tsdn, arena);
+ chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
- run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
+ run = &arena_miscelm_get(chunk, map_bias)->run;
if (arena_run_split_large(arena, run, size, zero))
run = NULL;
return (run);
@@ -1174,7 +1136,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
}
static arena_run_t *
-arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
+arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
@@ -1191,9 +1153,9 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
- chunk = arena_chunk_alloc(tsdn, arena);
+ chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
- run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
+ run = &arena_miscelm_get(chunk, map_bias)->run;
if (arena_run_split_small(arena, run, size, binind))
run = NULL;
return (run);
@@ -1216,239 +1178,42 @@ arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
}
ssize_t
-arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
+arena_lg_dirty_mult_get(arena_t *arena)
{
ssize_t lg_dirty_mult;
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
lg_dirty_mult = arena->lg_dirty_mult;
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (lg_dirty_mult);
}
bool
-arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
+arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
{
if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true);
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
arena->lg_dirty_mult = lg_dirty_mult;
- arena_maybe_purge(tsdn, arena);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ arena_maybe_purge(arena);
+ malloc_mutex_unlock(&arena->lock);
return (false);
}
-static void
-arena_decay_deadline_init(arena_t *arena)
-{
-
- assert(opt_purge == purge_mode_decay);
-
- /*
- * Generate a new deadline that is uniformly random within the next
- * epoch after the current one.
- */
- nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
- nstime_add(&arena->decay.deadline, &arena->decay.interval);
- if (arena->decay.time > 0) {
- nstime_t jitter;
-
- nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
- nstime_ns(&arena->decay.interval)));
- nstime_add(&arena->decay.deadline, &jitter);
- }
-}
-
-static bool
-arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
-{
-
- assert(opt_purge == purge_mode_decay);
-
- return (nstime_compare(&arena->decay.deadline, time) <= 0);
-}
-
-static size_t
-arena_decay_backlog_npages_limit(const arena_t *arena)
-{
- static const uint64_t h_steps[] = {
-#define STEP(step, h, x, y) \
- h,
- SMOOTHSTEP
-#undef STEP
- };
- uint64_t sum;
- size_t npages_limit_backlog;
- unsigned i;
-
- assert(opt_purge == purge_mode_decay);
-
- /*
- * For each element of decay_backlog, multiply by the corresponding
- * fixed-point smoothstep decay factor. Sum the products, then divide
- * to round down to the nearest whole number of pages.
- */
- sum = 0;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
- sum += arena->decay.backlog[i] * h_steps[i];
- npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
-
- return (npages_limit_backlog);
-}
-
-static void
-arena_decay_backlog_update_last(arena_t *arena)
-{
- size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
- arena->ndirty - arena->decay.ndirty : 0;
- arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
-}
-
-static void
-arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
-{
-
- if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
- memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
- sizeof(size_t));
- } else {
- size_t nadvance_z = (size_t)nadvance_u64;
-
- assert((uint64_t)nadvance_z == nadvance_u64);
-
- memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
- (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
- if (nadvance_z > 1) {
- memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
- nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
- }
- }
-
- arena_decay_backlog_update_last(arena);
-}
-
-static void
-arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
-{
- uint64_t nadvance_u64;
- nstime_t delta;
-
- assert(opt_purge == purge_mode_decay);
- assert(arena_decay_deadline_reached(arena, time));
-
- nstime_copy(&delta, time);
- nstime_subtract(&delta, &arena->decay.epoch);
- nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
- assert(nadvance_u64 > 0);
-
- /* Add nadvance_u64 decay intervals to epoch. */
- nstime_copy(&delta, &arena->decay.interval);
- nstime_imultiply(&delta, nadvance_u64);
- nstime_add(&arena->decay.epoch, &delta);
-
- /* Set a new deadline. */
- arena_decay_deadline_init(arena);
-
- /* Update the backlog. */
- arena_decay_backlog_update(arena, nadvance_u64);
-}
-
-static void
-arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
-{
- size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
-
- if (arena->ndirty > ndirty_limit)
- arena_purge_to_limit(tsdn, arena, ndirty_limit);
- arena->decay.ndirty = arena->ndirty;
-}
-
-static void
-arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
-{
-
- arena_decay_epoch_advance_helper(arena, time);
- arena_decay_epoch_advance_purge(tsdn, arena);
-}
-
-static void
-arena_decay_init(arena_t *arena, ssize_t decay_time)
-{
-
- arena->decay.time = decay_time;
- if (decay_time > 0) {
- nstime_init2(&arena->decay.interval, decay_time, 0);
- nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
- }
-
- nstime_init(&arena->decay.epoch, 0);
- nstime_update(&arena->decay.epoch);
- arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
- arena_decay_deadline_init(arena);
- arena->decay.ndirty = arena->ndirty;
- memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
-}
-
-static bool
-arena_decay_time_valid(ssize_t decay_time)
-{
-
- if (decay_time < -1)
- return (false);
- if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
- return (true);
- return (false);
-}
-
-ssize_t
-arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
-{
- ssize_t decay_time;
-
- malloc_mutex_lock(tsdn, &arena->lock);
- decay_time = arena->decay.time;
- malloc_mutex_unlock(tsdn, &arena->lock);
-
- return (decay_time);
-}
-
-bool
-arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
-{
-
- if (!arena_decay_time_valid(decay_time))
- return (true);
-
- malloc_mutex_lock(tsdn, &arena->lock);
- /*
- * Restart decay backlog from scratch, which may cause many dirty pages
- * to be immediately purged. It would conceptually be possible to map
- * the old backlog onto the new backlog, but there is no justification
- * for such complexity since decay_time changes are intended to be
- * infrequent, either between the {-1, 0, >0} states, or a one-time
- * arbitrary change during initial arena configuration.
- */
- arena_decay_init(arena, decay_time);
- arena_maybe_purge(tsdn, arena);
- malloc_mutex_unlock(tsdn, &arena->lock);
-
- return (false);
-}
-
-static void
-arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
+void
+arena_maybe_purge(arena_t *arena)
{
- assert(opt_purge == purge_mode_ratio);
-
/* Don't purge if the option is disabled. */
if (arena->lg_dirty_mult < 0)
return;
-
+ /* Don't recursively purge. */
+ if (arena->purging)
+ return;
/*
* Iterate, since preventing recursive purging could otherwise leave too
* many dirty pages.
@@ -1463,68 +1228,10 @@ arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
*/
if (arena->ndirty <= threshold)
return;
- arena_purge_to_limit(tsdn, arena, threshold);
+ arena_purge(arena, false);
}
}
-static void
-arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
-{
- nstime_t time;
-
- assert(opt_purge == purge_mode_decay);
-
- /* Purge all or nothing if the option is disabled. */
- if (arena->decay.time <= 0) {
- if (arena->decay.time == 0)
- arena_purge_to_limit(tsdn, arena, 0);
- return;
- }
-
- nstime_init(&time, 0);
- nstime_update(&time);
- if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
- &time) > 0)) {
- /*
- * Time went backwards. Move the epoch back in time and
- * generate a new deadline, with the expectation that time
- * typically flows forward for long enough periods of time that
- * epochs complete. Unfortunately, this strategy is susceptible
- * to clock jitter triggering premature epoch advances, but
- * clock jitter estimation and compensation isn't feasible here
- * because calls into this code are event-driven.
- */
- nstime_copy(&arena->decay.epoch, &time);
- arena_decay_deadline_init(arena);
- } else {
- /* Verify that time does not go backwards. */
- assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
- }
-
- /*
- * If the deadline has been reached, advance to the current epoch and
- * purge to the new limit if necessary. Note that dirty pages created
- * during the current epoch are not subject to purge until a future
- * epoch, so as a result purging only happens during epoch advances.
- */
- if (arena_decay_deadline_reached(arena, &time))
- arena_decay_epoch_advance(tsdn, arena, &time);
-}
-
-void
-arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
-{
-
- /* Don't recursively purge. */
- if (arena->purging)
- return;
-
- if (opt_purge == purge_mode_ratio)
- arena_maybe_purge_ratio(tsdn, arena);
- else
- arena_maybe_purge_decay(tsdn, arena);
-}
-
static size_t
arena_dirty_count(arena_t *arena)
{
@@ -1560,15 +1267,35 @@ arena_dirty_count(arena_t *arena)
}
static size_t
-arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
+arena_compute_npurge(arena_t *arena, bool all)
+{
+ size_t npurge;
+
+ /*
+ * Compute the minimum number of pages that this thread should try to
+ * purge.
+ */
+ if (!all) {
+ size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
+ threshold = threshold < chunk_npages ? chunk_npages : threshold;
+
+ npurge = arena->ndirty - threshold;
+ } else
+ npurge = arena->ndirty;
+
+ return (npurge);
+}
+
+static size_t
+arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
+ size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm;
size_t nstashed = 0;
- /* Stash runs/chunks according to ndirty_limit. */
+ /* Stash at least npurge pages. */
for (rdelm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_cache, cc_link);
rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
@@ -1577,32 +1304,24 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next;
- size_t sn;
- bool zero, commit;
+ bool zero;
UNUSED void *chunk;
- npages = extent_node_size_get(chunkselm) >> LG_PAGE;
- if (opt_purge == purge_mode_decay && arena->ndirty -
- (nstashed + npages) < ndirty_limit)
- break;
-
chunkselm_next = qr_next(chunkselm, cc_link);
/*
* Allocate. chunkselm remains valid due to the
* dalloc_node=false argument to chunk_alloc_cache().
*/
zero = false;
- commit = false;
- chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
+ chunk = chunk_alloc_cache(arena, chunk_hooks,
extent_node_addr_get(chunkselm),
- extent_node_size_get(chunkselm), chunksize, &sn,
- &zero, &commit, false);
+ extent_node_size_get(chunkselm), chunksize, &zero,
+ false);
assert(chunk == extent_node_addr_get(chunkselm));
assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
purge_chunks_sentinel);
- assert(npages == (extent_node_size_get(chunkselm) >>
- LG_PAGE));
+ npages = extent_node_size_get(chunkselm) >> LG_PAGE;
chunkselm = chunkselm_next;
} else {
arena_chunk_t *chunk =
@@ -1615,9 +1334,6 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_mapbits_unallocated_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
- if (opt_purge == purge_mode_decay && arena->ndirty -
- (nstashed + npages) < ndirty_limit)
- break;
assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) ==
@@ -1628,7 +1344,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
* prior to allocation.
*/
if (chunk == arena->spare)
- arena_chunk_alloc(tsdn, arena);
+ arena_chunk_alloc(arena);
/* Temporarily allocate the free dirty run. */
arena_run_split_large(arena, run, run_size, false);
@@ -1643,8 +1359,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
nstashed += npages;
- if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
- ndirty_limit)
+ if (!all && nstashed >= npurge)
break;
}
@@ -1652,7 +1367,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
static size_t
-arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
@@ -1664,7 +1379,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
nmadvise = 0;
npurged = 0;
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
@@ -1693,17 +1408,6 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
run_size = arena_mapbits_large_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
- /*
- * If this is the first run purged within chunk, mark
- * the chunk as non-huge. This will prevent all use of
- * transparent huge pages for this chunk until the chunk
- * as a whole is deallocated.
- */
- if (chunk->hugepage) {
- pages_nohuge(chunk, chunksize);
- chunk->hugepage = false;
- }
-
assert(pageind + npages <= chunk_npages);
assert(!arena_mapbits_decommitted_get(chunk, pageind));
assert(!arena_mapbits_decommitted_get(chunk,
@@ -1714,7 +1418,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
flag_unzeroed = 0;
flags = CHUNK_MAP_DECOMMITTED;
} else {
- flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
+ flag_unzeroed = chunk_purge_wrapper(arena,
chunk_hooks, chunk, chunksize, pageind <<
LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
flags = flag_unzeroed;
@@ -1745,7 +1449,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (config_stats)
nmadvise++;
}
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_stats) {
arena->stats.nmadvise += nmadvise;
@@ -1756,7 +1460,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
static void
-arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
@@ -1773,14 +1477,13 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
cc_link);
void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm);
- size_t sn = extent_node_sn_get(chunkselm);
bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm);
- arena_node_dalloc(tsdn, arena, chunkselm);
+ arena_node_dalloc(arena, chunkselm);
chunkselm = chunkselm_next;
- chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
- size, sn, zeroed, committed);
+ chunk_dalloc_arena(arena, chunk_hooks, addr, size,
+ zeroed, committed);
} else {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
@@ -1791,26 +1494,16 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
pageind) != 0);
arena_run_t *run = &miscelm->run;
qr_remove(rdelm, rd_link);
- arena_run_dalloc(tsdn, arena, run, false, true,
- decommitted);
+ arena_run_dalloc(arena, run, false, true, decommitted);
}
}
}
-/*
- * NB: ndirty_limit is interpreted differently depending on opt_purge:
- * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
- * desired state:
- * (arena->ndirty <= ndirty_limit)
- * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
- * violating the invariant:
- * (arena->ndirty >= ndirty_limit)
- */
static void
-arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
+arena_purge(arena_t *arena, bool all)
{
- chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
- size_t npurge, npurged;
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
+ size_t npurge, npurgeable, npurged;
arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel;
@@ -1824,183 +1517,34 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
size_t ndirty = arena_dirty_count(arena);
assert(ndirty == arena->ndirty);
}
- assert(opt_purge != purge_mode_ratio || (arena->nactive >>
- arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
+ assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all);
+
+ if (config_stats)
+ arena->stats.npurge++;
+ npurge = arena_compute_npurge(arena, all);
qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
- npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
- &purge_runs_sentinel, &purge_chunks_sentinel);
- if (npurge == 0)
- goto label_return;
- npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
+ npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge,
&purge_runs_sentinel, &purge_chunks_sentinel);
- assert(npurged == npurge);
- arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
+ assert(npurgeable >= npurge);
+ npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
+ &purge_chunks_sentinel);
+ assert(npurged == npurgeable);
+ arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel);
- if (config_stats)
- arena->stats.npurge++;
-
-label_return:
arena->purging = false;
}
void
-arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
+arena_purge_all(arena_t *arena)
{
- malloc_mutex_lock(tsdn, &arena->lock);
- if (all)
- arena_purge_to_limit(tsdn, arena, 0);
- else
- arena_maybe_purge(tsdn, arena);
- malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-static void
-arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
-{
- size_t pageind, npages;
-
- cassert(config_prof);
- assert(opt_prof);
-
- /*
- * Iterate over the allocated runs and remove profiled allocations from
- * the sample set.
- */
- for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
- if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
- if (arena_mapbits_large_get(chunk, pageind) != 0) {
- void *ptr = (void *)((uintptr_t)chunk + (pageind
- << LG_PAGE));
- size_t usize = isalloc(tsd_tsdn(tsd), ptr,
- config_prof);
-
- prof_free(tsd, ptr, usize);
- npages = arena_mapbits_large_size_get(chunk,
- pageind) >> LG_PAGE;
- } else {
- /* Skip small run. */
- size_t binind = arena_mapbits_binind_get(chunk,
- pageind);
- arena_bin_info_t *bin_info =
- &arena_bin_info[binind];
- npages = bin_info->run_size >> LG_PAGE;
- }
- } else {
- /* Skip unallocated run. */
- npages = arena_mapbits_unallocated_size_get(chunk,
- pageind) >> LG_PAGE;
- }
- assert(pageind + npages <= chunk_npages);
- }
-}
-
-void
-arena_reset(tsd_t *tsd, arena_t *arena)
-{
- unsigned i;
- extent_node_t *node;
-
- /*
- * Locking in this function is unintuitive. The caller guarantees that
- * no concurrent operations are happening in this arena, but there are
- * still reasons that some locking is necessary:
- *
- * - Some of the functions in the transitive closure of calls assume
- * appropriate locks are held, and in some cases these locks are
- * temporarily dropped to avoid lock order reversal or deadlock due to
- * reentry.
- * - mallctl("epoch", ...) may concurrently refresh stats. While
- * strictly speaking this is a "concurrent operation", disallowing
- * stats refreshes would impose an inconvenient burden.
- */
-
- /* Remove large allocations from prof sample set. */
- if (config_prof && opt_prof) {
- ql_foreach(node, &arena->achunks, ql_link) {
- arena_achunk_prof_reset(tsd, arena,
- extent_node_addr_get(node));
- }
- }
-
- /* Reset curruns for large size classes. */
- if (config_stats) {
- for (i = 0; i < nlclasses; i++)
- arena->stats.lstats[i].curruns = 0;
- }
-
- /* Huge allocations. */
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
- for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
- ql_last(&arena->huge, ql_link)) {
- void *ptr = extent_node_addr_get(node);
- size_t usize;
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
- if (config_stats || (config_prof && opt_prof))
- usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
- /* Remove huge allocation from prof sample set. */
- if (config_prof && opt_prof)
- prof_free(tsd, ptr, usize);
- huge_dalloc(tsd_tsdn(tsd), ptr);
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
- /* Cancel out unwanted effects on stats. */
- if (config_stats)
- arena_huge_reset_stats_cancel(arena, usize);
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
-
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
-
- /* Bins. */
- for (i = 0; i < NBINS; i++) {
- arena_bin_t *bin = &arena->bins[i];
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- bin->runcur = NULL;
- arena_run_heap_new(&bin->runs);
- if (config_stats) {
- bin->stats.curregs = 0;
- bin->stats.curruns = 0;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- }
-
- /*
- * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
- * chains directly correspond.
- */
- qr_new(&arena->runs_dirty, rd_link);
- for (node = qr_next(&arena->chunks_cache, cc_link);
- node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
- qr_new(&node->rd, rd_link);
- qr_meld(&arena->runs_dirty, &node->rd, rd_link);
- }
-
- /* Arena chunks. */
- for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
- ql_last(&arena->achunks, ql_link)) {
- ql_remove(&arena->achunks, node, ql_link);
- arena_chunk_discard(tsd_tsdn(tsd), arena,
- extent_node_addr_get(node));
- }
-
- /* Spare. */
- if (arena->spare != NULL) {
- arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
- arena->spare = NULL;
- }
-
- assert(!arena->purging);
- arena->nactive = 0;
-
- for (i = 0; i < NPSIZES; i++)
- arena_run_heap_new(&arena->runs_avail[i]);
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+ malloc_mutex_lock(&arena->lock);
+ arena_purge(arena, true);
+ malloc_mutex_unlock(&arena->lock);
}
static void
@@ -2116,9 +1660,21 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
return (size);
}
+static bool
+arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run)
+{
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ size_t run_ind = arena_miscelm_to_pageind(miscelm);
+ size_t offset = run_ind << LG_PAGE;
+ size_t length = arena_run_size_get(arena, chunk, run, run_ind);
+
+ return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length,
+ arena->ind));
+}
+
static void
-arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
- bool cleaned, bool decommitted)
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
+ bool decommitted)
{
arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm;
@@ -2131,7 +1687,8 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
assert(run_ind < chunk_npages);
size = arena_run_size_get(arena, chunk, run, run_ind);
run_pages = (size >> LG_PAGE);
- arena_nactive_sub(arena, run_pages);
+ arena_cactive_update(arena, 0, run_pages);
+ arena->nactive -= run_pages;
/*
* The run is dirty if the caller claims to have dirtied it, as well as
@@ -2178,7 +1735,7 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
if (size == arena_maxrun) {
assert(run_ind == map_bias);
assert(run_pages == (arena_maxrun >> LG_PAGE));
- arena_chunk_dalloc(tsdn, arena, chunk);
+ arena_chunk_dalloc(arena, chunk);
}
/*
@@ -2189,12 +1746,21 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
* chances of spuriously crossing the dirty page purging threshold.
*/
if (dirty)
- arena_maybe_purge(tsdn, arena);
+ arena_maybe_purge(arena);
}
static void
-arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize)
+arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run)
+{
+ bool committed = arena_run_decommit(arena, chunk, run);
+
+ arena_run_dalloc(arena, run, committed, false, !committed);
+}
+
+static void
+arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ size_t oldsize, size_t newsize)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -2229,13 +1795,12 @@ arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages)));
- arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
- 0));
+ arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
}
static void
-arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
+arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ size_t oldsize, size_t newsize, bool dirty)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -2272,10 +1837,20 @@ arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages)));
- tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
+ tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
tail_run = &tail_miscelm->run;
- arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
- != 0));
+ arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
+ 0));
+}
+
+static arena_run_t *
+arena_bin_runs_first(arena_bin_t *bin)
+{
+ arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
+ if (miscelm != NULL)
+ return (&miscelm->run);
+
+ return (NULL);
}
static void
@@ -2283,25 +1858,35 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- arena_run_heap_insert(&bin->runs, miscelm);
+ assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
+
+ arena_run_tree_insert(&bin->runs, miscelm);
}
-static arena_run_t *
-arena_bin_nonfull_run_tryget(arena_bin_t *bin)
+static void
+arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
{
- arena_chunk_map_misc_t *miscelm;
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- miscelm = arena_run_heap_remove_first(&bin->runs);
- if (miscelm == NULL)
- return (NULL);
- if (config_stats)
- bin->stats.reruns++;
+ assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
- return (&miscelm->run);
+ arena_run_tree_remove(&bin->runs, miscelm);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_tryget(arena_bin_t *bin)
+{
+ arena_run_t *run = arena_bin_runs_first(bin);
+ if (run != NULL) {
+ arena_bin_runs_remove(bin, run);
+ if (config_stats)
+ bin->stats.reruns++;
+ }
+ return (run);
}
static arena_run_t *
-arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
+arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
{
arena_run_t *run;
szind_t binind;
@@ -2317,19 +1902,19 @@ arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
bin_info = &arena_bin_info[binind];
/* Allocate a new run. */
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
/******************************/
- malloc_mutex_lock(tsdn, &arena->lock);
- run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
+ malloc_mutex_lock(&arena->lock);
+ run = arena_run_alloc_small(arena, bin_info->run_size, binind);
if (run != NULL) {
/* Initialize run internals. */
run->binind = binind;
run->nfree = bin_info->nregs;
bitmap_init(run->bitmap, &bin_info->bitmap_info);
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
/********************************/
- malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
if (run != NULL) {
if (config_stats) {
bin->stats.nruns++;
@@ -2352,7 +1937,7 @@ arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
static void *
-arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
+arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
{
szind_t binind;
arena_bin_info_t *bin_info;
@@ -2361,7 +1946,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
bin->runcur = NULL;
- run = arena_bin_nonfull_run_get(tsdn, arena, bin);
+ run = arena_bin_nonfull_run_get(arena, bin);
if (bin->runcur != NULL && bin->runcur->nfree > 0) {
/*
* Another thread updated runcur while this one ran without the
@@ -2382,11 +1967,10 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
* were just deallocated from the run.
*/
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- if (run->nfree == bin_info->nregs) {
- arena_dalloc_bin_run(tsdn, arena, chunk, run,
- bin);
- } else
- arena_bin_lower_run(arena, run, bin);
+ if (run->nfree == bin_info->nregs)
+ arena_dalloc_bin_run(arena, chunk, run, bin);
+ else
+ arena_bin_lower_run(arena, chunk, run, bin);
}
return (ret);
}
@@ -2402,18 +1986,18 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
}
void
-arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
- szind_t binind, uint64_t prof_accumbytes)
+arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
+ uint64_t prof_accumbytes)
{
unsigned i, nfill;
arena_bin_t *bin;
assert(tbin->ncached == 0);
- if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
- prof_idump(tsdn);
+ if (config_prof && arena_prof_accum(arena, prof_accumbytes))
+ prof_idump();
bin = &arena->bins[binind];
- malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) {
arena_run_t *run;
@@ -2421,15 +2005,16 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
if ((run = bin->runcur) != NULL && run->nfree > 0)
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
- ptr = arena_bin_malloc_hard(tsdn, arena, bin);
+ ptr = arena_bin_malloc_hard(arena, bin);
if (ptr == NULL) {
/*
* OOM. tbin->avail isn't yet filled down to its first
* element, so the successful allocations (if any) must
- * be moved just before tbin->avail before bailing out.
+ * be moved to the base of tbin->avail before bailing
+ * out.
*/
if (i > 0) {
- memmove(tbin->avail - i, tbin->avail - nfill,
+ memmove(tbin->avail, &tbin->avail[nfill - i],
i * sizeof(void *));
}
break;
@@ -2439,7 +2024,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
true);
}
/* Insert such that low regions get used first. */
- *(tbin->avail - nfill + i) = ptr;
+ tbin->avail[nfill - 1 - i] = ptr;
}
if (config_stats) {
bin->stats.nmalloc += i;
@@ -2448,31 +2033,29 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
bin->stats.nfills++;
tbin->tstats.nrequests = 0;
}
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
tbin->ncached = i;
- arena_decay_tick(tsdn, arena);
}
void
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
{
- size_t redzone_size = bin_info->redzone_size;
-
if (zero) {
- memset((void *)((uintptr_t)ptr - redzone_size),
- JEMALLOC_ALLOC_JUNK, redzone_size);
- memset((void *)((uintptr_t)ptr + bin_info->reg_size),
- JEMALLOC_ALLOC_JUNK, redzone_size);
+ size_t redzone_size = bin_info->redzone_size;
+ memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
+ redzone_size);
+ memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
+ redzone_size);
} else {
- memset((void *)((uintptr_t)ptr - redzone_size),
- JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
+ memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
+ bin_info->reg_interval);
}
}
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
-#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
+#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
#endif
static void
arena_redzone_corruption(void *ptr, size_t usize, bool after,
@@ -2487,7 +2070,7 @@ arena_redzone_corruption(void *ptr, size_t usize, bool after,
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
arena_redzone_corruption_t *arena_redzone_corruption =
- JEMALLOC_N(n_arena_redzone_corruption);
+ JEMALLOC_N(arena_redzone_corruption_impl);
#endif
static void
@@ -2502,22 +2085,22 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
for (i = 1; i <= redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
- if (*byte != JEMALLOC_ALLOC_JUNK) {
+ if (*byte != 0xa5) {
error = true;
arena_redzone_corruption(ptr, size, false, i,
*byte);
if (reset)
- *byte = JEMALLOC_ALLOC_JUNK;
+ *byte = 0xa5;
}
}
for (i = 0; i < redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
- if (*byte != JEMALLOC_ALLOC_JUNK) {
+ if (*byte != 0xa5) {
error = true;
arena_redzone_corruption(ptr, size, true, i,
*byte);
if (reset)
- *byte = JEMALLOC_ALLOC_JUNK;
+ *byte = 0xa5;
}
}
}
@@ -2528,7 +2111,7 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
-#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
+#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
#endif
void
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
@@ -2536,14 +2119,14 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
size_t redzone_size = bin_info->redzone_size;
arena_redzones_validate(ptr, bin_info, false);
- memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
+ memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
bin_info->reg_interval);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
arena_dalloc_junk_small_t *arena_dalloc_junk_small =
- JEMALLOC_N(n_arena_dalloc_junk_small);
+ JEMALLOC_N(arena_dalloc_junk_small_impl);
#endif
void
@@ -2561,26 +2144,27 @@ arena_quarantine_junk_small(void *ptr, size_t usize)
arena_redzones_validate(ptr, bin_info, true);
}
-static void *
-arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
+void *
+arena_malloc_small(arena_t *arena, size_t size, bool zero)
{
void *ret;
arena_bin_t *bin;
- size_t usize;
arena_run_t *run;
+ szind_t binind;
+ binind = size2index(size);
assert(binind < NBINS);
bin = &arena->bins[binind];
- usize = index2size(binind);
+ size = index2size(binind);
- malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0)
ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
- ret = arena_bin_malloc_hard(tsdn, arena, bin);
+ ret = arena_bin_malloc_hard(arena, bin);
if (ret == NULL) {
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
return (NULL);
}
@@ -2589,9 +2173,9 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
bin->stats.nrequests++;
bin->stats.curregs++;
}
- malloc_mutex_unlock(tsdn, &bin->lock);
- if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
- prof_idump(tsdn);
+ malloc_mutex_unlock(&bin->lock);
+ if (config_prof && !isthreaded && arena_prof_accum(arena, size))
+ prof_idump();
if (!zero) {
if (config_fill) {
@@ -2599,35 +2183,34 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (unlikely(opt_zero))
- memset(ret, 0, usize);
+ memset(ret, 0, size);
}
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
- memset(ret, 0, usize);
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ memset(ret, 0, size);
}
- arena_decay_tick(tsdn, arena);
return (ret);
}
void *
-arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
+arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
size_t usize;
uintptr_t random_offset;
arena_run_t *run;
arena_chunk_map_misc_t *miscelm;
- UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
+ UNUSED bool idump;
/* Large allocation. */
- usize = index2size(binind);
- malloc_mutex_lock(tsdn, &arena->lock);
+ usize = s2u(size);
+ malloc_mutex_lock(&arena->lock);
if (config_cache_oblivious) {
uint64_t r;
@@ -2636,21 +2219,22 @@ arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
* that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
* for 4 KiB pages and 64-byte cachelines.
*/
- r = prng_lg_range_zu(&arena->offset_state, LG_PAGE -
- LG_CACHELINE, false);
+ prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state,
+ UINT64_C(6364136223846793009),
+ UINT64_C(1442695040888963409));
random_offset = ((uintptr_t)r) << LG_CACHELINE;
} else
random_offset = 0;
- run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
+ run = arena_run_alloc_large(arena, usize + large_pad, zero);
if (run == NULL) {
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (NULL);
}
miscelm = arena_run_to_miscelm(run);
ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
random_offset);
if (config_stats) {
- szind_t index = binind - NBINS;
+ szind_t index = size2index(usize) - NBINS;
arena->stats.nmalloc_large++;
arena->stats.nrequests_large++;
@@ -2661,45 +2245,25 @@ arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
}
if (config_prof)
idump = arena_prof_accum_locked(arena, usize);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
if (config_prof && idump)
- prof_idump(tsdn);
+ prof_idump();
if (!zero) {
if (config_fill) {
if (unlikely(opt_junk_alloc))
- memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+ memset(ret, 0xa5, usize);
else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
}
- arena_decay_tick(tsdn, arena);
return (ret);
}
-void *
-arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
- bool zero)
-{
-
- assert(!tsdn_null(tsdn) || arena != NULL);
-
- if (likely(!tsdn_null(tsdn)))
- arena = arena_choose(tsdn_tsd(tsdn), arena);
- if (unlikely(arena == NULL))
- return (NULL);
-
- if (likely(size <= SMALL_MAXCLASS))
- return (arena_malloc_small(tsdn, arena, ind, zero));
- if (likely(size <= large_maxclass))
- return (arena_malloc_large(tsdn, arena, ind, zero));
- return (huge_malloc(tsdn, arena, index2size(ind), zero));
-}
-
/* Only handles large allocations that require more than page alignment. */
static void *
-arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero)
{
void *ret;
@@ -2709,21 +2273,19 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *miscelm;
void *rpages;
- assert(!tsdn_null(tsdn) || arena != NULL);
assert(usize == PAGE_CEILING(usize));
- if (likely(!tsdn_null(tsdn)))
- arena = arena_choose(tsdn_tsd(tsdn), arena);
+ arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
alignment = PAGE_CEILING(alignment);
alloc_size = usize + large_pad + alignment - PAGE;
- malloc_mutex_lock(tsdn, &arena->lock);
- run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
+ malloc_mutex_lock(&arena->lock);
+ run = arena_run_alloc_large(arena, alloc_size, false);
if (run == NULL) {
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (NULL);
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
@@ -2738,16 +2300,16 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *head_miscelm = miscelm;
arena_run_t *head_run = run;
- miscelm = arena_miscelm_get_mutable(chunk,
+ miscelm = arena_miscelm_get(chunk,
arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
LG_PAGE));
run = &miscelm->run;
- arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
+ arena_run_trim_head(arena, chunk, head_run, alloc_size,
alloc_size - leadsize);
}
if (trailsize != 0) {
- arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
+ arena_run_trim_tail(arena, chunk, run, usize + large_pad +
trailsize, usize + large_pad, false);
}
if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
@@ -2758,8 +2320,8 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
run_ind) != 0);
assert(decommitted); /* Cause of OOM. */
- arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ arena_run_dalloc(arena, run, dirty, false, decommitted);
+ malloc_mutex_unlock(&arena->lock);
return (NULL);
}
ret = arena_miscelm_to_rpages(miscelm);
@@ -2774,20 +2336,19 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
if (config_fill && !zero) {
if (unlikely(opt_junk_alloc))
- memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+ memset(ret, 0xa5, usize);
else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
- arena_decay_tick(tsdn, arena);
return (ret);
}
void *
-arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache)
{
void *ret;
@@ -2795,8 +2356,7 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special run placement. */
- ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
- tcache, true);
+ ret = arena_malloc(tsd, arena, usize, zero, tcache);
} else if (usize <= large_maxclass && alignment <= PAGE) {
/*
* Large; alignment doesn't require special run placement.
@@ -2804,25 +2364,25 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
* the base of the run, so do some bit manipulation to retrieve
* the base.
*/
- ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
- tcache, true);
+ ret = arena_malloc(tsd, arena, usize, zero, tcache);
if (config_cache_oblivious)
ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
} else {
if (likely(usize <= large_maxclass)) {
- ret = arena_palloc_large(tsdn, arena, usize, alignment,
+ ret = arena_palloc_large(tsd, arena, usize, alignment,
zero);
} else if (likely(alignment <= chunksize))
- ret = huge_malloc(tsdn, arena, usize, zero);
+ ret = huge_malloc(tsd, arena, usize, zero, tcache);
else {
- ret = huge_palloc(tsdn, arena, usize, alignment, zero);
+ ret = huge_palloc(tsd, arena, usize, alignment, zero,
+ tcache);
}
}
return (ret);
}
void
-arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
+arena_prof_promoted(const void *ptr, size_t size)
{
arena_chunk_t *chunk;
size_t pageind;
@@ -2831,8 +2391,8 @@ arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
- assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
- assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
+ assert(isalloc(ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(ptr, true) == LARGE_MINCLASS);
assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
@@ -2841,8 +2401,8 @@ arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
assert(binind < NBINS);
arena_mapbits_large_binind_set(chunk, pageind, binind);
- assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
- assert(isalloc(tsdn, ptr, true) == size);
+ assert(isalloc(ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(ptr, true) == size);
}
static void
@@ -2858,51 +2418,48 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
&chunk->node), bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
- /*
- * The following block's conditional is necessary because if the
- * run only contains one region, then it never gets inserted
- * into the non-full runs tree.
- */
if (bin_info->nregs != 1) {
- arena_chunk_map_misc_t *miscelm =
- arena_run_to_miscelm(run);
-
- arena_run_heap_remove(&bin->runs, miscelm);
+ /*
+ * This block's conditional is necessary because if the
+ * run only contains one region, then it never gets
+ * inserted into the non-full runs tree.
+ */
+ arena_bin_runs_remove(bin, run);
}
}
}
static void
-arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin)
+arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin)
{
assert(run != bin->runcur);
+ assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
+ NULL);
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
/******************************/
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_run_dalloc(tsdn, arena, run, true, false, false);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
+ arena_run_dalloc_decommit(arena, chunk, run);
+ malloc_mutex_unlock(&arena->lock);
/****************************/
- malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
if (config_stats)
bin->stats.curruns--;
}
static void
-arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
+arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin)
{
/*
- * Make sure that if bin->runcur is non-NULL, it refers to the
- * oldest/lowest non-full run. It is okay to NULL runcur out rather
- * than proactively keeping it pointing at the oldest/lowest non-full
- * run.
+ * Make sure that if bin->runcur is non-NULL, it refers to the lowest
+ * non-full run. It is okay to NULL runcur out rather than proactively
+ * keeping it pointing at the lowest non-full run.
*/
- if (bin->runcur != NULL &&
- arena_snad_comp(arena_run_to_miscelm(bin->runcur),
- arena_run_to_miscelm(run)) > 0) {
+ if ((uintptr_t)run < (uintptr_t)bin->runcur) {
/* Switch runcur. */
if (bin->runcur->nfree > 0)
arena_bin_runs_insert(bin, bin->runcur);
@@ -2914,8 +2471,8 @@ arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
}
static void
-arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
+arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ arena_chunk_map_bits_t *bitselm, bool junked)
{
size_t pageind, rpages_ind;
arena_run_t *run;
@@ -2925,7 +2482,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
+ run = &arena_miscelm_get(chunk, rpages_ind)->run;
binind = run->binind;
bin = &arena->bins[binind];
bin_info = &arena_bin_info[binind];
@@ -2936,9 +2493,9 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena_run_reg_dalloc(run, ptr);
if (run->nfree == bin_info->nregs) {
arena_dissociate_bin_run(chunk, run, bin);
- arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
+ arena_dalloc_bin_run(arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur)
- arena_bin_lower_run(arena, run, bin);
+ arena_bin_lower_run(arena, chunk, run, bin);
if (config_stats) {
bin->stats.ndalloc++;
@@ -2947,15 +2504,15 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
}
void
-arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
+arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ arena_chunk_map_bits_t *bitselm)
{
- arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
+ arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
}
void
-arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
+arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_bits_t *bitselm)
{
arena_run_t *run;
@@ -2963,16 +2520,16 @@ arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t rpages_ind;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
+ run = &arena_miscelm_get(chunk, rpages_ind)->run;
bin = &arena->bins[run->binind];
- malloc_mutex_lock(tsdn, &bin->lock);
- arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
+ arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
+ malloc_mutex_unlock(&bin->lock);
}
void
-arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t pageind)
+arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind)
{
arena_chunk_map_bits_t *bitselm;
@@ -2981,36 +2538,34 @@ arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID);
}
- bitselm = arena_bitselm_get_mutable(chunk, pageind);
- arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
- arena_decay_tick(tsdn, arena);
+ bitselm = arena_bitselm_get(chunk, pageind);
+ arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
-#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
+#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
#endif
void
arena_dalloc_junk_large(void *ptr, size_t usize)
{
if (config_fill && unlikely(opt_junk_free))
- memset(ptr, JEMALLOC_FREE_JUNK, usize);
+ memset(ptr, 0x5a, usize);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
arena_dalloc_junk_large_t *arena_dalloc_junk_large =
- JEMALLOC_N(n_arena_dalloc_junk_large);
+ JEMALLOC_N(arena_dalloc_junk_large_impl);
#endif
static void
-arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr, bool junked)
+arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, bool junked)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
arena_run_t *run = &miscelm->run;
if (config_fill || config_stats) {
@@ -3029,35 +2584,32 @@ arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
}
}
- arena_run_dalloc(tsdn, arena, run, true, false, false);
+ arena_run_dalloc_decommit(arena, chunk, run);
}
void
-arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
- arena_chunk_t *chunk, void *ptr)
+arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
+ void *ptr)
{
- arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
+ arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
}
void
-arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr)
+arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
- malloc_mutex_unlock(tsdn, &arena->lock);
- arena_decay_tick(tsdn, arena);
+ malloc_mutex_lock(&arena->lock);
+ arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
+ malloc_mutex_unlock(&arena->lock);
}
static void
-arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t size)
+arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t oldsize, size_t size)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
- pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
arena_run_t *run = &miscelm->run;
assert(size < oldsize);
@@ -3066,8 +2618,8 @@ arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
* Shrink the run, and make trailing pages available for other
* allocations.
*/
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
+ malloc_mutex_lock(&arena->lock);
+ arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
large_pad, true);
if (config_stats) {
szind_t oldindex = size2index(oldsize) - NBINS;
@@ -3085,12 +2637,12 @@ arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
}
static bool
-arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
+arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t npages = (oldsize + large_pad) >> LG_PAGE;
@@ -3100,7 +2652,7 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
large_pad);
/* Try to extend the run. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
pageind+npages) != 0)
goto label_fail;
@@ -3123,7 +2675,7 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
if (splitsize == 0)
goto label_fail;
- run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
+ run = &arena_miscelm_get(chunk, pageind+npages)->run;
if (arena_run_split_large(arena, run, splitsize, zero))
goto label_fail;
@@ -3131,16 +2683,10 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
/*
* Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state.
- * There will always be trailing bytes, because ptr's
- * offset from the beginning of the run is a multiple of
- * CACHELINE in [0 .. PAGE).
*/
- void *zbase = (void *)((uintptr_t)ptr + oldsize);
- void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
- PAGE));
- size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
- assert(nzero > 0);
- memset(zbase, 0, nzero);
+ assert(PAGE_CEILING(oldsize) == oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ PAGE_CEILING((uintptr_t)ptr) - (uintptr_t)ptr);
}
size = oldsize + splitsize;
@@ -3180,24 +2726,24 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (false);
}
label_fail:
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (true);
}
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
-#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
+#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
#endif
static void
arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
{
if (config_fill && unlikely(opt_junk_free)) {
- memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
+ memset((void *)((uintptr_t)ptr + usize), 0x5a,
old_usize - usize);
}
}
@@ -3205,7 +2751,7 @@ arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
arena_ralloc_junk_large_t *arena_ralloc_junk_large =
- JEMALLOC_N(n_arena_ralloc_junk_large);
+ JEMALLOC_N(arena_ralloc_junk_large_impl);
#endif
/*
@@ -3213,7 +2759,7 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large =
* always fail if growing an object, and the following run is already in use.
*/
static bool
-arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
+arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
arena_chunk_t *chunk;
@@ -3228,16 +2774,15 @@ arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
arena = extent_node_arena_get(&chunk->node);
if (oldsize < usize_max) {
- bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
- oldsize, usize_min, usize_max, zero);
+ bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
+ usize_min, usize_max, zero);
if (config_fill && !ret && !zero) {
if (unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize),
- JEMALLOC_ALLOC_JUNK,
- isalloc(tsdn, ptr, config_prof) - oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
+ isalloc(ptr, config_prof) - oldsize);
} else if (unlikely(opt_zero)) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
- isalloc(tsdn, ptr, config_prof) - oldsize);
+ isalloc(ptr, config_prof) - oldsize);
}
}
return (ret);
@@ -3246,27 +2791,19 @@ arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
assert(oldsize > usize_max);
/* Fill before shrinking in order avoid a race. */
arena_ralloc_junk_large(ptr, oldsize, usize_max);
- arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
+ arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
return (false);
}
bool
-arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
- size_t extra, bool zero)
+arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
+ bool zero)
{
size_t usize_min, usize_max;
- /* Calls with non-zero extra had to clamp extra. */
- assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
-
- if (unlikely(size > HUGE_MAXCLASS))
- return (true);
-
usize_min = s2u(size);
usize_max = s2u(size + extra);
if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
- arena_chunk_t *chunk;
-
/*
* Avoid moving the allocation if the size class can be left the
* same.
@@ -3274,39 +2811,37 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
if (oldsize <= SMALL_MAXCLASS) {
assert(arena_bin_info[size2index(oldsize)].reg_size ==
oldsize);
- if ((usize_max > SMALL_MAXCLASS ||
- size2index(usize_max) != size2index(oldsize)) &&
- (size > oldsize || usize_max < oldsize))
- return (true);
+ if ((usize_max <= SMALL_MAXCLASS &&
+ size2index(usize_max) == size2index(oldsize)) ||
+ (size <= oldsize && usize_max >= oldsize))
+ return (false);
} else {
- if (usize_max <= SMALL_MAXCLASS)
- return (true);
- if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
- usize_max, zero))
- return (true);
+ if (usize_max > SMALL_MAXCLASS) {
+ if (!arena_ralloc_large(ptr, oldsize, usize_min,
+ usize_max, zero))
+ return (false);
+ }
}
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
- return (false);
+ /* Reallocation would require a move. */
+ return (true);
} else {
- return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
- usize_max, zero));
+ return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max,
+ zero));
}
}
static void *
-arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
+arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache)
{
if (alignment == 0)
- return (arena_malloc(tsdn, arena, usize, size2index(usize),
- zero, tcache, true));
+ return (arena_malloc(tsd, arena, usize, zero, tcache));
usize = sa2u(usize, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ if (usize == 0)
return (NULL);
- return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
+ return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
}
void *
@@ -3317,15 +2852,14 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize;
usize = s2u(size);
- if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
+ if (usize == 0)
return (NULL);
if (likely(usize <= large_maxclass)) {
size_t copysize;
/* Try to avoid moving the allocation. */
- if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
- zero))
+ if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero))
return (ptr);
/*
@@ -3333,8 +2867,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
* the object. In that case, fall back to allocating new space
* and copying.
*/
- ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
- alignment, zero, tcache);
+ ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
+ zero, tcache);
if (ret == NULL)
return (NULL);
@@ -3346,7 +2880,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
copysize = (usize < oldsize) ? usize : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache, true);
+ isqalloc(tsd, ptr, oldsize, tcache);
} else {
ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
zero, tcache);
@@ -3355,25 +2889,25 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
}
dss_prec_t
-arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
+arena_dss_prec_get(arena_t *arena)
{
dss_prec_t ret;
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
ret = arena->dss_prec;
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (ret);
}
bool
-arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
+arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
{
if (!have_dss)
return (dss_prec != dss_prec_disabled);
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
arena->dss_prec = dss_prec;
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
return (false);
}
@@ -3388,76 +2922,27 @@ bool
arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
{
- if (opt_purge != purge_mode_ratio)
- return (true);
if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true);
atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
return (false);
}
-ssize_t
-arena_decay_time_default_get(void)
-{
-
- return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
-}
-
-bool
-arena_decay_time_default_set(ssize_t decay_time)
-{
-
- if (opt_purge != purge_mode_decay)
- return (true);
- if (!arena_decay_time_valid(decay_time))
- return (true);
- atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
- return (false);
-}
-
-static void
-arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
- size_t *nactive, size_t *ndirty)
-{
-
- *nthreads += arena_nthreads_get(arena, false);
- *dss = dss_prec_names[arena->dss_prec];
- *lg_dirty_mult = arena->lg_dirty_mult;
- *decay_time = arena->decay.time;
- *nactive += arena->nactive;
- *ndirty += arena->ndirty;
-}
-
void
-arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
- size_t *nactive, size_t *ndirty)
-{
-
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
- decay_time, nactive, ndirty);
- malloc_mutex_unlock(tsdn, &arena->lock);
-}
-
-void
-arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
- const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult,
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
malloc_huge_stats_t *hstats)
{
unsigned i;
- cassert(config_stats);
-
- malloc_mutex_lock(tsdn, &arena->lock);
- arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
- decay_time, nactive, ndirty);
+ malloc_mutex_lock(&arena->lock);
+ *dss = dss_prec_names[arena->dss_prec];
+ *lg_dirty_mult = arena->lg_dirty_mult;
+ *nactive += arena->nactive;
+ *ndirty += arena->ndirty;
astats->mapped += arena->stats.mapped;
- astats->retained += arena->stats.retained;
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
@@ -3483,12 +2968,12 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
- malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
@@ -3500,61 +2985,33 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns;
bstats[i].curruns += bin->stats.curruns;
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
}
}
-unsigned
-arena_nthreads_get(arena_t *arena, bool internal)
-{
-
- return (atomic_read_u(&arena->nthreads[internal]));
-}
-
-void
-arena_nthreads_inc(arena_t *arena, bool internal)
-{
-
- atomic_add_u(&arena->nthreads[internal], 1);
-}
-
-void
-arena_nthreads_dec(arena_t *arena, bool internal)
-{
-
- atomic_sub_u(&arena->nthreads[internal], 1);
-}
-
-size_t
-arena_extent_sn_next(arena_t *arena)
-{
-
- return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
-}
-
arena_t *
-arena_new(tsdn_t *tsdn, unsigned ind)
+arena_new(unsigned ind)
{
arena_t *arena;
unsigned i;
+ arena_bin_t *bin;
/*
* Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
* because there is no way to clean up if base_alloc() OOMs.
*/
if (config_stats) {
- arena = (arena_t *)base_alloc(tsdn,
- CACHELINE_CEILING(sizeof(arena_t)) +
- QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
- + (nhclasses * sizeof(malloc_huge_stats_t)));
+ arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t))
+ + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
+ nhclasses) * sizeof(malloc_huge_stats_t));
} else
- arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
+ arena = (arena_t *)base_alloc(sizeof(arena_t));
if (arena == NULL)
return (NULL);
arena->ind = ind;
- arena->nthreads[0] = arena->nthreads[1] = 0;
- if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
+ arena->nthreads = 0;
+ if (malloc_mutex_init(&arena->lock))
return (NULL);
if (config_stats) {
@@ -3584,15 +3041,11 @@ arena_new(tsdn_t *tsdn, unsigned ind)
* deterministic seed.
*/
arena->offset_state = config_debug ? ind :
- (size_t)(uintptr_t)arena;
+ (uint64_t)(uintptr_t)arena;
}
arena->dss_prec = chunk_dss_prec_get();
- ql_new(&arena->achunks);
-
- arena->extent_sn_next = 0;
-
arena->spare = NULL;
arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
@@ -3600,42 +3053,33 @@ arena_new(tsdn_t *tsdn, unsigned ind)
arena->nactive = 0;
arena->ndirty = 0;
- for (i = 0; i < NPSIZES; i++)
- arena_run_heap_new(&arena->runs_avail[i]);
-
+ arena_avail_tree_new(&arena->runs_avail);
qr_new(&arena->runs_dirty, rd_link);
qr_new(&arena->chunks_cache, cc_link);
- if (opt_purge == purge_mode_decay)
- arena_decay_init(arena, arena_decay_time_default_get());
-
ql_new(&arena->huge);
- if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
- WITNESS_RANK_ARENA_HUGE))
+ if (malloc_mutex_init(&arena->huge_mtx))
return (NULL);
- extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
+ extent_tree_szad_new(&arena->chunks_szad_cached);
extent_tree_ad_new(&arena->chunks_ad_cached);
- extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
+ extent_tree_szad_new(&arena->chunks_szad_retained);
extent_tree_ad_new(&arena->chunks_ad_retained);
- if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
- WITNESS_RANK_ARENA_CHUNKS))
+ if (malloc_mutex_init(&arena->chunks_mtx))
return (NULL);
ql_new(&arena->node_cache);
- if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
- WITNESS_RANK_ARENA_NODE_CACHE))
+ if (malloc_mutex_init(&arena->node_cache_mtx))
return (NULL);
arena->chunk_hooks = chunk_hooks_default;
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
- arena_bin_t *bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock, "arena_bin",
- WITNESS_RANK_ARENA_BIN))
+ bin = &arena->bins[i];
+ if (malloc_mutex_init(&bin->lock))
return (NULL);
bin->runcur = NULL;
- arena_run_heap_new(&bin->runs);
+ arena_run_tree_new(&bin->runs);
if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
@@ -3667,7 +3111,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
* be twice as large in order to maintain alignment.
*/
if (config_fill && unlikely(opt_redzone)) {
- size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
+ size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
+ 1);
if (align_min <= REDZONE_MINSIZE) {
bin_info->redzone_size = REDZONE_MINSIZE;
pad_size = 0;
@@ -3687,19 +3132,18 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
* size).
*/
try_run_size = PAGE;
- try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
+ try_nregs = try_run_size / bin_info->reg_size;
do {
perfect_run_size = try_run_size;
perfect_nregs = try_nregs;
try_run_size += PAGE;
- try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
+ try_nregs = try_run_size / bin_info->reg_size;
} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
assert(perfect_nregs <= RUN_MAXREGS);
actual_run_size = perfect_run_size;
- actual_nregs = (uint32_t)((actual_run_size - pad_size) /
- bin_info->reg_interval);
+ actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval;
/*
* Redzones can require enough padding that not even a single region can
@@ -3711,8 +3155,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
assert(config_fill && unlikely(opt_redzone));
actual_run_size += PAGE;
- actual_nregs = (uint32_t)((actual_run_size - pad_size) /
- bin_info->reg_interval);
+ actual_nregs = (actual_run_size - pad_size) /
+ bin_info->reg_interval;
}
/*
@@ -3720,8 +3164,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
*/
while (actual_run_size > arena_maxrun) {
actual_run_size -= PAGE;
- actual_nregs = (uint32_t)((actual_run_size - pad_size) /
- bin_info->reg_interval);
+ actual_nregs = (actual_run_size - pad_size) /
+ bin_info->reg_interval;
}
assert(actual_nregs > 0);
assert(actual_run_size == s2u(actual_run_size));
@@ -3729,8 +3173,11 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
/* Copy final settings. */
bin_info->run_size = actual_run_size;
bin_info->nregs = actual_nregs;
- bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
- bin_info->reg_interval) - pad_size + bin_info->redzone_size);
+ bin_info->reg0_offset = actual_run_size - (actual_nregs *
+ bin_info->reg_interval) - pad_size + bin_info->redzone_size;
+
+ if (actual_run_size > small_maxrun)
+ small_maxrun = actual_run_size;
assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
* bin_info->reg_interval) + pad_size == bin_info->run_size);
@@ -3747,7 +3194,7 @@ bin_info_init(void)
bin_info_run_size_calc(bin_info); \
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
#define BIN_INFO_INIT_bin_no(index, size)
-#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
#undef BIN_INFO_INIT_bin_yes
@@ -3755,13 +3202,38 @@ bin_info_init(void)
#undef SC
}
-void
+static bool
+small_run_size_init(void)
+{
+
+ assert(small_maxrun != 0);
+
+ small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
+ LG_PAGE));
+ if (small_run_tab == NULL)
+ return (true);
+
+#define TAB_INIT_bin_yes(index, size) { \
+ arena_bin_info_t *bin_info = &arena_bin_info[index]; \
+ small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
+ }
+#define TAB_INIT_bin_no(index, size)
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+ TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
+ SIZE_CLASSES
+#undef TAB_INIT_bin_yes
+#undef TAB_INIT_bin_no
+#undef SC
+
+ return (false);
+}
+
+bool
arena_boot(void)
{
unsigned i;
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
- arena_decay_time_default_set(opt_decay_time);
/*
* Compute the header size such that it is large enough to contain the
@@ -3803,61 +3275,44 @@ arena_boot(void)
nhclasses = NSIZES - nlclasses - NBINS;
bin_info_init();
+ return (small_run_size_init());
}
void
-arena_prefork0(tsdn_t *tsdn, arena_t *arena)
-{
-
- malloc_mutex_prefork(tsdn, &arena->lock);
-}
-
-void
-arena_prefork1(tsdn_t *tsdn, arena_t *arena)
-{
-
- malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
-}
-
-void
-arena_prefork2(tsdn_t *tsdn, arena_t *arena)
-{
-
- malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
-}
-
-void
-arena_prefork3(tsdn_t *tsdn, arena_t *arena)
+arena_prefork(arena_t *arena)
{
unsigned i;
+ malloc_mutex_prefork(&arena->lock);
+ malloc_mutex_prefork(&arena->huge_mtx);
+ malloc_mutex_prefork(&arena->chunks_mtx);
+ malloc_mutex_prefork(&arena->node_cache_mtx);
for (i = 0; i < NBINS; i++)
- malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
- malloc_mutex_prefork(tsdn, &arena->huge_mtx);
+ malloc_mutex_prefork(&arena->bins[i].lock);
}
void
-arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
+arena_postfork_parent(arena_t *arena)
{
unsigned i;
- malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
- malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
- malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
- malloc_mutex_postfork_parent(tsdn, &arena->lock);
+ malloc_mutex_postfork_parent(&arena->bins[i].lock);
+ malloc_mutex_postfork_parent(&arena->node_cache_mtx);
+ malloc_mutex_postfork_parent(&arena->chunks_mtx);
+ malloc_mutex_postfork_parent(&arena->huge_mtx);
+ malloc_mutex_postfork_parent(&arena->lock);
}
void
-arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
+arena_postfork_child(arena_t *arena)
{
unsigned i;
- malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
- malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
- malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
- malloc_mutex_postfork_child(tsdn, &arena->lock);
+ malloc_mutex_postfork_child(&arena->bins[i].lock);
+ malloc_mutex_postfork_child(&arena->node_cache_mtx);
+ malloc_mutex_postfork_child(&arena->chunks_mtx);
+ malloc_mutex_postfork_child(&arena->huge_mtx);
+ malloc_mutex_postfork_child(&arena->lock);
}
diff --git a/deps/jemalloc/src/base.c b/deps/jemalloc/src/base.c
index 5681a3f36..7cdcfed86 100644
--- a/deps/jemalloc/src/base.c
+++ b/deps/jemalloc/src/base.c
@@ -5,8 +5,7 @@
/* Data. */
static malloc_mutex_t base_mtx;
-static size_t base_extent_sn_next;
-static extent_tree_t base_avail_szsnad;
+static extent_tree_t base_avail_szad;
static extent_node_t *base_nodes;
static size_t base_allocated;
static size_t base_resident;
@@ -14,13 +13,12 @@ static size_t base_mapped;
/******************************************************************************/
+/* base_mtx must be held. */
static extent_node_t *
-base_node_try_alloc(tsdn_t *tsdn)
+base_node_try_alloc(void)
{
extent_node_t *node;
- malloc_mutex_assert_owner(tsdn, &base_mtx);
-
if (base_nodes == NULL)
return (NULL);
node = base_nodes;
@@ -29,42 +27,33 @@ base_node_try_alloc(tsdn_t *tsdn)
return (node);
}
+/* base_mtx must be held. */
static void
-base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
+base_node_dalloc(extent_node_t *node)
{
- malloc_mutex_assert_owner(tsdn, &base_mtx);
-
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
*(extent_node_t **)node = base_nodes;
base_nodes = node;
}
-static void
-base_extent_node_init(extent_node_t *node, void *addr, size_t size)
-{
- size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
-
- extent_node_init(node, NULL, addr, size, sn, true, true);
-}
-
+/* base_mtx must be held. */
static extent_node_t *
-base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
+base_chunk_alloc(size_t minsize)
{
extent_node_t *node;
size_t csize, nsize;
void *addr;
- malloc_mutex_assert_owner(tsdn, &base_mtx);
assert(minsize != 0);
- node = base_node_try_alloc(tsdn);
+ node = base_node_try_alloc();
/* Allocate enough space to also carve a node out if necessary. */
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize);
addr = chunk_alloc_base(csize);
if (addr == NULL) {
if (node != NULL)
- base_node_dalloc(tsdn, node);
+ base_node_dalloc(node);
return (NULL);
}
base_mapped += csize;
@@ -77,7 +66,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
base_resident += PAGE_CEILING(nsize);
}
}
- base_extent_node_init(node, addr, csize);
+ extent_node_init(node, NULL, addr, csize, true, true);
return (node);
}
@@ -87,7 +76,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
* physical memory usage.
*/
void *
-base_alloc(tsdn_t *tsdn, size_t size)
+base_alloc(size_t size)
{
void *ret;
size_t csize, usize;
@@ -101,15 +90,15 @@ base_alloc(tsdn_t *tsdn, size_t size)
csize = CACHELINE_CEILING(size);
usize = s2u(csize);
- extent_node_init(&key, NULL, NULL, usize, 0, false, false);
- malloc_mutex_lock(tsdn, &base_mtx);
- node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
+ extent_node_init(&key, NULL, NULL, usize, false, false);
+ malloc_mutex_lock(&base_mtx);
+ node = extent_tree_szad_nsearch(&base_avail_szad, &key);
if (node != NULL) {
/* Use existing space. */
- extent_tree_szsnad_remove(&base_avail_szsnad, node);
+ extent_tree_szad_remove(&base_avail_szad, node);
} else {
/* Try to allocate more space. */
- node = base_chunk_alloc(tsdn, csize);
+ node = base_chunk_alloc(csize);
}
if (node == NULL) {
ret = NULL;
@@ -120,9 +109,9 @@ base_alloc(tsdn_t *tsdn, size_t size)
if (extent_node_size_get(node) > csize) {
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
extent_node_size_set(node, extent_node_size_get(node) - csize);
- extent_tree_szsnad_insert(&base_avail_szsnad, node);
+ extent_tree_szad_insert(&base_avail_szad, node);
} else
- base_node_dalloc(tsdn, node);
+ base_node_dalloc(node);
if (config_stats) {
base_allocated += csize;
/*
@@ -134,54 +123,52 @@ base_alloc(tsdn_t *tsdn, size_t size)
}
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return:
- malloc_mutex_unlock(tsdn, &base_mtx);
+ malloc_mutex_unlock(&base_mtx);
return (ret);
}
void
-base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
- size_t *mapped)
+base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
{
- malloc_mutex_lock(tsdn, &base_mtx);
+ malloc_mutex_lock(&base_mtx);
assert(base_allocated <= base_resident);
assert(base_resident <= base_mapped);
*allocated = base_allocated;
*resident = base_resident;
*mapped = base_mapped;
- malloc_mutex_unlock(tsdn, &base_mtx);
+ malloc_mutex_unlock(&base_mtx);
}
bool
base_boot(void)
{
- if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
+ if (malloc_mutex_init(&base_mtx))
return (true);
- base_extent_sn_next = 0;
- extent_tree_szsnad_new(&base_avail_szsnad);
+ extent_tree_szad_new(&base_avail_szad);
base_nodes = NULL;
return (false);
}
void
-base_prefork(tsdn_t *tsdn)
+base_prefork(void)
{
- malloc_mutex_prefork(tsdn, &base_mtx);
+ malloc_mutex_prefork(&base_mtx);
}
void
-base_postfork_parent(tsdn_t *tsdn)
+base_postfork_parent(void)
{
- malloc_mutex_postfork_parent(tsdn, &base_mtx);
+ malloc_mutex_postfork_parent(&base_mtx);
}
void
-base_postfork_child(tsdn_t *tsdn)
+base_postfork_child(void)
{
- malloc_mutex_postfork_child(tsdn, &base_mtx);
+ malloc_mutex_postfork_child(&base_mtx);
}
diff --git a/deps/jemalloc/src/bitmap.c b/deps/jemalloc/src/bitmap.c
index ac0f3b381..c733372b4 100644
--- a/deps/jemalloc/src/bitmap.c
+++ b/deps/jemalloc/src/bitmap.c
@@ -3,8 +3,6 @@
/******************************************************************************/
-#ifdef USE_TREE
-
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
@@ -34,11 +32,20 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
binfo->nbits = nbits;
}
-static size_t
+size_t
bitmap_info_ngroups(const bitmap_info_t *binfo)
{
- return (binfo->levels[binfo->nlevels].group_offset);
+ return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
+}
+
+size_t
+bitmap_size(size_t nbits)
+{
+ bitmap_info_t binfo;
+
+ bitmap_info_init(&binfo, nbits);
+ return (bitmap_info_ngroups(&binfo));
}
void
@@ -54,7 +61,8 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
* correspond to the first logical bit in the group, so extra bits
* are the most significant bits of the last group.
*/
- memset(bitmap, 0xffU, bitmap_size(binfo));
+ memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
+ LG_SIZEOF_BITMAP);
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
@@ -68,44 +76,3 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
}
}
-
-#else /* USE_TREE */
-
-void
-bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
-{
-
- assert(nbits > 0);
- assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
-
- binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
- binfo->nbits = nbits;
-}
-
-static size_t
-bitmap_info_ngroups(const bitmap_info_t *binfo)
-{
-
- return (binfo->ngroups);
-}
-
-void
-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
- size_t extra;
-
- memset(bitmap, 0xffU, bitmap_size(binfo));
- extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
- & BITMAP_GROUP_NBITS_MASK;
- if (extra != 0)
- bitmap[binfo->ngroups - 1] >>= extra;
-}
-
-#endif /* USE_TREE */
-
-size_t
-bitmap_size(const bitmap_info_t *binfo)
-{
-
- return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
-}
diff --git a/deps/jemalloc/src/chunk.c b/deps/jemalloc/src/chunk.c
index c1c514a86..6ba1ca7a5 100644
--- a/deps/jemalloc/src/chunk.c
+++ b/deps/jemalloc/src/chunk.c
@@ -49,10 +49,9 @@ const chunk_hooks_t chunk_hooks_default = {
* definition.
*/
-static void chunk_record(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
- extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
- bool zeroed, bool committed);
+static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, bool zeroed, bool committed);
/******************************************************************************/
@@ -64,23 +63,23 @@ chunk_hooks_get_locked(arena_t *arena)
}
chunk_hooks_t
-chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
+chunk_hooks_get(arena_t *arena)
{
chunk_hooks_t chunk_hooks;
- malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_lock(&arena->chunks_mtx);
chunk_hooks = chunk_hooks_get_locked(arena);
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_unlock(&arena->chunks_mtx);
return (chunk_hooks);
}
chunk_hooks_t
-chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
+chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
{
chunk_hooks_t old_chunk_hooks;
- malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_lock(&arena->chunks_mtx);
old_chunk_hooks = arena->chunk_hooks;
/*
* Copy each field atomically so that it is impossible for readers to
@@ -105,14 +104,14 @@ chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
ATOMIC_COPY_HOOK(split);
ATOMIC_COPY_HOOK(merge);
#undef ATOMIC_COPY_HOOK
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_unlock(&arena->chunks_mtx);
return (old_chunk_hooks);
}
static void
-chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks, bool locked)
+chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ bool locked)
{
static const chunk_hooks_t uninitialized_hooks =
CHUNK_HOOKS_INITIALIZER;
@@ -120,28 +119,27 @@ chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
0) {
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
- chunk_hooks_get(tsdn, arena);
+ chunk_hooks_get(arena);
}
}
static void
-chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
+chunk_hooks_assure_initialized_locked(arena_t *arena,
chunk_hooks_t *chunk_hooks)
{
- chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
+ chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
}
static void
-chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
- chunk_hooks_t *chunk_hooks)
+chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
{
- chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
+ chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
}
bool
-chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
+chunk_register(const void *chunk, const extent_node_t *node)
{
assert(extent_node_addr_get(node) == chunk);
@@ -161,7 +159,7 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
high = atomic_read_z(&highchunks);
}
if (cur > high && prof_gdump_get_unlocked())
- prof_gdump(tsdn);
+ prof_gdump();
}
return (false);
@@ -183,35 +181,33 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
}
/*
- * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
- * best fits.
+ * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
+ * fits.
*/
static extent_node_t *
-chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
+chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, size_t size)
{
extent_node_t key;
assert(size == CHUNK_CEILING(size));
- extent_node_init(&key, arena, NULL, size, 0, false, false);
- return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
+ extent_node_init(&key, arena, NULL, size, false, false);
+ return (extent_tree_szad_nsearch(chunks_szad, &key));
}
static void *
-chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
- void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
- bool *commit, bool dalloc_node)
+chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
+ bool dalloc_node)
{
void *ret;
extent_node_t *node;
size_t alloc_size, leadsize, trailsize;
bool zeroed, committed;
- assert(CHUNK_CEILING(size) == size);
- assert(alignment > 0);
assert(new_addr == NULL || alignment == chunksize);
- assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
/*
* Cached chunks use the node linkage embedded in their headers, in
* which case dalloc_node is true, and new_addr is non-NULL because
@@ -219,23 +215,24 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
*/
assert(dalloc_node || new_addr != NULL);
- alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
+ alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
- malloc_mutex_lock(tsdn, &arena->chunks_mtx);
- chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
+ malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
- extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
+ extent_node_init(&key, arena, new_addr, alloc_size, false,
false);
node = extent_tree_ad_search(chunks_ad, &key);
} else {
- node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
+ node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
+ alloc_size);
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
size)) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_unlock(&arena->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
@@ -244,7 +241,6 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(extent_node_size_get(node) >= leadsize + size);
trailsize = extent_node_size_get(node) - leadsize - size;
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
- *sn = extent_node_sn_get(node);
zeroed = extent_node_zeroed_get(node);
if (zeroed)
*zero = true;
@@ -255,17 +251,17 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (leadsize != 0 &&
chunk_hooks->split(extent_node_addr_get(node),
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_unlock(&arena->chunks_mtx);
return (NULL);
}
/* Remove node from the tree. */
- extent_tree_szsnad_remove(chunks_szsnad, node);
+ extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
extent_node_size_set(node, leadsize);
- extent_tree_szsnad_insert(chunks_szsnad, node);
+ extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
@@ -275,42 +271,41 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (chunk_hooks->split(ret, size + trailsize, size,
trailsize, false, arena->ind)) {
if (dalloc_node && node != NULL)
- arena_node_dalloc(tsdn, arena, node);
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
- chunks_ad, cache, ret, size + trailsize, *sn,
- zeroed, committed);
+ arena_node_dalloc(arena, node);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
+ cache, ret, size + trailsize, zeroed, committed);
return (NULL);
}
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
- node = arena_node_alloc(tsdn, arena);
+ node = arena_node_alloc(arena);
if (node == NULL) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks,
- chunks_szsnad, chunks_ad, cache, ret, size
- + trailsize, *sn, zeroed, committed);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad,
+ chunks_ad, cache, ret, size + trailsize,
+ zeroed, committed);
return (NULL);
}
}
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
- trailsize, *sn, zeroed, committed);
- extent_tree_szsnad_insert(chunks_szsnad, node);
+ trailsize, zeroed, committed);
+ extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
- chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
- cache, ret, size, *sn, zeroed, committed);
+ malloc_mutex_unlock(&arena->chunks_mtx);
+ chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
+ ret, size, zeroed, committed);
return (NULL);
}
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_unlock(&arena->chunks_mtx);
assert(dalloc_node || node != NULL);
if (dalloc_node && node != NULL)
- arena_node_dalloc(tsdn, arena, node);
+ arena_node_dalloc(arena, node);
if (*zero) {
if (!zeroed)
memset(ret, 0, size);
@@ -318,11 +313,10 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
- if (config_valgrind)
- JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
}
return (ret);
}
@@ -334,29 +328,39 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
* them if they are returned.
*/
static void *
-chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
+chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit, dss_prec_t dss_prec)
{
void *ret;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
+ /* Retained. */
+ if ((ret = chunk_recycle(arena, &chunk_hooks,
+ &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
+ new_addr, size, alignment, zero, commit, true)) != NULL)
+ return (ret);
+
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
- chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL)
- return (ret);
- /* mmap. */
- if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
+ chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
NULL)
return (ret);
+ /*
+ * mmap. Requesting an address is not implemented for
+ * chunk_alloc_mmap(), so only call it if (new_addr == NULL).
+ */
+ if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero,
+ commit)) != NULL)
+ return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
- chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL)
+ chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
+ NULL)
return (ret);
/* All strategies for allocation failed. */
@@ -376,7 +380,7 @@ chunk_alloc_base(size_t size)
*/
zero = true;
commit = true;
- ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
+ ret = chunk_alloc_mmap(size, chunksize, &zero, &commit);
if (ret == NULL)
return (NULL);
if (config_valgrind)
@@ -386,33 +390,37 @@ chunk_alloc_base(size_t size)
}
void *
-chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
- bool *commit, bool dalloc_node)
+chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool dalloc_node)
{
void *ret;
+ bool commit;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
- ret = chunk_recycle(tsdn, arena, chunk_hooks,
- &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
- new_addr, size, alignment, sn, zero, commit, dalloc_node);
+ commit = true;
+ ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
+ &arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
+ &commit, dalloc_node);
if (ret == NULL)
return (NULL);
+ assert(commit);
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
static arena_t *
-chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
+chunk_arena_get(unsigned arena_ind)
{
arena_t *arena;
- arena = arena_get(tsdn, arena_ind, false);
+ /* Dodge tsd for a0 in order to avoid bootstrapping issues. */
+ arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
+ false, true);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
@@ -422,12 +430,14 @@ chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
}
static void *
-chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit)
+chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit, unsigned arena_ind)
{
void *ret;
+ arena_t *arena;
- ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
+ arena = chunk_arena_get(arena_ind);
+ ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
commit, arena->dss_prec);
if (ret == NULL)
return (NULL);
@@ -437,80 +447,26 @@ chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
return (ret);
}
-static void *
-chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
- bool *commit, unsigned arena_ind)
-{
- tsdn_t *tsdn;
- arena_t *arena;
-
- tsdn = tsdn_fetch();
- arena = chunk_arena_get(tsdn, arena_ind);
-
- return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
- zero, commit));
-}
-
-static void *
-chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
- bool *commit)
-{
- void *ret;
-
- assert(size != 0);
- assert((size & chunksize_mask) == 0);
- assert(alignment != 0);
- assert((alignment & chunksize_mask) == 0);
-
- ret = chunk_recycle(tsdn, arena, chunk_hooks,
- &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
- new_addr, size, alignment, sn, zero, commit, true);
-
- if (config_stats && ret != NULL)
- arena->stats.retained -= size;
-
- return (ret);
-}
-
void *
-chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
- bool *commit)
+chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
- chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
-
- ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
- alignment, sn, zero, commit);
- if (ret == NULL) {
- if (chunk_hooks->alloc == chunk_alloc_default) {
- /* Call directly to propagate tsdn. */
- ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
- size, alignment, zero, commit);
- } else {
- ret = chunk_hooks->alloc(new_addr, size, alignment,
- zero, commit, arena->ind);
- }
-
- if (ret == NULL)
- return (NULL);
-
- *sn = arena_extent_sn_next(arena);
-
- if (config_valgrind && chunk_hooks->alloc !=
- chunk_alloc_default)
- JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
- }
-
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
+ arena->ind);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
return (ret);
}
static void
-chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
- void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
+chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, bool zeroed, bool committed)
{
bool unzeroed;
extent_node_t *node, *prev;
@@ -520,9 +476,9 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
- malloc_mutex_lock(tsdn, &arena->chunks_mtx);
- chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
- extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
+ malloc_mutex_lock(&arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
+ extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
@@ -534,21 +490,19 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
- * remove/insert from/into chunks_szsnad.
+ * remove/insert from/into chunks_szad.
*/
- extent_tree_szsnad_remove(chunks_szsnad, node);
+ extent_tree_szad_remove(chunks_szad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, chunk);
extent_node_size_set(node, size + extent_node_size_get(node));
- if (sn < extent_node_sn_get(node))
- extent_node_sn_set(node, sn);
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
!unzeroed);
- extent_tree_szsnad_insert(chunks_szsnad, node);
+ extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
- node = arena_node_alloc(tsdn, arena);
+ node = arena_node_alloc(arena);
if (node == NULL) {
/*
* Node allocation failed, which is an exceedingly
@@ -557,15 +511,15 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
* a virtual memory leak.
*/
if (cache) {
- chunk_purge_wrapper(tsdn, arena, chunk_hooks,
- chunk, size, 0, size);
+ chunk_purge_wrapper(arena, chunk_hooks, chunk,
+ size, 0, size);
}
goto label_return;
}
- extent_node_init(node, arena, chunk, size, sn, !unzeroed,
+ extent_node_init(node, arena, chunk, size, !unzeroed,
committed);
extent_tree_ad_insert(chunks_ad, node);
- extent_tree_szsnad_insert(chunks_szsnad, node);
+ extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
}
@@ -579,33 +533,31 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
- * remove/insert node from/into chunks_szsnad.
+ * remove/insert node from/into chunks_szad.
*/
- extent_tree_szsnad_remove(chunks_szsnad, prev);
+ extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev);
arena_chunk_cache_maybe_remove(arena, prev, cache);
- extent_tree_szsnad_remove(chunks_szsnad, node);
+ extent_tree_szad_remove(chunks_szad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, extent_node_addr_get(prev));
extent_node_size_set(node, extent_node_size_get(prev) +
extent_node_size_get(node));
- if (extent_node_sn_get(prev) < extent_node_sn_get(node))
- extent_node_sn_set(node, extent_node_sn_get(prev));
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
extent_node_zeroed_get(node));
- extent_tree_szsnad_insert(chunks_szsnad, node);
+ extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
- arena_node_dalloc(tsdn, arena, prev);
+ arena_node_dalloc(arena, prev);
}
label_return:
- malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ malloc_mutex_unlock(&arena->chunks_mtx);
}
void
-chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, size_t sn, bool committed)
+chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool committed)
{
assert(chunk != NULL);
@@ -613,49 +565,24 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(size != 0);
assert((size & chunksize_mask) == 0);
- chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
- &arena->chunks_ad_cached, true, chunk, size, sn, false,
- committed);
- arena_maybe_purge(tsdn, arena);
-}
-
-static bool
-chunk_dalloc_default_impl(void *chunk, size_t size)
-{
-
- if (!have_dss || !chunk_in_dss(chunk))
- return (chunk_dalloc_mmap(chunk, size));
- return (true);
-}
-
-static bool
-chunk_dalloc_default(void *chunk, size_t size, bool committed,
- unsigned arena_ind)
-{
-
- return (chunk_dalloc_default_impl(chunk, size));
+ chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
+ &arena->chunks_ad_cached, true, chunk, size, false, committed);
+ arena_maybe_purge(arena);
}
void
-chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
+chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool zeroed, bool committed)
{
- bool err;
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
- chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
/* Try to deallocate. */
- if (chunk_hooks->dalloc == chunk_dalloc_default) {
- /* Call directly to propagate tsdn. */
- err = chunk_dalloc_default_impl(chunk, size);
- } else
- err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
-
- if (!err)
+ if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
return;
/* Try to decommit; purge if that fails. */
if (committed) {
@@ -664,12 +591,29 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
- chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
- &arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
- committed);
+ chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
+ &arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
+}
+
+static bool
+chunk_dalloc_default(void *chunk, size_t size, bool committed,
+ unsigned arena_ind)
+{
- if (config_stats)
- arena->stats.retained += size;
+ if (!have_dss || !chunk_in_dss(chunk))
+ return (chunk_dalloc_mmap(chunk, size));
+ return (true);
+}
+
+void
+chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, bool committed)
+{
+
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
+ chunk_hooks->dalloc(chunk, size, committed, arena->ind);
+ if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
+ JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
}
static bool
@@ -690,9 +634,8 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
length));
}
-static bool
-chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
- unsigned arena_ind)
+bool
+chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
{
assert(chunk != NULL);
@@ -705,12 +648,21 @@ chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
length));
}
+static bool
+chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
+ length));
+}
+
bool
-chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, size_t offset, size_t length)
+chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
+ size_t size, size_t offset, size_t length)
{
- chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
+ chunk_hooks_assure_initialized(arena, chunk_hooks);
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
}
@@ -725,30 +677,23 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
}
static bool
-chunk_merge_default_impl(void *chunk_a, void *chunk_b)
+chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ bool committed, unsigned arena_ind)
{
if (!maps_coalesce)
return (true);
- if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b))
+ if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
return (true);
return (false);
}
-static bool
-chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
- bool committed, unsigned arena_ind)
-{
-
- return (chunk_merge_default_impl(chunk_a, chunk_b));
-}
-
static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{
- return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
+ return ((rtree_node_elm_t *)base_alloc(nelms *
sizeof(rtree_node_elm_t)));
}
@@ -771,7 +716,7 @@ chunk_boot(void)
* so pages_map will always take fast path.
*/
if (!opt_lg_chunk) {
- opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
+ opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
- 1;
}
#else
@@ -785,11 +730,32 @@ chunk_boot(void)
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> LG_PAGE);
- if (have_dss)
- chunk_dss_boot();
- if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
- opt_lg_chunk), chunks_rtree_node_alloc, NULL))
+ if (have_dss && chunk_dss_boot())
+ return (true);
+ if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
+ opt_lg_chunk, chunks_rtree_node_alloc, NULL))
return (true);
return (false);
}
+
+void
+chunk_prefork(void)
+{
+
+ chunk_dss_prefork();
+}
+
+void
+chunk_postfork_parent(void)
+{
+
+ chunk_dss_postfork_parent();
+}
+
+void
+chunk_postfork_child(void)
+{
+
+ chunk_dss_postfork_child();
+}
diff --git a/deps/jemalloc/src/chunk_dss.c b/deps/jemalloc/src/chunk_dss.c
index ee3f83888..61fc91696 100644
--- a/deps/jemalloc/src/chunk_dss.c
+++ b/deps/jemalloc/src/chunk_dss.c
@@ -10,19 +10,20 @@ const char *dss_prec_names[] = {
"N/A"
};
+/* Current dss precedence default, used when creating new arenas. */
+static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
+
/*
- * Current dss precedence default, used when creating new arenas. NB: This is
- * stored as unsigned rather than dss_prec_t because in principle there's no
- * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
- * atomic operations to synchronize the setting.
+ * Protects sbrk() calls. This avoids malloc races among threads, though it
+ * does not protect against races with threads that call sbrk() directly.
*/
-static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT;
+static malloc_mutex_t dss_mtx;
/* Base address of the DSS. */
static void *dss_base;
-/* Atomic boolean indicating whether the DSS is exhausted. */
-static unsigned dss_exhausted;
-/* Atomic current upper limit on DSS addresses. */
+/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
+static void *dss_prev;
+/* Current upper limit on DSS addresses. */
static void *dss_max;
/******************************************************************************/
@@ -46,7 +47,9 @@ chunk_dss_prec_get(void)
if (!have_dss)
return (dss_prec_disabled);
- ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
+ malloc_mutex_lock(&dss_mtx);
+ ret = dss_prec_default;
+ malloc_mutex_unlock(&dss_mtx);
return (ret);
}
@@ -56,46 +59,15 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
if (!have_dss)
return (dss_prec != dss_prec_disabled);
- atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
+ malloc_mutex_lock(&dss_mtx);
+ dss_prec_default = dss_prec;
+ malloc_mutex_unlock(&dss_mtx);
return (false);
}
-static void *
-chunk_dss_max_update(void *new_addr)
-{
- void *max_cur;
- spin_t spinner;
-
- /*
- * Get the current end of the DSS as max_cur and assure that dss_max is
- * up to date.
- */
- spin_init(&spinner);
- while (true) {
- void *max_prev = atomic_read_p(&dss_max);
-
- max_cur = chunk_dss_sbrk(0);
- if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
- /*
- * Another thread optimistically updated dss_max. Wait
- * for it to finish.
- */
- spin_adaptive(&spinner);
- continue;
- }
- if (!atomic_cas_p(&dss_max, max_prev, max_cur))
- break;
- }
- /* Fixed new_addr can only be supported if it is at the edge of DSS. */
- if (new_addr != NULL && max_cur != new_addr)
- return (NULL);
-
- return (max_cur);
-}
-
void *
-chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit)
+chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit)
{
cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
@@ -108,20 +80,28 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
if ((intptr_t)size < 0)
return (NULL);
- if (!atomic_read_u(&dss_exhausted)) {
+ malloc_mutex_lock(&dss_mtx);
+ if (dss_prev != (void *)-1) {
+
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
*/
- while (true) {
- void *ret, *cpad, *max_cur, *dss_next, *dss_prev;
+ do {
+ void *ret, *cpad, *dss_next;
size_t gap_size, cpad_size;
intptr_t incr;
+ /* Avoid an unnecessary system call. */
+ if (new_addr != NULL && dss_max != new_addr)
+ break;
+
+ /* Get the current end of the DSS. */
+ dss_max = chunk_dss_sbrk(0);
- max_cur = chunk_dss_max_update(new_addr);
- if (max_cur == NULL)
- goto label_oom;
+ /* Make sure the earlier condition still holds. */
+ if (new_addr != NULL && dss_max != new_addr)
+ break;
/*
* Calculate how much padding is necessary to
@@ -140,29 +120,22 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||
- (uintptr_t)dss_next < (uintptr_t)dss_max)
- goto label_oom; /* Wrap-around. */
+ (uintptr_t)dss_next < (uintptr_t)dss_max) {
+ /* Wrap-around. */
+ malloc_mutex_unlock(&dss_mtx);
+ return (NULL);
+ }
incr = gap_size + cpad_size + size;
-
- /*
- * Optimistically update dss_max, and roll back below if
- * sbrk() fails. No other thread will try to extend the
- * DSS while dss_max is greater than the current DSS
- * max reported by sbrk(0).
- */
- if (atomic_cas_p(&dss_max, max_cur, dss_next))
- continue;
-
- /* Try to allocate. */
dss_prev = chunk_dss_sbrk(incr);
- if (dss_prev == max_cur) {
+ if (dss_prev == dss_max) {
/* Success. */
+ dss_max = dss_next;
+ malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0) {
chunk_hooks_t chunk_hooks =
CHUNK_HOOKS_INITIALIZER;
- chunk_dalloc_wrapper(tsdn, arena,
+ chunk_dalloc_wrapper(arena,
&chunk_hooks, cpad, cpad_size,
- arena_extent_sn_next(arena), false,
true);
}
if (*zero) {
@@ -174,65 +147,68 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
*commit = pages_decommit(ret, size);
return (ret);
}
-
- /*
- * Failure, whether due to OOM or a race with a raw
- * sbrk() call from outside the allocator. Try to roll
- * back optimistic dss_max update; if rollback fails,
- * it's due to another caller of this function having
- * succeeded since this invocation started, in which
- * case rollback is not necessary.
- */
- atomic_cas_p(&dss_max, dss_next, max_cur);
- if (dss_prev == (void *)-1) {
- /* OOM. */
- atomic_write_u(&dss_exhausted, (unsigned)true);
- goto label_oom;
- }
- }
+ } while (dss_prev != (void *)-1);
}
-label_oom:
- return (NULL);
-}
-
-static bool
-chunk_in_dss_helper(void *chunk, void *max)
-{
+ malloc_mutex_unlock(&dss_mtx);
- return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk <
- (uintptr_t)max);
+ return (NULL);
}
bool
chunk_in_dss(void *chunk)
{
+ bool ret;
cassert(have_dss);
- return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max)));
+ malloc_mutex_lock(&dss_mtx);
+ if ((uintptr_t)chunk >= (uintptr_t)dss_base
+ && (uintptr_t)chunk < (uintptr_t)dss_max)
+ ret = true;
+ else
+ ret = false;
+ malloc_mutex_unlock(&dss_mtx);
+
+ return (ret);
}
bool
-chunk_dss_mergeable(void *chunk_a, void *chunk_b)
+chunk_dss_boot(void)
{
- void *max;
cassert(have_dss);
- max = atomic_read_p(&dss_max);
- return (chunk_in_dss_helper(chunk_a, max) ==
- chunk_in_dss_helper(chunk_b, max));
+ if (malloc_mutex_init(&dss_mtx))
+ return (true);
+ dss_base = chunk_dss_sbrk(0);
+ dss_prev = dss_base;
+ dss_max = dss_base;
+
+ return (false);
}
void
-chunk_dss_boot(void)
+chunk_dss_prefork(void)
{
- cassert(have_dss);
+ if (have_dss)
+ malloc_mutex_prefork(&dss_mtx);
+}
- dss_base = chunk_dss_sbrk(0);
- dss_exhausted = (unsigned)(dss_base == (void *)-1);
- dss_max = dss_base;
+void
+chunk_dss_postfork_parent(void)
+{
+
+ if (have_dss)
+ malloc_mutex_postfork_parent(&dss_mtx);
+}
+
+void
+chunk_dss_postfork_child(void)
+{
+
+ if (have_dss)
+ malloc_mutex_postfork_child(&dss_mtx);
}
/******************************************************************************/
diff --git a/deps/jemalloc/src/chunk_mmap.c b/deps/jemalloc/src/chunk_mmap.c
index 73fc497af..b9ba74191 100644
--- a/deps/jemalloc/src/chunk_mmap.c
+++ b/deps/jemalloc/src/chunk_mmap.c
@@ -16,22 +16,23 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
do {
void *pages;
size_t leadsize;
- pages = pages_map(NULL, alloc_size, commit);
+ pages = pages_map(NULL, alloc_size);
if (pages == NULL)
return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
- ret = pages_trim(pages, alloc_size, leadsize, size, commit);
+ ret = pages_trim(pages, alloc_size, leadsize, size);
} while (ret == NULL);
assert(ret != NULL);
*zero = true;
+ if (!*commit)
+ *commit = pages_decommit(ret, size);
return (ret);
}
void *
-chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
- bool *commit)
+chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
size_t offset;
@@ -52,10 +53,9 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
- ret = pages_map(new_addr, size, commit);
- if (ret == NULL || ret == new_addr)
- return (ret);
- assert(new_addr == NULL);
+ ret = pages_map(NULL, size);
+ if (ret == NULL)
+ return (NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
@@ -64,6 +64,8 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
assert(ret != NULL);
*zero = true;
+ if (!*commit)
+ *commit = pages_decommit(ret, size);
return (ret);
}
diff --git a/deps/jemalloc/src/ckh.c b/deps/jemalloc/src/ckh.c
index 159bd8ae1..53a1c1ef1 100644
--- a/deps/jemalloc/src/ckh.c
+++ b/deps/jemalloc/src/ckh.c
@@ -99,8 +99,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
- offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
- LG_CKH_BUCKET_CELLS);
+ prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
@@ -142,8 +141,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
- i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
- LG_CKH_BUCKET_CELLS);
+ prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
@@ -249,7 +247,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
{
bool ret;
ckhc_t *tab, *ttab;
- unsigned lg_prevbuckets, lg_curcells;
+ size_t lg_curcells;
+ unsigned lg_prevbuckets;
#ifdef CKH_COUNT
ckh->ngrows++;
@@ -267,12 +266,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ if (usize == 0) {
ret = true;
goto label_return;
}
- tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
- true, NULL, true, arena_ichoose(tsd, NULL));
+ tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
+ true, NULL);
if (tab == NULL) {
ret = true;
goto label_return;
@@ -284,12 +283,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
- idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
+ idalloctm(tsd, tab, tcache_get(tsd, false), true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
@@ -303,8 +302,8 @@ static void
ckh_shrink(tsd_t *tsd, ckh_t *ckh)
{
ckhc_t *tab, *ttab;
- size_t usize;
- unsigned lg_prevbuckets, lg_curcells;
+ size_t lg_curcells, usize;
+ unsigned lg_prevbuckets;
/*
* It is possible (though unlikely, given well behaved hashes) that the
@@ -313,10 +312,10 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ if (usize == 0)
return;
- tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
- true, arena_ichoose(tsd, NULL));
+ tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
+ NULL);
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
@@ -331,7 +330,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
- idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
+ idalloctm(tsd, tab, tcache_get(tsd, false), true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@@ -339,7 +338,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@@ -388,12 +387,12 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ if (usize == 0) {
ret = true;
goto label_return;
}
- ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
- NULL, true, arena_ichoose(tsd, NULL));
+ ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
+ NULL);
if (ckh->tab == NULL) {
ret = true;
goto label_return;
@@ -422,9 +421,9 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
(unsigned long long)ckh->nrelocs);
#endif
- idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
if (config_debug)
- memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
+ memset(ckh, 0x5a, sizeof(ckh_t));
}
size_t
diff --git a/deps/jemalloc/src/ctl.c b/deps/jemalloc/src/ctl.c
index bc78b2055..3de8e602d 100644
--- a/deps/jemalloc/src/ctl.c
+++ b/deps/jemalloc/src/ctl.c
@@ -24,7 +24,7 @@ ctl_named_node(const ctl_node_t *node)
}
JEMALLOC_INLINE_C const ctl_named_node_t *
-ctl_named_children(const ctl_named_node_t *node, size_t index)
+ctl_named_children(const ctl_named_node_t *node, int index)
{
const ctl_named_node_t *children = ctl_named_node(node->children);
@@ -42,25 +42,25 @@ ctl_indexed_node(const ctl_node_t *node)
/* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \
-static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
-static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
- const size_t *mib, size_t miblen, size_t i);
+static const ctl_named_node_t *n##_index(const size_t *mib, \
+ size_t miblen, size_t i);
static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats);
-static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
+static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
-static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i);
-static bool ctl_grow(tsdn_t *tsdn);
-static void ctl_refresh(tsdn_t *tsdn);
-static bool ctl_init(tsdn_t *tsdn);
-static int ctl_lookup(tsdn_t *tsdn, const char *name,
- ctl_node_t const **nodesp, size_t *mibp, size_t *depthp);
+static void ctl_arena_refresh(arena_t *arena, unsigned i);
+static bool ctl_grow(void);
+static void ctl_refresh(void);
+static bool ctl_init(void);
+static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
+ size_t *mibp, size_t *depthp);
CTL_PROTO(version)
CTL_PROTO(epoch)
@@ -77,7 +77,6 @@ CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
-CTL_PROTO(config_malloc_conf)
CTL_PROTO(config_munmap)
CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
@@ -92,9 +91,7 @@ CTL_PROTO(opt_abort)
CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas)
-CTL_PROTO(opt_purge)
CTL_PROTO(opt_lg_dirty_mult)
-CTL_PROTO(opt_decay_time)
CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
@@ -117,13 +114,10 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy)
-static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all);
CTL_PROTO(arena_i_purge)
-CTL_PROTO(arena_i_decay)
-CTL_PROTO(arena_i_reset)
+static void arena_purge(unsigned arena_ind);
CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_lg_dirty_mult)
-CTL_PROTO(arena_i_decay_time)
CTL_PROTO(arena_i_chunk_hooks)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
@@ -137,7 +131,6 @@ INDEX_PROTO(arenas_hchunk_i)
CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized)
CTL_PROTO(arenas_lg_dirty_mult)
-CTL_PROTO(arenas_decay_time)
CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
@@ -188,11 +181,9 @@ INDEX_PROTO(stats_arenas_i_hchunks_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_lg_dirty_mult)
-CTL_PROTO(stats_arenas_i_decay_time)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped)
-CTL_PROTO(stats_arenas_i_retained)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
@@ -205,7 +196,6 @@ CTL_PROTO(stats_active)
CTL_PROTO(stats_metadata)
CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
-CTL_PROTO(stats_retained)
/******************************************************************************/
/* mallctl tree. */
@@ -251,7 +241,6 @@ static const ctl_named_node_t config_node[] = {
{NAME("debug"), CTL(config_debug)},
{NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)},
- {NAME("malloc_conf"), CTL(config_malloc_conf)},
{NAME("munmap"), CTL(config_munmap)},
{NAME("prof"), CTL(config_prof)},
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
@@ -269,9 +258,7 @@ static const ctl_named_node_t opt_node[] = {
{NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)},
- {NAME("purge"), CTL(opt_purge)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
- {NAME("decay_time"), CTL(opt_decay_time)},
{NAME("stats_print"), CTL(opt_stats_print)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
@@ -301,11 +288,8 @@ static const ctl_named_node_t tcache_node[] = {
static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)},
- {NAME("decay"), CTL(arena_i_decay)},
- {NAME("reset"), CTL(arena_i_reset)},
{NAME("dss"), CTL(arena_i_dss)},
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
- {NAME("decay_time"), CTL(arena_i_decay_time)},
{NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
};
static const ctl_named_node_t super_arena_i_node[] = {
@@ -355,7 +339,6 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)},
{NAME("initialized"), CTL(arenas_initialized)},
{NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
- {NAME("decay_time"), CTL(arenas_decay_time)},
{NAME("quantum"), CTL(arenas_quantum)},
{NAME("page"), CTL(arenas_page)},
{NAME("tcache_max"), CTL(arenas_tcache_max)},
@@ -456,11 +439,9 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
- {NAME("decay_time"), CTL(stats_arenas_i_decay_time)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
- {NAME("retained"), CTL(stats_arenas_i_retained)},
{NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)},
@@ -487,7 +468,6 @@ static const ctl_named_node_t stats_node[] = {
{NAME("metadata"), CTL(stats_metadata)},
{NAME("resident"), CTL(stats_resident)},
{NAME("mapped"), CTL(stats_mapped)},
- {NAME("retained"), CTL(stats_retained)},
{NAME("arenas"), CHILD(indexed, stats_arenas)}
};
@@ -539,10 +519,8 @@ static void
ctl_arena_clear(ctl_arena_stats_t *astats)
{
- astats->nthreads = 0;
astats->dss = dss_prec_names[dss_prec_limit];
astats->lg_dirty_mult = -1;
- astats->decay_time = -1;
astats->pactive = 0;
astats->pdirty = 0;
if (config_stats) {
@@ -560,27 +538,20 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
}
static void
-ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
+ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
- if (config_stats) {
- arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
- &cstats->lg_dirty_mult, &cstats->decay_time,
- &cstats->pactive, &cstats->pdirty, &cstats->astats,
- cstats->bstats, cstats->lstats, cstats->hstats);
-
- for (i = 0; i < NBINS; i++) {
- cstats->allocated_small += cstats->bstats[i].curregs *
- index2size(i);
- cstats->nmalloc_small += cstats->bstats[i].nmalloc;
- cstats->ndalloc_small += cstats->bstats[i].ndalloc;
- cstats->nrequests_small += cstats->bstats[i].nrequests;
- }
- } else {
- arena_basic_stats_merge(tsdn, arena, &cstats->nthreads,
- &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time,
- &cstats->pactive, &cstats->pdirty);
+ arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult,
+ &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats,
+ cstats->lstats, cstats->hstats);
+
+ for (i = 0; i < NBINS; i++) {
+ cstats->allocated_small += cstats->bstats[i].curregs *
+ index2size(i);
+ cstats->nmalloc_small += cstats->bstats[i].nmalloc;
+ cstats->ndalloc_small += cstats->bstats[i].ndalloc;
+ cstats->nrequests_small += cstats->bstats[i].nrequests;
}
}
@@ -589,91 +560,89 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
{
unsigned i;
- sstats->nthreads += astats->nthreads;
sstats->pactive += astats->pactive;
sstats->pdirty += astats->pdirty;
- if (config_stats) {
- sstats->astats.mapped += astats->astats.mapped;
- sstats->astats.retained += astats->astats.retained;
- sstats->astats.npurge += astats->astats.npurge;
- sstats->astats.nmadvise += astats->astats.nmadvise;
- sstats->astats.purged += astats->astats.purged;
-
- sstats->astats.metadata_mapped +=
- astats->astats.metadata_mapped;
- sstats->astats.metadata_allocated +=
- astats->astats.metadata_allocated;
-
- sstats->allocated_small += astats->allocated_small;
- sstats->nmalloc_small += astats->nmalloc_small;
- sstats->ndalloc_small += astats->ndalloc_small;
- sstats->nrequests_small += astats->nrequests_small;
-
- sstats->astats.allocated_large +=
- astats->astats.allocated_large;
- sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
- sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
- sstats->astats.nrequests_large +=
- astats->astats.nrequests_large;
-
- sstats->astats.allocated_huge += astats->astats.allocated_huge;
- sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
- sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
-
- for (i = 0; i < NBINS; i++) {
- sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
- sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
- sstats->bstats[i].nrequests +=
- astats->bstats[i].nrequests;
- sstats->bstats[i].curregs += astats->bstats[i].curregs;
- if (config_tcache) {
- sstats->bstats[i].nfills +=
- astats->bstats[i].nfills;
- sstats->bstats[i].nflushes +=
- astats->bstats[i].nflushes;
- }
- sstats->bstats[i].nruns += astats->bstats[i].nruns;
- sstats->bstats[i].reruns += astats->bstats[i].reruns;
- sstats->bstats[i].curruns += astats->bstats[i].curruns;
+ sstats->astats.mapped += astats->astats.mapped;
+ sstats->astats.npurge += astats->astats.npurge;
+ sstats->astats.nmadvise += astats->astats.nmadvise;
+ sstats->astats.purged += astats->astats.purged;
+
+ sstats->astats.metadata_mapped += astats->astats.metadata_mapped;
+ sstats->astats.metadata_allocated += astats->astats.metadata_allocated;
+
+ sstats->allocated_small += astats->allocated_small;
+ sstats->nmalloc_small += astats->nmalloc_small;
+ sstats->ndalloc_small += astats->ndalloc_small;
+ sstats->nrequests_small += astats->nrequests_small;
+
+ sstats->astats.allocated_large += astats->astats.allocated_large;
+ sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
+ sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
+ sstats->astats.nrequests_large += astats->astats.nrequests_large;
+
+ sstats->astats.allocated_huge += astats->astats.allocated_huge;
+ sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
+ sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
+
+ for (i = 0; i < NBINS; i++) {
+ sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
+ sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
+ sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
+ sstats->bstats[i].curregs += astats->bstats[i].curregs;
+ if (config_tcache) {
+ sstats->bstats[i].nfills += astats->bstats[i].nfills;
+ sstats->bstats[i].nflushes +=
+ astats->bstats[i].nflushes;
}
+ sstats->bstats[i].nruns += astats->bstats[i].nruns;
+ sstats->bstats[i].reruns += astats->bstats[i].reruns;
+ sstats->bstats[i].curruns += astats->bstats[i].curruns;
+ }
- for (i = 0; i < nlclasses; i++) {
- sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
- sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
- sstats->lstats[i].nrequests +=
- astats->lstats[i].nrequests;
- sstats->lstats[i].curruns += astats->lstats[i].curruns;
- }
+ for (i = 0; i < nlclasses; i++) {
+ sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
+ sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
+ sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
+ sstats->lstats[i].curruns += astats->lstats[i].curruns;
+ }
- for (i = 0; i < nhclasses; i++) {
- sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
- sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
- sstats->hstats[i].curhchunks +=
- astats->hstats[i].curhchunks;
- }
+ for (i = 0; i < nhclasses; i++) {
+ sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
+ sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
+ sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks;
}
}
static void
-ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i)
+ctl_arena_refresh(arena_t *arena, unsigned i)
{
ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
ctl_arena_clear(astats);
- ctl_arena_stats_amerge(tsdn, astats, arena);
- /* Merge into sum stats as well. */
- ctl_arena_stats_smerge(sstats, astats);
+
+ sstats->nthreads += astats->nthreads;
+ if (config_stats) {
+ ctl_arena_stats_amerge(astats, arena);
+ /* Merge into sum stats as well. */
+ ctl_arena_stats_smerge(sstats, astats);
+ } else {
+ astats->pactive += arena->nactive;
+ astats->pdirty += arena->ndirty;
+ /* Merge into sum stats as well. */
+ sstats->pactive += arena->nactive;
+ sstats->pdirty += arena->ndirty;
+ }
}
static bool
-ctl_grow(tsdn_t *tsdn)
+ctl_grow(void)
{
ctl_arena_stats_t *astats;
/* Initialize new arena. */
- if (arena_init(tsdn, ctl_stats.narenas) == NULL)
+ if (arena_init(ctl_stats.narenas) == NULL)
return (true);
/* Allocate extended arena stats. */
@@ -708,32 +677,47 @@ ctl_grow(tsdn_t *tsdn)
}
static void
-ctl_refresh(tsdn_t *tsdn)
+ctl_refresh(void)
{
+ tsd_t *tsd;
unsigned i;
+ bool refreshed;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
+ ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
- for (i = 0; i < ctl_stats.narenas; i++)
- tarenas[i] = arena_get(tsdn, i, false);
+ tsd = tsd_fetch();
+ for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
+ tarenas[i] = arena_get(tsd, i, false, false);
+ if (tarenas[i] == NULL && !refreshed) {
+ tarenas[i] = arena_get(tsd, i, false, true);
+ refreshed = true;
+ }
+ }
+
+ for (i = 0; i < ctl_stats.narenas; i++) {
+ if (tarenas[i] != NULL)
+ ctl_stats.arenas[i].nthreads = arena_nbound(i);
+ else
+ ctl_stats.arenas[i].nthreads = 0;
+ }
for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized;
if (initialized)
- ctl_arena_refresh(tsdn, tarenas[i], i);
+ ctl_arena_refresh(tarenas[i], i);
}
if (config_stats) {
size_t base_allocated, base_resident, base_mapped;
- base_stats_get(tsdn, &base_allocated, &base_resident,
- &base_mapped);
+ base_stats_get(&base_allocated, &base_resident, &base_mapped);
ctl_stats.allocated =
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
@@ -750,19 +734,17 @@ ctl_refresh(tsdn_t *tsdn)
ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
ctl_stats.mapped = base_mapped +
ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
- ctl_stats.retained =
- ctl_stats.arenas[ctl_stats.narenas].astats.retained;
}
ctl_epoch++;
}
static bool
-ctl_init(tsdn_t *tsdn)
+ctl_init(void)
{
bool ret;
- malloc_mutex_lock(tsdn, &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
if (!ctl_initialized) {
/*
* Allocate space for one extra arena stats element, which
@@ -804,19 +786,19 @@ ctl_init(tsdn_t *tsdn)
ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0;
- ctl_refresh(tsdn);
+ ctl_refresh();
ctl_initialized = true;
}
ret = false;
label_return:
- malloc_mutex_unlock(tsdn, &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
-ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
- size_t *mibp, size_t *depthp)
+ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
+ size_t *depthp)
{
int ret;
const char *elm, *tdot, *dot;
@@ -868,7 +850,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
}
inode = ctl_indexed_node(node->children);
- node = inode->index(tsdn, mibp, *depthp, (size_t)index);
+ node = inode->index(mibp, *depthp, (size_t)index);
if (node == NULL) {
ret = ENOENT;
goto label_return;
@@ -912,8 +894,8 @@ label_return:
}
int
-ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen)
{
int ret;
size_t depth;
@@ -921,19 +903,19 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
- if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
+ if (!ctl_initialized && ctl_init()) {
ret = EAGAIN;
goto label_return;
}
depth = CTL_MAX_DEPTH;
- ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
+ ret = ctl_lookup(name, nodes, mib, &depth);
if (ret != 0)
goto label_return;
node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl)
- ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
+ ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
else {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
@@ -944,29 +926,29 @@ label_return:
}
int
-ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp)
+ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
{
int ret;
- if (!ctl_initialized && ctl_init(tsdn)) {
+ if (!ctl_initialized && ctl_init()) {
ret = EAGAIN;
goto label_return;
}
- ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp);
+ ret = ctl_lookup(name, NULL, mibp, miblenp);
label_return:
return(ret);
}
int
-ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
const ctl_named_node_t *node;
size_t i;
- if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
+ if (!ctl_initialized && ctl_init()) {
ret = EAGAIN;
goto label_return;
}
@@ -978,7 +960,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) {
/* Children are named. */
- if (node->nchildren <= (unsigned)mib[i]) {
+ if (node->nchildren <= mib[i]) {
ret = ENOENT;
goto label_return;
}
@@ -988,7 +970,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Indexed element. */
inode = ctl_indexed_node(node->children);
- node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
+ node = inode->index(mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto label_return;
@@ -998,7 +980,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Call the ctl function. */
if (node && node->ctl)
- ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
+ ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
else {
/* Partial MIB. */
ret = ENOENT;
@@ -1012,7 +994,7 @@ bool
ctl_boot(void)
{
- if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL))
+ if (malloc_mutex_init(&ctl_mtx))
return (true);
ctl_initialized = false;
@@ -1021,24 +1003,24 @@ ctl_boot(void)
}
void
-ctl_prefork(tsdn_t *tsdn)
+ctl_prefork(void)
{
- malloc_mutex_prefork(tsdn, &ctl_mtx);
+ malloc_mutex_prefork(&ctl_mtx);
}
void
-ctl_postfork_parent(tsdn_t *tsdn)
+ctl_postfork_parent(void)
{
- malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
+ malloc_mutex_postfork_parent(&ctl_mtx);
}
void
-ctl_postfork_child(tsdn_t *tsdn)
+ctl_postfork_child(void)
{
- malloc_mutex_postfork_child(tsdn, &ctl_mtx);
+ malloc_mutex_postfork_child(&ctl_mtx);
}
/******************************************************************************/
@@ -1095,8 +1077,8 @@ ctl_postfork_child(tsdn_t *tsdn)
*/
#define CTL_RO_CLGEN(c, l, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
@@ -1104,7 +1086,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
if (!(c)) \
return (ENOENT); \
if (l) \
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
@@ -1112,47 +1094,47 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
ret = 0; \
label_return: \
if (l) \
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
}
#define CTL_RO_CGEN(c, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
if (!(c)) \
return (ENOENT); \
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
}
#define CTL_RO_GEN(n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
}
@@ -1162,8 +1144,8 @@ label_return: \
*/
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
@@ -1181,8 +1163,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
@@ -1198,15 +1180,17 @@ label_return: \
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
+ tsd_t *tsd; \
\
if (!(c)) \
return (ENOENT); \
READONLY(); \
+ tsd = tsd_fetch(); \
oldval = (m(tsd)); \
READ(oldval, t); \
\
@@ -1215,17 +1199,17 @@ label_return: \
return (ret); \
}
-#define CTL_RO_CONFIG_GEN(n, t) \
+#define CTL_RO_BOOL_CONFIG_GEN(n) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
{ \
int ret; \
- t oldval; \
+ bool oldval; \
\
READONLY(); \
oldval = n; \
- READ(oldval, t); \
+ READ(oldval, bool); \
\
ret = 0; \
label_return: \
@@ -1237,51 +1221,48 @@ label_return: \
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
-epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
UNUSED uint64_t newval;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
WRITE(newval, uint64_t);
if (newp != NULL)
- ctl_refresh(tsd_tsdn(tsd));
+ ctl_refresh();
READ(ctl_epoch, uint64_t);
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/******************************************************************************/
-CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
-CTL_RO_CONFIG_GEN(config_debug, bool)
-CTL_RO_CONFIG_GEN(config_fill, bool)
-CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
-CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
-CTL_RO_CONFIG_GEN(config_munmap, bool)
-CTL_RO_CONFIG_GEN(config_prof, bool)
-CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
-CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
-CTL_RO_CONFIG_GEN(config_stats, bool)
-CTL_RO_CONFIG_GEN(config_tcache, bool)
-CTL_RO_CONFIG_GEN(config_tls, bool)
-CTL_RO_CONFIG_GEN(config_utrace, bool)
-CTL_RO_CONFIG_GEN(config_valgrind, bool)
-CTL_RO_CONFIG_GEN(config_xmalloc, bool)
+CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious)
+CTL_RO_BOOL_CONFIG_GEN(config_debug)
+CTL_RO_BOOL_CONFIG_GEN(config_fill)
+CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
+CTL_RO_BOOL_CONFIG_GEN(config_munmap)
+CTL_RO_BOOL_CONFIG_GEN(config_prof)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
+CTL_RO_BOOL_CONFIG_GEN(config_stats)
+CTL_RO_BOOL_CONFIG_GEN(config_tcache)
+CTL_RO_BOOL_CONFIG_GEN(config_tls)
+CTL_RO_BOOL_CONFIG_GEN(config_utrace)
+CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
+CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
-CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
-CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *)
+CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
-CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
@@ -1306,18 +1287,20 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/
static int
-thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
+ tsd_t *tsd;
arena_t *oldarena;
unsigned newind, oldind;
+ tsd = tsd_fetch();
oldarena = arena_choose(tsd, NULL);
if (oldarena == NULL)
return (EAGAIN);
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
newind = oldind = oldarena->ind;
WRITE(newind, unsigned);
READ(oldind, unsigned);
@@ -1331,7 +1314,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
/* Initialize arena if necessary. */
- newarena = arena_get(tsd_tsdn(tsd), newind, true);
+ newarena = arena_get(tsd, newind, true, true);
if (newarena == NULL) {
ret = EAGAIN;
goto label_return;
@@ -1341,15 +1324,15 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (config_tcache) {
tcache_t *tcache = tsd_tcache_get(tsd);
if (tcache != NULL) {
- tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
- oldarena, newarena);
+ tcache_arena_reassociate(tcache, oldarena,
+ newarena);
}
}
}
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@@ -1363,8 +1346,8 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
tsd_thread_deallocatedp_get, uint64_t *)
static int
-thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
@@ -1388,8 +1371,8 @@ label_return:
}
static int
-thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1407,7 +1390,7 @@ label_return:
}
static int
-thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1418,16 +1401,20 @@ thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
READ_XOR_WRITE();
if (newp != NULL) {
+ tsd_t *tsd;
+
if (newlen != sizeof(const char *)) {
ret = EINVAL;
goto label_return;
}
+ tsd = tsd_fetch();
+
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
0)
goto label_return;
} else {
- const char *oldname = prof_thread_name_get(tsd);
+ const char *oldname = prof_thread_name_get();
READ(oldname, const char *);
}
@@ -1437,7 +1424,7 @@ label_return:
}
static int
-thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1446,13 +1433,13 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (!config_prof)
return (ENOENT);
- oldval = prof_thread_active_get(tsd);
+ oldval = prof_thread_active_get();
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
- if (prof_thread_active_set(tsd, *(bool *)newp)) {
+ if (prof_thread_active_set(*(bool *)newp)) {
ret = EAGAIN;
goto label_return;
}
@@ -1467,16 +1454,19 @@ label_return:
/******************************************************************************/
static int
-tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
+ tsd_t *tsd;
unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ tsd = tsd_fetch();
+
+ malloc_mutex_lock(&ctl_mtx);
READONLY();
if (tcaches_create(tsd, &tcache_ind)) {
ret = EFAULT;
@@ -1486,20 +1476,23 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
-tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
+ tsd_t *tsd;
unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
+ tsd = tsd_fetch();
+
WRITEONLY();
tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned);
@@ -1515,15 +1508,18 @@ label_return:
}
static int
-tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
+ tsd_t *tsd;
unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
+ tsd = tsd_fetch();
+
WRITEONLY();
tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned);
@@ -1540,105 +1536,48 @@ label_return:
/******************************************************************************/
+/* ctl_mutex must be held during execution of this function. */
static void
-arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
+arena_purge(unsigned arena_ind)
{
+ tsd_t *tsd;
+ unsigned i;
+ bool refreshed;
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
- malloc_mutex_lock(tsdn, &ctl_mtx);
- {
- unsigned narenas = ctl_stats.narenas;
-
- if (arena_ind == narenas) {
- unsigned i;
- VARIABLE_ARRAY(arena_t *, tarenas, narenas);
-
- for (i = 0; i < narenas; i++)
- tarenas[i] = arena_get(tsdn, i, false);
-
- /*
- * No further need to hold ctl_mtx, since narenas and
- * tarenas contain everything needed below.
- */
- malloc_mutex_unlock(tsdn, &ctl_mtx);
-
- for (i = 0; i < narenas; i++) {
- if (tarenas[i] != NULL)
- arena_purge(tsdn, tarenas[i], all);
- }
- } else {
- arena_t *tarena;
-
- assert(arena_ind < narenas);
-
- tarena = arena_get(tsdn, arena_ind, false);
-
- /* No further need to hold ctl_mtx. */
- malloc_mutex_unlock(tsdn, &ctl_mtx);
-
- if (tarena != NULL)
- arena_purge(tsdn, tarena, all);
+ tsd = tsd_fetch();
+ for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) {
+ tarenas[i] = arena_get(tsd, i, false, false);
+ if (tarenas[i] == NULL && !refreshed) {
+ tarenas[i] = arena_get(tsd, i, false, true);
+ refreshed = true;
}
}
-}
-
-static int
-arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
- READONLY();
- WRITEONLY();
- arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true);
-
- ret = 0;
-label_return:
- return (ret);
-}
-
-static int
-arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
-
- READONLY();
- WRITEONLY();
- arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false);
-
- ret = 0;
-label_return:
- return (ret);
+ if (arena_ind == ctl_stats.narenas) {
+ unsigned i;
+ for (i = 0; i < ctl_stats.narenas; i++) {
+ if (tarenas[i] != NULL)
+ arena_purge_all(tarenas[i]);
+ }
+ } else {
+ assert(arena_ind < ctl_stats.narenas);
+ if (tarenas[arena_ind] != NULL)
+ arena_purge_all(tarenas[arena_ind]);
+ }
}
static int
-arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
- unsigned arena_ind;
- arena_t *arena;
READONLY();
WRITEONLY();
-
- if ((config_valgrind && unlikely(in_valgrind)) || (config_fill &&
- unlikely(opt_quarantine))) {
- ret = EFAULT;
- goto label_return;
- }
-
- arena_ind = (unsigned)mib[1];
- if (config_debug) {
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- assert(arena_ind < ctl_stats.narenas);
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
- }
- assert(arena_ind >= opt_narenas);
-
- arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
-
- arena_reset(tsd, arena);
+ malloc_mutex_lock(&ctl_mtx);
+ arena_purge(mib[1]);
+ malloc_mutex_unlock(&ctl_mtx);
ret = 0;
label_return:
@@ -1646,16 +1585,16 @@ label_return:
}
static int
-arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
const char *dss = NULL;
- unsigned arena_ind = (unsigned)mib[1];
+ unsigned arena_ind = mib[1];
dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
WRITE(dss, const char *);
if (dss != NULL) {
int i;
@@ -1676,13 +1615,13 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
}
if (arena_ind < ctl_stats.narenas) {
- arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true);
if (arena == NULL || (dss_prec != dss_prec_limit &&
- arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) {
+ arena_dss_prec_set(arena, dss_prec))) {
ret = EFAULT;
goto label_return;
}
- dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena);
+ dss_prec_old = arena_dss_prec_get(arena);
} else {
if (dss_prec != dss_prec_limit &&
chunk_dss_prec_set(dss_prec)) {
@@ -1697,61 +1636,26 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
-arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
- unsigned arena_ind = (unsigned)mib[1];
- arena_t *arena;
-
- arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
- if (arena == NULL) {
- ret = EFAULT;
- goto label_return;
- }
-
- if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena);
- READ(oldval, ssize_t);
- }
- if (newp != NULL) {
- if (newlen != sizeof(ssize_t)) {
- ret = EINVAL;
- goto label_return;
- }
- if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena,
- *(ssize_t *)newp)) {
- ret = EFAULT;
- goto label_return;
- }
- }
-
- ret = 0;
-label_return:
- return (ret);
-}
-
-static int
-arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
- unsigned arena_ind = (unsigned)mib[1];
+ unsigned arena_ind = mib[1];
arena_t *arena;
- arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ arena = arena_get(tsd_fetch(), arena_ind, false, true);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena);
+ size_t oldval = arena_lg_dirty_mult_get(arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
@@ -1759,8 +1663,7 @@ arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
- if (arena_decay_time_set(tsd_tsdn(tsd), arena,
- *(ssize_t *)newp)) {
+ if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
@@ -1772,25 +1675,24 @@ label_return:
}
static int
-arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
- unsigned arena_ind = (unsigned)mib[1];
+ unsigned arena_ind = mib[1];
arena_t *arena;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
if (arena_ind < narenas_total_get() && (arena =
- arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
+ arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) {
if (newp != NULL) {
chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
WRITE(new_chunk_hooks, chunk_hooks_t);
- old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena,
+ old_chunk_hooks = chunk_hooks_set(arena,
&new_chunk_hooks);
READ(old_chunk_hooks, chunk_hooks_t);
} else {
- chunk_hooks_t old_chunk_hooks =
- chunk_hooks_get(tsd_tsdn(tsd), arena);
+ chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena);
READ(old_chunk_hooks, chunk_hooks_t);
}
} else {
@@ -1799,16 +1701,16 @@ arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
}
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static const ctl_named_node_t *
-arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+arena_i_index(const size_t *mib, size_t miblen, size_t i)
{
- const ctl_named_node_t *ret;
+ const ctl_named_node_t * ret;
- malloc_mutex_lock(tsdn, &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
if (i > ctl_stats.narenas) {
ret = NULL;
goto label_return;
@@ -1816,20 +1718,20 @@ arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
ret = super_arena_i_node;
label_return:
- malloc_mutex_unlock(tsdn, &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/******************************************************************************/
static int
-arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned narenas;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
READONLY();
if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL;
@@ -1840,23 +1742,23 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
-arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned nread, i;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
READONLY();
if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL;
nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
- ? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas;
+ ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
} else {
ret = 0;
nread = ctl_stats.narenas;
@@ -1866,13 +1768,13 @@ arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
-arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1896,32 +1798,6 @@ label_return:
return (ret);
}
-static int
-arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
-
- if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = arena_decay_time_default_get();
- READ(oldval, ssize_t);
- }
- if (newp != NULL) {
- if (newlen != sizeof(ssize_t)) {
- ret = EINVAL;
- goto label_return;
- }
- if (arena_decay_time_default_set(*(ssize_t *)newp)) {
- ret = EFAULT;
- goto label_return;
- }
- }
-
- ret = 0;
-label_return:
- return (ret);
-}
-
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
@@ -1931,7 +1807,7 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
static const ctl_named_node_t *
-arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > NBINS)
@@ -1940,9 +1816,9 @@ arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
}
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
-CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
+CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t)
static const ctl_named_node_t *
-arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > nlclasses)
@@ -1951,10 +1827,9 @@ arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
}
CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
-CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
- size_t)
+CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t)
static const ctl_named_node_t *
-arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
{
if (i > nhclasses)
@@ -1963,15 +1838,15 @@ arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
}
static int
-arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
unsigned narenas;
- malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
READONLY();
- if (ctl_grow(tsd_tsdn(tsd))) {
+ if (ctl_grow()) {
ret = EAGAIN;
goto label_return;
}
@@ -1980,15 +1855,15 @@ arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/******************************************************************************/
static int
-prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
@@ -2001,10 +1876,9 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL;
goto label_return;
}
- oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
- *(bool *)newp);
+ oldval = prof_thread_active_init_set(*(bool *)newp);
} else
- oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
+ oldval = prof_thread_active_init_get();
READ(oldval, bool);
ret = 0;
@@ -2013,8 +1887,8 @@ label_return:
}
static int
-prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
bool oldval;
@@ -2027,9 +1901,9 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
- oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
+ oldval = prof_active_set(*(bool *)newp);
} else
- oldval = prof_active_get(tsd_tsdn(tsd));
+ oldval = prof_active_get();
READ(oldval, bool);
ret = 0;
@@ -2038,8 +1912,8 @@ label_return:
}
static int
-prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
const char *filename = NULL;
@@ -2050,7 +1924,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
WRITEONLY();
WRITE(filename, const char *);
- if (prof_mdump(tsd, filename)) {
+ if (prof_mdump(filename)) {
ret = EFAULT;
goto label_return;
}
@@ -2061,8 +1935,8 @@ label_return:
}
static int
-prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
bool oldval;
@@ -2075,9 +1949,9 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
- oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
+ oldval = prof_gdump_set(*(bool *)newp);
} else
- oldval = prof_gdump_get(tsd_tsdn(tsd));
+ oldval = prof_gdump_get();
READ(oldval, bool);
ret = 0;
@@ -2086,11 +1960,12 @@ label_return:
}
static int
-prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
size_t lg_sample = lg_prof_sample;
+ tsd_t *tsd;
if (!config_prof)
return (ENOENT);
@@ -2100,6 +1975,8 @@ prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (lg_sample >= (sizeof(uint64_t) << 3))
lg_sample = (sizeof(uint64_t) << 3) - 1;
+ tsd = tsd_fetch();
+
prof_reset(tsd, lg_sample);
ret = 0;
@@ -2118,20 +1995,15 @@ CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
ssize_t)
-CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time,
- ssize_t)
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
ctl_stats.arenas[mib[2]].astats.mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
- ctl_stats.arenas[mib[2]].astats.retained, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
@@ -2188,8 +2060,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
static const ctl_named_node_t *
-stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t j)
+stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
{
if (j > NBINS)
@@ -2207,8 +2078,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
static const ctl_named_node_t *
-stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t j)
+stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
{
if (j > nlclasses)
@@ -2227,8 +2097,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
static const ctl_named_node_t *
-stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
- size_t j)
+stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
{
if (j > nhclasses)
@@ -2237,11 +2106,11 @@ stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
}
static const ctl_named_node_t *
-stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
- malloc_mutex_lock(tsdn, &ctl_mtx);
+ malloc_mutex_lock(&ctl_mtx);
if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
ret = NULL;
goto label_return;
@@ -2249,6 +2118,6 @@ stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
ret = super_stats_arenas_i_node;
label_return:
- malloc_mutex_unlock(tsdn, &ctl_mtx);
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
diff --git a/deps/jemalloc/src/extent.c b/deps/jemalloc/src/extent.c
index 218156c60..13f94411c 100644
--- a/deps/jemalloc/src/extent.c
+++ b/deps/jemalloc/src/extent.c
@@ -3,48 +3,45 @@
/******************************************************************************/
-/*
- * Round down to the nearest chunk size that can actually be requested during
- * normal huge allocation.
- */
JEMALLOC_INLINE_C size_t
extent_quantize(size_t size)
{
- size_t ret;
- szind_t ind;
- assert(size > 0);
-
- ind = size2index(size + 1);
- if (ind == 0) {
- /* Avoid underflow. */
- return (index2size(0));
- }
- ret = index2size(ind - 1);
- assert(ret <= size);
- return (ret);
+ /*
+ * Round down to the nearest chunk size that can actually be requested
+ * during normal huge allocation.
+ */
+ return (index2size(size2index(size + 1) - 1));
}
JEMALLOC_INLINE_C int
-extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
+extent_szad_comp(extent_node_t *a, extent_node_t *b)
{
+ int ret;
size_t a_qsize = extent_quantize(extent_node_size_get(a));
size_t b_qsize = extent_quantize(extent_node_size_get(b));
- return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
-}
+ /*
+ * Compare based on quantized size rather than size, in order to sort
+ * equally useful extents only by address.
+ */
+ ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
+ if (ret == 0) {
+ uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
-JEMALLOC_INLINE_C int
-extent_sn_comp(const extent_node_t *a, const extent_node_t *b)
-{
- size_t a_sn = extent_node_sn_get(a);
- size_t b_sn = extent_node_sn_get(b);
+ ret = (a_addr > b_addr) - (a_addr < b_addr);
+ }
- return ((a_sn > b_sn) - (a_sn < b_sn));
+ return (ret);
}
+/* Generate red-black tree functions. */
+rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
+ extent_szad_comp)
+
JEMALLOC_INLINE_C int
-extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
+extent_ad_comp(extent_node_t *a, extent_node_t *b)
{
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
@@ -52,26 +49,5 @@ extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
return ((a_addr > b_addr) - (a_addr < b_addr));
}
-JEMALLOC_INLINE_C int
-extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
-{
- int ret;
-
- ret = extent_sz_comp(a, b);
- if (ret != 0)
- return (ret);
-
- ret = extent_sn_comp(a, b);
- if (ret != 0)
- return (ret);
-
- ret = extent_ad_comp(a, b);
- return (ret);
-}
-
-/* Generate red-black tree functions. */
-rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
- extent_szsnad_comp)
-
/* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
diff --git a/deps/jemalloc/src/huge.c b/deps/jemalloc/src/huge.c
index 8abd8c00c..1e9a66512 100644
--- a/deps/jemalloc/src/huge.c
+++ b/deps/jemalloc/src/huge.c
@@ -15,21 +15,12 @@ huge_node_get(const void *ptr)
}
static bool
-huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
+huge_node_set(const void *ptr, extent_node_t *node)
{
assert(extent_node_addr_get(node) == ptr);
assert(!extent_node_achunk_get(node));
- return (chunk_register(tsdn, ptr, node));
-}
-
-static void
-huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
-{
- bool err;
-
- err = huge_node_set(tsdn, ptr, node);
- assert(!err);
+ return (chunk_register(ptr, node));
}
static void
@@ -40,39 +31,39 @@ huge_node_unset(const void *ptr, const extent_node_t *node)
}
void *
-huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
+huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
+ tcache_t *tcache)
{
+ size_t usize;
- assert(usize == s2u(usize));
+ usize = s2u(size);
+ if (usize == 0) {
+ /* size_t overflow. */
+ return (NULL);
+ }
- return (huge_palloc(tsdn, arena, usize, chunksize, zero));
+ return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
}
void *
-huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
- bool zero)
+huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
+ bool zero, tcache_t *tcache)
{
void *ret;
- size_t ausize;
- arena_t *iarena;
+ size_t usize;
extent_node_t *node;
- size_t sn;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
- assert(!tsdn_null(tsdn) || arena != NULL);
-
- ausize = sa2u(usize, alignment);
- if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
+ usize = sa2u(size, alignment);
+ if (unlikely(usize == 0))
return (NULL);
- assert(ausize >= chunksize);
+ assert(usize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
- iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
- a0get();
- node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
- CACHELINE, false, NULL, true, iarena);
+ node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
+ CACHELINE, false, tcache, true, arena);
if (node == NULL)
return (NULL);
@@ -81,35 +72,33 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- if (likely(!tsdn_null(tsdn)))
- arena = arena_choose(tsdn_tsd(tsdn), arena);
- if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
- arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
- idalloctm(tsdn, node, NULL, true, true);
+ arena = arena_choose(tsd, arena);
+ if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
+ size, alignment, &is_zeroed)) == NULL) {
+ idalloctm(tsd, node, tcache, true);
return (NULL);
}
- extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
+ extent_node_init(node, arena, ret, size, is_zeroed, true);
- if (huge_node_set(tsdn, ret, node)) {
- arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
- idalloctm(tsdn, node, NULL, true, true);
+ if (huge_node_set(ret, node)) {
+ arena_chunk_dalloc_huge(arena, ret, size);
+ idalloctm(tsd, node, tcache, true);
return (NULL);
}
/* Insert node into huge. */
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->huge, node, ql_link);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed)
- memset(ret, 0, usize);
+ memset(ret, 0, size);
} else if (config_fill && unlikely(opt_junk_alloc))
- memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+ memset(ret, 0xa5, size);
- arena_decay_tick(tsdn, arena);
return (ret);
}
@@ -127,7 +116,7 @@ huge_dalloc_junk(void *ptr, size_t usize)
* unmapped.
*/
if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
- memset(ptr, JEMALLOC_FREE_JUNK, usize);
+ memset(ptr, 0x5a, usize);
}
}
#ifdef JEMALLOC_JET
@@ -137,8 +126,8 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static void
-huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t usize_min, size_t usize_max, bool zero)
+huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero)
{
size_t usize, usize_next;
extent_node_t *node;
@@ -162,28 +151,24 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) {
- memset((void *)((uintptr_t)ptr + usize),
- JEMALLOC_FREE_JUNK, sdiff);
+ memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
post_zeroed = false;
} else {
- post_zeroed = !chunk_purge_wrapper(tsdn, arena,
- &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
- sdiff);
+ post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
+ ptr, CHUNK_CEILING(oldsize), usize, sdiff);
}
} else
post_zeroed = pre_zeroed;
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
/* Update the size of the huge allocation. */
- huge_node_unset(ptr, node);
assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize);
- huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
- arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
+ arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
/* Fill if necessary (growing). */
if (oldsize < usize) {
@@ -193,15 +178,14 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
usize - oldsize);
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize),
- JEMALLOC_ALLOC_JUNK, usize - oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
+ oldsize);
}
}
}
static bool
-huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t usize)
+huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
{
extent_node_t *node;
arena_t *arena;
@@ -212,7 +196,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node);
- chunk_hooks = chunk_hooks_get(tsdn, arena);
+ chunk_hooks = chunk_hooks_get(arena);
assert(oldsize > usize);
@@ -229,59 +213,53 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
sdiff);
post_zeroed = false;
} else {
- post_zeroed = !chunk_purge_wrapper(tsdn, arena,
- &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
- usize), CHUNK_CEILING(oldsize),
+ post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
+ CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
+ CHUNK_CEILING(oldsize),
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
}
} else
post_zeroed = pre_zeroed;
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
/* Update the size of the huge allocation. */
- huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
- huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
/* Zap the excess chunks. */
- arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
- extent_node_sn_get(node));
+ arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
return (false);
}
static bool
-huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
- size_t usize, bool zero) {
+huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
extent_node_t *node;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
is_zeroed_subchunk = extent_node_zeroed_get(node);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
/*
- * Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
- * update extent's zeroed field, and zero as necessary.
+ * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
+ * that it is possible to make correct junk/zero fill decisions below.
*/
- is_zeroed_chunk = false;
- if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
+ is_zeroed_chunk = zero;
+
+ if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
&is_zeroed_chunk))
return (true);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
- huge_node_unset(ptr, node);
+ malloc_mutex_lock(&arena->huge_mtx);
+ /* Update the size of the huge allocation. */
extent_node_size_set(node, usize);
- extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
- is_zeroed_chunk);
- huge_node_reset(tsdn, ptr, node);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed_subchunk) {
@@ -294,21 +272,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
CHUNK_CEILING(oldsize));
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
- usize - oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
+ oldsize);
}
return (false);
}
bool
-huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
+huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
assert(s2u(oldsize) == oldsize);
- /* The following should have been caught by callers. */
- assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
/* Both allocations must be huge to avoid a move. */
if (oldsize < chunksize || usize_max < chunksize)
@@ -316,18 +292,13 @@ huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
/* Attempt to expand the allocation in-place. */
- if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
- zero)) {
- arena_decay_tick(tsdn, huge_aalloc(ptr));
+ if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
return (false);
- }
/* Try again, this time with usize_min. */
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
- CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
- ptr, oldsize, usize_min, zero)) {
- arena_decay_tick(tsdn, huge_aalloc(ptr));
+ CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
+ oldsize, usize_min, zero))
return (false);
- }
}
/*
@@ -336,46 +307,36 @@ huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
*/
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
- huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
- usize_max, zero);
- arena_decay_tick(tsdn, huge_aalloc(ptr));
+ huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
+ zero);
return (false);
}
/* Attempt to shrink the allocation in-place. */
- if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
- if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
- usize_max)) {
- arena_decay_tick(tsdn, huge_aalloc(ptr));
- return (false);
- }
- }
+ if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
+ return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
return (true);
}
static void *
-huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool zero)
+huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
{
if (alignment <= chunksize)
- return (huge_malloc(tsdn, arena, usize, zero));
- return (huge_palloc(tsdn, arena, usize, alignment, zero));
+ return (huge_malloc(tsd, arena, usize, zero, tcache));
+ return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
}
void *
-huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
- size_t usize, size_t alignment, bool zero, tcache_t *tcache)
+huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
size_t copysize;
- /* The following should have been caught by callers. */
- assert(usize > 0 && usize <= HUGE_MAXCLASS);
-
/* Try to avoid moving the allocation. */
- if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
- zero))
+ if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
return (ptr);
/*
@@ -383,19 +344,19 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
* different size class. In that case, fall back to allocating new
* space and copying.
*/
- ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
- zero);
+ ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
+ tcache);
if (ret == NULL)
return (NULL);
copysize = (usize < oldsize) ? usize : oldsize;
memcpy(ret, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache, true);
+ isqalloc(tsd, ptr, oldsize, tcache);
return (ret);
}
void
-huge_dalloc(tsdn_t *tsdn, void *ptr)
+huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
extent_node_t *node;
arena_t *arena;
@@ -403,18 +364,15 @@ huge_dalloc(tsdn_t *tsdn, void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
huge_node_unset(ptr, node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
ql_remove(&arena->huge, node, ql_link);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
huge_dalloc_junk(extent_node_addr_get(node),
extent_node_size_get(node));
- arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
- extent_node_addr_get(node), extent_node_size_get(node),
- extent_node_sn_get(node));
- idalloctm(tsdn, node, NULL, true, true);
-
- arena_decay_tick(tsdn, arena);
+ arena_chunk_dalloc_huge(extent_node_arena_get(node),
+ extent_node_addr_get(node), extent_node_size_get(node));
+ idalloctm(tsd, node, tcache, true);
}
arena_t *
@@ -425,7 +383,7 @@ huge_aalloc(const void *ptr)
}
size_t
-huge_salloc(tsdn_t *tsdn, const void *ptr)
+huge_salloc(const void *ptr)
{
size_t size;
extent_node_t *node;
@@ -433,15 +391,15 @@ huge_salloc(tsdn_t *tsdn, const void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
size = extent_node_size_get(node);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
return (size);
}
prof_tctx_t *
-huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
+huge_prof_tctx_get(const void *ptr)
{
prof_tctx_t *tctx;
extent_node_t *node;
@@ -449,29 +407,29 @@ huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
tctx = extent_node_prof_tctx_get(node);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
return (tctx);
}
void
-huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
+huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{
extent_node_t *node;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
- malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ malloc_mutex_lock(&arena->huge_mtx);
extent_node_prof_tctx_set(node, tctx);
- malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+ malloc_mutex_unlock(&arena->huge_mtx);
}
void
-huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
+huge_prof_tctx_reset(const void *ptr)
{
- huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
+ huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
}
diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c
index 07389ca2f..5a2d32406 100644
--- a/deps/jemalloc/src/jemalloc.c
+++ b/deps/jemalloc/src/jemalloc.c
@@ -5,11 +5,7 @@
/* Data. */
/* Runtime configuration options. */
-const char *je_malloc_conf
-#ifndef _WIN32
- JEMALLOC_ATTR(weak)
-#endif
- ;
+const char *je_malloc_conf JEMALLOC_ATTR(weak);
bool opt_abort =
#ifdef JEMALLOC_DEBUG
true
@@ -44,14 +40,14 @@ bool opt_redzone = false;
bool opt_utrace = false;
bool opt_xmalloc = false;
bool opt_zero = false;
-unsigned opt_narenas = 0;
+size_t opt_narenas = 0;
/* Initialized to true if the process is running inside Valgrind. */
bool in_valgrind;
unsigned ncpus;
-/* Protects arenas initialization. */
+/* Protects arenas initialization (arenas, narenas_total). */
static malloc_mutex_t arenas_lock;
/*
* Arenas that are used to service external requests. Not all elements of the
@@ -61,10 +57,10 @@ static malloc_mutex_t arenas_lock;
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
* takes some action to create them and allocate from them.
*/
-arena_t **arenas;
-static unsigned narenas_total; /* Use narenas_total_*(). */
+static arena_t **arenas;
+static unsigned narenas_total;
static arena_t *a0; /* arenas[0]; read-only after initialization. */
-unsigned narenas_auto; /* Read-only after initialization. */
+static unsigned narenas_auto; /* Read-only after initialization. */
typedef enum {
malloc_init_uninitialized = 3,
@@ -74,37 +70,9 @@ typedef enum {
} malloc_init_t;
static malloc_init_t malloc_init_state = malloc_init_uninitialized;
-/* False should be the common case. Set to true to trigger initialization. */
-static bool malloc_slow = true;
-
-/* When malloc_slow is true, set the corresponding bits for sanity check. */
-enum {
- flag_opt_junk_alloc = (1U),
- flag_opt_junk_free = (1U << 1),
- flag_opt_quarantine = (1U << 2),
- flag_opt_zero = (1U << 3),
- flag_opt_utrace = (1U << 4),
- flag_in_valgrind = (1U << 5),
- flag_opt_xmalloc = (1U << 6)
-};
-static uint8_t malloc_slow_flags;
-
-JEMALLOC_ALIGNED(CACHELINE)
-const size_t pind2sz_tab[NPSIZES] = {
-#define PSZ_yes(lg_grp, ndelta, lg_delta) \
- (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
-#define PSZ_no(lg_grp, ndelta, lg_delta)
-#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
- PSZ_##psz(lg_grp, ndelta, lg_delta)
- SIZE_CLASSES
-#undef PSZ_yes
-#undef PSZ_no
-#undef SC
-};
-
JEMALLOC_ALIGNED(CACHELINE)
const size_t index2size_tab[NSIZES] = {
-#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
SIZE_CLASSES
#undef SC
@@ -176,7 +144,7 @@ const uint8_t size2index_tab[] = {
#define S2B_11(i) S2B_10(i) S2B_10(i)
#endif
#define S2B_no(i)
-#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
S2B_##lg_delta_lookup(index)
SIZE_CLASSES
#undef S2B_3
@@ -227,7 +195,7 @@ _init_init_lock(void)
* really only matters early in the process creation, before any
* separate thread normally starts doing anything. */
if (!init_lock_initialized)
- malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
+ malloc_mutex_init(&init_lock);
init_lock_initialized = true;
}
@@ -322,10 +290,18 @@ malloc_init(void)
}
/*
- * The a0*() functions are used instead of i{d,}alloc() in situations that
+ * The a0*() functions are used instead of i[mcd]alloc() in situations that
* cannot tolerate TLS variable access.
*/
+arena_t *
+a0get(void)
+{
+
+ assert(a0 != NULL);
+ return (a0);
+}
+
static void *
a0ialloc(size_t size, bool zero, bool is_metadata)
{
@@ -333,22 +309,14 @@ a0ialloc(size_t size, bool zero, bool is_metadata)
if (unlikely(malloc_init_a0()))
return (NULL);
- return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
- is_metadata, arena_get(TSDN_NULL, 0, true), true));
+ return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
}
static void
a0idalloc(void *ptr, bool is_metadata)
{
- idalloctm(TSDN_NULL, ptr, false, is_metadata, true);
-}
-
-arena_t *
-a0get(void)
-{
-
- return (a0);
+ idalloctm(NULL, ptr, false, is_metadata);
}
void *
@@ -405,228 +373,224 @@ bootstrap_free(void *ptr)
a0idalloc(ptr, false);
}
-static void
-arena_set(unsigned ind, arena_t *arena)
-{
-
- atomic_write_p((void **)&arenas[ind], arena);
-}
-
-static void
-narenas_total_set(unsigned narenas)
-{
-
- atomic_write_u(&narenas_total, narenas);
-}
-
-static void
-narenas_total_inc(void)
-{
-
- atomic_add_u(&narenas_total, 1);
-}
-
-unsigned
-narenas_total_get(void)
-{
-
- return (atomic_read_u(&narenas_total));
-}
-
/* Create a new arena and insert it into the arenas array at index ind. */
static arena_t *
-arena_init_locked(tsdn_t *tsdn, unsigned ind)
+arena_init_locked(unsigned ind)
{
arena_t *arena;
- assert(ind <= narenas_total_get());
+ /* Expand arenas if necessary. */
+ assert(ind <= narenas_total);
if (ind > MALLOCX_ARENA_MAX)
return (NULL);
- if (ind == narenas_total_get())
- narenas_total_inc();
+ if (ind == narenas_total) {
+ unsigned narenas_new = narenas_total + 1;
+ arena_t **arenas_new =
+ (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
+ sizeof(arena_t *)));
+ if (arenas_new == NULL)
+ return (NULL);
+ memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
+ arenas_new[ind] = NULL;
+ /*
+ * Deallocate only if arenas came from a0malloc() (not
+ * base_alloc()).
+ */
+ if (narenas_total != narenas_auto)
+ a0dalloc(arenas);
+ arenas = arenas_new;
+ narenas_total = narenas_new;
+ }
/*
* Another thread may have already initialized arenas[ind] if it's an
* auto arena.
*/
- arena = arena_get(tsdn, ind, false);
+ arena = arenas[ind];
if (arena != NULL) {
assert(ind < narenas_auto);
return (arena);
}
/* Actually initialize the arena. */
- arena = arena_new(tsdn, ind);
- arena_set(ind, arena);
+ arena = arenas[ind] = arena_new(ind);
return (arena);
}
arena_t *
-arena_init(tsdn_t *tsdn, unsigned ind)
+arena_init(unsigned ind)
{
arena_t *arena;
- malloc_mutex_lock(tsdn, &arenas_lock);
- arena = arena_init_locked(tsdn, ind);
- malloc_mutex_unlock(tsdn, &arenas_lock);
+ malloc_mutex_lock(&arenas_lock);
+ arena = arena_init_locked(ind);
+ malloc_mutex_unlock(&arenas_lock);
return (arena);
}
+unsigned
+narenas_total_get(void)
+{
+ unsigned narenas;
+
+ malloc_mutex_lock(&arenas_lock);
+ narenas = narenas_total;
+ malloc_mutex_unlock(&arenas_lock);
+
+ return (narenas);
+}
+
static void
-arena_bind(tsd_t *tsd, unsigned ind, bool internal)
+arena_bind_locked(tsd_t *tsd, unsigned ind)
{
arena_t *arena;
- if (!tsd_nominal(tsd))
- return;
-
- arena = arena_get(tsd_tsdn(tsd), ind, false);
- arena_nthreads_inc(arena, internal);
+ arena = arenas[ind];
+ arena->nthreads++;
- if (internal)
- tsd_iarena_set(tsd, arena);
- else
+ if (tsd_nominal(tsd))
tsd_arena_set(tsd, arena);
}
+static void
+arena_bind(tsd_t *tsd, unsigned ind)
+{
+
+ malloc_mutex_lock(&arenas_lock);
+ arena_bind_locked(tsd, ind);
+ malloc_mutex_unlock(&arenas_lock);
+}
+
void
arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
{
arena_t *oldarena, *newarena;
- oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
- newarena = arena_get(tsd_tsdn(tsd), newind, false);
- arena_nthreads_dec(oldarena, false);
- arena_nthreads_inc(newarena, false);
+ malloc_mutex_lock(&arenas_lock);
+ oldarena = arenas[oldind];
+ newarena = arenas[newind];
+ oldarena->nthreads--;
+ newarena->nthreads++;
+ malloc_mutex_unlock(&arenas_lock);
tsd_arena_set(tsd, newarena);
}
+unsigned
+arena_nbound(unsigned ind)
+{
+ unsigned nthreads;
+
+ malloc_mutex_lock(&arenas_lock);
+ nthreads = arenas[ind]->nthreads;
+ malloc_mutex_unlock(&arenas_lock);
+ return (nthreads);
+}
+
static void
-arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
+arena_unbind(tsd_t *tsd, unsigned ind)
{
arena_t *arena;
- arena = arena_get(tsd_tsdn(tsd), ind, false);
- arena_nthreads_dec(arena, internal);
- if (internal)
- tsd_iarena_set(tsd, NULL);
- else
- tsd_arena_set(tsd, NULL);
+ malloc_mutex_lock(&arenas_lock);
+ arena = arenas[ind];
+ arena->nthreads--;
+ malloc_mutex_unlock(&arenas_lock);
+ tsd_arena_set(tsd, NULL);
}
-arena_tdata_t *
-arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
+arena_t *
+arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
{
- arena_tdata_t *tdata, *arenas_tdata_old;
- arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
- unsigned narenas_tdata_old, i;
- unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
+ arena_t *arena;
+ arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
+ unsigned narenas_cache = tsd_narenas_cache_get(tsd);
unsigned narenas_actual = narenas_total_get();
- /*
- * Dissociate old tdata array (and set up for deallocation upon return)
- * if it's too small.
- */
- if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
- arenas_tdata_old = arenas_tdata;
- narenas_tdata_old = narenas_tdata;
- arenas_tdata = NULL;
- narenas_tdata = 0;
- tsd_arenas_tdata_set(tsd, arenas_tdata);
- tsd_narenas_tdata_set(tsd, narenas_tdata);
- } else {
- arenas_tdata_old = NULL;
- narenas_tdata_old = 0;
- }
-
- /* Allocate tdata array if it's missing. */
- if (arenas_tdata == NULL) {
- bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
- narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
-
- if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
- *arenas_tdata_bypassp = true;
- arenas_tdata = (arena_tdata_t *)a0malloc(
- sizeof(arena_tdata_t) * narenas_tdata);
- *arenas_tdata_bypassp = false;
+ /* Deallocate old cache if it's too small. */
+ if (arenas_cache != NULL && narenas_cache < narenas_actual) {
+ a0dalloc(arenas_cache);
+ arenas_cache = NULL;
+ narenas_cache = 0;
+ tsd_arenas_cache_set(tsd, arenas_cache);
+ tsd_narenas_cache_set(tsd, narenas_cache);
+ }
+
+ /* Allocate cache if it's missing. */
+ if (arenas_cache == NULL) {
+ bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
+ assert(ind < narenas_actual || !init_if_missing);
+ narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
+
+ if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
+ *arenas_cache_bypassp = true;
+ arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
+ narenas_cache);
+ *arenas_cache_bypassp = false;
}
- if (arenas_tdata == NULL) {
- tdata = NULL;
- goto label_return;
+ if (arenas_cache == NULL) {
+ /*
+ * This function must always tell the truth, even if
+ * it's slow, so don't let OOM, thread cleanup (note
+ * tsd_nominal check), nor recursive allocation
+ * avoidance (note arenas_cache_bypass check) get in the
+ * way.
+ */
+ if (ind >= narenas_actual)
+ return (NULL);
+ malloc_mutex_lock(&arenas_lock);
+ arena = arenas[ind];
+ malloc_mutex_unlock(&arenas_lock);
+ return (arena);
}
- assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
- tsd_arenas_tdata_set(tsd, arenas_tdata);
- tsd_narenas_tdata_set(tsd, narenas_tdata);
+ assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
+ tsd_arenas_cache_set(tsd, arenas_cache);
+ tsd_narenas_cache_set(tsd, narenas_cache);
}
/*
- * Copy to tdata array. It's possible that the actual number of arenas
- * has increased since narenas_total_get() was called above, but that
- * causes no correctness issues unless two threads concurrently execute
- * the arenas.extend mallctl, which we trust mallctl synchronization to
+ * Copy to cache. It's possible that the actual number of arenas has
+ * increased since narenas_total_get() was called above, but that causes
+ * no correctness issues unless two threads concurrently execute the
+ * arenas.extend mallctl, which we trust mallctl synchronization to
* prevent.
*/
-
- /* Copy/initialize tickers. */
- for (i = 0; i < narenas_actual; i++) {
- if (i < narenas_tdata_old) {
- ticker_copy(&arenas_tdata[i].decay_ticker,
- &arenas_tdata_old[i].decay_ticker);
- } else {
- ticker_init(&arenas_tdata[i].decay_ticker,
- DECAY_NTICKS_PER_UPDATE);
- }
- }
- if (narenas_tdata > narenas_actual) {
- memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
- * (narenas_tdata - narenas_actual));
- }
-
- /* Read the refreshed tdata array. */
- tdata = &arenas_tdata[ind];
-label_return:
- if (arenas_tdata_old != NULL)
- a0dalloc(arenas_tdata_old);
- return (tdata);
+ malloc_mutex_lock(&arenas_lock);
+ memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
+ malloc_mutex_unlock(&arenas_lock);
+ if (narenas_cache > narenas_actual) {
+ memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
+ (narenas_cache - narenas_actual));
+ }
+
+ /* Read the refreshed cache, and init the arena if necessary. */
+ arena = arenas_cache[ind];
+ if (init_if_missing && arena == NULL)
+ arena = arenas_cache[ind] = arena_init(ind);
+ return (arena);
}
/* Slow path, called only by arena_choose(). */
arena_t *
-arena_choose_hard(tsd_t *tsd, bool internal)
+arena_choose_hard(tsd_t *tsd)
{
- arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+ arena_t *ret;
if (narenas_auto > 1) {
- unsigned i, j, choose[2], first_null;
-
- /*
- * Determine binding for both non-internal and internal
- * allocation.
- *
- * choose[0]: For application allocation.
- * choose[1]: For internal metadata allocation.
- */
-
- for (j = 0; j < 2; j++)
- choose[j] = 0;
+ unsigned i, choose, first_null;
+ choose = 0;
first_null = narenas_auto;
- malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
- assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
+ malloc_mutex_lock(&arenas_lock);
+ assert(a0get() != NULL);
for (i = 1; i < narenas_auto; i++) {
- if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
+ if (arenas[i] != NULL) {
/*
* Choose the first arena that has the lowest
* number of threads assigned to it.
*/
- for (j = 0; j < 2; j++) {
- if (arena_nthreads_get(arena_get(
- tsd_tsdn(tsd), i, false), !!j) <
- arena_nthreads_get(arena_get(
- tsd_tsdn(tsd), choose[j], false),
- !!j))
- choose[j] = i;
- }
+ if (arenas[i]->nthreads <
+ arenas[choose]->nthreads)
+ choose = i;
} else if (first_null == narenas_auto) {
/*
* Record the index of the first uninitialized
@@ -641,40 +605,27 @@ arena_choose_hard(tsd_t *tsd, bool internal)
}
}
- for (j = 0; j < 2; j++) {
- if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
- choose[j], false), !!j) == 0 || first_null ==
- narenas_auto) {
- /*
- * Use an unloaded arena, or the least loaded
- * arena if all arenas are already initialized.
- */
- if (!!j == internal) {
- ret = arena_get(tsd_tsdn(tsd),
- choose[j], false);
- }
- } else {
- arena_t *arena;
-
- /* Initialize a new arena. */
- choose[j] = first_null;
- arena = arena_init_locked(tsd_tsdn(tsd),
- choose[j]);
- if (arena == NULL) {
- malloc_mutex_unlock(tsd_tsdn(tsd),
- &arenas_lock);
- return (NULL);
- }
- if (!!j == internal)
- ret = arena;
+ if (arenas[choose]->nthreads == 0
+ || first_null == narenas_auto) {
+ /*
+ * Use an unloaded arena, or the least loaded arena if
+ * all arenas are already initialized.
+ */
+ ret = arenas[choose];
+ } else {
+ /* Initialize a new arena. */
+ choose = first_null;
+ ret = arena_init_locked(choose);
+ if (ret == NULL) {
+ malloc_mutex_unlock(&arenas_lock);
+ return (NULL);
}
- arena_bind(tsd, choose[j], !!j);
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
+ arena_bind_locked(tsd, choose);
+ malloc_mutex_unlock(&arenas_lock);
} else {
- ret = arena_get(tsd_tsdn(tsd), 0, false);
- arena_bind(tsd, 0, false);
- arena_bind(tsd, 0, true);
+ ret = a0get();
+ arena_bind(tsd, 0);
}
return (ret);
@@ -695,49 +646,36 @@ thread_deallocated_cleanup(tsd_t *tsd)
}
void
-iarena_cleanup(tsd_t *tsd)
-{
- arena_t *iarena;
-
- iarena = tsd_iarena_get(tsd);
- if (iarena != NULL)
- arena_unbind(tsd, iarena->ind, true);
-}
-
-void
arena_cleanup(tsd_t *tsd)
{
arena_t *arena;
arena = tsd_arena_get(tsd);
if (arena != NULL)
- arena_unbind(tsd, arena->ind, false);
+ arena_unbind(tsd, arena->ind);
}
void
-arenas_tdata_cleanup(tsd_t *tsd)
+arenas_cache_cleanup(tsd_t *tsd)
{
- arena_tdata_t *arenas_tdata;
-
- /* Prevent tsd->arenas_tdata from being (re)created. */
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
+ arena_t **arenas_cache;
- arenas_tdata = tsd_arenas_tdata_get(tsd);
- if (arenas_tdata != NULL) {
- tsd_arenas_tdata_set(tsd, NULL);
- a0dalloc(arenas_tdata);
+ arenas_cache = tsd_arenas_cache_get(tsd);
+ if (arenas_cache != NULL) {
+ tsd_arenas_cache_set(tsd, NULL);
+ a0dalloc(arenas_cache);
}
}
void
-narenas_tdata_cleanup(tsd_t *tsd)
+narenas_cache_cleanup(tsd_t *tsd)
{
/* Do nothing. */
}
void
-arenas_tdata_bypass_cleanup(tsd_t *tsd)
+arenas_cache_bypass_cleanup(tsd_t *tsd)
{
/* Do nothing. */
@@ -748,11 +686,8 @@ stats_print_atexit(void)
{
if (config_tcache && config_stats) {
- tsdn_t *tsdn;
unsigned narenas, i;
- tsdn = tsdn_fetch();
-
/*
* Merge stats from extant threads. This is racy, since
* individual threads do not lock when recording tcache stats
@@ -761,7 +696,7 @@ stats_print_atexit(void)
* continue to allocate.
*/
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena = arena_get(tsdn, i, false);
+ arena_t *arena = arenas[i];
if (arena != NULL) {
tcache_t *tcache;
@@ -771,11 +706,11 @@ stats_print_atexit(void)
* and bin locks in the opposite order,
* deadlocks may result.
*/
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
ql_foreach(tcache, &arena->tcache_ql, link) {
- tcache_stats_merge(tsdn, tcache, arena);
+ tcache_stats_merge(tcache, arena);
}
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
}
}
}
@@ -812,20 +747,6 @@ malloc_ncpus(void)
SYSTEM_INFO si;
GetSystemInfo(&si);
result = si.dwNumberOfProcessors;
-#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
- /*
- * glibc >= 2.6 has the CPU_COUNT macro.
- *
- * glibc's sysconf() uses isspace(). glibc allocates for the first time
- * *before* setting up the isspace tables. Therefore we need a
- * different method to get the number of CPUs.
- */
- {
- cpu_set_t set;
-
- pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
- result = CPU_COUNT(&set);
- }
#else
result = sysconf(_SC_NPROCESSORS_ONLN);
#endif
@@ -918,26 +839,6 @@ malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
}
static void
-malloc_slow_flag_init(void)
-{
- /*
- * Combine the runtime options into malloc_slow for fast path. Called
- * after processing all the options.
- */
- malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
- | (opt_junk_free ? flag_opt_junk_free : 0)
- | (opt_quarantine ? flag_opt_quarantine : 0)
- | (opt_zero ? flag_opt_zero : 0)
- | (opt_utrace ? flag_opt_utrace : 0)
- | (opt_xmalloc ? flag_opt_xmalloc : 0);
-
- if (config_valgrind)
- malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
-
- malloc_slow = (malloc_slow_flags != 0);
-}
-
-static void
malloc_conf_init(void)
{
unsigned i;
@@ -963,13 +864,10 @@ malloc_conf_init(void)
opt_tcache = false;
}
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 3; i++) {
/* Get runtime configuration. */
switch (i) {
case 0:
- opts = config_malloc_conf;
- break;
- case 1:
if (je_malloc_conf != NULL) {
/*
* Use options that were compiled into the
@@ -982,8 +880,8 @@ malloc_conf_init(void)
opts = buf;
}
break;
- case 2: {
- ssize_t linklen = 0;
+ case 1: {
+ int linklen = 0;
#ifndef _WIN32
int saved_errno = errno;
const char *linkname =
@@ -1009,7 +907,7 @@ malloc_conf_init(void)
buf[linklen] = '\0';
opts = buf;
break;
- } case 3: {
+ } case 2: {
const char *envname =
#ifdef JEMALLOC_PREFIX
JEMALLOC_CPREFIX"MALLOC_CONF"
@@ -1056,11 +954,7 @@ malloc_conf_init(void)
if (cont) \
continue; \
}
-#define CONF_MIN_no(um, min) false
-#define CONF_MIN_yes(um, min) ((um) < (min))
-#define CONF_MAX_no(um, max) false
-#define CONF_MAX_yes(um, max) ((um) > (max))
-#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
+#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
if (CONF_MATCH(n)) { \
uintmax_t um; \
char *end; \
@@ -1073,35 +967,24 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} else if (clip) { \
- if (CONF_MIN_##check_min(um, \
- (min))) \
- o = (t)(min); \
- else if (CONF_MAX_##check_max( \
- um, (max))) \
- o = (t)(max); \
+ if ((min) != 0 && um < (min)) \
+ o = (min); \
+ else if (um > (max)) \
+ o = (max); \
else \
- o = (t)um; \
+ o = um; \
} else { \
- if (CONF_MIN_##check_min(um, \
- (min)) || \
- CONF_MAX_##check_max(um, \
- (max))) { \
+ if (((min) != 0 && um < (min)) \
+ || um > (max)) { \
malloc_conf_error( \
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
} else \
- o = (t)um; \
+ o = um; \
} \
continue; \
}
-#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
- clip) \
- CONF_HANDLE_T_U(unsigned, o, n, min, max, \
- check_min, check_max, clip)
-#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
- CONF_HANDLE_T_U(size_t, o, n, min, max, \
- check_min, check_max, clip)
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
if (CONF_MATCH(n)) { \
long l; \
@@ -1144,7 +1027,7 @@ malloc_conf_init(void)
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
- (sizeof(size_t) << 3) - 1, yes, yes, true)
+ (sizeof(size_t) << 3) - 1, true)
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
@@ -1169,47 +1052,17 @@ malloc_conf_init(void)
}
continue;
}
- CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
- UINT_MAX, yes, no, false)
- if (strncmp("purge", k, klen) == 0) {
- int i;
- bool match = false;
- for (i = 0; i < purge_mode_limit; i++) {
- if (strncmp(purge_mode_names[i], v,
- vlen) == 0) {
- opt_purge = (purge_mode_t)i;
- match = true;
- break;
- }
- }
- if (!match) {
- malloc_conf_error("Invalid conf value",
- k, klen, v, vlen);
- }
- continue;
- }
+ CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
+ SIZE_T_MAX, false)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
-1, (sizeof(size_t) << 3) - 1)
- CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
- NSTIME_SEC_MAX);
CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
if (config_fill) {
if (CONF_MATCH("junk")) {
if (CONF_MATCH_VALUE("true")) {
- if (config_valgrind &&
- unlikely(in_valgrind)) {
- malloc_conf_error(
- "Deallocation-time "
- "junk filling cannot "
- "be enabled while "
- "running inside "
- "Valgrind", k, klen, v,
- vlen);
- } else {
- opt_junk = "true";
- opt_junk_alloc = true;
- opt_junk_free = true;
- }
+ opt_junk = "true";
+ opt_junk_alloc = opt_junk_free =
+ true;
} else if (CONF_MATCH_VALUE("false")) {
opt_junk = "false";
opt_junk_alloc = opt_junk_free =
@@ -1219,20 +1072,9 @@ malloc_conf_init(void)
opt_junk_alloc = true;
opt_junk_free = false;
} else if (CONF_MATCH_VALUE("free")) {
- if (config_valgrind &&
- unlikely(in_valgrind)) {
- malloc_conf_error(
- "Deallocation-time "
- "junk filling cannot "
- "be enabled while "
- "running inside "
- "Valgrind", k, klen, v,
- vlen);
- } else {
- opt_junk = "free";
- opt_junk_alloc = false;
- opt_junk_free = true;
- }
+ opt_junk = "free";
+ opt_junk_alloc = false;
+ opt_junk_free = true;
} else {
malloc_conf_error(
"Invalid conf value", k,
@@ -1241,7 +1083,7 @@ malloc_conf_init(void)
continue;
}
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
- 0, SIZE_T_MAX, no, no, false)
+ 0, SIZE_T_MAX, false)
CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
CONF_HANDLE_BOOL(opt_zero, "zero", true)
}
@@ -1278,8 +1120,8 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
"prof_thread_active_init", true)
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
- "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
- - 1, no, yes, true)
+ "lg_prof_sample", 0,
+ (sizeof(uint64_t) << 3) - 1, true)
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
true)
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
@@ -1295,14 +1137,7 @@ malloc_conf_init(void)
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
#undef CONF_MATCH
-#undef CONF_MATCH_VALUE
#undef CONF_HANDLE_BOOL
-#undef CONF_MIN_no
-#undef CONF_MIN_yes
-#undef CONF_MAX_no
-#undef CONF_MAX_yes
-#undef CONF_HANDLE_T_U
-#undef CONF_HANDLE_UNSIGNED
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P
@@ -1310,6 +1145,7 @@ malloc_conf_init(void)
}
}
+/* init_lock must be held. */
static bool
malloc_init_hard_needed(void)
{
@@ -1325,14 +1161,11 @@ malloc_init_hard_needed(void)
}
#ifdef JEMALLOC_THREADED_INIT
if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
- spin_t spinner;
-
/* Busy-wait until the initializing thread completes. */
- spin_init(&spinner);
do {
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
- spin_adaptive(&spinner);
- malloc_mutex_lock(TSDN_NULL, &init_lock);
+ malloc_mutex_unlock(&init_lock);
+ CPU_SPINWAIT;
+ malloc_mutex_lock(&init_lock);
} while (!malloc_initialized());
return (false);
}
@@ -1340,8 +1173,9 @@ malloc_init_hard_needed(void)
return (true);
}
+/* init_lock must be held. */
static bool
-malloc_init_hard_a0_locked()
+malloc_init_hard_a0_locked(void)
{
malloc_initializer = INITIALIZER;
@@ -1357,7 +1191,6 @@ malloc_init_hard_a0_locked()
abort();
}
}
- pages_boot();
if (base_boot())
return (true);
if (chunk_boot())
@@ -1366,28 +1199,26 @@ malloc_init_hard_a0_locked()
return (true);
if (config_prof)
prof_boot1();
- arena_boot();
- if (config_tcache && tcache_boot(TSDN_NULL))
+ if (arena_boot())
+ return (true);
+ if (config_tcache && tcache_boot())
return (true);
- if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
+ if (malloc_mutex_init(&arenas_lock))
return (true);
/*
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
- narenas_auto = 1;
- narenas_total_set(narenas_auto);
+ narenas_total = narenas_auto = 1;
arenas = &a0;
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
/*
* Initialize one arena here. The rest are lazily created in
* arena_choose_hard().
*/
- if (arena_init(TSDN_NULL, 0) == NULL)
+ if (arena_init(0) == NULL)
return (true);
-
malloc_init_state = malloc_init_a0_initialized;
-
return (false);
}
@@ -1396,42 +1227,45 @@ malloc_init_hard_a0(void)
{
bool ret;
- malloc_mutex_lock(TSDN_NULL, &init_lock);
+ malloc_mutex_lock(&init_lock);
ret = malloc_init_hard_a0_locked();
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ malloc_mutex_unlock(&init_lock);
return (ret);
}
-/* Initialize data structures which may trigger recursive allocation. */
-static bool
+/*
+ * Initialize data structures which may trigger recursive allocation.
+ *
+ * init_lock must be held.
+ */
+static void
malloc_init_hard_recursible(void)
{
malloc_init_state = malloc_init_recursible;
+ malloc_mutex_unlock(&init_lock);
ncpus = malloc_ncpus();
-#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
- && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
- !defined(__native_client__))
- /* LinuxThreads' pthread_atfork() allocates. */
+#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
+ && !defined(_WIN32) && !defined(__native_client__))
+ /* LinuxThreads's pthread_atfork() allocates. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
if (opt_abort)
abort();
- return (true);
}
#endif
-
- return (false);
+ malloc_mutex_lock(&init_lock);
}
+/* init_lock must be held. */
static bool
-malloc_init_hard_finish(tsdn_t *tsdn)
+malloc_init_hard_finish(void)
{
- if (malloc_mutex_boot())
+ if (mutex_boot())
return (true);
if (opt_narenas == 0) {
@@ -1446,69 +1280,68 @@ malloc_init_hard_finish(tsdn_t *tsdn)
}
narenas_auto = opt_narenas;
/*
- * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
+ * Make sure that the arenas array can be allocated. In practice, this
+ * limit is enough to allow the allocator to function, but the ctl
+ * machinery will fail to allocate memory at far lower limits.
*/
- if (narenas_auto > MALLOCX_ARENA_MAX) {
- narenas_auto = MALLOCX_ARENA_MAX;
+ if (narenas_auto > chunksize / sizeof(arena_t *)) {
+ narenas_auto = chunksize / sizeof(arena_t *);
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
narenas_auto);
}
- narenas_total_set(narenas_auto);
+ narenas_total = narenas_auto;
/* Allocate and initialize arenas. */
- arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) *
- (MALLOCX_ARENA_MAX+1));
+ arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
if (arenas == NULL)
return (true);
+ /*
+ * Zero the array. In practice, this should always be pre-zeroed,
+ * since it was just mmap()ed, but let's be sure.
+ */
+ memset(arenas, 0, sizeof(arena_t *) * narenas_total);
/* Copy the pointer to the one arena that was already initialized. */
- arena_set(0, a0);
+ arenas[0] = a0;
malloc_init_state = malloc_init_initialized;
- malloc_slow_flag_init();
-
return (false);
}
static bool
malloc_init_hard(void)
{
- tsd_t *tsd;
#if defined(_WIN32) && _WIN32_WINNT < 0x0600
_init_init_lock();
#endif
- malloc_mutex_lock(TSDN_NULL, &init_lock);
+ malloc_mutex_lock(&init_lock);
if (!malloc_init_hard_needed()) {
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ malloc_mutex_unlock(&init_lock);
return (false);
}
if (malloc_init_state != malloc_init_a0_initialized &&
malloc_init_hard_a0_locked()) {
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ malloc_mutex_unlock(&init_lock);
return (true);
}
-
- malloc_mutex_unlock(TSDN_NULL, &init_lock);
- /* Recursive allocation relies on functional tsd. */
- tsd = malloc_tsd_boot0();
- if (tsd == NULL)
- return (true);
- if (malloc_init_hard_recursible())
+ if (malloc_tsd_boot0()) {
+ malloc_mutex_unlock(&init_lock);
return (true);
- malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
-
- if (config_prof && prof_boot2(tsd)) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+ }
+ if (config_prof && prof_boot2()) {
+ malloc_mutex_unlock(&init_lock);
return (true);
}
- if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+ malloc_init_hard_recursible();
+
+ if (malloc_init_hard_finish()) {
+ malloc_mutex_unlock(&init_lock);
return (true);
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+ malloc_mutex_unlock(&init_lock);
malloc_tsd_boot1();
return (false);
}
@@ -1522,104 +1355,61 @@ malloc_init_hard(void)
*/
static void *
-ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
- prof_tctx_t *tctx, bool slow_path)
+imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
void *p;
if (tctx == NULL)
return (NULL);
if (usize <= SMALL_MAXCLASS) {
- szind_t ind_large = size2index(LARGE_MINCLASS);
- p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
+ p = imalloc(tsd, LARGE_MINCLASS);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ arena_prof_promoted(p, usize);
} else
- p = ialloc(tsd, usize, ind, zero, slow_path);
+ p = imalloc(tsd, usize);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path)
+imalloc_prof(tsd_t *tsd, size_t usize)
{
void *p;
prof_tctx_t *tctx;
tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
- p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path);
+ p = imalloc_prof_sample(tsd, usize, tctx);
else
- p = ialloc(tsd, usize, ind, zero, slow_path);
+ p = imalloc(tsd, usize);
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
- prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
+ prof_malloc(p, usize, tctx);
return (p);
}
-/*
- * ialloc_body() is inlined so that fast and slow paths are generated separately
- * with statically known slow_path.
- *
- * This function guarantees that *tsdn is non-NULL on success.
- */
JEMALLOC_ALWAYS_INLINE_C void *
-ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
- bool slow_path)
+imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
{
- tsd_t *tsd;
- szind_t ind;
-
- if (slow_path && unlikely(malloc_init())) {
- *tsdn = NULL;
- return (NULL);
- }
-
- tsd = tsd_fetch();
- *tsdn = tsd_tsdn(tsd);
- witness_assert_lockless(tsd_tsdn(tsd));
- ind = size2index(size);
- if (unlikely(ind >= NSIZES))
+ if (unlikely(malloc_init()))
return (NULL);
+ *tsd = tsd_fetch();
- if (config_stats || (config_prof && opt_prof) || (slow_path &&
- config_valgrind && unlikely(in_valgrind))) {
- *usize = index2size(ind);
- assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
+ if (config_prof && opt_prof) {
+ *usize = s2u(size);
+ if (unlikely(*usize == 0))
+ return (NULL);
+ return (imalloc_prof(*tsd, *usize));
}
- if (config_prof && opt_prof)
- return (ialloc_prof(tsd, *usize, ind, zero, slow_path));
-
- return (ialloc(tsd, size, ind, zero, slow_path));
-}
-
-JEMALLOC_ALWAYS_INLINE_C void
-ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
- bool update_errno, bool slow_path)
-{
-
- assert(!tsdn_null(tsdn) || ret == NULL);
-
- if (unlikely(ret == NULL)) {
- if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_printf("<jemalloc>: Error in %s(): out of "
- "memory\n", func);
- abort();
- }
- if (update_errno)
- set_errno(ENOMEM);
- }
- if (config_stats && likely(ret != NULL)) {
- assert(usize == isalloc(tsdn, ret, config_prof));
- *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
- }
- witness_assert_lockless(tsdn);
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ *usize = s2u(size);
+ return (imalloc(*tsd, size));
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@@ -1628,22 +1418,27 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_malloc(size_t size)
{
void *ret;
- tsdn_t *tsdn;
+ tsd_t *tsd;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0)
size = 1;
- if (likely(!malloc_slow)) {
- ret = ialloc_body(size, false, &tsdn, &usize, false);
- ialloc_post_check(ret, tsdn, usize, "malloc", true, false);
- } else {
- ret = ialloc_body(size, false, &tsdn, &usize, true);
- ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
- UTRACE(0, size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
+ ret = imalloc_body(size, &tsd, &usize);
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error in malloc(): "
+ "out of memory\n");
+ abort();
+ }
+ set_errno(ENOMEM);
}
-
+ if (config_stats && likely(ret != NULL)) {
+ assert(usize == isalloc(ret, config_prof));
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ }
+ UTRACE(0, size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
return (ret);
}
@@ -1660,7 +1455,7 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ arena_prof_promoted(p, usize);
} else
p = ipalloc(tsd, usize, alignment, false);
@@ -1682,7 +1477,7 @@ imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
- prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
+ prof_malloc(p, usize, tctx);
return (p);
}
@@ -1699,12 +1494,10 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
assert(min_alignment != 0);
if (unlikely(malloc_init())) {
- tsd = NULL;
result = NULL;
goto label_oom;
}
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
if (size == 0)
size = 1;
@@ -1722,7 +1515,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
}
usize = sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ if (unlikely(usize == 0)) {
result = NULL;
goto label_oom;
}
@@ -1739,13 +1532,10 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
ret = 0;
label_return:
if (config_stats && likely(result != NULL)) {
- assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof));
+ assert(usize == isalloc(result, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, result);
- JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
- false);
- witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
label_oom:
assert(result == NULL);
@@ -1755,7 +1545,6 @@ label_oom:
abort();
}
ret = ENOMEM;
- witness_assert_lockless(tsd_tsdn(tsd));
goto label_return;
}
@@ -1763,10 +1552,9 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW
JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void **memptr, size_t alignment, size_t size)
{
- int ret;
-
- ret = imemalign(memptr, alignment, size, sizeof(void *));
-
+ int ret = imemalign(memptr, alignment, size, sizeof(void *));
+ JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
+ config_prof), false);
return (ret);
}
@@ -1782,45 +1570,114 @@ je_aligned_alloc(size_t alignment, size_t size)
ret = NULL;
set_errno(err);
}
-
+ JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
+ false);
return (ret);
}
+static void *
+icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
+{
+ void *p;
+
+ if (tctx == NULL)
+ return (NULL);
+ if (usize <= SMALL_MAXCLASS) {
+ p = icalloc(tsd, LARGE_MINCLASS);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = icalloc(tsd, usize);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+icalloc_prof(tsd_t *tsd, size_t usize)
+{
+ void *p;
+ prof_tctx_t *tctx;
+
+ tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = icalloc_prof_sample(tsd, usize, tctx);
+ else
+ p = icalloc(tsd, usize);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ return (NULL);
+ }
+ prof_malloc(p, usize, tctx);
+
+ return (p);
+}
+
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
je_calloc(size_t num, size_t size)
{
void *ret;
- tsdn_t *tsdn;
+ tsd_t *tsd;
size_t num_size;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+ if (unlikely(malloc_init())) {
+ num_size = 0;
+ ret = NULL;
+ goto label_return;
+ }
+ tsd = tsd_fetch();
+
num_size = num * size;
if (unlikely(num_size == 0)) {
if (num == 0 || size == 0)
num_size = 1;
- else
- num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */
+ else {
+ ret = NULL;
+ goto label_return;
+ }
/*
* Try to avoid division here. We know that it isn't possible to
* overflow during multiplication if neither operand uses any of the
* most significant half of the bits in a size_t.
*/
} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
- 2))) && (num_size / size != num)))
- num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */
+ 2))) && (num_size / size != num))) {
+ /* size_t overflow. */
+ ret = NULL;
+ goto label_return;
+ }
- if (likely(!malloc_slow)) {
- ret = ialloc_body(num_size, true, &tsdn, &usize, false);
- ialloc_post_check(ret, tsdn, usize, "calloc", true, false);
+ if (config_prof && opt_prof) {
+ usize = s2u(num_size);
+ if (unlikely(usize == 0)) {
+ ret = NULL;
+ goto label_return;
+ }
+ ret = icalloc_prof(tsd, usize);
} else {
- ret = ialloc_body(num_size, true, &tsdn, &usize, true);
- ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
- UTRACE(0, num_size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ usize = s2u(num_size);
+ ret = icalloc(tsd, num_size);
}
+label_return:
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error in calloc(): out of "
+ "memory\n");
+ abort();
+ }
+ set_errno(ENOMEM);
+ }
+ if (config_stats && likely(ret != NULL)) {
+ assert(usize == isalloc(ret, config_prof));
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ }
+ UTRACE(0, num_size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
return (ret);
}
@@ -1836,7 +1693,7 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ arena_prof_promoted(p, usize);
} else
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
@@ -1851,7 +1708,7 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
+ old_tctx = prof_tctx_get(old_ptr);
tctx = prof_alloc_prep(tsd, usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
@@ -1868,41 +1725,32 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
}
JEMALLOC_INLINE_C void
-ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
+ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
- witness_assert_lockless(tsd_tsdn(tsd));
-
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
if (config_prof && opt_prof) {
- usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ usize = isalloc(ptr, config_prof);
prof_free(tsd, ptr, usize);
} else if (config_stats || config_valgrind)
- usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ usize = isalloc(ptr, config_prof);
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
-
- if (likely(!slow_path))
- iqalloc(tsd, ptr, tcache, false);
- else {
- if (config_valgrind && unlikely(in_valgrind))
- rzsize = p2rz(tsd_tsdn(tsd), ptr);
- iqalloc(tsd, ptr, tcache, true);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
- }
+ if (config_valgrind && unlikely(in_valgrind))
+ rzsize = p2rz(ptr);
+ iqalloc(tsd, ptr, tcache);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
JEMALLOC_INLINE_C void
-isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
+isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
{
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
- witness_assert_lockless(tsd_tsdn(tsd));
-
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
@@ -1911,8 +1759,8 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
if (config_valgrind && unlikely(in_valgrind))
- rzsize = p2rz(tsd_tsdn(tsd), ptr);
- isqalloc(tsd, ptr, usize, tcache, slow_path);
+ rzsize = p2rz(ptr);
+ isqalloc(tsd, ptr, usize, tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
@@ -1922,57 +1770,44 @@ JEMALLOC_ALLOC_SIZE(2)
je_realloc(void *ptr, size_t size)
{
void *ret;
- tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
+ tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (unlikely(size == 0)) {
if (ptr != NULL) {
- tsd_t *tsd;
-
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
tsd = tsd_fetch();
- ifree(tsd, ptr, tcache_get(tsd, false), true);
+ ifree(tsd, ptr, tcache_get(tsd, false));
return (NULL);
}
size = 1;
}
if (likely(ptr != NULL)) {
- tsd_t *tsd;
-
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
-
- old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
- if (config_valgrind && unlikely(in_valgrind)) {
- old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
- u2rz(old_usize);
- }
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && unlikely(in_valgrind))
+ old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
if (config_prof && opt_prof) {
usize = s2u(size);
- ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
- NULL : irealloc_prof(tsd, ptr, old_usize, usize);
+ ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd,
+ ptr, old_usize, usize);
} else {
if (config_stats || (config_valgrind &&
unlikely(in_valgrind)))
usize = s2u(size);
ret = iralloc(tsd, ptr, old_usize, size, 0, false);
}
- tsdn = tsd_tsdn(tsd);
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
- if (likely(!malloc_slow))
- ret = ialloc_body(size, false, &tsdn, &usize, false);
- else
- ret = ialloc_body(size, false, &tsdn, &usize, true);
- assert(!tsdn_null(tsdn) || ret == NULL);
+ ret = imalloc_body(size, &tsd, &usize);
}
if (unlikely(ret == NULL)) {
@@ -1984,17 +1819,13 @@ je_realloc(void *ptr, size_t size)
set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
- tsd_t *tsd;
-
- assert(usize == isalloc(tsdn, ret, config_prof));
- tsd = tsdn_tsd(tsdn);
+ assert(usize == isalloc(ret, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
- JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr,
- old_usize, old_rzsize, maybe, false);
- witness_assert_lockless(tsdn);
+ JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
+ old_rzsize, true, false);
return (ret);
}
@@ -2005,12 +1836,7 @@ je_free(void *ptr)
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) {
tsd_t *tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
- if (likely(!malloc_slow))
- ifree(tsd, ptr, tcache_get(tsd, false), false);
- else
- ifree(tsd, ptr, tcache_get(tsd, false), true);
- witness_assert_lockless(tsd_tsdn(tsd));
+ ifree(tsd, ptr, tcache_get(tsd, false));
}
}
@@ -2031,6 +1857,7 @@ je_memalign(size_t alignment, size_t size)
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
ret = NULL;
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
@@ -2044,6 +1871,7 @@ je_valloc(size_t size)
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
ret = NULL;
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
@@ -2073,29 +1901,6 @@ JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
je_memalign;
# endif
-
-#ifdef CPU_COUNT
-/*
- * To enable static linking with glibc, the libc specific malloc interface must
- * be implemented also, so none of glibc's malloc.o functions are added to the
- * link.
- */
-#define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
-/* To force macro expansion of je_ prefix before stringification. */
-#define PREALIAS(je_fn) ALIAS(je_fn)
-void *__libc_malloc(size_t size) PREALIAS(je_malloc);
-void __libc_free(void* ptr) PREALIAS(je_free);
-void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
-void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
-void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
-void *__libc_valloc(size_t size) PREALIAS(je_valloc);
-int __posix_memalign(void** r, size_t a, size_t s)
- PREALIAS(je_posix_memalign);
-#undef PREALIAS
-#undef ALIAS
-
-#endif
-
#endif
/*
@@ -2107,7 +1912,7 @@ int __posix_memalign(void** r, size_t a, size_t s)
*/
JEMALLOC_ALWAYS_INLINE_C bool
-imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
+imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
{
@@ -2118,8 +1923,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
*usize = sa2u(size, *alignment);
}
- if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
- return (true);
+ assert(*usize != 0);
*zero = MALLOCX_ZERO_GET(flags);
if ((flags & MALLOCX_TCACHE_MASK) != 0) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
@@ -2130,7 +1934,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
*tcache = tcache_get(tsd, true);
if ((flags & MALLOCX_ARENA_MASK) != 0) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
- *arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
+ *arena = arena_get(tsd, arena_ind, true, true);
if (unlikely(*arena == NULL))
return (true);
} else
@@ -2138,44 +1942,59 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
return (false);
}
+JEMALLOC_ALWAYS_INLINE_C bool
+imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
+ size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
+{
+
+ if (likely(flags == 0)) {
+ *usize = s2u(size);
+ assert(*usize != 0);
+ *alignment = 0;
+ *zero = false;
+ *tcache = tcache_get(tsd, true);
+ *arena = NULL;
+ return (false);
+ } else {
+ return (imallocx_flags_decode_hard(tsd, size, flags, usize,
+ alignment, zero, tcache, arena));
+ }
+}
+
JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena, bool slow_path)
+imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena)
{
- szind_t ind;
if (unlikely(alignment != 0))
- return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
- ind = size2index(usize);
- assert(ind < NSIZES);
- return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena,
- slow_path));
+ return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
+ if (unlikely(zero))
+ return (icalloct(tsd, usize, tcache, arena));
+ return (imalloct(tsd, usize, tcache, arena));
}
static void *
-imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena, bool slow_path)
+imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena)
{
void *p;
if (usize <= SMALL_MAXCLASS) {
assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
- p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero,
- tcache, arena, slow_path);
+ p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
+ arena);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsdn, p, usize);
- } else {
- p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
- slow_path);
- }
+ arena_prof_promoted(p, usize);
+ } else
+ p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
+imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
{
void *p;
size_t alignment;
@@ -2188,27 +2007,25 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
&zero, &tcache, &arena)))
return (NULL);
tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
- if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
- p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero,
- tcache, arena, slow_path);
- } else if ((uintptr_t)tctx > (uintptr_t)1U) {
- p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero,
- tcache, arena, slow_path);
+ if (likely((uintptr_t)tctx == (uintptr_t)1U))
+ p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
+ else if ((uintptr_t)tctx > (uintptr_t)1U) {
+ p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
+ arena);
} else
p = NULL;
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
- prof_malloc(tsd_tsdn(tsd), p, *usize, tctx);
+ prof_malloc(p, *usize, tctx);
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
- bool slow_path)
+imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
{
void *p;
size_t alignment;
@@ -2216,53 +2033,18 @@ imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
tcache_t *tcache;
arena_t *arena;
- if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
- &zero, &tcache, &arena)))
- return (NULL);
- p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache,
- arena, slow_path);
- assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
- return (p);
-}
-
-/* This function guarantees that *tsdn is non-NULL on success. */
-JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
- bool slow_path)
-{
- tsd_t *tsd;
-
- if (slow_path && unlikely(malloc_init())) {
- *tsdn = NULL;
- return (NULL);
- }
-
- tsd = tsd_fetch();
- *tsdn = tsd_tsdn(tsd);
- witness_assert_lockless(tsd_tsdn(tsd));
-
if (likely(flags == 0)) {
- szind_t ind = size2index(size);
- if (unlikely(ind >= NSIZES))
- return (NULL);
- if (config_stats || (config_prof && opt_prof) || (slow_path &&
- config_valgrind && unlikely(in_valgrind))) {
- *usize = index2size(ind);
- assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
- }
-
- if (config_prof && opt_prof) {
- return (ialloc_prof(tsd, *usize, ind, false,
- slow_path));
- }
-
- return (ialloc(tsd, size, ind, false, slow_path));
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ *usize = s2u(size);
+ return (imalloc(tsd, size));
}
- if (config_prof && opt_prof)
- return (imallocx_prof(tsd, size, flags, usize, slow_path));
-
- return (imallocx_no_prof(tsd, size, flags, usize, slow_path));
+ if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
+ &alignment, &zero, &tcache, &arena)))
+ return (NULL);
+ p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+ return (p);
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@@ -2270,24 +2052,37 @@ void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_mallocx(size_t size, int flags)
{
- tsdn_t *tsdn;
+ tsd_t *tsd;
void *p;
size_t usize;
assert(size != 0);
- if (likely(!malloc_slow)) {
- p = imallocx_body(size, flags, &tsdn, &usize, false);
- ialloc_post_check(p, tsdn, usize, "mallocx", false, false);
- } else {
- p = imallocx_body(size, flags, &tsdn, &usize, true);
- ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
- UTRACE(0, size, p);
- JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
- MALLOCX_ZERO_GET(flags));
- }
+ if (unlikely(malloc_init()))
+ goto label_oom;
+ tsd = tsd_fetch();
+ if (config_prof && opt_prof)
+ p = imallocx_prof(tsd, size, flags, &usize);
+ else
+ p = imallocx_no_prof(tsd, size, flags, &usize);
+ if (unlikely(p == NULL))
+ goto label_oom;
+
+ if (config_stats) {
+ assert(usize == isalloc(p, config_prof));
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ }
+ UTRACE(0, size, p);
+ JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
return (p);
+label_oom:
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
+ abort();
+ }
+ UTRACE(0, size, 0);
+ return (NULL);
}
static void *
@@ -2304,7 +2099,7 @@ irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
zero, tcache, arena);
if (p == NULL)
return (NULL);
- arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ arena_prof_promoted(p, usize);
} else {
p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
tcache, arena);
@@ -2323,8 +2118,8 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
- tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
+ old_tctx = prof_tctx_get(old_ptr);
+ tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
alignment, zero, tcache, arena, tctx);
@@ -2333,7 +2128,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
tcache, arena);
}
if (unlikely(p == NULL)) {
- prof_alloc_rollback(tsd, tctx, false);
+ prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
@@ -2346,9 +2141,9 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
* be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize.
*/
- *usize = isalloc(tsd_tsdn(tsd), p, config_prof);
+ *usize = isalloc(p, config_prof);
}
- prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
+ prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
old_usize, old_tctx);
return (p);
@@ -2374,11 +2169,10 @@ je_rallocx(void *ptr, size_t size, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
- arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
+ arena = arena_get(tsd, arena_ind, true, true);
if (unlikely(arena == NULL))
goto label_oom;
} else
@@ -2392,14 +2186,13 @@ je_rallocx(void *ptr, size_t size, int flags)
} else
tcache = tcache_get(tsd, true);
- old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ old_usize = isalloc(ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
- goto label_oom;
+ assert(usize != 0);
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
zero, tcache, arena);
if (unlikely(p == NULL))
@@ -2410,7 +2203,7 @@ je_rallocx(void *ptr, size_t size, int flags)
if (unlikely(p == NULL))
goto label_oom;
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
- usize = isalloc(tsd_tsdn(tsd), p, config_prof);
+ usize = isalloc(p, config_prof);
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
@@ -2419,9 +2212,8 @@ je_rallocx(void *ptr, size_t size, int flags)
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, p);
- JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr,
- old_usize, old_rzsize, no, zero);
- witness_assert_lockless(tsd_tsdn(tsd));
+ JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
+ old_rzsize, false, zero);
return (p);
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
@@ -2429,33 +2221,31 @@ label_oom:
abort();
}
UTRACE(ptr, size, 0);
- witness_assert_lockless(tsd_tsdn(tsd));
return (NULL);
}
JEMALLOC_ALWAYS_INLINE_C size_t
-ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
- size_t extra, size_t alignment, bool zero)
+ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, bool zero)
{
size_t usize;
- if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero))
+ if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
return (old_usize);
- usize = isalloc(tsdn, ptr, config_prof);
+ usize = isalloc(ptr, config_prof);
return (usize);
}
static size_t
-ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
- size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
+ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, bool zero, prof_tctx_t *tctx)
{
size_t usize;
if (tctx == NULL)
return (old_usize);
- usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
- zero);
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero);
return (usize);
}
@@ -2469,36 +2259,23 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
+ old_tctx = prof_tctx_get(ptr);
/*
* usize isn't knowable before ixalloc() returns when extra is non-zero.
* Therefore, compute its maximum possible value and use that in
* prof_alloc_prep() to decide whether to capture a backtrace.
* prof_realloc() will use the actual usize to decide whether to sample.
*/
- if (alignment == 0) {
- usize_max = s2u(size+extra);
- assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
- } else {
- usize_max = sa2u(size+extra, alignment);
- if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
- /*
- * usize_max is out of range, and chances are that
- * allocation will fail, but use the maximum possible
- * value and carry on with prof_alloc_prep(), just in
- * case allocation succeeds.
- */
- usize_max = HUGE_MAXCLASS;
- }
- }
+ usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
+ alignment);
+ assert(usize_max != 0);
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
-
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
- usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
- size, extra, alignment, zero, tctx);
+ usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
+ alignment, zero, tctx);
} else {
- usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
- extra, alignment, zero);
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero);
}
if (usize == old_usize) {
prof_alloc_rollback(tsd, tctx, false);
@@ -2525,25 +2302,18 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
- old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ old_usize = isalloc(ptr, config_prof);
- /*
- * The API explicitly absolves itself of protecting against (size +
- * extra) numerical overflow, but we may need to clamp extra to avoid
- * exceeding HUGE_MAXCLASS.
- *
- * Ordinarily, size limit checking is handled deeper down, but here we
- * have to check as part of (size + extra) clamping, since we need the
- * clamped value in the above helper functions.
- */
- if (unlikely(size > HUGE_MAXCLASS)) {
- usize = old_usize;
- goto label_not_resized;
- }
- if (unlikely(HUGE_MAXCLASS - size < extra))
+ /* Clamp extra if necessary to avoid (size + extra) overflow. */
+ if (unlikely(size + extra > HUGE_MAXCLASS)) {
+ /* Check for size overflow. */
+ if (unlikely(size > HUGE_MAXCLASS)) {
+ usize = old_usize;
+ goto label_not_resized;
+ }
extra = HUGE_MAXCLASS - size;
+ }
if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
@@ -2552,8 +2322,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
alignment, zero);
} else {
- usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
- extra, alignment, zero);
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero);
}
if (unlikely(usize == old_usize))
goto label_not_resized;
@@ -2562,11 +2332,10 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
- JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr,
- old_usize, old_rzsize, no, zero);
+ JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
+ old_rzsize, false, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
- witness_assert_lockless(tsd_tsdn(tsd));
return (usize);
}
@@ -2575,20 +2344,15 @@ JEMALLOC_ATTR(pure)
je_sallocx(const void *ptr, int flags)
{
size_t usize;
- tsdn_t *tsdn;
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
- tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
-
if (config_ivsalloc)
- usize = ivsalloc(tsdn, ptr, config_prof);
+ usize = ivsalloc(ptr, config_prof);
else
- usize = isalloc(tsdn, ptr, config_prof);
+ usize = isalloc(ptr, config_prof);
- witness_assert_lockless(tsdn);
return (usize);
}
@@ -2602,7 +2366,6 @@ je_dallocx(void *ptr, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
tcache = NULL;
@@ -2612,25 +2375,19 @@ je_dallocx(void *ptr, int flags)
tcache = tcache_get(tsd, false);
UTRACE(ptr, 0, 0);
- if (likely(!malloc_slow))
- ifree(tsd, ptr, tcache, false);
- else
- ifree(tsd, ptr, tcache, true);
- witness_assert_lockless(tsd_tsdn(tsd));
+ ifree(tsd_fetch(), ptr, tcache);
}
JEMALLOC_ALWAYS_INLINE_C size_t
-inallocx(tsdn_t *tsdn, size_t size, int flags)
+inallocx(size_t size, int flags)
{
size_t usize;
- witness_assert_lockless(tsdn);
-
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
usize = s2u(size);
else
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
- witness_assert_lockless(tsdn);
+ assert(usize != 0);
return (usize);
}
@@ -2643,11 +2400,10 @@ je_sdallocx(void *ptr, size_t size, int flags)
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- tsd = tsd_fetch();
- usize = inallocx(tsd_tsdn(tsd), size, flags);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
+ usize = inallocx(size, flags);
+ assert(usize == isalloc(ptr, config_prof));
- witness_assert_lockless(tsd_tsdn(tsd));
+ tsd = tsd_fetch();
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
tcache = NULL;
@@ -2657,116 +2413,75 @@ je_sdallocx(void *ptr, size_t size, int flags)
tcache = tcache_get(tsd, false);
UTRACE(ptr, 0, 0);
- if (likely(!malloc_slow))
- isfree(tsd, ptr, usize, tcache, false);
- else
- isfree(tsd, ptr, usize, tcache, true);
- witness_assert_lockless(tsd_tsdn(tsd));
+ isfree(tsd, ptr, usize, tcache);
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)
je_nallocx(size_t size, int flags)
{
- size_t usize;
- tsdn_t *tsdn;
assert(size != 0);
if (unlikely(malloc_init()))
return (0);
- tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
-
- usize = inallocx(tsdn, size, flags);
- if (unlikely(usize > HUGE_MAXCLASS))
- return (0);
-
- witness_assert_lockless(tsdn);
- return (usize);
+ return (inallocx(size, flags));
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
- int ret;
- tsd_t *tsd;
if (unlikely(malloc_init()))
return (EAGAIN);
- tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
- ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
- witness_assert_lockless(tsd_tsdn(tsd));
- return (ret);
+ return (ctl_byname(name, oldp, oldlenp, newp, newlen));
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
{
- int ret;
- tsdn_t *tsdn;
if (unlikely(malloc_init()))
return (EAGAIN);
- tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
- ret = ctl_nametomib(tsdn, name, mibp, miblenp);
- witness_assert_lockless(tsdn);
- return (ret);
+ return (ctl_nametomib(name, mibp, miblenp));
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
- int ret;
- tsd_t *tsd;
if (unlikely(malloc_init()))
return (EAGAIN);
- tsd = tsd_fetch();
- witness_assert_lockless(tsd_tsdn(tsd));
- ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
- witness_assert_lockless(tsd_tsdn(tsd));
- return (ret);
+ return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
- tsdn_t *tsdn;
- tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
stats_print(write_cb, cbopaque, opts);
- witness_assert_lockless(tsdn);
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{
size_t ret;
- tsdn_t *tsdn;
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
- tsdn = tsdn_fetch();
- witness_assert_lockless(tsdn);
-
if (config_ivsalloc)
- ret = ivsalloc(tsdn, ptr, config_prof);
+ ret = ivsalloc(ptr, config_prof);
else
- ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof);
+ ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
- witness_assert_lockless(tsdn);
return (ret);
}
@@ -2792,7 +2507,6 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
* to trigger the deadlock described above, but doing so would involve forking
* via a library constructor that runs before jemalloc's runs.
*/
-#ifndef JEMALLOC_JET
JEMALLOC_ATTR(constructor)
static void
jemalloc_constructor(void)
@@ -2800,7 +2514,6 @@ jemalloc_constructor(void)
malloc_init();
}
-#endif
#ifndef JEMALLOC_MUTEX_INIT_CB
void
@@ -2810,9 +2523,7 @@ JEMALLOC_EXPORT void
_malloc_prefork(void)
#endif
{
- tsd_t *tsd;
- unsigned i, j, narenas;
- arena_t *arena;
+ unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized())
@@ -2820,40 +2531,16 @@ _malloc_prefork(void)
#endif
assert(malloc_initialized());
- tsd = tsd_fetch();
-
- narenas = narenas_total_get();
-
- witness_prefork(tsd);
/* Acquire all mutexes in a safe order. */
- ctl_prefork(tsd_tsdn(tsd));
- malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
- prof_prefork0(tsd_tsdn(tsd));
- for (i = 0; i < 3; i++) {
- for (j = 0; j < narenas; j++) {
- if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
- NULL) {
- switch (i) {
- case 0:
- arena_prefork0(tsd_tsdn(tsd), arena);
- break;
- case 1:
- arena_prefork1(tsd_tsdn(tsd), arena);
- break;
- case 2:
- arena_prefork2(tsd_tsdn(tsd), arena);
- break;
- default: not_reached();
- }
- }
- }
- }
- base_prefork(tsd_tsdn(tsd));
- for (i = 0; i < narenas; i++) {
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
- arena_prefork3(tsd_tsdn(tsd), arena);
+ ctl_prefork();
+ prof_prefork();
+ malloc_mutex_prefork(&arenas_lock);
+ for (i = 0; i < narenas_total; i++) {
+ if (arenas[i] != NULL)
+ arena_prefork(arenas[i]);
}
- prof_prefork1(tsd_tsdn(tsd));
+ chunk_prefork();
+ base_prefork();
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -2864,8 +2551,7 @@ JEMALLOC_EXPORT void
_malloc_postfork(void)
#endif
{
- tsd_t *tsd;
- unsigned i, narenas;
+ unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized())
@@ -2873,77 +2559,35 @@ _malloc_postfork(void)
#endif
assert(malloc_initialized());
- tsd = tsd_fetch();
-
- witness_postfork_parent(tsd);
/* Release all mutexes, now that fork() has completed. */
- base_postfork_parent(tsd_tsdn(tsd));
- for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena;
-
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
- arena_postfork_parent(tsd_tsdn(tsd), arena);
+ base_postfork_parent();
+ chunk_postfork_parent();
+ for (i = 0; i < narenas_total; i++) {
+ if (arenas[i] != NULL)
+ arena_postfork_parent(arenas[i]);
}
- prof_postfork_parent(tsd_tsdn(tsd));
- malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
- ctl_postfork_parent(tsd_tsdn(tsd));
+ malloc_mutex_postfork_parent(&arenas_lock);
+ prof_postfork_parent();
+ ctl_postfork_parent();
}
void
jemalloc_postfork_child(void)
{
- tsd_t *tsd;
- unsigned i, narenas;
+ unsigned i;
assert(malloc_initialized());
- tsd = tsd_fetch();
-
- witness_postfork_child(tsd);
/* Release all mutexes, now that fork() has completed. */
- base_postfork_child(tsd_tsdn(tsd));
- for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena;
-
- if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
- arena_postfork_child(tsd_tsdn(tsd), arena);
+ base_postfork_child();
+ chunk_postfork_child();
+ for (i = 0; i < narenas_total; i++) {
+ if (arenas[i] != NULL)
+ arena_postfork_child(arenas[i]);
}
- prof_postfork_child(tsd_tsdn(tsd));
- malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
- ctl_postfork_child(tsd_tsdn(tsd));
+ malloc_mutex_postfork_child(&arenas_lock);
+ prof_postfork_child();
+ ctl_postfork_child();
}
/******************************************************************************/
-
-/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation.
- * returns 0 if the allocation is in the currently active run,
- * or when it is not causing any frag issue (large or huge bin)
- * returns the bin utilization and run utilization both in fixed point 16:16.
- * If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
-je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
- int defrag = 0;
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (likely(chunk != ptr)) { /* indication that this is not a HUGE alloc */
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t mapbits = arena_mapbits_get(chunk, pageind);
- if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* indication that this is not a LARGE alloc */
- arena_t *arena = extent_node_arena_get(&chunk->node);
- size_t rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- arena_run_t *run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
- arena_bin_t *bin = &arena->bins[run->binind];
- tsd_t *tsd = tsd_fetch();
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- /* runs that are in the same chunk in as the current chunk, are likely to be the next currun */
- if (chunk != (arena_chunk_t *)CHUNK_ADDR2BASE(bin->runcur)) {
- arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
- size_t availregs = bin_info->nregs * bin->stats.curruns;
- *bin_util = (bin->stats.curregs<<16) / availregs;
- *run_util = ((bin_info->nregs - run->nfree)<<16) / bin_info->nregs;
- defrag = 1;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- }
- }
- return defrag;
-}
diff --git a/deps/jemalloc/src/mutex.c b/deps/jemalloc/src/mutex.c
index 6333e73d6..2d47af976 100644
--- a/deps/jemalloc/src/mutex.c
+++ b/deps/jemalloc/src/mutex.c
@@ -69,7 +69,7 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
#endif
bool
-malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
+malloc_mutex_init(malloc_mutex_t *mutex)
{
#ifdef _WIN32
@@ -80,8 +80,6 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
_CRT_SPINCOUNT))
return (true);
# endif
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- mutex->lock = OS_UNFAIR_LOCK_INIT;
#elif (defined(JEMALLOC_OSSPIN))
mutex->lock = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
@@ -105,34 +103,31 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
}
pthread_mutexattr_destroy(&attr);
#endif
- if (config_debug)
- witness_init(&mutex->witness, name, rank, NULL);
return (false);
}
void
-malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex)
+malloc_mutex_prefork(malloc_mutex_t *mutex)
{
- malloc_mutex_lock(tsdn, mutex);
+ malloc_mutex_lock(mutex);
}
void
-malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex)
+malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
{
- malloc_mutex_unlock(tsdn, mutex);
+ malloc_mutex_unlock(mutex);
}
void
-malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
+malloc_mutex_postfork_child(malloc_mutex_t *mutex)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
- malloc_mutex_unlock(tsdn, mutex);
+ malloc_mutex_unlock(mutex);
#else
- if (malloc_mutex_init(mutex, mutex->witness.name,
- mutex->witness.rank)) {
+ if (malloc_mutex_init(mutex)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
if (opt_abort)
@@ -142,7 +137,7 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
}
bool
-malloc_mutex_boot(void)
+mutex_boot(void)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
diff --git a/deps/jemalloc/src/nstime.c b/deps/jemalloc/src/nstime.c
deleted file mode 100644
index 0948e29fa..000000000
--- a/deps/jemalloc/src/nstime.c
+++ /dev/null
@@ -1,194 +0,0 @@
-#include "jemalloc/internal/jemalloc_internal.h"
-
-#define BILLION UINT64_C(1000000000)
-
-void
-nstime_init(nstime_t *time, uint64_t ns)
-{
-
- time->ns = ns;
-}
-
-void
-nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec)
-{
-
- time->ns = sec * BILLION + nsec;
-}
-
-uint64_t
-nstime_ns(const nstime_t *time)
-{
-
- return (time->ns);
-}
-
-uint64_t
-nstime_sec(const nstime_t *time)
-{
-
- return (time->ns / BILLION);
-}
-
-uint64_t
-nstime_nsec(const nstime_t *time)
-{
-
- return (time->ns % BILLION);
-}
-
-void
-nstime_copy(nstime_t *time, const nstime_t *source)
-{
-
- *time = *source;
-}
-
-int
-nstime_compare(const nstime_t *a, const nstime_t *b)
-{
-
- return ((a->ns > b->ns) - (a->ns < b->ns));
-}
-
-void
-nstime_add(nstime_t *time, const nstime_t *addend)
-{
-
- assert(UINT64_MAX - time->ns >= addend->ns);
-
- time->ns += addend->ns;
-}
-
-void
-nstime_subtract(nstime_t *time, const nstime_t *subtrahend)
-{
-
- assert(nstime_compare(time, subtrahend) >= 0);
-
- time->ns -= subtrahend->ns;
-}
-
-void
-nstime_imultiply(nstime_t *time, uint64_t multiplier)
-{
-
- assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
- 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
-
- time->ns *= multiplier;
-}
-
-void
-nstime_idivide(nstime_t *time, uint64_t divisor)
-{
-
- assert(divisor != 0);
-
- time->ns /= divisor;
-}
-
-uint64_t
-nstime_divide(const nstime_t *time, const nstime_t *divisor)
-{
-
- assert(divisor->ns != 0);
-
- return (time->ns / divisor->ns);
-}
-
-#ifdef _WIN32
-# define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time)
-{
- FILETIME ft;
- uint64_t ticks_100ns;
-
- GetSystemTimeAsFileTime(&ft);
- ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-
- nstime_init(time, ticks_100ns * 100);
-}
-#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
-# define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time)
-{
- struct timespec ts;
-
- clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
- nstime_init2(time, ts.tv_sec, ts.tv_nsec);
-}
-#elif JEMALLOC_HAVE_CLOCK_MONOTONIC
-# define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time)
-{
- struct timespec ts;
-
- clock_gettime(CLOCK_MONOTONIC, &ts);
- nstime_init2(time, ts.tv_sec, ts.tv_nsec);
-}
-#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
-# define NSTIME_MONOTONIC true
-static void
-nstime_get(nstime_t *time)
-{
-
- nstime_init(time, mach_absolute_time());
-}
-#else
-# define NSTIME_MONOTONIC false
-static void
-nstime_get(nstime_t *time)
-{
- struct timeval tv;
-
- gettimeofday(&tv, NULL);
- nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
-}
-#endif
-
-#ifdef JEMALLOC_JET
-#undef nstime_monotonic
-#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic)
-#endif
-bool
-nstime_monotonic(void)
-{
-
- return (NSTIME_MONOTONIC);
-#undef NSTIME_MONOTONIC
-}
-#ifdef JEMALLOC_JET
-#undef nstime_monotonic
-#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
-nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef nstime_update
-#define nstime_update JEMALLOC_N(n_nstime_update)
-#endif
-bool
-nstime_update(nstime_t *time)
-{
- nstime_t old_time;
-
- nstime_copy(&old_time, time);
- nstime_get(time);
-
- /* Handle non-monotonic clocks. */
- if (unlikely(nstime_compare(&old_time, time) > 0)) {
- nstime_copy(time, &old_time);
- return (true);
- }
-
- return (false);
-}
-#ifdef JEMALLOC_JET
-#undef nstime_update
-#define nstime_update JEMALLOC_N(nstime_update)
-nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update);
-#endif
diff --git a/deps/jemalloc/src/pages.c b/deps/jemalloc/src/pages.c
index 5f0c9669d..83a167f67 100644
--- a/deps/jemalloc/src/pages.c
+++ b/deps/jemalloc/src/pages.c
@@ -1,49 +1,29 @@
#define JEMALLOC_PAGES_C_
#include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
-#include <sys/sysctl.h>
-#endif
-
-/******************************************************************************/
-/* Data. */
-
-#ifndef _WIN32
-# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
-# define PAGES_PROT_DECOMMIT (PROT_NONE)
-static int mmap_flags;
-#endif
-static bool os_overcommits;
-
/******************************************************************************/
void *
-pages_map(void *addr, size_t size, bool *commit)
+pages_map(void *addr, size_t size)
{
void *ret;
assert(size != 0);
- if (os_overcommits)
- *commit = true;
-
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
- ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
+ ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
- {
- int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
-
- ret = mmap(addr, size, prot, mmap_flags, -1, 0);
- }
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+ -1, 0);
assert(ret != NULL);
if (ret == MAP_FAILED)
@@ -87,8 +67,7 @@ pages_unmap(void *addr, size_t size)
}
void *
-pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
- bool *commit)
+pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
{
void *ret = (void *)((uintptr_t)addr + leadsize);
@@ -98,7 +77,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
void *new_addr;
pages_unmap(addr, alloc_size);
- new_addr = pages_map(ret, size, commit);
+ new_addr = pages_map(ret, size);
if (new_addr == ret)
return (ret);
if (new_addr)
@@ -122,17 +101,17 @@ static bool
pages_commit_impl(void *addr, size_t size, bool commit)
{
- if (os_overcommits)
- return (true);
-
-#ifdef _WIN32
- return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
- PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
-#else
- {
- int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
- void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
- -1, 0);
+#ifndef _WIN32
+ /*
+ * The following decommit/commit implementation is functional, but
+ * always disabled because it doesn't add value beyong improved
+ * debugging (at the cost of extra system calls) on systems that
+ * overcommit.
+ */
+ if (false) {
+ int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
+ void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
+ MAP_FIXED, -1, 0);
if (result == MAP_FAILED)
return (true);
if (result != addr) {
@@ -146,6 +125,7 @@ pages_commit_impl(void *addr, size_t size, bool commit)
return (false);
}
#endif
+ return (true);
}
bool
@@ -170,16 +150,15 @@ pages_purge(void *addr, size_t size)
#ifdef _WIN32
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
unzeroed = true;
-#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \
- defined(JEMALLOC_PURGE_MADVISE_DONTNEED))
-# if defined(JEMALLOC_PURGE_MADVISE_FREE)
-# define JEMALLOC_MADV_PURGE MADV_FREE
-# define JEMALLOC_MADV_ZEROS false
-# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
+#elif defined(JEMALLOC_HAVE_MADVISE)
+# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
# define JEMALLOC_MADV_ZEROS true
+# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+# define JEMALLOC_MADV_PURGE MADV_FREE
+# define JEMALLOC_MADV_ZEROS false
# else
-# error No madvise(2) flag defined for purging unused dirty pages
+# error "No madvise(2) flag defined for purging unused dirty pages."
# endif
int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
@@ -192,111 +171,3 @@ pages_purge(void *addr, size_t size)
return (unzeroed);
}
-bool
-pages_huge(void *addr, size_t size)
-{
-
- assert(PAGE_ADDR2BASE(addr) == addr);
- assert(PAGE_CEILING(size) == size);
-
-#ifdef JEMALLOC_THP
- return (madvise(addr, size, MADV_HUGEPAGE) != 0);
-#else
- return (false);
-#endif
-}
-
-bool
-pages_nohuge(void *addr, size_t size)
-{
-
- assert(PAGE_ADDR2BASE(addr) == addr);
- assert(PAGE_CEILING(size) == size);
-
-#ifdef JEMALLOC_THP
- return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
-#else
- return (false);
-#endif
-}
-
-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
-static bool
-os_overcommits_sysctl(void)
-{
- int vm_overcommit;
- size_t sz;
-
- sz = sizeof(vm_overcommit);
- if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
- return (false); /* Error. */
-
- return ((vm_overcommit & 0x3) == 0);
-}
-#endif
-
-#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
-/*
- * Use syscall(2) rather than {open,read,close}(2) when possible to avoid
- * reentry during bootstrapping if another library has interposed system call
- * wrappers.
- */
-static bool
-os_overcommits_proc(void)
-{
- int fd;
- char buf[1];
- ssize_t nread;
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
- fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
-#else
- fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
-#endif
- if (fd == -1)
- return (false); /* Error. */
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
- nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
-#else
- nread = read(fd, &buf, sizeof(buf));
-#endif
-
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
- syscall(SYS_close, fd);
-#else
- close(fd);
-#endif
-
- if (nread < 1)
- return (false); /* Error. */
- /*
- * /proc/sys/vm/overcommit_memory meanings:
- * 0: Heuristic overcommit.
- * 1: Always overcommit.
- * 2: Never overcommit.
- */
- return (buf[0] == '0' || buf[0] == '1');
-}
-#endif
-
-void
-pages_boot(void)
-{
-
-#ifndef _WIN32
- mmap_flags = MAP_PRIVATE | MAP_ANON;
-#endif
-
-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
- os_overcommits = os_overcommits_sysctl();
-#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
- os_overcommits = os_overcommits_proc();
-# ifdef MAP_NORESERVE
- if (os_overcommits)
- mmap_flags |= MAP_NORESERVE;
-# endif
-#else
- os_overcommits = false;
-#endif
-}
diff --git a/deps/jemalloc/src/prng.c b/deps/jemalloc/src/prng.c
deleted file mode 100644
index 76646a2a4..000000000
--- a/deps/jemalloc/src/prng.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define JEMALLOC_PRNG_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/deps/jemalloc/src/prof.c b/deps/jemalloc/src/prof.c
index c89dade1f..5d2b9598f 100644
--- a/deps/jemalloc/src/prof.c
+++ b/deps/jemalloc/src/prof.c
@@ -109,7 +109,7 @@ static char prof_dump_buf[
1
#endif
];
-static size_t prof_dump_buf_end;
+static unsigned prof_dump_buf_end;
static int prof_dump_fd;
/* Do not dump any profiles until bootstrapping is complete. */
@@ -121,13 +121,13 @@ static bool prof_booted = false;
* definition.
*/
-static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
+static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
-static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+static bool prof_tdata_should_destroy(prof_tdata_t *tdata,
bool even_if_attached);
static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
bool even_if_attached);
-static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
+static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
/******************************************************************************/
/* Red-black trees. */
@@ -213,23 +213,22 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
}
if ((uintptr_t)tctx > (uintptr_t)1U) {
- malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ malloc_mutex_lock(tctx->tdata->lock);
tctx->prepared = false;
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
+ if (prof_tctx_should_destroy(tctx))
prof_tctx_destroy(tsd, tctx);
else
- malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ malloc_mutex_unlock(tctx->tdata->lock);
}
}
void
-prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx)
+prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
- prof_tctx_set(tsdn, ptr, usize, tctx);
+ prof_tctx_set(ptr, usize, tctx);
- malloc_mutex_lock(tsdn, tctx->tdata->lock);
+ malloc_mutex_lock(tctx->tdata->lock);
tctx->cnts.curobjs++;
tctx->cnts.curbytes += usize;
if (opt_prof_accum) {
@@ -237,23 +236,23 @@ prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
tctx->cnts.accumbytes += usize;
}
tctx->prepared = false;
- malloc_mutex_unlock(tsdn, tctx->tdata->lock);
+ malloc_mutex_unlock(tctx->tdata->lock);
}
void
prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
- malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ malloc_mutex_lock(tctx->tdata->lock);
assert(tctx->cnts.curobjs > 0);
assert(tctx->cnts.curbytes >= usize);
tctx->cnts.curobjs--;
tctx->cnts.curbytes -= usize;
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
+ if (prof_tctx_should_destroy(tctx))
prof_tctx_destroy(tsd, tctx);
else
- malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ malloc_mutex_unlock(tctx->tdata->lock);
}
void
@@ -278,7 +277,7 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
tdata->enq = true;
}
- malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ malloc_mutex_lock(&bt2gctx_mtx);
}
JEMALLOC_INLINE_C void
@@ -288,7 +287,7 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
cassert(config_prof);
assert(tdata == prof_tdata_get(tsd, false));
- malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ malloc_mutex_unlock(&bt2gctx_mtx);
if (tdata != NULL) {
bool idump, gdump;
@@ -301,9 +300,9 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
tdata->enq_gdump = false;
if (idump)
- prof_idump(tsd_tsdn(tsd));
+ prof_idump();
if (gdump)
- prof_gdump(tsd_tsdn(tsd));
+ prof_gdump();
}
}
@@ -547,15 +546,14 @@ prof_tdata_mutex_choose(uint64_t thr_uid)
}
static prof_gctx_t *
-prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
+prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
{
/*
* Create a single allocation that has space for vec of length bt->len.
*/
- size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
- prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
- size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
- true);
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t,
+ vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true),
+ true, NULL);
if (gctx == NULL)
return (NULL);
gctx->lock = prof_gctx_mutex_choose();
@@ -587,7 +585,7 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
* into this function.
*/
prof_enter(tsd, tdata_self);
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_lock(gctx->lock);
assert(gctx->nlimbo != 0);
if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
/* Remove gctx from bt2gctx. */
@@ -595,25 +593,24 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
not_reached();
prof_leave(tsd, tdata_self);
/* Destroy gctx. */
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true);
+ malloc_mutex_unlock(gctx->lock);
+ idalloctm(tsd, gctx, tcache_get(tsd, false), true);
} else {
/*
* Compensate for increment in prof_tctx_destroy() or
* prof_lookup().
*/
gctx->nlimbo--;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
prof_leave(tsd, tdata_self);
}
}
+/* tctx->tdata->lock must be held. */
static bool
-prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
+prof_tctx_should_destroy(prof_tctx_t *tctx)
{
- malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
-
if (opt_prof_accum)
return (false);
if (tctx->cnts.curobjs != 0)
@@ -636,6 +633,7 @@ prof_gctx_should_destroy(prof_gctx_t *gctx)
return (true);
}
+/* tctx->tdata->lock is held upon entry, and released before return. */
static void
prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
{
@@ -643,8 +641,6 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
prof_gctx_t *gctx = tctx->gctx;
bool destroy_tdata, destroy_tctx, destroy_gctx;
- malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
assert(tctx->cnts.curobjs == 0);
assert(tctx->cnts.curbytes == 0);
assert(!opt_prof_accum);
@@ -652,10 +648,10 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
assert(tctx->cnts.accumbytes == 0);
ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
- destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ destroy_tdata = prof_tdata_should_destroy(tdata, false);
+ malloc_mutex_unlock(tdata->lock);
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_lock(gctx->lock);
switch (tctx->state) {
case prof_tctx_state_nominal:
tctx_tree_remove(&gctx->tctxs, tctx);
@@ -695,19 +691,17 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
destroy_tctx = false;
destroy_gctx = false;
}
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
if (destroy_gctx) {
prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
tdata);
}
- malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
if (destroy_tdata)
prof_tdata_destroy(tsd, tdata, false);
if (destroy_tctx)
- idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true);
+ idalloctm(tsd, tctx, tcache_get(tsd, false), true);
}
static bool
@@ -727,7 +721,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
prof_enter(tsd, tdata);
if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
/* bt has never been seen before. Insert it. */
- gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
+ gctx.p = prof_gctx_create(tsd, bt);
if (gctx.v == NULL) {
prof_leave(tsd, tdata);
return (true);
@@ -736,7 +730,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
/* OOM. */
prof_leave(tsd, tdata);
- idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true);
+ idalloctm(tsd, gctx.v, tcache_get(tsd, false), true);
return (true);
}
new_gctx = true;
@@ -745,9 +739,9 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
* Increment nlimbo, in order to avoid a race condition with
* prof_tctx_destroy()/prof_gctx_try_destroy().
*/
- malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
+ malloc_mutex_lock(gctx.p->lock);
gctx.p->nlimbo++;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
+ malloc_mutex_unlock(gctx.p->lock);
new_gctx = false;
}
prof_leave(tsd, tdata);
@@ -774,12 +768,13 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
if (tdata == NULL)
return (NULL);
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ malloc_mutex_lock(tdata->lock);
not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
if (!not_found) /* Note double negative! */
ret.p->prepared = true;
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ malloc_mutex_unlock(tdata->lock);
if (not_found) {
+ tcache_t *tcache;
void *btkey;
prof_gctx_t *gctx;
bool new_gctx, error;
@@ -793,9 +788,9 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
return (NULL);
/* Link a prof_tctx_t into gctx for this thread. */
- ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
- size2index(sizeof(prof_tctx_t)), false, NULL, true,
- arena_ichoose(tsd, NULL), true);
+ tcache = tcache_get(tsd, true);
+ ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true,
+ NULL);
if (ret.p == NULL) {
if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
@@ -809,41 +804,41 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
ret.p->tctx_uid = tdata->tctx_uid_next++;
ret.p->prepared = true;
ret.p->state = prof_tctx_state_initializing;
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ malloc_mutex_lock(tdata->lock);
error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ malloc_mutex_unlock(tdata->lock);
if (error) {
if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true);
+ idalloctm(tsd, ret.v, tcache, true);
return (NULL);
}
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_lock(gctx->lock);
ret.p->state = prof_tctx_state_nominal;
tctx_tree_insert(&gctx->tctxs, ret.p);
gctx->nlimbo--;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
}
return (ret.p);
}
-/*
- * The bodies of this function and prof_leakcheck() are compiled out unless heap
- * profiling is enabled, so that it is possible to compile jemalloc with
- * floating point support completely disabled. Avoiding floating point code is
- * important on memory-constrained systems, but it also enables a workaround for
- * versions of glibc that don't properly save/restore floating point registers
- * during dynamic lazy symbol loading (which internally calls into whatever
- * malloc implementation happens to be integrated into the application). Note
- * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
- * memory moves, so jemalloc must be compiled with such optimizations disabled
- * (e.g.
- * -mno-sse) in order for the workaround to be complete.
- */
void
prof_sample_threshold_update(prof_tdata_t *tdata)
{
+ /*
+ * The body of this function is compiled out unless heap profiling is
+ * enabled, so that it is possible to compile jemalloc with floating
+ * point support completely disabled. Avoiding floating point code is
+ * important on memory-constrained systems, but it also enables a
+ * workaround for versions of glibc that don't properly save/restore
+ * floating point registers during dynamic lazy symbol loading (which
+ * internally calls into whatever malloc implementation happens to be
+ * integrated into the application). Note that some compilers (e.g.
+ * gcc 4.8) may use floating point registers for fast memory moves, so
+ * jemalloc must be compiled with such optimizations disabled (e.g.
+ * -mno-sse) in order for the workaround to be complete.
+ */
#ifdef JEMALLOC_PROF
uint64_t r;
double u;
@@ -874,7 +869,8 @@ prof_sample_threshold_update(prof_tdata_t *tdata)
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
*/
- r = prng_lg_range_u64(&tdata->prng_state, 53);
+ prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005),
+ UINT64_C(1442695040888963407));
u = (double)r * (1.0/9007199254740992.0L);
tdata->bytes_until_sample = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
@@ -897,13 +893,11 @@ size_t
prof_tdata_count(void)
{
size_t tdata_count = 0;
- tsdn_t *tsdn;
- tsdn = tsdn_fetch();
- malloc_mutex_lock(tsdn, &tdatas_mtx);
+ malloc_mutex_lock(&tdatas_mtx);
tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
(void *)&tdata_count);
- malloc_mutex_unlock(tsdn, &tdatas_mtx);
+ malloc_mutex_unlock(&tdatas_mtx);
return (tdata_count);
}
@@ -922,9 +916,9 @@ prof_bt_count(void)
if (tdata == NULL)
return (0);
- malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ malloc_mutex_lock(&bt2gctx_mtx);
bt_count = ckh_count(&bt2gctx);
- malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ malloc_mutex_unlock(&bt2gctx_mtx);
return (bt_count);
}
@@ -994,7 +988,7 @@ prof_dump_close(bool propagate_err)
static bool
prof_dump_write(bool propagate_err, const char *s)
{
- size_t i, slen, n;
+ unsigned i, slen, n;
cassert(config_prof);
@@ -1037,21 +1031,20 @@ prof_dump_printf(bool propagate_err, const char *format, ...)
return (ret);
}
+/* tctx->tdata->lock is held. */
static void
-prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
+prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
{
- malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
-
- malloc_mutex_lock(tsdn, tctx->gctx->lock);
+ malloc_mutex_lock(tctx->gctx->lock);
switch (tctx->state) {
case prof_tctx_state_initializing:
- malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+ malloc_mutex_unlock(tctx->gctx->lock);
return;
case prof_tctx_state_nominal:
tctx->state = prof_tctx_state_dumping;
- malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+ malloc_mutex_unlock(tctx->gctx->lock);
memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
@@ -1070,12 +1063,11 @@ prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
}
}
+/* gctx->lock is held. */
static void
-prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
+prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
{
- malloc_mutex_assert_owner(tsdn, gctx->lock);
-
gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
if (opt_prof_accum) {
@@ -1084,12 +1076,10 @@ prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
}
}
+/* tctx->gctx is held. */
static prof_tctx_t *
prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
- tsdn_t *tsdn = (tsdn_t *)arg;
-
- malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
switch (tctx->state) {
case prof_tctx_state_nominal:
@@ -1097,7 +1087,7 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
break;
case prof_tctx_state_dumping:
case prof_tctx_state_purgatory:
- prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
+ prof_tctx_merge_gctx(tctx, tctx->gctx);
break;
default:
not_reached();
@@ -1106,18 +1096,11 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
return (NULL);
}
-struct prof_tctx_dump_iter_arg_s {
- tsdn_t *tsdn;
- bool propagate_err;
-};
-
+/* gctx->lock is held. */
static prof_tctx_t *
-prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
- struct prof_tctx_dump_iter_arg_s *arg =
- (struct prof_tctx_dump_iter_arg_s *)opaque;
-
- malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
+ bool propagate_err = *(bool *)arg;
switch (tctx->state) {
case prof_tctx_state_initializing:
@@ -1126,7 +1109,7 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
break;
case prof_tctx_state_dumping:
case prof_tctx_state_purgatory:
- if (prof_dump_printf(arg->propagate_err,
+ if (prof_dump_printf(propagate_err,
" t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
"%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
@@ -1139,14 +1122,12 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
return (NULL);
}
+/* tctx->gctx is held. */
static prof_tctx_t *
prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
- tsdn_t *tsdn = (tsdn_t *)arg;
prof_tctx_t *ret;
- malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
-
switch (tctx->state) {
case prof_tctx_state_nominal:
/* New since dumping started; ignore. */
@@ -1167,12 +1148,12 @@ label_return:
}
static void
-prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
+prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
{
cassert(config_prof);
- malloc_mutex_lock(tsdn, gctx->lock);
+ malloc_mutex_lock(gctx->lock);
/*
* Increment nlimbo so that gctx won't go away before dump.
@@ -1184,26 +1165,19 @@ prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
- malloc_mutex_unlock(tsdn, gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
}
-struct prof_gctx_merge_iter_arg_s {
- tsdn_t *tsdn;
- size_t leak_ngctx;
-};
-
static prof_gctx_t *
-prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
{
- struct prof_gctx_merge_iter_arg_s *arg =
- (struct prof_gctx_merge_iter_arg_s *)opaque;
+ size_t *leak_ngctx = (size_t *)arg;
- malloc_mutex_lock(arg->tsdn, gctx->lock);
- tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
- (void *)arg->tsdn);
+ malloc_mutex_lock(gctx->lock);
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
if (gctx->cnt_summed.curobjs != 0)
- arg->leak_ngctx++;
- malloc_mutex_unlock(arg->tsdn, gctx->lock);
+ (*leak_ngctx)++;
+ malloc_mutex_unlock(gctx->lock);
return (NULL);
}
@@ -1222,7 +1196,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
*/
while ((gctx = gctx_tree_first(gctxs)) != NULL) {
gctx_tree_remove(gctxs, gctx);
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_lock(gctx->lock);
{
prof_tctx_t *next;
@@ -1230,15 +1204,14 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
do {
prof_tctx_t *to_destroy =
tctx_tree_iter(&gctx->tctxs, next,
- prof_tctx_finish_iter,
- (void *)tsd_tsdn(tsd));
+ prof_tctx_finish_iter, NULL);
if (to_destroy != NULL) {
next = tctx_tree_next(&gctx->tctxs,
to_destroy);
tctx_tree_remove(&gctx->tctxs,
to_destroy);
- idalloctm(tsd_tsdn(tsd), to_destroy,
- NULL, true, true);
+ idalloctm(tsd, to_destroy,
+ tcache_get(tsd, false), true);
} else
next = NULL;
} while (next != NULL);
@@ -1246,26 +1219,19 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
gctx->nlimbo--;
if (prof_gctx_should_destroy(gctx)) {
gctx->nlimbo++;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
} else
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
}
}
-struct prof_tdata_merge_iter_arg_s {
- tsdn_t *tsdn;
- prof_cnt_t cnt_all;
-};
-
static prof_tdata_t *
-prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *opaque)
+prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
{
- struct prof_tdata_merge_iter_arg_s *arg =
- (struct prof_tdata_merge_iter_arg_s *)opaque;
+ prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
- malloc_mutex_lock(arg->tsdn, tdata->lock);
+ malloc_mutex_lock(tdata->lock);
if (!tdata->expired) {
size_t tabind;
union {
@@ -1277,17 +1243,17 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
&tctx.v);)
- prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
+ prof_tctx_merge_tdata(tctx.p, tdata);
- arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
- arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
+ cnt_all->curobjs += tdata->cnt_summed.curobjs;
+ cnt_all->curbytes += tdata->cnt_summed.curbytes;
if (opt_prof_accum) {
- arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
- arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
+ cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
+ cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
}
} else
tdata->dumping = false;
- malloc_mutex_unlock(arg->tsdn, tdata->lock);
+ malloc_mutex_unlock(tdata->lock);
return (NULL);
}
@@ -1316,7 +1282,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
#endif
static bool
-prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
+prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
{
bool ret;
@@ -1327,10 +1293,10 @@ prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
return (true);
- malloc_mutex_lock(tsdn, &tdatas_mtx);
+ malloc_mutex_lock(&tdatas_mtx);
ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
(void *)&propagate_err) != NULL);
- malloc_mutex_unlock(tsdn, &tdatas_mtx);
+ malloc_mutex_unlock(&tdatas_mtx);
return (ret);
}
#ifdef JEMALLOC_JET
@@ -1339,16 +1305,15 @@ prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
#endif
+/* gctx->lock is held. */
static bool
-prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
- const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
+prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
+ prof_gctx_tree_t *gctxs)
{
bool ret;
unsigned i;
- struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
cassert(config_prof);
- malloc_mutex_assert_owner(tsdn, gctx->lock);
/* Avoid dumping such gctx's that have no useful data. */
if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
@@ -1382,10 +1347,8 @@ prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
goto label_return;
}
- prof_tctx_dump_iter_arg.tsdn = tsdn;
- prof_tctx_dump_iter_arg.propagate_err = propagate_err;
if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
- (void *)&prof_tctx_dump_iter_arg) != NULL) {
+ (void *)&propagate_err) != NULL) {
ret = true;
goto label_return;
}
@@ -1395,7 +1358,6 @@ label_return:
return (ret);
}
-#ifndef _WIN32
JEMALLOC_FORMAT_PRINTF(1, 2)
static int
prof_open_maps(const char *format, ...)
@@ -1411,18 +1373,6 @@ prof_open_maps(const char *format, ...)
return (mfd);
}
-#endif
-
-static int
-prof_getpid(void)
-{
-
-#ifdef _WIN32
- return (GetCurrentProcessId());
-#else
- return (getpid());
-#endif
-}
static bool
prof_dump_maps(bool propagate_err)
@@ -1433,11 +1383,9 @@ prof_dump_maps(bool propagate_err)
cassert(config_prof);
#ifdef __FreeBSD__
mfd = prof_open_maps("/proc/curproc/map");
-#elif defined(_WIN32)
- mfd = -1; // Not implemented
#else
{
- int pid = prof_getpid();
+ int pid = getpid();
mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
if (mfd == -1)
@@ -1478,66 +1426,39 @@ label_return:
return (ret);
}
-/*
- * See prof_sample_threshold_update() comment for why the body of this function
- * is conditionally compiled.
- */
static void
prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
const char *filename)
{
-#ifdef JEMALLOC_PROF
- /*
- * Scaling is equivalent AdjustSamples() in jeprof, but the result may
- * differ slightly from what jeprof reports, because here we scale the
- * summary values, whereas jeprof scales each context individually and
- * reports the sums of the scaled values.
- */
if (cnt_all->curbytes != 0) {
- double sample_period = (double)((uint64_t)1 << lg_prof_sample);
- double ratio = (((double)cnt_all->curbytes) /
- (double)cnt_all->curobjs) / sample_period;
- double scale_factor = 1.0 / (1.0 - exp(-ratio));
- uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
- * scale_factor);
- uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
- scale_factor);
-
- malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
- " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
- curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
- 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
+ malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
+ FMTu64" object%s, %zu context%s\n",
+ cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
+ cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
+ leak_ngctx, (leak_ngctx != 1) ? "s" : "");
malloc_printf(
"<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
filename);
}
-#endif
}
-struct prof_gctx_dump_iter_arg_s {
- tsdn_t *tsdn;
- bool propagate_err;
-};
-
static prof_gctx_t *
-prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
{
prof_gctx_t *ret;
- struct prof_gctx_dump_iter_arg_s *arg =
- (struct prof_gctx_dump_iter_arg_s *)opaque;
+ bool propagate_err = *(bool *)arg;
- malloc_mutex_lock(arg->tsdn, gctx->lock);
+ malloc_mutex_lock(gctx->lock);
- if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
- gctxs)) {
+ if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
ret = gctx;
goto label_return;
}
ret = NULL;
label_return:
- malloc_mutex_unlock(arg->tsdn, gctx->lock);
+ malloc_mutex_unlock(gctx->lock);
return (ret);
}
@@ -1545,14 +1466,13 @@ static bool
prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
{
prof_tdata_t *tdata;
- struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
+ prof_cnt_t cnt_all;
size_t tabind;
union {
prof_gctx_t *p;
void *v;
} gctx;
- struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
- struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
+ size_t leak_ngctx;
prof_gctx_tree_t gctxs;
cassert(config_prof);
@@ -1561,7 +1481,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
if (tdata == NULL)
return (true);
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_lock(&prof_dump_mtx);
prof_enter(tsd, tdata);
/*
@@ -1570,24 +1490,20 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
*/
gctx_tree_new(&gctxs);
for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
- prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs);
+ prof_dump_gctx_prep(gctx.p, &gctxs);
/*
* Iterate over tdatas, and for the non-expired ones snapshot their tctx
* stats and merge them into the associated gctx's.
*/
- prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd);
- memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t));
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
- tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
- (void *)&prof_tdata_merge_iter_arg);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+ memset(&cnt_all, 0, sizeof(prof_cnt_t));
+ malloc_mutex_lock(&tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
+ malloc_mutex_unlock(&tdatas_mtx);
/* Merge tctx stats into gctx's. */
- prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd);
- prof_gctx_merge_iter_arg.leak_ngctx = 0;
- gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter,
- (void *)&prof_gctx_merge_iter_arg);
+ leak_ngctx = 0;
+ gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
prof_leave(tsd, tdata);
@@ -1596,15 +1512,12 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
goto label_open_close_error;
/* Dump profile header. */
- if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
- &prof_tdata_merge_iter_arg.cnt_all))
+ if (prof_dump_header(propagate_err, &cnt_all))
goto label_write_error;
/* Dump per gctx profile stats. */
- prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd);
- prof_gctx_dump_iter_arg.propagate_err = propagate_err;
if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
- (void *)&prof_gctx_dump_iter_arg) != NULL)
+ (void *)&propagate_err) != NULL)
goto label_write_error;
/* Dump /proc/<pid>/maps if possible. */
@@ -1615,18 +1528,17 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
goto label_open_close_error;
prof_gctx_finish(tsd, &gctxs);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_unlock(&prof_dump_mtx);
+
+ if (leakcheck)
+ prof_leakcheck(&cnt_all, leak_ngctx, filename);
- if (leakcheck) {
- prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
- prof_gctx_merge_iter_arg.leak_ngctx, filename);
- }
return (false);
label_write_error:
prof_dump_close(propagate_err);
label_open_close_error:
prof_gctx_finish(tsd, &gctxs);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_unlock(&prof_dump_mtx);
return (true);
}
@@ -1642,12 +1554,12 @@ prof_dump_filename(char *filename, char v, uint64_t vseq)
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c%"FMTu64".heap",
- opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
+ opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
} else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"FMTu64".%c.heap",
- opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
+ opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
}
prof_dump_seq++;
}
@@ -1666,23 +1578,23 @@ prof_fdump(void)
return;
tsd = tsd_fetch();
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'f', VSEQ_INVALID);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(tsd, false, filename, opt_prof_leak);
}
void
-prof_idump(tsdn_t *tsdn)
+prof_idump(void)
{
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
- if (!prof_booted || tsdn_null(tsdn))
+ if (!prof_booted)
return;
- tsd = tsdn_tsd(tsdn);
+ tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL)
return;
@@ -1693,48 +1605,50 @@ prof_idump(tsdn_t *tsdn)
if (opt_prof_prefix[0] != '\0') {
char filename[PATH_MAX + 1];
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(tsd, false, filename, false);
}
}
bool
-prof_mdump(tsd_t *tsd, const char *filename)
+prof_mdump(const char *filename)
{
+ tsd_t *tsd;
char filename_buf[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
if (!opt_prof || !prof_booted)
return (true);
+ tsd = tsd_fetch();
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
if (opt_prof_prefix[0] == '\0')
return (true);
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++;
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
filename = filename_buf;
}
return (prof_dump(tsd, true, filename, false));
}
void
-prof_gdump(tsdn_t *tsdn)
+prof_gdump(void)
{
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
- if (!prof_booted || tsdn_null(tsdn))
+ if (!prof_booted)
return;
- tsd = tsdn_tsd(tsdn);
+ tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL)
return;
@@ -1745,10 +1659,10 @@ prof_gdump(tsdn_t *tsdn)
if (opt_prof_prefix[0] != '\0') {
char filename[DUMP_FILENAME_BUFSIZE];
- malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++;
- malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(tsd, false, filename, false);
}
}
@@ -1777,14 +1691,14 @@ prof_bt_keycomp(const void *k1, const void *k2)
}
JEMALLOC_INLINE_C uint64_t
-prof_thr_uid_alloc(tsdn_t *tsdn)
+prof_thr_uid_alloc(void)
{
uint64_t thr_uid;
- malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_lock(&next_thr_uid_mtx);
thr_uid = next_thr_uid;
next_thr_uid++;
- malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_unlock(&next_thr_uid_mtx);
return (thr_uid);
}
@@ -1794,13 +1708,14 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
char *thread_name, bool active)
{
prof_tdata_t *tdata;
+ tcache_t *tcache;
cassert(config_prof);
/* Initialize an empty cache for this thread. */
- tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
- size2index(sizeof(prof_tdata_t)), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
+ tcache = tcache_get(tsd, true);
+ tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false,
+ tcache, true, NULL);
if (tdata == NULL)
return (NULL);
@@ -1812,9 +1727,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata->expired = false;
tdata->tctx_uid_next = 0;
- if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
- prof_bt_keycomp)) {
- idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
+ if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
+ prof_bt_hash, prof_bt_keycomp)) {
+ idalloctm(tsd, tdata, tcache, true);
return (NULL);
}
@@ -1828,9 +1743,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata->dumping = false;
tdata->active = active;
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_lock(&tdatas_mtx);
tdata_tree_insert(&tdatas, tdata);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_unlock(&tdatas_mtx);
return (tdata);
}
@@ -1839,12 +1754,13 @@ prof_tdata_t *
prof_tdata_init(tsd_t *tsd)
{
- return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
- NULL, prof_thread_active_init_get(tsd_tsdn(tsd))));
+ return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
+ prof_thread_active_init_get()));
}
+/* tdata->lock must be held. */
static bool
-prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
+prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
{
if (tdata->attached && !even_if_attached)
@@ -1854,40 +1770,32 @@ prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
return (true);
}
-static bool
-prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
- bool even_if_attached)
-{
-
- malloc_mutex_assert_owner(tsdn, tdata->lock);
-
- return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
-}
-
+/* tdatas_mtx must be held. */
static void
prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
bool even_if_attached)
{
+ tcache_t *tcache;
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
+ assert(prof_tdata_should_destroy(tdata, even_if_attached));
+ assert(tsd_prof_tdata_get(tsd) != tdata);
tdata_tree_remove(&tdatas, tdata);
- assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
-
+ tcache = tcache_get(tsd, false);
if (tdata->thread_name != NULL)
- idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
+ idalloctm(tsd, tdata->thread_name, tcache, true);
ckh_delete(tsd, &tdata->bt2tctx);
- idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
+ idalloctm(tsd, tdata, tcache, true);
}
static void
prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
{
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_lock(&tdatas_mtx);
prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_unlock(&tdatas_mtx);
}
static void
@@ -1895,10 +1803,9 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
{
bool destroy_tdata;
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ malloc_mutex_lock(tdata->lock);
if (tdata->attached) {
- destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
- true);
+ destroy_tdata = prof_tdata_should_destroy(tdata, true);
/*
* Only detach if !destroy_tdata, because detaching would allow
* another thread to win the race to destroy tdata.
@@ -1908,7 +1815,7 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
tsd_prof_tdata_set(tsd, NULL);
} else
destroy_tdata = false;
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ malloc_mutex_unlock(tdata->lock);
if (destroy_tdata)
prof_tdata_destroy(tsd, tdata, true);
}
@@ -1919,7 +1826,7 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
uint64_t thr_uid = tdata->thr_uid;
uint64_t thr_discrim = tdata->thr_discrim + 1;
char *thread_name = (tdata->thread_name != NULL) ?
- prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
+ prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
bool active = tdata->active;
prof_tdata_detach(tsd, tdata);
@@ -1928,18 +1835,18 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
}
static bool
-prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
+prof_tdata_expire(prof_tdata_t *tdata)
{
bool destroy_tdata;
- malloc_mutex_lock(tsdn, tdata->lock);
+ malloc_mutex_lock(tdata->lock);
if (!tdata->expired) {
tdata->expired = true;
destroy_tdata = tdata->attached ? false :
- prof_tdata_should_destroy(tsdn, tdata, false);
+ prof_tdata_should_destroy(tdata, false);
} else
destroy_tdata = false;
- malloc_mutex_unlock(tsdn, tdata->lock);
+ malloc_mutex_unlock(tdata->lock);
return (destroy_tdata);
}
@@ -1947,9 +1854,8 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
static prof_tdata_t *
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
{
- tsdn_t *tsdn = (tsdn_t *)arg;
- return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
+ return (prof_tdata_expire(tdata) ? tdata : NULL);
}
void
@@ -1959,15 +1865,15 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
assert(lg_sample < (sizeof(uint64_t) << 3));
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_lock(&prof_dump_mtx);
+ malloc_mutex_lock(&tdatas_mtx);
lg_prof_sample = lg_sample;
next = NULL;
do {
prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
- prof_tdata_reset_iter, (void *)tsd);
+ prof_tdata_reset_iter, NULL);
if (to_destroy != NULL) {
next = tdata_tree_next(&tdatas, to_destroy);
prof_tdata_destroy_locked(tsd, to_destroy, false);
@@ -1975,8 +1881,8 @@ prof_reset(tsd_t *tsd, size_t lg_sample)
next = NULL;
} while (next != NULL);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_unlock(&tdatas_mtx);
+ malloc_mutex_unlock(&prof_dump_mtx);
}
void
@@ -1993,33 +1899,35 @@ prof_tdata_cleanup(tsd_t *tsd)
}
bool
-prof_active_get(tsdn_t *tsdn)
+prof_active_get(void)
{
bool prof_active_current;
- malloc_mutex_lock(tsdn, &prof_active_mtx);
+ malloc_mutex_lock(&prof_active_mtx);
prof_active_current = prof_active;
- malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ malloc_mutex_unlock(&prof_active_mtx);
return (prof_active_current);
}
bool
-prof_active_set(tsdn_t *tsdn, bool active)
+prof_active_set(bool active)
{
bool prof_active_old;
- malloc_mutex_lock(tsdn, &prof_active_mtx);
+ malloc_mutex_lock(&prof_active_mtx);
prof_active_old = prof_active;
prof_active = active;
- malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ malloc_mutex_unlock(&prof_active_mtx);
return (prof_active_old);
}
const char *
-prof_thread_name_get(tsd_t *tsd)
+prof_thread_name_get(void)
{
+ tsd_t *tsd;
prof_tdata_t *tdata;
+ tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return ("");
@@ -2027,7 +1935,7 @@ prof_thread_name_get(tsd_t *tsd)
}
static char *
-prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
+prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
{
char *ret;
size_t size;
@@ -2039,8 +1947,7 @@ prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
if (size == 1)
return ("");
- ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
+ ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL);
if (ret == NULL)
return (NULL);
memcpy(ret, thread_name, size);
@@ -2067,12 +1974,13 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
return (EFAULT);
}
- s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
+ s = prof_thread_name_alloc(tsd, thread_name);
if (s == NULL)
return (EAGAIN);
if (tdata->thread_name != NULL) {
- idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
+ idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
+ true);
tdata->thread_name = NULL;
}
if (strlen(s) > 0)
@@ -2081,10 +1989,12 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
}
bool
-prof_thread_active_get(tsd_t *tsd)
+prof_thread_active_get(void)
{
+ tsd_t *tsd;
prof_tdata_t *tdata;
+ tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (false);
@@ -2092,10 +2002,12 @@ prof_thread_active_get(tsd_t *tsd)
}
bool
-prof_thread_active_set(tsd_t *tsd, bool active)
+prof_thread_active_set(bool active)
{
+ tsd_t *tsd;
prof_tdata_t *tdata;
+ tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (true);
@@ -2104,48 +2016,48 @@ prof_thread_active_set(tsd_t *tsd, bool active)
}
bool
-prof_thread_active_init_get(tsdn_t *tsdn)
+prof_thread_active_init_get(void)
{
bool active_init;
- malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_lock(&prof_thread_active_init_mtx);
active_init = prof_thread_active_init;
- malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_unlock(&prof_thread_active_init_mtx);
return (active_init);
}
bool
-prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
+prof_thread_active_init_set(bool active_init)
{
bool active_init_old;
- malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_lock(&prof_thread_active_init_mtx);
active_init_old = prof_thread_active_init;
prof_thread_active_init = active_init;
- malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_unlock(&prof_thread_active_init_mtx);
return (active_init_old);
}
bool
-prof_gdump_get(tsdn_t *tsdn)
+prof_gdump_get(void)
{
bool prof_gdump_current;
- malloc_mutex_lock(tsdn, &prof_gdump_mtx);
+ malloc_mutex_lock(&prof_gdump_mtx);
prof_gdump_current = prof_gdump_val;
- malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
+ malloc_mutex_unlock(&prof_gdump_mtx);
return (prof_gdump_current);
}
bool
-prof_gdump_set(tsdn_t *tsdn, bool gdump)
+prof_gdump_set(bool gdump)
{
bool prof_gdump_old;
- malloc_mutex_lock(tsdn, &prof_gdump_mtx);
+ malloc_mutex_lock(&prof_gdump_mtx);
prof_gdump_old = prof_gdump_val;
prof_gdump_val = gdump;
- malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
+ malloc_mutex_unlock(&prof_gdump_mtx);
return (prof_gdump_old);
}
@@ -2186,54 +2098,47 @@ prof_boot1(void)
}
bool
-prof_boot2(tsd_t *tsd)
+prof_boot2(void)
{
cassert(config_prof);
if (opt_prof) {
+ tsd_t *tsd;
unsigned i;
lg_prof_sample = opt_lg_prof_sample;
prof_active = opt_prof_active;
- if (malloc_mutex_init(&prof_active_mtx, "prof_active",
- WITNESS_RANK_PROF_ACTIVE))
+ if (malloc_mutex_init(&prof_active_mtx))
return (true);
prof_gdump_val = opt_prof_gdump;
- if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
- WITNESS_RANK_PROF_GDUMP))
+ if (malloc_mutex_init(&prof_gdump_mtx))
return (true);
prof_thread_active_init = opt_prof_thread_active_init;
- if (malloc_mutex_init(&prof_thread_active_init_mtx,
- "prof_thread_active_init",
- WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
+ if (malloc_mutex_init(&prof_thread_active_init_mtx))
return (true);
+ tsd = tsd_fetch();
if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp))
return (true);
- if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
- WITNESS_RANK_PROF_BT2GCTX))
+ if (malloc_mutex_init(&bt2gctx_mtx))
return (true);
tdata_tree_new(&tdatas);
- if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
- WITNESS_RANK_PROF_TDATAS))
+ if (malloc_mutex_init(&tdatas_mtx))
return (true);
next_thr_uid = 0;
- if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
- WITNESS_RANK_PROF_NEXT_THR_UID))
+ if (malloc_mutex_init(&next_thr_uid_mtx))
return (true);
- if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
- WITNESS_RANK_PROF_DUMP_SEQ))
+ if (malloc_mutex_init(&prof_dump_seq_mtx))
return (true);
- if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
- WITNESS_RANK_PROF_DUMP))
+ if (malloc_mutex_init(&prof_dump_mtx))
return (true);
if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
@@ -2243,23 +2148,21 @@ prof_boot2(tsd_t *tsd)
abort();
}
- gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- PROF_NCTX_LOCKS * sizeof(malloc_mutex_t));
+ gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
+ sizeof(malloc_mutex_t));
if (gctx_locks == NULL)
return (true);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
- if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
- WITNESS_RANK_PROF_GCTX))
+ if (malloc_mutex_init(&gctx_locks[i]))
return (true);
}
- tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t));
+ tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
+ sizeof(malloc_mutex_t));
if (tdata_locks == NULL)
return (true);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
- if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
- WITNESS_RANK_PROF_TDATA))
+ if (malloc_mutex_init(&tdata_locks[i]))
return (true);
}
}
@@ -2278,77 +2181,56 @@ prof_boot2(tsd_t *tsd)
}
void
-prof_prefork0(tsdn_t *tsdn)
+prof_prefork(void)
{
if (opt_prof) {
unsigned i;
- malloc_mutex_prefork(tsdn, &prof_dump_mtx);
- malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
- malloc_mutex_prefork(tsdn, &tdatas_mtx);
- for (i = 0; i < PROF_NTDATA_LOCKS; i++)
- malloc_mutex_prefork(tsdn, &tdata_locks[i]);
+ malloc_mutex_prefork(&tdatas_mtx);
+ malloc_mutex_prefork(&bt2gctx_mtx);
+ malloc_mutex_prefork(&next_thr_uid_mtx);
+ malloc_mutex_prefork(&prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_prefork(tsdn, &gctx_locks[i]);
- }
-}
-
-void
-prof_prefork1(tsdn_t *tsdn)
-{
-
- if (opt_prof) {
- malloc_mutex_prefork(tsdn, &prof_active_mtx);
- malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
- malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
- malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
- malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_prefork(&gctx_locks[i]);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_prefork(&tdata_locks[i]);
}
}
void
-prof_postfork_parent(tsdn_t *tsdn)
+prof_postfork_parent(void)
{
if (opt_prof) {
unsigned i;
- malloc_mutex_postfork_parent(tsdn,
- &prof_thread_active_init_mtx);
- malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
- malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
- malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
- malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
+ malloc_mutex_postfork_parent(&tdata_locks[i]);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_parent(&gctx_locks[i]);
+ malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
+ malloc_mutex_postfork_parent(&next_thr_uid_mtx);
+ malloc_mutex_postfork_parent(&bt2gctx_mtx);
+ malloc_mutex_postfork_parent(&tdatas_mtx);
}
}
void
-prof_postfork_child(tsdn_t *tsdn)
+prof_postfork_child(void)
{
if (opt_prof) {
unsigned i;
- malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
- malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
- malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
- malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
- malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
+ malloc_mutex_postfork_child(&tdata_locks[i]);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_child(&gctx_locks[i]);
+ malloc_mutex_postfork_child(&prof_dump_seq_mtx);
+ malloc_mutex_postfork_child(&next_thr_uid_mtx);
+ malloc_mutex_postfork_child(&bt2gctx_mtx);
+ malloc_mutex_postfork_child(&tdatas_mtx);
}
}
diff --git a/deps/jemalloc/src/quarantine.c b/deps/jemalloc/src/quarantine.c
index 18903fb5c..6c43dfcaa 100644
--- a/deps/jemalloc/src/quarantine.c
+++ b/deps/jemalloc/src/quarantine.c
@@ -13,22 +13,22 @@
/* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
-static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine);
-static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine,
+static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine);
+static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
size_t upper_bound);
/******************************************************************************/
static quarantine_t *
-quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs)
+quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
{
quarantine_t *quarantine;
- size_t size;
- size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) *
- sizeof(quarantine_obj_t));
- quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size),
- false, NULL, true, arena_get(TSDN_NULL, 0, true), true);
+ assert(tsd_nominal(tsd));
+
+ quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs)
+ + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false,
+ tcache_get(tsd, true), true, NULL);
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
@@ -47,7 +47,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (!tsd_nominal(tsd))
return;
- quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT);
+ quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
/*
* Check again whether quarantine has been initialized, because
* quarantine_init() may have triggered recursive initialization.
@@ -55,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (tsd_quarantine_get(tsd) == NULL)
tsd_quarantine_set(tsd, quarantine);
else
- idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
+ idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
}
static quarantine_t *
@@ -63,9 +63,9 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_t *ret;
- ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1);
+ ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
if (ret == NULL) {
- quarantine_drain_one(tsd_tsdn(tsd), quarantine);
+ quarantine_drain_one(tsd, quarantine);
return (quarantine);
}
@@ -87,18 +87,18 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
- idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
+ idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
tsd_quarantine_set(tsd, ret);
return (ret);
}
static void
-quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
+quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
- assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof));
- idalloctm(tsdn, obj->ptr, NULL, false, true);
+ assert(obj->usize == isalloc(obj->ptr, config_prof));
+ idalloctm(tsd, obj->ptr, NULL, false);
quarantine->curbytes -= obj->usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
@@ -106,24 +106,24 @@ quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
}
static void
-quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound)
+quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
- quarantine_drain_one(tsdn, quarantine);
+ quarantine_drain_one(tsd, quarantine);
}
void
quarantine(tsd_t *tsd, void *ptr)
{
quarantine_t *quarantine;
- size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ size_t usize = isalloc(ptr, config_prof);
cassert(config_fill);
assert(opt_quarantine);
if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
- idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
+ idalloctm(tsd, ptr, NULL, false);
return;
}
/*
@@ -133,7 +133,7 @@ quarantine(tsd_t *tsd, void *ptr)
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
- quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound);
+ quarantine_drain(tsd, quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
@@ -158,11 +158,11 @@ quarantine(tsd_t *tsd, void *ptr)
&& usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize);
else
- memset(ptr, JEMALLOC_FREE_JUNK, usize);
+ memset(ptr, 0x5a, usize);
}
} else {
assert(quarantine->curbytes == 0);
- idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
+ idalloctm(tsd, ptr, NULL, false);
}
}
@@ -176,8 +176,8 @@ quarantine_cleanup(tsd_t *tsd)
quarantine = tsd_quarantine_get(tsd);
if (quarantine != NULL) {
- quarantine_drain(tsd_tsdn(tsd), quarantine, 0);
- idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
+ quarantine_drain(tsd, quarantine, 0);
+ idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
tsd_quarantine_set(tsd, NULL);
}
}
diff --git a/deps/jemalloc/src/rtree.c b/deps/jemalloc/src/rtree.c
index f2e2997d5..af0d97e75 100644
--- a/deps/jemalloc/src/rtree.c
+++ b/deps/jemalloc/src/rtree.c
@@ -15,8 +15,6 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
{
unsigned bits_in_leaf, height, i;
- assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
- RTREE_BITS_PER_LEVEL));
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
@@ -96,15 +94,12 @@ rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
rtree_node_elm_t *node;
if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
- spin_t spinner;
-
/*
* Another thread is already in the process of initializing.
* Spin-wait until initialization is complete.
*/
- spin_init(&spinner);
do {
- spin_adaptive(&spinner);
+ CPU_SPINWAIT;
node = atomic_read_p((void **)elmp);
} while (node == RTREE_NODE_INITIALIZING);
} else {
@@ -128,5 +123,5 @@ rtree_node_elm_t *
rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
{
- return (rtree_node_init(rtree, level+1, &elm->child));
+ return (rtree_node_init(rtree, level, &elm->child));
}
diff --git a/deps/jemalloc/src/spin.c b/deps/jemalloc/src/spin.c
deleted file mode 100644
index 5242d95aa..000000000
--- a/deps/jemalloc/src/spin.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define JEMALLOC_SPIN_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/deps/jemalloc/src/stats.c b/deps/jemalloc/src/stats.c
index 1360f3bd0..154c3e74c 100755..100644
--- a/deps/jemalloc/src/stats.c
+++ b/deps/jemalloc/src/stats.c
@@ -3,7 +3,7 @@
#define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \
- xmallctl(n, (void *)v, &sz, NULL, 0); \
+ xmallctl(n, v, &sz, NULL, 0); \
} while (0)
#define CTL_M2_GET(n, i, v, t) do { \
@@ -12,7 +12,7 @@
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \
- xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
+ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_M2_M4_GET(n, i, j, v, t) do { \
@@ -22,7 +22,7 @@
xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \
mib[4] = (j); \
- xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
+ xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
/******************************************************************************/
@@ -33,106 +33,85 @@ bool opt_stats_print = false;
size_t stats_cactive = 0;
/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, unsigned i);
+static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, unsigned i);
+static void stats_arena_hchunks_print(
+ void (*write_cb)(void *, const char *), void *cbopaque, unsigned i);
+static void stats_arena_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, unsigned i, bool bins, bool large, bool huge);
+
+/******************************************************************************/
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool large, bool huge, unsigned i)
+ unsigned i)
{
size_t page;
- bool config_tcache, in_gap, in_gap_prev;
+ bool config_tcache, in_gap;
unsigned nbins, j;
CTL_GET("arenas.page", &page, size_t);
- CTL_GET("arenas.nbins", &nbins, unsigned);
- if (json) {
+ CTL_GET("config.tcache", &config_tcache, bool);
+ if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"bins\": [\n");
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs curruns regs"
+ " pgs util nfills nflushes newruns"
+ " reruns\n");
} else {
- CTL_GET("config.tcache", &config_tcache, bool);
- if (config_tcache) {
- malloc_cprintf(write_cb, cbopaque,
- "bins: size ind allocated nmalloc"
- " ndalloc nrequests curregs"
- " curruns regs pgs util nfills"
- " nflushes newruns reruns\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "bins: size ind allocated nmalloc"
- " ndalloc nrequests curregs"
- " curruns regs pgs util newruns"
- " reruns\n");
- }
+ malloc_cprintf(write_cb, cbopaque,
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs curruns regs"
+ " pgs util newruns reruns\n");
}
+ CTL_GET("arenas.nbins", &nbins, unsigned);
for (j = 0, in_gap = false; j < nbins; j++) {
uint64_t nruns;
- size_t reg_size, run_size, curregs;
- size_t curruns;
- uint32_t nregs;
- uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
- uint64_t nreruns;
CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
uint64_t);
- in_gap_prev = in_gap;
- in_gap = (nruns == 0);
-
- if (!json && in_gap_prev && !in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
-
- CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
- CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
- CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t);
-
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
- size_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
- &nrequests, uint64_t);
- if (config_tcache) {
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j,
- &nfills, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j,
- &nflushes, uint64_t);
- }
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &nreruns,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns,
- size_t);
+ if (nruns == 0)
+ in_gap = true;
+ else {
+ size_t reg_size, run_size, curregs, availregs, milli;
+ size_t curruns;
+ uint32_t nregs;
+ uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
+ uint64_t reruns;
+ char util[6]; /* "x.yyy". */
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t{\n"
- "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
- "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
- "\t\t\t\t\t\t\"curregs\": %zu,\n"
- "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n",
- nmalloc,
- ndalloc,
- curregs,
- nrequests);
- if (config_tcache) {
+ if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
- "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n",
- nfills,
- nflushes);
+ " ---\n");
+ in_gap = false;
}
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\t\"nreruns\": %"FMTu64",\n"
- "\t\t\t\t\t\t\"curruns\": %zu\n"
- "\t\t\t\t\t}%s\n",
- nreruns,
- curruns,
- (j + 1 < nbins) ? "," : "");
- } else if (!in_gap) {
- size_t availregs, milli;
- char util[6]; /* "x.yyy". */
+ CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
+ CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
+ CTL_M2_GET("arenas.bin.0.run_size", j, &run_size,
+ size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j,
+ &nmalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j,
+ &ndalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j,
+ &curregs, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ if (config_tcache) {
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i,
+ j, &nfills, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes",
+ i, j, &nflushes, uint64_t);
+ }
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j,
+ &reruns, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j,
+ &curruns, size_t);
availregs = nregs * curruns;
milli = (availregs != 0) ? (1000 * curregs) / availregs
@@ -159,7 +138,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
reg_size, j, curregs * reg_size, nmalloc,
ndalloc, nrequests, curregs, curruns, nregs,
run_size / page, util, nfills, nflushes,
- nruns, nreruns);
+ nruns, reruns);
} else {
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64
@@ -168,38 +147,28 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
" %12"FMTu64"\n",
reg_size, j, curregs * reg_size, nmalloc,
ndalloc, nrequests, curregs, curruns, nregs,
- run_size / page, util, nruns, nreruns);
+ run_size / page, util, nruns, reruns);
}
}
}
- if (json) {
+ if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t]%s\n", (large || huge) ? "," : "");
- } else {
- if (in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
+ " ---\n");
}
}
static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool huge, unsigned i)
+ unsigned i)
{
unsigned nbins, nlruns, j;
- bool in_gap, in_gap_prev;
+ bool in_gap;
+ malloc_cprintf(write_cb, cbopaque,
+ "large: size ind allocated nmalloc ndalloc"
+ " nrequests curruns\n");
CTL_GET("arenas.nbins", &nbins, unsigned);
CTL_GET("arenas.nlruns", &nlruns, unsigned);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"lruns\": [\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "large: size ind allocated nmalloc"
- " ndalloc nrequests curruns\n");
- }
for (j = 0, in_gap = false; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t run_size, curruns;
@@ -210,25 +179,17 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
uint64_t);
CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
&nrequests, uint64_t);
- in_gap_prev = in_gap;
- in_gap = (nrequests == 0);
-
- if (!json && in_gap_prev && !in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
-
- CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
- CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns,
- size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t{\n"
- "\t\t\t\t\t\t\"curruns\": %zu\n"
- "\t\t\t\t\t}%s\n",
- curruns,
- (j + 1 < nlruns) ? "," : "");
- } else if (!in_gap) {
+ if (nrequests == 0)
+ in_gap = true;
+ else {
+ CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j,
+ &curruns, size_t);
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ in_gap = false;
+ }
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64" %12zu\n",
@@ -236,35 +197,25 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
ndalloc, nrequests, curruns);
}
}
- if (json) {
+ if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t]%s\n", huge ? "," : "");
- } else {
- if (in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
+ " ---\n");
}
}
static void
stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
- void *cbopaque, bool json, unsigned i)
+ void *cbopaque, unsigned i)
{
unsigned nbins, nlruns, nhchunks, j;
- bool in_gap, in_gap_prev;
+ bool in_gap;
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: size ind allocated nmalloc ndalloc"
+ " nrequests curhchunks\n");
CTL_GET("arenas.nbins", &nbins, unsigned);
CTL_GET("arenas.nlruns", &nlruns, unsigned);
CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"hchunks\": [\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "huge: size ind allocated nmalloc"
- " ndalloc nrequests curhchunks\n");
- }
for (j = 0, in_gap = false; j < nhchunks; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t hchunk_size, curhchunks;
@@ -275,25 +226,18 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
&ndalloc, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
&nrequests, uint64_t);
- in_gap_prev = in_gap;
- in_gap = (nrequests == 0);
-
- if (!json && in_gap_prev && !in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
-
- CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t);
- CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j,
- &curhchunks, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t{\n"
- "\t\t\t\t\t\t\"curhchunks\": %zu\n"
- "\t\t\t\t\t}%s\n",
- curhchunks,
- (j + 1 < nhchunks) ? "," : "");
- } else if (!in_gap) {
+ if (nrequests == 0)
+ in_gap = true;
+ else {
+ CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size,
+ size_t);
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i,
+ j, &curhchunks, size_t);
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ in_gap = false;
+ }
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64" %12"FMTu64
" %12"FMTu64" %12zu\n",
@@ -302,25 +246,20 @@ stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
nrequests, curhchunks);
}
}
- if (json) {
+ if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t]\n");
- } else {
- if (in_gap) {
- malloc_cprintf(write_cb, cbopaque,
- " ---\n");
- }
+ " ---\n");
}
}
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, unsigned i, bool bins, bool large, bool huge)
+ unsigned i, bool bins, bool large, bool huge)
{
unsigned nthreads;
const char *dss;
- ssize_t lg_dirty_mult, decay_time;
- size_t page, pactive, pdirty, mapped, retained;
+ ssize_t lg_dirty_mult;
+ size_t page, pactive, pdirty, mapped;
size_t metadata_mapped, metadata_allocated;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
@@ -333,435 +272,240 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("arenas.page", &page, size_t);
CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"nthreads\": %u,\n", nthreads);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "assigned threads: %u\n", nthreads);
- }
-
+ malloc_cprintf(write_cb, cbopaque,
+ "assigned threads: %u\n", nthreads);
CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"dss\": \"%s\",\n", dss);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "dss allocation precedence: %s\n", dss);
- }
-
+ malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
+ dss);
CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
- if (json) {
+ if (lg_dirty_mult >= 0) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"lg_dirty_mult\": %zd,\n", lg_dirty_mult);
+ "min active:dirty page ratio: %u:1\n",
+ (1U << lg_dirty_mult));
} else {
- if (opt_purge == purge_mode_ratio) {
- if (lg_dirty_mult >= 0) {
- malloc_cprintf(write_cb, cbopaque,
- "min active:dirty page ratio: %u:1\n",
- (1U << lg_dirty_mult));
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "min active:dirty page ratio: N/A\n");
- }
- }
- }
-
- CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
- if (json) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"decay_time\": %zd,\n", decay_time);
- } else {
- if (opt_purge == purge_mode_decay) {
- if (decay_time >= 0) {
- malloc_cprintf(write_cb, cbopaque,
- "decay time: %zd\n", decay_time);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "decay time: N/A\n");
- }
- }
+ "min active:dirty page ratio: N/A\n");
}
-
CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"pactive\": %zu,\n", pactive);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"pdirty\": %zu,\n", pdirty);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"npurge\": %"FMTu64",\n", npurge);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"purged\": %"FMTu64",\n", purged);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64
- ", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
- }
-
+ malloc_cprintf(write_cb, cbopaque,
+ "dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64
+ " madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge ==
+ 1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged);
+
+ malloc_cprintf(write_cb, cbopaque,
+ " allocated nmalloc ndalloc"
+ " nrequests\n");
CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
size_t);
CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
uint64_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"small\": {\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t},\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- " allocated nmalloc"
- " ndalloc nrequests\n");
- malloc_cprintf(write_cb, cbopaque,
- "small: %12zu %12"FMTu64" %12"FMTu64
- " %12"FMTu64"\n",
- small_allocated, small_nmalloc, small_ndalloc,
- small_nrequests);
- }
-
+ malloc_cprintf(write_cb, cbopaque,
+ "small: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
size_t);
CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
uint64_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"large\": {\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t},\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "large: %12zu %12"FMTu64" %12"FMTu64
- " %12"FMTu64"\n",
- large_allocated, large_nmalloc, large_ndalloc,
- large_nrequests);
- }
-
+ malloc_cprintf(write_cb, cbopaque,
+ "large: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
uint64_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"huge\": {\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"allocated\": %zu,\n", huge_allocated);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", huge_nmalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", huge_ndalloc);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", huge_nrequests);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t},\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "huge: %12zu %12"FMTu64" %12"FMTu64
- " %12"FMTu64"\n",
- huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
- malloc_cprintf(write_cb, cbopaque,
- "total: %12zu %12"FMTu64" %12"FMTu64
- " %12"FMTu64"\n",
- small_allocated + large_allocated + huge_allocated,
- small_nmalloc + large_nmalloc + huge_nmalloc,
- small_ndalloc + large_ndalloc + huge_ndalloc,
- small_nrequests + large_nrequests + huge_nrequests);
- }
- if (!json) {
- malloc_cprintf(write_cb, cbopaque,
- "active: %12zu\n", pactive * page);
- }
-
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
+ malloc_cprintf(write_cb, cbopaque,
+ "total: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ small_allocated + large_allocated + huge_allocated,
+ small_nmalloc + large_nmalloc + huge_nmalloc,
+ small_ndalloc + large_ndalloc + huge_ndalloc,
+ small_nrequests + large_nrequests + huge_nrequests);
+ malloc_cprintf(write_cb, cbopaque,
+ "active: %12zu\n", pactive * page);
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"mapped\": %zu,\n", mapped);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "mapped: %12zu\n", mapped);
- }
-
- CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"retained\": %zu,\n", retained);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "retained: %12zu\n", retained);
- }
-
+ malloc_cprintf(write_cb, cbopaque,
+ "mapped: %12zu\n", mapped);
CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
size_t);
CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\"metadata\": {\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"mapped\": %zu,\n", metadata_mapped);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t},\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "metadata: mapped: %zu, allocated: %zu\n",
- metadata_mapped, metadata_allocated);
- }
+ malloc_cprintf(write_cb, cbopaque,
+ "metadata: mapped: %zu, allocated: %zu\n",
+ metadata_mapped, metadata_allocated);
- if (bins) {
- stats_arena_bins_print(write_cb, cbopaque, json, large, huge,
- i);
- }
+ if (bins)
+ stats_arena_bins_print(write_cb, cbopaque, i);
if (large)
- stats_arena_lruns_print(write_cb, cbopaque, json, huge, i);
+ stats_arena_lruns_print(write_cb, cbopaque, i);
if (huge)
- stats_arena_hchunks_print(write_cb, cbopaque, json, i);
+ stats_arena_hchunks_print(write_cb, cbopaque, i);
}
-static void
-stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool merged, bool unmerged)
+void
+stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts)
{
- const char *cpv;
- bool bv;
- unsigned uv;
- uint32_t u32v;
- uint64_t u64v;
- ssize_t ssv;
- size_t sv, bsz, usz, ssz, sssz, cpsz;
+ int err;
+ uint64_t epoch;
+ size_t u64sz;
+ bool general = true;
+ bool merged = true;
+ bool unmerged = true;
+ bool bins = true;
+ bool large = true;
+ bool huge = true;
- bsz = sizeof(bool);
- usz = sizeof(unsigned);
- ssz = sizeof(size_t);
- sssz = sizeof(ssize_t);
- cpsz = sizeof(const char *);
+ /*
+ * Refresh stats, in case mallctl() was called by the application.
+ *
+ * Check for OOM here, since refreshing the ctl cache can trigger
+ * allocation. In practice, none of the subsequent mallctl()-related
+ * calls in this function will cause OOM if this one succeeds.
+ * */
+ epoch = 1;
+ u64sz = sizeof(uint64_t);
+ err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
+ if (err != 0) {
+ if (err == EAGAIN) {
+ malloc_write("<jemalloc>: Memory allocation failure in "
+ "mallctl(\"epoch\", ...)\n");
+ return;
+ }
+ malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
+ "...)\n");
+ abort();
+ }
- CTL_GET("version", &cpv, const char *);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"version\": \"%s\",\n", cpv);
- } else
- malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
+ if (opts != NULL) {
+ unsigned i;
- /* config. */
-#define CONFIG_WRITE_BOOL_JSON(n, c) \
- if (json) { \
- CTL_GET("config."#n, &bv, bool); \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \
- (c)); \
+ for (i = 0; opts[i] != '\0'; i++) {
+ switch (opts[i]) {
+ case 'g':
+ general = false;
+ break;
+ case 'm':
+ merged = false;
+ break;
+ case 'a':
+ unmerged = false;
+ break;
+ case 'b':
+ bins = false;
+ break;
+ case 'l':
+ large = false;
+ break;
+ case 'h':
+ huge = false;
+ break;
+ default:;
+ }
+ }
}
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"config\": {\n");
- }
+ malloc_cprintf(write_cb, cbopaque,
+ "___ Begin jemalloc statistics ___\n");
+ if (general) {
+ const char *cpv;
+ bool bv;
+ unsigned uv;
+ ssize_t ssv;
+ size_t sv, bsz, ssz, sssz, cpsz;
- CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",")
+ bsz = sizeof(bool);
+ ssz = sizeof(size_t);
+ sssz = sizeof(ssize_t);
+ cpsz = sizeof(const char *);
- CTL_GET("config.debug", &bv, bool);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"debug\": %s,\n", bv ? "true" : "false");
- } else {
+ CTL_GET("version", &cpv, const char *);
+ malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
+ CTL_GET("config.debug", &bv, bool);
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
bv ? "enabled" : "disabled");
- }
-
- CONFIG_WRITE_BOOL_JSON(fill, ",")
- CONFIG_WRITE_BOOL_JSON(lazy_lock, ",")
-
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"malloc_conf\": \"%s\",\n",
- config_malloc_conf);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "config.malloc_conf: \"%s\"\n", config_malloc_conf);
- }
- CONFIG_WRITE_BOOL_JSON(munmap, ",")
- CONFIG_WRITE_BOOL_JSON(prof, ",")
- CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
- CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
- CONFIG_WRITE_BOOL_JSON(stats, ",")
- CONFIG_WRITE_BOOL_JSON(tcache, ",")
- CONFIG_WRITE_BOOL_JSON(tls, ",")
- CONFIG_WRITE_BOOL_JSON(utrace, ",")
- CONFIG_WRITE_BOOL_JSON(valgrind, ",")
- CONFIG_WRITE_BOOL_JSON(xmalloc, "")
-
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t},\n");
- }
-#undef CONFIG_WRITE_BOOL_JSON
-
- /* opt. */
-#define OPT_WRITE_BOOL(n, c) \
- if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
- "false", (c)); \
- } else { \
+#define OPT_WRITE_BOOL(n) \
+ if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
- } \
- }
-#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \
- bool bv2; \
- if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \
- je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
- "false", (c)); \
- } else { \
+ }
+#define OPT_WRITE_BOOL_MUTABLE(n, m) { \
+ bool bv2; \
+ if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \
+ je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s ("#m": %s)\n", bv ? "true" \
: "false", bv2 ? "true" : "false"); \
} \
- } \
}
-#define OPT_WRITE_UNSIGNED(n, c) \
- if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %u%s\n", uv, (c)); \
- } else { \
- malloc_cprintf(write_cb, cbopaque, \
- " opt."#n": %u\n", uv); \
- } \
- }
-#define OPT_WRITE_SIZE_T(n, c) \
- if (je_mallctl("opt."#n, (void *)&sv, &ssz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %zu%s\n", sv, (c)); \
- } else { \
+#define OPT_WRITE_SIZE_T(n) \
+ if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
- } \
- }
-#define OPT_WRITE_SSIZE_T(n, c) \
- if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
- } else { \
+ }
+#define OPT_WRITE_SSIZE_T(n) \
+ if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
- } \
- }
-#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \
- ssize_t ssv2; \
- if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \
- je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
- } else { \
+ }
+#define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \
+ ssize_t ssv2; \
+ if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \
+ je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd ("#m": %zd)\n", \
ssv, ssv2); \
} \
- } \
}
-#define OPT_WRITE_CHAR_P(n, c) \
- if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \
- if (json) { \
- malloc_cprintf(write_cb, cbopaque, \
- "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \
- } else { \
+#define OPT_WRITE_CHAR_P(n) \
+ if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": \"%s\"\n", cpv); \
- } \
- }
+ }
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"opt\": {\n");
- } else {
malloc_cprintf(write_cb, cbopaque,
"Run-time option settings:\n");
- }
- OPT_WRITE_BOOL(abort, ",")
- OPT_WRITE_SIZE_T(lg_chunk, ",")
- OPT_WRITE_CHAR_P(dss, ",")
- OPT_WRITE_UNSIGNED(narenas, ",")
- OPT_WRITE_CHAR_P(purge, ",")
- if (json || opt_purge == purge_mode_ratio) {
- OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult,
- arenas.lg_dirty_mult, ",")
- }
- if (json || opt_purge == purge_mode_decay) {
- OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",")
- }
- OPT_WRITE_CHAR_P(junk, ",")
- OPT_WRITE_SIZE_T(quarantine, ",")
- OPT_WRITE_BOOL(redzone, ",")
- OPT_WRITE_BOOL(zero, ",")
- OPT_WRITE_BOOL(utrace, ",")
- OPT_WRITE_BOOL(xmalloc, ",")
- OPT_WRITE_BOOL(tcache, ",")
- OPT_WRITE_SSIZE_T(lg_tcache_max, ",")
- OPT_WRITE_BOOL(prof, ",")
- OPT_WRITE_CHAR_P(prof_prefix, ",")
- OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",")
- OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init,
- ",")
- OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",")
- OPT_WRITE_BOOL(prof_accum, ",")
- OPT_WRITE_SSIZE_T(lg_prof_interval, ",")
- OPT_WRITE_BOOL(prof_gdump, ",")
- OPT_WRITE_BOOL(prof_final, ",")
- OPT_WRITE_BOOL(prof_leak, ",")
- /*
- * stats_print is always emitted, so as long as stats_print comes last
- * it's safe to unconditionally omit the comma here (rather than having
- * to conditionally omit it elsewhere depending on configuration).
- */
- OPT_WRITE_BOOL(stats_print, "")
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t},\n");
- }
+ OPT_WRITE_BOOL(abort)
+ OPT_WRITE_SIZE_T(lg_chunk)
+ OPT_WRITE_CHAR_P(dss)
+ OPT_WRITE_SIZE_T(narenas)
+ OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult)
+ OPT_WRITE_BOOL(stats_print)
+ OPT_WRITE_CHAR_P(junk)
+ OPT_WRITE_SIZE_T(quarantine)
+ OPT_WRITE_BOOL(redzone)
+ OPT_WRITE_BOOL(zero)
+ OPT_WRITE_BOOL(utrace)
+ OPT_WRITE_BOOL(valgrind)
+ OPT_WRITE_BOOL(xmalloc)
+ OPT_WRITE_BOOL(tcache)
+ OPT_WRITE_SSIZE_T(lg_tcache_max)
+ OPT_WRITE_BOOL(prof)
+ OPT_WRITE_CHAR_P(prof_prefix)
+ OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active)
+ OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init,
+ prof.thread_active_init)
+ OPT_WRITE_SSIZE_T(lg_prof_sample)
+ OPT_WRITE_BOOL(prof_accum)
+ OPT_WRITE_SSIZE_T(lg_prof_interval)
+ OPT_WRITE_BOOL(prof_gdump)
+ OPT_WRITE_BOOL(prof_final)
+ OPT_WRITE_BOOL(prof_leak)
#undef OPT_WRITE_BOOL
#undef OPT_WRITE_BOOL_MUTABLE
@@ -769,386 +513,128 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
- /* arenas. */
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"arenas\": {\n");
- }
+ malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
- CTL_GET("arenas.narenas", &uv, unsigned);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"narenas\": %u,\n", uv);
- } else
+ CTL_GET("arenas.narenas", &uv, unsigned);
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
- CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"lg_dirty_mult\": %zd,\n", ssv);
- } else if (opt_purge == purge_mode_ratio) {
- if (ssv >= 0) {
- malloc_cprintf(write_cb, cbopaque,
- "Min active:dirty page ratio per arena: "
- "%u:1\n", (1U << ssv));
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "Min active:dirty page ratio per arena: "
- "N/A\n");
- }
- }
- CTL_GET("arenas.decay_time", &ssv, ssize_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"decay_time\": %zd,\n", ssv);
- } else if (opt_purge == purge_mode_decay) {
- malloc_cprintf(write_cb, cbopaque,
- "Unused dirty page decay time: %zd%s\n",
- ssv, (ssv < 0) ? " (no decay)" : "");
- }
+ malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
+ sizeof(void *));
- CTL_GET("arenas.quantum", &sv, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"quantum\": %zu,\n", sv);
- } else
- malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
+ CTL_GET("arenas.quantum", &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n",
+ sv);
- CTL_GET("arenas.page", &sv, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"page\": %zu,\n", sv);
- } else
+ CTL_GET("arenas.page", &sv, size_t);
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
- if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
- if (json) {
+ CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
+ if (ssv >= 0) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"tcache_max\": %zu,\n", sv);
+ "Min active:dirty page ratio per arena: %u:1\n",
+ (1U << ssv));
} else {
malloc_cprintf(write_cb, cbopaque,
- "Maximum thread-cached size class: %zu\n", sv);
+ "Min active:dirty page ratio per arena: N/A\n");
}
- }
-
- if (json) {
- unsigned nbins, nlruns, nhchunks, i;
-
- CTL_GET("arenas.nbins", &nbins, unsigned);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"nbins\": %u,\n", nbins);
-
- CTL_GET("arenas.nhbins", &uv, unsigned);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"nhbins\": %u,\n", uv);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"bin\": [\n");
- for (i = 0; i < nbins; i++) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t{\n");
-
- CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"size\": %zu,\n", sv);
-
- CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v);
-
- CTL_M2_GET("arenas.bin.0.run_size", i, &sv, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"run_size\": %zu\n", sv);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : "");
- }
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t],\n");
-
- CTL_GET("arenas.nlruns", &nlruns, unsigned);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"nlruns\": %u,\n", nlruns);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"lrun\": [\n");
- for (i = 0; i < nlruns; i++) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t{\n");
-
- CTL_M2_GET("arenas.lrun.0.size", i, &sv, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"size\": %zu\n", sv);
-
+ if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) {
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t}%s\n", (i + 1 < nlruns) ? "," : "");
+ "Maximum thread-cached size class: %zu\n", sv);
}
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t],\n");
-
- CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"nhchunks\": %u,\n", nhchunks);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"hchunk\": [\n");
- for (i = 0; i < nhchunks; i++) {
+ if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) {
+ CTL_GET("prof.lg_sample", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t{\n");
+ "Average profile sample interval: %"FMTu64
+ " (2^%zu)\n", (((uint64_t)1U) << sv), sv);
- CTL_M2_GET("arenas.hchunk.0.size", i, &sv, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t\t\"size\": %zu\n", sv);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\t}%s\n", (i + 1 < nhchunks) ? "," : "");
+ CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
+ if (ssv >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "Average profile dump interval: %"FMTu64
+ " (2^%zd)\n",
+ (((uint64_t)1U) << ssv), ssv);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "Average profile dump interval: N/A\n");
+ }
}
+ CTL_GET("opt.lg_chunk", &sv, size_t);
malloc_cprintf(write_cb, cbopaque,
- "\t\t\t]\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t},\n");
+ "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv);
}
- /* prof. */
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"prof\": {\n");
-
- CTL_GET("prof.thread_active_init", &bv, bool);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" :
- "false");
-
- CTL_GET("prof.active", &bv, bool);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"active\": %s,\n", bv ? "true" : "false");
-
- CTL_GET("prof.gdump", &bv, bool);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false");
-
- CTL_GET("prof.interval", &u64v, uint64_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"interval\": %"FMTu64",\n", u64v);
-
- CTL_GET("prof.lg_sample", &ssv, ssize_t);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"lg_sample\": %zd\n", ssv);
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t}%s\n", (config_stats || merged || unmerged) ? "," :
- "");
- }
-}
-
-static void
-stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
- bool json, bool merged, bool unmerged, bool bins, bool large, bool huge)
-{
- size_t *cactive;
- size_t allocated, active, metadata, resident, mapped, retained;
-
- CTL_GET("stats.cactive", &cactive, size_t *);
- CTL_GET("stats.allocated", &allocated, size_t);
- CTL_GET("stats.active", &active, size_t);
- CTL_GET("stats.metadata", &metadata, size_t);
- CTL_GET("stats.resident", &resident, size_t);
- CTL_GET("stats.mapped", &mapped, size_t);
- CTL_GET("stats.retained", &retained, size_t);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"stats\": {\n");
-
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"cactive\": %zu,\n", atomic_read_z(cactive));
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"allocated\": %zu,\n", allocated);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"active\": %zu,\n", active);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"metadata\": %zu,\n", metadata);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"resident\": %zu,\n", resident);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"mapped\": %zu,\n", mapped);
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"retained\": %zu\n", retained);
+ if (config_stats) {
+ size_t *cactive;
+ size_t allocated, active, metadata, resident, mapped;
- malloc_cprintf(write_cb, cbopaque,
- "\t\t}%s\n", (merged || unmerged) ? "," : "");
- } else {
+ CTL_GET("stats.cactive", &cactive, size_t *);
+ CTL_GET("stats.allocated", &allocated, size_t);
+ CTL_GET("stats.active", &active, size_t);
+ CTL_GET("stats.metadata", &metadata, size_t);
+ CTL_GET("stats.resident", &resident, size_t);
+ CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, metadata: %zu,"
- " resident: %zu, mapped: %zu, retained: %zu\n",
- allocated, active, metadata, resident, mapped, retained);
+ " resident: %zu, mapped: %zu\n",
+ allocated, active, metadata, resident, mapped);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n",
atomic_read_z(cactive));
- }
- if (merged || unmerged) {
- unsigned narenas;
-
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\"stats.arenas\": {\n");
- }
-
- CTL_GET("arenas.narenas", &narenas, unsigned);
- {
- VARIABLE_ARRAY(bool, initialized, narenas);
- size_t isz;
- unsigned i, j, ninitialized;
-
- isz = sizeof(bool) * narenas;
- xmallctl("arenas.initialized", (void *)initialized,
- &isz, NULL, 0);
- for (i = ninitialized = 0; i < narenas; i++) {
- if (initialized[i])
- ninitialized++;
- }
+ if (merged) {
+ unsigned narenas;
+
+ CTL_GET("arenas.narenas", &narenas, unsigned);
+ {
+ VARIABLE_ARRAY(bool, initialized, narenas);
+ size_t isz;
+ unsigned i, ninitialized;
+
+ isz = sizeof(bool) * narenas;
+ xmallctl("arenas.initialized", initialized,
+ &isz, NULL, 0);
+ for (i = ninitialized = 0; i < narenas; i++) {
+ if (initialized[i])
+ ninitialized++;
+ }
- /* Merged stats. */
- if (merged && (ninitialized > 1 || !unmerged)) {
- /* Print merged arena stats. */
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t\"merged\": {\n");
- } else {
+ if (ninitialized > 1 || !unmerged) {
+ /* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
- }
- stats_arena_print(write_cb, cbopaque, json,
- narenas, bins, large, huge);
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t\t}%s\n", (ninitialized > 1) ?
- "," : "");
- }
- }
-
- /* Unmerged stats. */
- for (i = j = 0; i < narenas; i++) {
- if (initialized[i]) {
- if (json) {
- j++;
- malloc_cprintf(write_cb,
- cbopaque,
- "\t\t\t\"%u\": {\n", i);
- } else {
- malloc_cprintf(write_cb,
- cbopaque, "\narenas[%u]:\n",
- i);
- }
stats_arena_print(write_cb, cbopaque,
- json, i, bins, large, huge);
- if (json) {
- malloc_cprintf(write_cb,
- cbopaque,
- "\t\t\t}%s\n", (j <
- ninitialized) ? "," : "");
- }
+ narenas, bins, large, huge);
}
}
}
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t\t}\n");
- }
- }
-}
+ if (unmerged) {
+ unsigned narenas;
-void
-stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
-{
- int err;
- uint64_t epoch;
- size_t u64sz;
- bool json = false;
- bool general = true;
- bool merged = true;
- bool unmerged = true;
- bool bins = true;
- bool large = true;
- bool huge = true;
+ /* Print stats for each arena. */
- /*
- * Refresh stats, in case mallctl() was called by the application.
- *
- * Check for OOM here, since refreshing the ctl cache can trigger
- * allocation. In practice, none of the subsequent mallctl()-related
- * calls in this function will cause OOM if this one succeeds.
- * */
- epoch = 1;
- u64sz = sizeof(uint64_t);
- err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
- sizeof(uint64_t));
- if (err != 0) {
- if (err == EAGAIN) {
- malloc_write("<jemalloc>: Memory allocation failure in "
- "mallctl(\"epoch\", ...)\n");
- return;
- }
- malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
- "...)\n");
- abort();
- }
+ CTL_GET("arenas.narenas", &narenas, unsigned);
+ {
+ VARIABLE_ARRAY(bool, initialized, narenas);
+ size_t isz;
+ unsigned i;
- if (opts != NULL) {
- unsigned i;
+ isz = sizeof(bool) * narenas;
+ xmallctl("arenas.initialized", initialized,
+ &isz, NULL, 0);
- for (i = 0; opts[i] != '\0'; i++) {
- switch (opts[i]) {
- case 'J':
- json = true;
- break;
- case 'g':
- general = false;
- break;
- case 'm':
- merged = false;
- break;
- case 'a':
- unmerged = false;
- break;
- case 'b':
- bins = false;
- break;
- case 'l':
- large = false;
- break;
- case 'h':
- huge = false;
- break;
- default:;
+ for (i = 0; i < narenas; i++) {
+ if (initialized[i]) {
+ malloc_cprintf(write_cb,
+ cbopaque,
+ "\narenas[%u]:\n", i);
+ stats_arena_print(write_cb,
+ cbopaque, i, bins, large,
+ huge);
+ }
+ }
}
}
}
-
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "{\n"
- "\t\"jemalloc\": {\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "___ Begin jemalloc statistics ___\n");
- }
-
- if (general)
- stats_general_print(write_cb, cbopaque, json, merged, unmerged);
- if (config_stats) {
- stats_print_helper(write_cb, cbopaque, json, merged, unmerged,
- bins, large, huge);
- }
- if (json) {
- malloc_cprintf(write_cb, cbopaque,
- "\t}\n"
- "}\n");
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "--- End jemalloc statistics ---\n");
- }
+ malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
}
diff --git a/deps/jemalloc/src/tcache.c b/deps/jemalloc/src/tcache.c
index 21540ff46..fdafd0c62 100755..100644
--- a/deps/jemalloc/src/tcache.c
+++ b/deps/jemalloc/src/tcache.c
@@ -10,7 +10,7 @@ ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
tcache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */
-unsigned nhbins;
+size_t nhbins;
size_t tcache_maxclass;
tcaches_t *tcaches;
@@ -23,11 +23,10 @@ static tcaches_t *tcaches_avail;
/******************************************************************************/
-size_t
-tcache_salloc(tsdn_t *tsdn, const void *ptr)
+size_t tcache_salloc(const void *ptr)
{
- return (arena_salloc(tsdn, ptr, false));
+ return (arena_salloc(ptr, false));
}
void
@@ -68,19 +67,20 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
tcache->next_gc_bin++;
if (tcache->next_gc_bin == nhbins)
tcache->next_gc_bin = 0;
+ tcache->ev_cnt = 0;
}
void *
-tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
+tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind)
{
void *ret;
- arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
+ arena_tcache_fill_small(arena, tbin, binind, config_prof ?
tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
- ret = tcache_alloc_easy(tbin, tcache_success);
+ ret = tcache_alloc_easy(tbin);
return (ret);
}
@@ -102,18 +102,17 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
- *(tbin->avail - 1));
+ tbin->avail[0]);
arena_t *bin_arena = extent_node_arena_get(&chunk->node);
arena_bin_t *bin = &bin_arena->bins[binind];
if (config_prof && bin_arena == arena) {
- if (arena_prof_accum(tsd_tsdn(tsd), arena,
- tcache->prof_accumbytes))
- prof_idump(tsd_tsdn(tsd));
+ if (arena_prof_accum(arena, tcache->prof_accumbytes))
+ prof_idump();
tcache->prof_accumbytes = 0;
}
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_lock(&bin->lock);
if (config_stats && bin_arena == arena) {
assert(!merged_stats);
merged_stats = true;
@@ -123,16 +122,16 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
- ptr = *(tbin->avail - 1 - i);
+ ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) == bin_arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm =
- arena_bitselm_get_mutable(chunk, pageind);
- arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
- bin_arena, chunk, ptr, bitselm);
+ arena_bitselm_get(chunk, pageind);
+ arena_dalloc_bin_junked_locked(bin_arena, chunk,
+ ptr, bitselm);
} else {
/*
* This object was allocated via a different
@@ -140,12 +139,11 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
- *(tbin->avail - 1 - ndeferred) = ptr;
+ tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
+ malloc_mutex_unlock(&bin->lock);
}
if (config_stats && !merged_stats) {
/*
@@ -153,15 +151,15 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_bin_t *bin = &arena->bins[binind];
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_lock(&bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
}
- memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
- sizeof(void *));
+ memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
+ rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
@@ -184,13 +182,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
- *(tbin->avail - 1));
+ tbin->avail[0]);
arena_t *locked_arena = extent_node_arena_get(&chunk->node);
UNUSED bool idump;
if (config_prof)
idump = false;
- malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
+ malloc_mutex_lock(&locked_arena->lock);
if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
@@ -208,13 +206,13 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
}
ndeferred = 0;
for (i = 0; i < nflush; i++) {
- ptr = *(tbin->avail - 1 - i);
+ ptr = tbin->avail[i];
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) ==
locked_arena) {
- arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
- locked_arena, chunk, ptr);
+ arena_dalloc_large_junked_locked(locked_arena,
+ chunk, ptr);
} else {
/*
* This object was allocated via a different
@@ -222,56 +220,62 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
* Stash the object, so that it can be handled
* in a future pass.
*/
- *(tbin->avail - 1 - ndeferred) = ptr;
+ tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
+ malloc_mutex_unlock(&locked_arena->lock);
if (config_prof && idump)
- prof_idump(tsd_tsdn(tsd));
- arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
- ndeferred);
+ prof_idump();
}
if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+ malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
}
- memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
- sizeof(void *));
+ memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
+ rem * sizeof(void *));
tbin->ncached = rem;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
-static void
-tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
+void
+tcache_arena_associate(tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Link into list of extant tcaches. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
}
}
-static void
-tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
+void
+tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
+{
+
+ tcache_arena_dissociate(tcache, oldarena);
+ tcache_arena_associate(tcache, newarena);
+}
+
+void
+tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
- malloc_mutex_lock(tsdn, &arena->lock);
+ malloc_mutex_lock(&arena->lock);
if (config_debug) {
bool in_ql = false;
tcache_t *iter;
@@ -284,20 +288,11 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
assert(in_ql);
}
ql_remove(&arena->tcache_ql, tcache, link);
- tcache_stats_merge(tsdn, tcache, arena);
- malloc_mutex_unlock(tsdn, &arena->lock);
+ tcache_stats_merge(tcache, arena);
+ malloc_mutex_unlock(&arena->lock);
}
}
-void
-tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
- arena_t *newarena)
-{
-
- tcache_arena_dissociate(tsdn, tcache, oldarena);
- tcache_arena_associate(tsdn, tcache, newarena);
-}
-
tcache_t *
tcache_get_hard(tsd_t *tsd)
{
@@ -311,11 +306,11 @@ tcache_get_hard(tsd_t *tsd)
arena = arena_choose(tsd, NULL);
if (unlikely(arena == NULL))
return (NULL);
- return (tcache_create(tsd_tsdn(tsd), arena));
+ return (tcache_create(tsd, arena));
}
tcache_t *
-tcache_create(tsdn_t *tsdn, arena_t *arena)
+tcache_create(tsd_t *tsd, arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
@@ -329,26 +324,18 @@ tcache_create(tsdn_t *tsdn, arena_t *arena)
/* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE);
- tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
- arena_get(TSDN_NULL, 0, true));
+ tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
if (tcache == NULL)
return (NULL);
- tcache_arena_associate(tsdn, tcache, arena);
-
- ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
+ tcache_arena_associate(tcache, arena);
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
for (i = 0; i < nhbins; i++) {
tcache->tbins[i].lg_fill_div = 1;
- stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
- /*
- * avail points past the available space. Allocations will
- * access the slots toward higher addresses (for the benefit of
- * prefetch).
- */
tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
(uintptr_t)stack_offset);
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
return (tcache);
@@ -361,7 +348,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
unsigned i;
arena = arena_choose(tsd, NULL);
- tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
+ tcache_arena_dissociate(tcache, arena);
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
@@ -369,9 +356,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
if (config_stats && tbin->tstats.nrequests != 0) {
arena_bin_t *bin = &arena->bins[i];
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
}
}
@@ -380,19 +367,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+ malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+ malloc_mutex_unlock(&arena->lock);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
- arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
- prof_idump(tsd_tsdn(tsd));
+ arena_prof_accum(arena, tcache->prof_accumbytes))
+ prof_idump();
- idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
+ idalloctm(tsd, tcache, false, true);
}
void
@@ -416,22 +403,21 @@ tcache_enabled_cleanup(tsd_t *tsd)
/* Do nothing. */
}
+/* Caller must own arena->lock. */
void
-tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
+tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
unsigned i;
cassert(config_stats);
- malloc_mutex_assert_owner(tsdn, &arena->lock);
-
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
- malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(tsdn, &bin->lock);
+ malloc_mutex_unlock(&bin->lock);
tbin->tstats.nrequests = 0;
}
@@ -447,12 +433,11 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
bool
tcaches_create(tsd_t *tsd, unsigned *r_ind)
{
- arena_t *arena;
tcache_t *tcache;
tcaches_t *elm;
if (tcaches == NULL) {
- tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) *
+ tcaches = base_alloc(sizeof(tcache_t *) *
(MALLOCX_TCACHE_MAX+1));
if (tcaches == NULL)
return (true);
@@ -460,10 +445,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
return (true);
- arena = arena_ichoose(tsd, NULL);
- if (unlikely(arena == NULL))
- return (true);
- tcache = tcache_create(tsd_tsdn(tsd), arena);
+ tcache = tcache_create(tsd, a0get());
if (tcache == NULL)
return (true);
@@ -471,7 +453,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
elm = tcaches_avail;
tcaches_avail = tcaches_avail->next;
elm->tcache = tcache;
- *r_ind = (unsigned)(elm - tcaches);
+ *r_ind = elm - tcaches;
} else {
elm = &tcaches[tcaches_past];
elm->tcache = tcache;
@@ -509,7 +491,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind)
}
bool
-tcache_boot(tsdn_t *tsdn)
+tcache_boot(void)
{
unsigned i;
@@ -517,17 +499,17 @@ tcache_boot(tsdn_t *tsdn)
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
* known.
*/
- if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
+ if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
- else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass)
+ else if ((1U << opt_lg_tcache_max) > large_maxclass)
tcache_maxclass = large_maxclass;
else
- tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
+ tcache_maxclass = (1U << opt_lg_tcache_max);
nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
- tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
+ tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);
diff --git a/deps/jemalloc/src/ticker.c b/deps/jemalloc/src/ticker.c
deleted file mode 100644
index db0902404..000000000
--- a/deps/jemalloc/src/ticker.c
+++ /dev/null
@@ -1,2 +0,0 @@
-#define JEMALLOC_TICKER_C_
-#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/deps/jemalloc/src/tsd.c b/deps/jemalloc/src/tsd.c
index ec69a51c3..9ffe9afef 100644
--- a/deps/jemalloc/src/tsd.c
+++ b/deps/jemalloc/src/tsd.c
@@ -77,7 +77,7 @@ tsd_cleanup(void *arg)
/* Do nothing. */
break;
case tsd_state_nominal:
-#define O(n, t) \
+#define O(n, t) \
n##_cleanup(tsd);
MALLOC_TSD
#undef O
@@ -106,17 +106,15 @@ MALLOC_TSD
}
}
-tsd_t *
+bool
malloc_tsd_boot0(void)
{
- tsd_t *tsd;
ncleanups = 0;
if (tsd_boot0())
- return (NULL);
- tsd = tsd_fetch();
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
- return (tsd);
+ return (true);
+ *tsd_arenas_cache_bypassp_get(tsd_fetch()) = true;
+ return (false);
}
void
@@ -124,7 +122,7 @@ malloc_tsd_boot1(void)
{
tsd_boot1();
- *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false;
+ *tsd_arenas_cache_bypassp_get(tsd_fetch()) = false;
}
#ifdef _WIN32
@@ -150,15 +148,13 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
#ifdef _MSC_VER
# ifdef _M_IX86
# pragma comment(linker, "/INCLUDE:__tls_used")
-# pragma comment(linker, "/INCLUDE:_tls_callback")
# else
# pragma comment(linker, "/INCLUDE:_tls_used")
-# pragma comment(linker, "/INCLUDE:tls_callback")
# endif
# pragma section(".CRT$XLY",long,read)
#endif
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
-BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
+static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
@@ -171,10 +167,10 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
tsd_init_block_t *iter;
/* Check whether this thread has already inserted into the list. */
- malloc_mutex_lock(TSDN_NULL, &head->lock);
+ malloc_mutex_lock(&head->lock);
ql_foreach(iter, &head->blocks, link) {
if (iter->thread == self) {
- malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ malloc_mutex_unlock(&head->lock);
return (iter->data);
}
}
@@ -182,7 +178,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
ql_elm_new(block, link);
block->thread = self;
ql_tail_insert(&head->blocks, block, link);
- malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ malloc_mutex_unlock(&head->lock);
return (NULL);
}
@@ -190,8 +186,8 @@ void
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
{
- malloc_mutex_lock(TSDN_NULL, &head->lock);
+ malloc_mutex_lock(&head->lock);
ql_remove(&head->blocks, block, link);
- malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ malloc_mutex_unlock(&head->lock);
}
#endif
diff --git a/deps/jemalloc/src/util.c b/deps/jemalloc/src/util.c
index dd8c23630..4cb0d6c1e 100755..100644
--- a/deps/jemalloc/src/util.c
+++ b/deps/jemalloc/src/util.c
@@ -1,7 +1,3 @@
-/*
- * Define simple versions of assertion macros that won't recurse in case
- * of assertion failures in malloc_*printf().
- */
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_write("<jemalloc>: Failed assertion\n"); \
@@ -14,7 +10,6 @@
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
- unreachable(); \
} while (0)
#define not_implemented() do { \
@@ -49,19 +44,15 @@ static void
wrtmessage(void *cbopaque, const char *s)
{
-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
+#ifdef SYS_write
/*
* Use syscall(2) rather than write(2) when possible in order to avoid
* the possibility of memory allocation within libc. This is necessary
* on FreeBSD; most operating systems do not have this problem though.
- *
- * syscall() returns long or int, depending on platform, so capture the
- * unused result in the widest plausible type to avoid compiler
- * warnings.
*/
- UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
+ UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
#else
- UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s));
+ UNUSED int result = write(STDERR_FILENO, s, strlen(s));
#endif
}
@@ -91,7 +82,7 @@ buferror(int err, char *buf, size_t buflen)
#ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
- (LPSTR)buf, (DWORD)buflen, NULL);
+ (LPSTR)buf, buflen, NULL);
return (0);
#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen);
@@ -200,7 +191,7 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
p++;
}
if (neg)
- ret = (uintmax_t)(-((intmax_t)ret));
+ ret = -ret;
if (p == ns) {
/* No conversion performed. */
@@ -315,9 +306,10 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
return (s);
}
-size_t
+int
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
{
+ int ret;
size_t i;
const char *f;
@@ -408,8 +400,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
int prec = -1;
int width = -1;
unsigned char len = '?';
- char *s;
- size_t slen;
f++;
/* Flags. */
@@ -500,6 +490,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
}
/* Conversion specifier. */
switch (*f) {
+ char *s;
+ size_t slen;
case '%':
/* %% */
APPEND_C(*f);
@@ -585,19 +577,20 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
str[i] = '\0';
else
str[size - 1] = '\0';
+ ret = i;
#undef APPEND_C
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
- return (i);
+ return (ret);
}
JEMALLOC_FORMAT_PRINTF(3, 4)
-size_t
+int
malloc_snprintf(char *str, size_t size, const char *format, ...)
{
- size_t ret;
+ int ret;
va_list ap;
va_start(ap, format);
@@ -655,12 +648,3 @@ malloc_printf(const char *format, ...)
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
}
-
-/*
- * Restore normal assertion macros, in order to make it possible to compile all
- * C files as a single concatenation.
- */
-#undef assert
-#undef not_reached
-#undef not_implemented
-#include "jemalloc/internal/assert.h"
diff --git a/deps/jemalloc/src/witness.c b/deps/jemalloc/src/witness.c
deleted file mode 100644
index 23753f246..000000000
--- a/deps/jemalloc/src/witness.c
+++ /dev/null
@@ -1,136 +0,0 @@
-#define JEMALLOC_WITNESS_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-void
-witness_init(witness_t *witness, const char *name, witness_rank_t rank,
- witness_comp_t *comp)
-{
-
- witness->name = name;
- witness->rank = rank;
- witness->comp = comp;
-}
-
-#ifdef JEMALLOC_JET
-#undef witness_lock_error
-#define witness_lock_error JEMALLOC_N(n_witness_lock_error)
-#endif
-void
-witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
-{
- witness_t *w;
-
- malloc_printf("<jemalloc>: Lock rank order reversal:");
- ql_foreach(w, witnesses, link) {
- malloc_printf(" %s(%u)", w->name, w->rank);
- }
- malloc_printf(" %s(%u)\n", witness->name, witness->rank);
- abort();
-}
-#ifdef JEMALLOC_JET
-#undef witness_lock_error
-#define witness_lock_error JEMALLOC_N(witness_lock_error)
-witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef witness_owner_error
-#define witness_owner_error JEMALLOC_N(n_witness_owner_error)
-#endif
-void
-witness_owner_error(const witness_t *witness)
-{
-
- malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
- witness->rank);
- abort();
-}
-#ifdef JEMALLOC_JET
-#undef witness_owner_error
-#define witness_owner_error JEMALLOC_N(witness_owner_error)
-witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef witness_not_owner_error
-#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error)
-#endif
-void
-witness_not_owner_error(const witness_t *witness)
-{
-
- malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
- witness->rank);
- abort();
-}
-#ifdef JEMALLOC_JET
-#undef witness_not_owner_error
-#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
-witness_not_owner_error_t *witness_not_owner_error =
- JEMALLOC_N(n_witness_not_owner_error);
-#endif
-
-#ifdef JEMALLOC_JET
-#undef witness_lockless_error
-#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error)
-#endif
-void
-witness_lockless_error(const witness_list_t *witnesses)
-{
- witness_t *w;
-
- malloc_printf("<jemalloc>: Should not own any locks:");
- ql_foreach(w, witnesses, link) {
- malloc_printf(" %s(%u)", w->name, w->rank);
- }
- malloc_printf("\n");
- abort();
-}
-#ifdef JEMALLOC_JET
-#undef witness_lockless_error
-#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
-witness_lockless_error_t *witness_lockless_error =
- JEMALLOC_N(n_witness_lockless_error);
-#endif
-
-void
-witnesses_cleanup(tsd_t *tsd)
-{
-
- witness_assert_lockless(tsd_tsdn(tsd));
-
- /* Do nothing. */
-}
-
-void
-witness_fork_cleanup(tsd_t *tsd)
-{
-
- /* Do nothing. */
-}
-
-void
-witness_prefork(tsd_t *tsd)
-{
-
- tsd_witness_fork_set(tsd, true);
-}
-
-void
-witness_postfork_parent(tsd_t *tsd)
-{
-
- tsd_witness_fork_set(tsd, false);
-}
-
-void
-witness_postfork_child(tsd_t *tsd)
-{
-#ifndef JEMALLOC_MUTEX_INIT_CB
- witness_list_t *witnesses;
-
- witnesses = tsd_witnessesp_get(tsd);
- ql_new(witnesses);
-#endif
- tsd_witness_fork_set(tsd, false);
-}
diff --git a/deps/jemalloc/src/zone.c b/deps/jemalloc/src/zone.c
index 0571920e4..12e1734a9 100644
--- a/deps/jemalloc/src/zone.c
+++ b/deps/jemalloc/src/zone.c
@@ -4,7 +4,7 @@
#endif
/*
- * The malloc_default_purgeable_zone() function is only available on >= 10.6.
+ * The malloc_default_purgeable_zone function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import.
*/
extern malloc_zone_t *malloc_default_purgeable_zone(void)
@@ -13,9 +13,8 @@ JEMALLOC_ATTR(weak_import);
/******************************************************************************/
/* Data. */
-static malloc_zone_t *default_zone, *purgeable_zone;
-static malloc_zone_t jemalloc_zone;
-static struct malloc_introspection_t jemalloc_zone_introspect;
+static malloc_zone_t zone;
+static struct malloc_introspection_t zone_introspect;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
@@ -57,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
- return (ivsalloc(tsdn_fetch(), ptr, config_prof));
+ return (ivsalloc(ptr, config_prof));
}
static void *
@@ -88,7 +87,7 @@ static void
zone_free(malloc_zone_t *zone, void *ptr)
{
- if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) {
+ if (ivsalloc(ptr, config_prof) != 0) {
je_free(ptr);
return;
}
@@ -100,7 +99,7 @@ static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
- if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0)
+ if (ivsalloc(ptr, config_prof) != 0)
return (je_realloc(ptr, size));
return (realloc(ptr, size));
@@ -122,11 +121,9 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
- size_t alloc_size;
- alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof);
- if (alloc_size != 0) {
- assert(alloc_size == size);
+ if (ivsalloc(ptr, config_prof) != 0) {
+ assert(ivsalloc(ptr, config_prof) == size);
je_free(ptr);
return;
}
@@ -165,103 +162,89 @@ static void
zone_force_unlock(malloc_zone_t *zone)
{
- /*
- * Call jemalloc_postfork_child() rather than
- * jemalloc_postfork_parent(), because this function is executed by both
- * parent and child. The parent can tolerate having state
- * reinitialized, but the child cannot unlock mutexes that were locked
- * by the parent.
- */
if (isthreaded)
- jemalloc_postfork_child();
+ jemalloc_postfork_parent();
}
-static void
-zone_init(void)
+JEMALLOC_ATTR(constructor)
+void
+register_zone(void)
{
- jemalloc_zone.size = (void *)zone_size;
- jemalloc_zone.malloc = (void *)zone_malloc;
- jemalloc_zone.calloc = (void *)zone_calloc;
- jemalloc_zone.valloc = (void *)zone_valloc;
- jemalloc_zone.free = (void *)zone_free;
- jemalloc_zone.realloc = (void *)zone_realloc;
- jemalloc_zone.destroy = (void *)zone_destroy;
- jemalloc_zone.zone_name = "jemalloc_zone";
- jemalloc_zone.batch_malloc = NULL;
- jemalloc_zone.batch_free = NULL;
- jemalloc_zone.introspect = &jemalloc_zone_introspect;
- jemalloc_zone.version = JEMALLOC_ZONE_VERSION;
+ /*
+ * If something else replaced the system default zone allocator, don't
+ * register jemalloc's.
+ */
+ malloc_zone_t *default_zone = malloc_default_zone();
+ malloc_zone_t *purgeable_zone = NULL;
+ if (!default_zone->zone_name ||
+ strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
+ return;
+ }
+
+ zone.size = (void *)zone_size;
+ zone.malloc = (void *)zone_malloc;
+ zone.calloc = (void *)zone_calloc;
+ zone.valloc = (void *)zone_valloc;
+ zone.free = (void *)zone_free;
+ zone.realloc = (void *)zone_realloc;
+ zone.destroy = (void *)zone_destroy;
+ zone.zone_name = "jemalloc_zone";
+ zone.batch_malloc = NULL;
+ zone.batch_free = NULL;
+ zone.introspect = &zone_introspect;
+ zone.version = JEMALLOC_ZONE_VERSION;
#if (JEMALLOC_ZONE_VERSION >= 5)
- jemalloc_zone.memalign = zone_memalign;
+ zone.memalign = zone_memalign;
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
- jemalloc_zone.free_definite_size = zone_free_definite_size;
+ zone.free_definite_size = zone_free_definite_size;
#endif
#if (JEMALLOC_ZONE_VERSION >= 8)
- jemalloc_zone.pressure_relief = NULL;
+ zone.pressure_relief = NULL;
#endif
- jemalloc_zone_introspect.enumerator = NULL;
- jemalloc_zone_introspect.good_size = (void *)zone_good_size;
- jemalloc_zone_introspect.check = NULL;
- jemalloc_zone_introspect.print = NULL;
- jemalloc_zone_introspect.log = NULL;
- jemalloc_zone_introspect.force_lock = (void *)zone_force_lock;
- jemalloc_zone_introspect.force_unlock = (void *)zone_force_unlock;
- jemalloc_zone_introspect.statistics = NULL;
+ zone_introspect.enumerator = NULL;
+ zone_introspect.good_size = (void *)zone_good_size;
+ zone_introspect.check = NULL;
+ zone_introspect.print = NULL;
+ zone_introspect.log = NULL;
+ zone_introspect.force_lock = (void *)zone_force_lock;
+ zone_introspect.force_unlock = (void *)zone_force_unlock;
+ zone_introspect.statistics = NULL;
#if (JEMALLOC_ZONE_VERSION >= 6)
- jemalloc_zone_introspect.zone_locked = NULL;
+ zone_introspect.zone_locked = NULL;
#endif
#if (JEMALLOC_ZONE_VERSION >= 7)
- jemalloc_zone_introspect.enable_discharge_checking = NULL;
- jemalloc_zone_introspect.disable_discharge_checking = NULL;
- jemalloc_zone_introspect.discharge = NULL;
-# ifdef __BLOCKS__
- jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
-# else
- jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
-# endif
+ zone_introspect.enable_discharge_checking = NULL;
+ zone_introspect.disable_discharge_checking = NULL;
+ zone_introspect.discharge = NULL;
+#ifdef __BLOCKS__
+ zone_introspect.enumerate_discharged_pointers = NULL;
+#else
+ zone_introspect.enumerate_unavailable_without_blocks = NULL;
+#endif
#endif
-}
-
-static malloc_zone_t *
-zone_default_get(void)
-{
- malloc_zone_t **zones = NULL;
- unsigned int num_zones = 0;
/*
- * On OSX 10.12, malloc_default_zone returns a special zone that is not
- * present in the list of registered zones. That zone uses a "lite zone"
- * if one is present (apparently enabled when malloc stack logging is
- * enabled), or the first registered zone otherwise. In practice this
- * means unless malloc stack logging is enabled, the first registered
- * zone is the default. So get the list of zones to get the first one,
- * instead of relying on malloc_default_zone.
+ * The default purgeable zone is created lazily by OSX's libc. It uses
+ * the default zone when it is created for "small" allocations
+ * (< 15 KiB), but assumes the default zone is a scalable_zone. This
+ * obviously fails when the default zone is the jemalloc zone, so
+ * malloc_default_purgeable_zone is called beforehand so that the
+ * default purgeable zone is created when the default zone is still
+ * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
+ * to check for the existence of malloc_default_purgeable_zone() at
+ * run time.
*/
- if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
- (vm_address_t**)&zones, &num_zones)) {
- /*
- * Reset the value in case the failure happened after it was
- * set.
- */
- num_zones = 0;
- }
-
- if (num_zones)
- return (zones[0]);
-
- return (malloc_default_zone());
-}
+ if (malloc_default_purgeable_zone != NULL)
+ purgeable_zone = malloc_default_purgeable_zone();
-/* As written, this function can only promote jemalloc_zone. */
-static void
-zone_promote(void)
-{
- malloc_zone_t *zone;
+ /* Register the custom zone. At this point it won't be the default. */
+ malloc_zone_register(&zone);
do {
+ default_zone = malloc_default_zone();
/*
* Unregister and reregister the default zone. On OSX >= 10.6,
* unregistering takes the last registered zone and places it
@@ -272,7 +255,6 @@ zone_promote(void)
*/
malloc_zone_unregister(default_zone);
malloc_zone_register(default_zone);
-
/*
* On OSX 10.6, having the default purgeable zone appear before
* the default zone makes some things crash because it thinks it
@@ -284,47 +266,9 @@ zone_promote(void)
* above, i.e. the default zone. Registering it again then puts
* it at the end, obviously after the default zone.
*/
- if (purgeable_zone != NULL) {
+ if (purgeable_zone) {
malloc_zone_unregister(purgeable_zone);
malloc_zone_register(purgeable_zone);
}
-
- zone = zone_default_get();
- } while (zone != &jemalloc_zone);
-}
-
-JEMALLOC_ATTR(constructor)
-void
-zone_register(void)
-{
-
- /*
- * If something else replaced the system default zone allocator, don't
- * register jemalloc's.
- */
- default_zone = zone_default_get();
- if (!default_zone->zone_name || strcmp(default_zone->zone_name,
- "DefaultMallocZone") != 0)
- return;
-
- /*
- * The default purgeable zone is created lazily by OSX's libc. It uses
- * the default zone when it is created for "small" allocations
- * (< 15 KiB), but assumes the default zone is a scalable_zone. This
- * obviously fails when the default zone is the jemalloc zone, so
- * malloc_default_purgeable_zone() is called beforehand so that the
- * default purgeable zone is created when the default zone is still
- * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
- * to check for the existence of malloc_default_purgeable_zone() at
- * run time.
- */
- purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
- malloc_default_purgeable_zone();
-
- /* Register the custom zone. At this point it won't be the default. */
- zone_init();
- malloc_zone_register(&jemalloc_zone);
-
- /* Promote the custom zone to be default. */
- zone_promote();
+ } while (malloc_default_zone() != &zone);
}
diff --git a/deps/jemalloc/test/include/test/jemalloc_test.h.in b/deps/jemalloc/test/include/test/jemalloc_test.h.in
index 1f36e4695..455569da4 100644
--- a/deps/jemalloc/test/include/test/jemalloc_test.h.in
+++ b/deps/jemalloc/test/include/test/jemalloc_test.h.in
@@ -11,6 +11,7 @@
#ifdef _WIN32
# include "msvc_compat/strings.h"
#endif
+#include <sys/time.h>
#ifdef _WIN32
# include <windows.h>
@@ -19,6 +20,39 @@
# include <pthread.h>
#endif
+/******************************************************************************/
+/*
+ * Define always-enabled assertion macros, so that test assertions execute even
+ * if assertions are disabled in the library code. These definitions must
+ * exist prior to including "jemalloc/internal/util.h".
+ */
+#define assert(e) do { \
+ if (!(e)) { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
+ __FILE__, __LINE__, #e); \
+ abort(); \
+ } \
+} while (0)
+
+#define not_reached() do { \
+ malloc_printf( \
+ "<jemalloc>: %s:%d: Unreachable code reached\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+} while (0)
+
+#define not_implemented() do { \
+ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+} while (0)
+
+#define assert_not_implemented(e) do { \
+ if (!(e)) \
+ not_implemented(); \
+} while (0)
+
#include "test/jemalloc_test_defs.h"
#ifdef JEMALLOC_OSSPIN
@@ -53,14 +87,6 @@
# include "jemalloc/internal/jemalloc_internal_defs.h"
# include "jemalloc/internal/jemalloc_internal_macros.h"
-static const bool config_debug =
-#ifdef JEMALLOC_DEBUG
- true
-#else
- false
-#endif
- ;
-
# define JEMALLOC_N(n) @private_namespace@##n
# include "jemalloc/internal/private_namespace.h"
@@ -68,7 +94,6 @@ static const bool config_debug =
# define JEMALLOC_H_STRUCTS
# define JEMALLOC_H_EXTERNS
# define JEMALLOC_H_INLINES
-# include "jemalloc/internal/nstime.h"
# include "jemalloc/internal/util.h"
# include "jemalloc/internal/qr.h"
# include "jemalloc/internal/ql.h"
@@ -124,40 +149,3 @@ static const bool config_debug =
#include "test/thd.h"
#define MEXP 19937
#include "test/SFMT.h"
-
-/******************************************************************************/
-/*
- * Define always-enabled assertion macros, so that test assertions execute even
- * if assertions are disabled in the library code.
- */
-#undef assert
-#undef not_reached
-#undef not_implemented
-#undef assert_not_implemented
-
-#define assert(e) do { \
- if (!(e)) { \
- malloc_printf( \
- "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
- __FILE__, __LINE__, #e); \
- abort(); \
- } \
-} while (0)
-
-#define not_reached() do { \
- malloc_printf( \
- "<jemalloc>: %s:%d: Unreachable code reached\n", \
- __FILE__, __LINE__); \
- abort(); \
-} while (0)
-
-#define not_implemented() do { \
- malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
- __FILE__, __LINE__); \
- abort(); \
-} while (0)
-
-#define assert_not_implemented(e) do { \
- if (!(e)) \
- not_implemented(); \
-} while (0)
diff --git a/deps/jemalloc/test/include/test/mtx.h b/deps/jemalloc/test/include/test/mtx.h
index 58afbc3d1..bbe822f54 100644
--- a/deps/jemalloc/test/include/test/mtx.h
+++ b/deps/jemalloc/test/include/test/mtx.h
@@ -8,8 +8,6 @@
typedef struct {
#ifdef _WIN32
CRITICAL_SECTION lock;
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#else
diff --git a/deps/jemalloc/test/include/test/test.h b/deps/jemalloc/test/include/test/test.h
index c8112eb8b..3cf901fc4 100644
--- a/deps/jemalloc/test/include/test/test.h
+++ b/deps/jemalloc/test/include/test/test.h
@@ -311,9 +311,6 @@ label_test_end: \
#define test(...) \
p_test(__VA_ARGS__, NULL)
-#define test_no_malloc_init(...) \
- p_test_no_malloc_init(__VA_ARGS__, NULL)
-
#define test_skip_if(e) do { \
if (e) { \
test_skip("%s:%s:%d: Test skipped: (%s)", \
@@ -327,7 +324,6 @@ void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
/* For private use by macros. */
test_status_t p_test(test_t *t, ...);
-test_status_t p_test_no_malloc_init(test_t *t, ...);
void p_test_init(const char *name);
void p_test_fini(void);
void p_test_fail(const char *prefix, const char *message);
diff --git a/deps/jemalloc/test/include/test/timer.h b/deps/jemalloc/test/include/test/timer.h
index ace6191b8..a7fefdfd1 100644
--- a/deps/jemalloc/test/include/test/timer.h
+++ b/deps/jemalloc/test/include/test/timer.h
@@ -1,8 +1,23 @@
/* Simple timer, for use in benchmark reporting. */
+#include <unistd.h>
+#include <sys/time.h>
+
+#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \
+ && _POSIX_MONOTONIC_CLOCK >= 0
+
typedef struct {
- nstime_t t0;
- nstime_t t1;
+#ifdef _WIN32
+ FILETIME ft0;
+ FILETIME ft1;
+#elif JEMALLOC_CLOCK_GETTIME
+ struct timespec ts0;
+ struct timespec ts1;
+ int clock_id;
+#else
+ struct timeval tv0;
+ struct timeval tv1;
+#endif
} timedelta_t;
void timer_start(timedelta_t *timer);
diff --git a/deps/jemalloc/test/integration/MALLOCX_ARENA.c b/deps/jemalloc/test/integration/MALLOCX_ARENA.c
index 910a096fd..30c203ae6 100755..100644
--- a/deps/jemalloc/test/integration/MALLOCX_ARENA.c
+++ b/deps/jemalloc/test/integration/MALLOCX_ARENA.c
@@ -19,8 +19,8 @@ thd_start(void *arg)
size_t sz;
sz = sizeof(arena_ind);
- assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
- 0, "Error in arenas.extend");
+ assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,
+ "Error in arenas.extend");
if (thread_ind % 4 != 3) {
size_t mib[3];
diff --git a/deps/jemalloc/test/integration/aligned_alloc.c b/deps/jemalloc/test/integration/aligned_alloc.c
index 58438421d..609001487 100644
--- a/deps/jemalloc/test/integration/aligned_alloc.c
+++ b/deps/jemalloc/test/integration/aligned_alloc.c
@@ -1,20 +1,9 @@
#include "test/jemalloc_test.h"
#define CHUNK 0x400000
-#define MAXALIGN (((size_t)1) << 23)
-
-/*
- * On systems which can't merge extents, tests that call this function generate
- * a lot of dirty memory very quickly. Purging between cycles mitigates
- * potential OOM on e.g. 32-bit Windows.
- */
-static void
-purge(void)
-{
-
- assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
- "Unexpected mallctl error");
-}
+/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
+#define MAXALIGN ((size_t)0x2000000LU)
+#define NITER 4
TEST_BEGIN(test_alignment_errors)
{
@@ -85,7 +74,6 @@ TEST_END
TEST_BEGIN(test_alignment_and_size)
{
-#define NITER 4
size_t alignment, size, total;
unsigned i;
void *ps[NITER];
@@ -122,9 +110,7 @@ TEST_BEGIN(test_alignment_and_size)
}
}
}
- purge();
}
-#undef NITER
}
TEST_END
diff --git a/deps/jemalloc/test/integration/allocated.c b/deps/jemalloc/test/integration/allocated.c
index 6ce145b3e..3630e80ce 100755..100644
--- a/deps/jemalloc/test/integration/allocated.c
+++ b/deps/jemalloc/test/integration/allocated.c
@@ -18,14 +18,14 @@ thd_start(void *arg)
size_t sz, usize;
sz = sizeof(a0);
- if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) {
+ if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
sz = sizeof(ap0);
- if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) {
+ if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
@@ -36,15 +36,14 @@ thd_start(void *arg)
"storage");
sz = sizeof(d0);
- if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) {
+ if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
strerror(err));
}
sz = sizeof(dp0);
- if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL,
- 0))) {
+ if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) {
if (err == ENOENT)
goto label_ENOENT;
test_fail("%s(): Error in mallctl(): %s", __func__,
@@ -58,9 +57,9 @@ thd_start(void *arg)
assert_ptr_not_null(p, "Unexpected malloc() error");
sz = sizeof(a1);
- mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0);
+ mallctl("thread.allocated", &a1, &sz, NULL, 0);
sz = sizeof(ap1);
- mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0);
+ mallctl("thread.allocatedp", &ap1, &sz, NULL, 0);
assert_u64_eq(*ap1, a1,
"Dereferenced \"thread.allocatedp\" value should equal "
"\"thread.allocated\" value");
@@ -75,9 +74,9 @@ thd_start(void *arg)
free(p);
sz = sizeof(d1);
- mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0);
+ mallctl("thread.deallocated", &d1, &sz, NULL, 0);
sz = sizeof(dp1);
- mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0);
+ mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0);
assert_u64_eq(*dp1, d1,
"Dereferenced \"thread.deallocatedp\" value should equal "
"\"thread.deallocated\" value");
diff --git a/deps/jemalloc/test/integration/chunk.c b/deps/jemalloc/test/integration/chunk.c
index 94cf0025a..af1c9a53e 100644
--- a/deps/jemalloc/test/integration/chunk.c
+++ b/deps/jemalloc/test/integration/chunk.c
@@ -121,10 +121,6 @@ TEST_BEGIN(test_chunk)
{
void *p;
size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz;
- unsigned arena_ind;
- int flags;
- size_t hooks_mib[3], purge_mib[3];
- size_t hooks_miblen, purge_miblen;
chunk_hooks_t new_hooks = {
chunk_alloc,
chunk_dalloc,
@@ -136,21 +132,11 @@ TEST_BEGIN(test_chunk)
};
bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
- 0, "Unexpected mallctl() failure");
- flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
-
/* Install custom chunk hooks. */
- hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arena.0.chunk_hooks", hooks_mib,
- &hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
- hooks_mib[1] = (size_t)arena_ind;
old_size = sizeof(chunk_hooks_t);
new_size = sizeof(chunk_hooks_t);
- assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
- &old_size, (void *)&new_hooks, new_size), 0,
- "Unexpected chunk_hooks error");
+ assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size,
+ &new_hooks, new_size), 0, "Unexpected chunk_hooks error");
orig_hooks = old_hooks;
assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error");
assert_ptr_ne(old_hooks.dalloc, chunk_dalloc,
@@ -165,63 +151,59 @@ TEST_BEGIN(test_chunk)
/* Get large size classes. */
sz = sizeof(size_t);
- assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL,
- 0), 0, "Unexpected arenas.lrun.0.size failure");
- assert_d_eq(mallctl("arenas.lrun.1.size", (void *)&large1, &sz, NULL,
- 0), 0, "Unexpected arenas.lrun.1.size failure");
+ assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0,
+ "Unexpected arenas.lrun.0.size failure");
+ assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0,
+ "Unexpected arenas.lrun.1.size failure");
/* Get huge size classes. */
- assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL,
- 0), 0, "Unexpected arenas.hchunk.0.size failure");
- assert_d_eq(mallctl("arenas.hchunk.1.size", (void *)&huge1, &sz, NULL,
- 0), 0, "Unexpected arenas.hchunk.1.size failure");
- assert_d_eq(mallctl("arenas.hchunk.2.size", (void *)&huge2, &sz, NULL,
- 0), 0, "Unexpected arenas.hchunk.2.size failure");
+ assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0,
+ "Unexpected arenas.hchunk.0.size failure");
+ assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0,
+ "Unexpected arenas.hchunk.1.size failure");
+ assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0,
+ "Unexpected arenas.hchunk.2.size failure");
/* Test dalloc/decommit/purge cascade. */
- purge_miblen = sizeof(purge_mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen),
- 0, "Unexpected mallctlnametomib() failure");
- purge_mib[1] = (size_t)arena_ind;
do_dalloc = false;
do_decommit = false;
- p = mallocx(huge0 * 2, flags);
+ p = mallocx(huge0 * 2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
did_dalloc = false;
did_decommit = false;
did_purge = false;
did_split = false;
- xallocx_success_a = (xallocx(p, huge0, 0, flags) == huge0);
- assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
- 0, "Unexpected arena.%u.purge error", arena_ind);
+ xallocx_success_a = (xallocx(p, huge0, 0, 0) == huge0);
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.purge error");
if (xallocx_success_a) {
assert_true(did_dalloc, "Expected dalloc");
assert_false(did_decommit, "Unexpected decommit");
assert_true(did_purge, "Expected purge");
}
assert_true(did_split, "Expected split");
- dallocx(p, flags);
+ dallocx(p, 0);
do_dalloc = true;
/* Test decommit/commit and observe split/merge. */
do_dalloc = false;
do_decommit = true;
- p = mallocx(huge0 * 2, flags);
+ p = mallocx(huge0 * 2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
did_decommit = false;
did_commit = false;
did_split = false;
did_merge = false;
- xallocx_success_b = (xallocx(p, huge0, 0, flags) == huge0);
- assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
- 0, "Unexpected arena.%u.purge error", arena_ind);
+ xallocx_success_b = (xallocx(p, huge0, 0, 0) == huge0);
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.purge error");
if (xallocx_success_b)
assert_true(did_split, "Expected split");
- xallocx_success_c = (xallocx(p, huge0 * 2, 0, flags) == huge0 * 2);
+ xallocx_success_c = (xallocx(p, huge0 * 2, 0, 0) == huge0 * 2);
assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
if (xallocx_success_b && xallocx_success_c)
assert_true(did_merge, "Expected merge");
- dallocx(p, flags);
+ dallocx(p, 0);
do_dalloc = true;
do_decommit = false;
@@ -232,43 +214,43 @@ TEST_BEGIN(test_chunk)
* successful xallocx() from size=huge2 to size=huge1 is
* guaranteed to leave trailing purgeable memory.
*/
- p = mallocx(huge2, flags);
+ p = mallocx(huge2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
did_purge = false;
- assert_zu_eq(xallocx(p, huge1, 0, flags), huge1,
+ assert_zu_eq(xallocx(p, huge1, 0, 0), huge1,
"Unexpected xallocx() failure");
assert_true(did_purge, "Expected purge");
- dallocx(p, flags);
+ dallocx(p, 0);
}
/* Test decommit for large allocations. */
do_decommit = true;
- p = mallocx(large1, flags);
+ p = mallocx(large1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
- assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
- 0, "Unexpected arena.%u.purge error", arena_ind);
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.purge error");
did_decommit = false;
- assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ assert_zu_eq(xallocx(p, large0, 0, 0), large0,
"Unexpected xallocx() failure");
- assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
- 0, "Unexpected arena.%u.purge error", arena_ind);
+ assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.purge error");
did_commit = false;
- assert_zu_eq(xallocx(p, large1, 0, flags), large1,
+ assert_zu_eq(xallocx(p, large1, 0, 0), large1,
"Unexpected xallocx() failure");
assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match");
- dallocx(p, flags);
+ dallocx(p, 0);
do_decommit = false;
/* Make sure non-huge allocation succeeds. */
- p = mallocx(42, flags);
+ p = mallocx(42, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
- dallocx(p, flags);
+ dallocx(p, 0);
/* Restore chunk hooks. */
- assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
- (void *)&old_hooks, new_size), 0, "Unexpected chunk_hooks error");
- assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
- &old_size, NULL, 0), 0, "Unexpected chunk_hooks error");
+ assert_d_eq(mallctl("arena.0.chunk_hooks", NULL, NULL, &old_hooks,
+ new_size), 0, "Unexpected chunk_hooks error");
+ assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size,
+ NULL, 0), 0, "Unexpected chunk_hooks error");
assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc,
"Unexpected alloc error");
assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc,
diff --git a/deps/jemalloc/test/integration/mallocx.c b/deps/jemalloc/test/integration/mallocx.c
index d709eb301..6253175d6 100755..100644
--- a/deps/jemalloc/test/integration/mallocx.c
+++ b/deps/jemalloc/test/integration/mallocx.c
@@ -1,9 +1,5 @@
#include "test/jemalloc_test.h"
-#ifdef JEMALLOC_FILL
-const char *malloc_conf = "junk:false";
-#endif
-
static unsigned
get_nsizes_impl(const char *cmd)
{
@@ -11,7 +7,7 @@ get_nsizes_impl(const char *cmd)
size_t z;
z = sizeof(unsigned);
- assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return (ret);
@@ -37,7 +33,7 @@ get_size_impl(const char *cmd, size_t ind)
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return (ret);
@@ -50,84 +46,43 @@ get_huge_size(size_t ind)
return (get_size_impl("arenas.hchunk.0.size", ind));
}
-/*
- * On systems which can't merge extents, tests that call this function generate
- * a lot of dirty memory very quickly. Purging between cycles mitigates
- * potential OOM on e.g. 32-bit Windows.
- */
-static void
-purge(void)
-{
-
- assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
- "Unexpected mallctl error");
-}
-
-TEST_BEGIN(test_overflow)
+TEST_BEGIN(test_oom)
{
- size_t hugemax;
+ size_t hugemax, size, alignment;
hugemax = get_huge_size(get_nhuge()-1);
- assert_ptr_null(mallocx(hugemax+1, 0),
- "Expected OOM for mallocx(size=%#zx, 0)", hugemax+1);
-
- assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
- "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
-
- assert_ptr_null(mallocx(SIZE_T_MAX, 0),
- "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
-
- assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
- "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
- ZU(PTRDIFF_MAX)+1);
-}
-TEST_END
-
-TEST_BEGIN(test_oom)
-{
- size_t hugemax;
- bool oom;
- void *ptrs[3];
- unsigned i;
-
/*
- * It should be impossible to allocate three objects that each consume
- * nearly half the virtual address space.
+ * It should be impossible to allocate two objects that each consume
+ * more than half the virtual address space.
*/
- hugemax = get_huge_size(get_nhuge()-1);
- oom = false;
- for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
- ptrs[i] = mallocx(hugemax, 0);
- if (ptrs[i] == NULL)
- oom = true;
- }
- assert_true(oom,
- "Expected OOM during series of calls to mallocx(size=%zu, 0)",
- hugemax);
- for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
- if (ptrs[i] != NULL)
- dallocx(ptrs[i], 0);
+ {
+ void *p;
+
+ p = mallocx(hugemax, 0);
+ if (p != NULL) {
+ assert_ptr_null(mallocx(hugemax, 0),
+ "Expected OOM for mallocx(size=%#zx, 0)", hugemax);
+ dallocx(p, 0);
+ }
}
- purge();
#if LG_SIZEOF_PTR == 3
- assert_ptr_null(mallocx(0x8000000000000000ULL,
- MALLOCX_ALIGN(0x8000000000000000ULL)),
- "Expected OOM for mallocx()");
- assert_ptr_null(mallocx(0x8000000000000000ULL,
- MALLOCX_ALIGN(0x80000000)),
- "Expected OOM for mallocx()");
+ size = ZU(0x8000000000000000);
+ alignment = ZU(0x8000000000000000);
#else
- assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)),
- "Expected OOM for mallocx()");
+ size = ZU(0x80000000);
+ alignment = ZU(0x80000000);
#endif
+ assert_ptr_null(mallocx(size, MALLOCX_ALIGN(alignment)),
+ "Expected OOM for mallocx(size=%#zx, MALLOCX_ALIGN(%#zx)", size,
+ alignment);
}
TEST_END
TEST_BEGIN(test_basic)
{
-#define MAXSZ (((size_t)1) << 23)
+#define MAXSZ (((size_t)1) << 26)
size_t sz;
for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
@@ -136,28 +91,23 @@ TEST_BEGIN(test_basic)
nsz = nallocx(sz, 0);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
p = mallocx(sz, 0);
- assert_ptr_not_null(p,
- "Unexpected mallocx(size=%zx, flags=0) error", sz);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
rsz = sallocx(p, 0);
assert_zu_ge(rsz, sz, "Real size smaller than expected");
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
dallocx(p, 0);
p = mallocx(sz, 0);
- assert_ptr_not_null(p,
- "Unexpected mallocx(size=%zx, flags=0) error", sz);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
dallocx(p, 0);
nsz = nallocx(sz, MALLOCX_ZERO);
assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
p = mallocx(sz, MALLOCX_ZERO);
- assert_ptr_not_null(p,
- "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error",
- nsz);
+ assert_ptr_not_null(p, "Unexpected mallocx() error");
rsz = sallocx(p, 0);
assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
dallocx(p, 0);
- purge();
}
#undef MAXSZ
}
@@ -165,7 +115,7 @@ TEST_END
TEST_BEGIN(test_alignment_and_size)
{
-#define MAXALIGN (((size_t)1) << 23)
+#define MAXALIGN (((size_t)1) << 25)
#define NITER 4
size_t nsz, rsz, sz, alignment, total;
unsigned i;
@@ -215,7 +165,6 @@ TEST_BEGIN(test_alignment_and_size)
}
}
}
- purge();
}
#undef MAXALIGN
#undef NITER
@@ -227,7 +176,6 @@ main(void)
{
return (test(
- test_overflow,
test_oom,
test_basic,
test_alignment_and_size));
diff --git a/deps/jemalloc/test/integration/overflow.c b/deps/jemalloc/test/integration/overflow.c
index 84a35652c..303d9b2d3 100755..100644
--- a/deps/jemalloc/test/integration/overflow.c
+++ b/deps/jemalloc/test/integration/overflow.c
@@ -8,8 +8,8 @@ TEST_BEGIN(test_overflow)
void *p;
sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0),
- 0, "Unexpected mallctl() error");
+ assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
+ "Unexpected mallctl() error");
miblen = sizeof(mib) / sizeof(size_t);
assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
@@ -17,8 +17,8 @@ TEST_BEGIN(test_overflow)
mib[2] = nhchunks - 1;
sz = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
- NULL, 0), 0, "Unexpected mallctlbymib() error");
+ assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
+ "Unexpected mallctlbymib() error");
assert_ptr_null(malloc(max_size_class + 1),
"Expected OOM due to over-sized allocation request");
diff --git a/deps/jemalloc/test/integration/posix_memalign.c b/deps/jemalloc/test/integration/posix_memalign.c
index e22e10200..19741c6cb 100644
--- a/deps/jemalloc/test/integration/posix_memalign.c
+++ b/deps/jemalloc/test/integration/posix_memalign.c
@@ -1,20 +1,9 @@
#include "test/jemalloc_test.h"
#define CHUNK 0x400000
-#define MAXALIGN (((size_t)1) << 23)
-
-/*
- * On systems which can't merge extents, tests that call this function generate
- * a lot of dirty memory very quickly. Purging between cycles mitigates
- * potential OOM on e.g. 32-bit Windows.
- */
-static void
-purge(void)
-{
-
- assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
- "Unexpected mallctl error");
-}
+/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */
+#define MAXALIGN ((size_t)0x2000000LU)
+#define NITER 4
TEST_BEGIN(test_alignment_errors)
{
@@ -77,7 +66,6 @@ TEST_END
TEST_BEGIN(test_alignment_and_size)
{
-#define NITER 4
size_t alignment, size, total;
unsigned i;
int err;
@@ -116,9 +104,7 @@ TEST_BEGIN(test_alignment_and_size)
}
}
}
- purge();
}
-#undef NITER
}
TEST_END
diff --git a/deps/jemalloc/test/integration/rallocx.c b/deps/jemalloc/test/integration/rallocx.c
index 506bf1c90..be1b27b73 100755..100644
--- a/deps/jemalloc/test/integration/rallocx.c
+++ b/deps/jemalloc/test/integration/rallocx.c
@@ -1,51 +1,5 @@
#include "test/jemalloc_test.h"
-static unsigned
-get_nsizes_impl(const char *cmd)
-{
- unsigned ret;
- size_t z;
-
- z = sizeof(unsigned);
- assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
- "Unexpected mallctl(\"%s\", ...) failure", cmd);
-
- return (ret);
-}
-
-static unsigned
-get_nhuge(void)
-{
-
- return (get_nsizes_impl("arenas.nhchunks"));
-}
-
-static size_t
-get_size_impl(const char *cmd, size_t ind)
-{
- size_t ret;
- size_t z;
- size_t mib[4];
- size_t miblen = 4;
-
- z = sizeof(size_t);
- assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
- 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
- mib[2] = ind;
- z = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
- 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
-
- return (ret);
-}
-
-static size_t
-get_huge_size(size_t ind)
-{
-
- return (get_size_impl("arenas.hchunk.0.size", ind));
-}
-
TEST_BEGIN(test_grow_and_shrink)
{
void *p, *q;
@@ -184,22 +138,22 @@ TEST_END
TEST_BEGIN(test_lg_align_and_zero)
{
void *p, *q;
- unsigned lg_align;
- size_t sz;
+ size_t lg_align, sz;
#define MAX_LG_ALIGN 25
#define MAX_VALIDATE (ZU(1) << 22)
- lg_align = 0;
+ lg_align = ZU(0);
p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
assert_ptr_not_null(q,
- "Unexpected rallocx() error for lg_align=%u", lg_align);
+ "Unexpected rallocx() error for lg_align=%zu", lg_align);
assert_ptr_null(
(void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
- "%p inadequately aligned for lg_align=%u", q, lg_align);
+ "%p inadequately aligned for lg_align=%zu",
+ q, lg_align);
sz = sallocx(q, 0);
if ((sz << 1) <= MAX_VALIDATE) {
assert_false(validate_fill(q, 0, 0, sz),
@@ -219,33 +173,6 @@ TEST_BEGIN(test_lg_align_and_zero)
}
TEST_END
-TEST_BEGIN(test_overflow)
-{
- size_t hugemax;
- void *p;
-
- hugemax = get_huge_size(get_nhuge()-1);
-
- p = mallocx(1, 0);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
-
- assert_ptr_null(rallocx(p, hugemax+1, 0),
- "Expected OOM for rallocx(p, size=%#zx, 0)", hugemax+1);
-
- assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0),
- "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
-
- assert_ptr_null(rallocx(p, SIZE_T_MAX, 0),
- "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX);
-
- assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
- "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))",
- ZU(PTRDIFF_MAX)+1);
-
- dallocx(p, 0);
-}
-TEST_END
-
int
main(void)
{
@@ -254,6 +181,5 @@ main(void)
test_grow_and_shrink,
test_zero,
test_align,
- test_lg_align_and_zero,
- test_overflow));
+ test_lg_align_and_zero));
}
diff --git a/deps/jemalloc/test/integration/sdallocx.c b/deps/jemalloc/test/integration/sdallocx.c
index f92e0589c..b84817d76 100644
--- a/deps/jemalloc/test/integration/sdallocx.c
+++ b/deps/jemalloc/test/integration/sdallocx.c
@@ -1,7 +1,7 @@
#include "test/jemalloc_test.h"
-#define MAXALIGN (((size_t)1) << 22)
-#define NITER 3
+#define MAXALIGN (((size_t)1) << 25)
+#define NITER 4
TEST_BEGIN(test_basic)
{
diff --git a/deps/jemalloc/test/integration/thread_arena.c b/deps/jemalloc/test/integration/thread_arena.c
index 7a35a6351..67be53513 100755..100644
--- a/deps/jemalloc/test/integration/thread_arena.c
+++ b/deps/jemalloc/test/integration/thread_arena.c
@@ -16,8 +16,8 @@ thd_start(void *arg)
free(p);
size = sizeof(arena_ind);
- if ((err = mallctl("thread.arena", (void *)&arena_ind, &size,
- (void *)&main_arena_ind, sizeof(main_arena_ind)))) {
+ if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind,
+ sizeof(main_arena_ind)))) {
char buf[BUFERROR_BUF];
buferror(err, buf, sizeof(buf));
@@ -25,8 +25,7 @@ thd_start(void *arg)
}
size = sizeof(arena_ind);
- if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL,
- 0))) {
+ if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
char buf[BUFERROR_BUF];
buferror(err, buf, sizeof(buf));
@@ -51,8 +50,7 @@ TEST_BEGIN(test_thread_arena)
assert_ptr_not_null(p, "Error in malloc()");
size = sizeof(arena_ind);
- if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL,
- 0))) {
+ if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
char buf[BUFERROR_BUF];
buferror(err, buf, sizeof(buf));
diff --git a/deps/jemalloc/test/integration/thread_tcache_enabled.c b/deps/jemalloc/test/integration/thread_tcache_enabled.c
index 2c2825e19..f4e89c682 100755..100644
--- a/deps/jemalloc/test/integration/thread_tcache_enabled.c
+++ b/deps/jemalloc/test/integration/thread_tcache_enabled.c
@@ -16,8 +16,7 @@ thd_start(void *arg)
bool e0, e1;
sz = sizeof(bool);
- if ((err = mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL,
- 0))) {
+ if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) {
if (err == ENOENT) {
assert_false(config_tcache,
"ENOENT should only be returned if tcache is "
@@ -28,53 +27,53 @@ thd_start(void *arg)
if (e0) {
e1 = false;
- assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
- (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz),
+ 0, "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
}
e1 = true;
- assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
- (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
e1 = true;
- assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
- (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
e1 = false;
- assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
- (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
e1 = false;
- assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
- (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
free(malloc(1));
e1 = true;
- assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
- (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
free(malloc(1));
e1 = true;
- assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
- (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
free(malloc(1));
e1 = false;
- assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
- (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
assert_true(e0, "tcache should be enabled");
free(malloc(1));
e1 = false;
- assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
- (void *)&e1, sz), 0, "Unexpected mallctl() error");
+ assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0,
+ "Unexpected mallctl() error");
assert_false(e0, "tcache should be disabled");
free(malloc(1));
diff --git a/deps/jemalloc/test/integration/xallocx.c b/deps/jemalloc/test/integration/xallocx.c
index 67e0a0e71..373625219 100755..100644
--- a/deps/jemalloc/test/integration/xallocx.c
+++ b/deps/jemalloc/test/integration/xallocx.c
@@ -1,28 +1,5 @@
#include "test/jemalloc_test.h"
-#ifdef JEMALLOC_FILL
-const char *malloc_conf = "junk:false";
-#endif
-
-/*
- * Use a separate arena for xallocx() extension/contraction tests so that
- * internal allocation e.g. by heap profiling can't interpose allocations where
- * xallocx() would ordinarily be able to extend.
- */
-static unsigned
-arena_ind(void)
-{
- static unsigned ind = 0;
-
- if (ind == 0) {
- size_t sz = sizeof(ind);
- assert_d_eq(mallctl("arenas.extend", (void *)&ind, &sz, NULL,
- 0), 0, "Unexpected mallctl failure creating arena");
- }
-
- return (ind);
-}
-
TEST_BEGIN(test_same_size)
{
void *p;
@@ -78,7 +55,7 @@ get_nsizes_impl(const char *cmd)
size_t z;
z = sizeof(unsigned);
- assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return (ret);
@@ -118,7 +95,7 @@ get_size_impl(const char *cmd, size_t ind)
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return (ret);
@@ -241,7 +218,6 @@ TEST_END
TEST_BEGIN(test_extra_large)
{
- int flags = MALLOCX_ARENA(arena_ind());
size_t smallmax, large0, large1, large2, huge0, hugemax;
void *p;
@@ -253,122 +229,121 @@ TEST_BEGIN(test_extra_large)
huge0 = get_huge_size(0);
hugemax = get_huge_size(get_nhuge()-1);
- p = mallocx(large2, flags);
+ p = mallocx(large2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
- assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+ assert_zu_eq(xallocx(p, large2, 0, 0), large2,
"Unexpected xallocx() behavior");
/* Test size decrease with zero extra. */
- assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ assert_zu_eq(xallocx(p, large0, 0, 0), large0,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, smallmax, 0, flags), large0,
+ assert_zu_eq(xallocx(p, smallmax, 0, 0), large0,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+ assert_zu_eq(xallocx(p, large2, 0, 0), large2,
"Unexpected xallocx() behavior");
/* Test size decrease with non-zero extra. */
- assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2,
+ assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, large1, large2 - large1, flags), large2,
+ assert_zu_eq(xallocx(p, large1, large2 - large1, 0), large2,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, large0, large1 - large0, flags), large1,
+ assert_zu_eq(xallocx(p, large0, large1 - large0, 0), large1,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, flags), large0,
+ assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, 0), large0,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ assert_zu_eq(xallocx(p, large0, 0, 0), large0,
"Unexpected xallocx() behavior");
/* Test size increase with zero extra. */
- assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+ assert_zu_eq(xallocx(p, large2, 0, 0), large2,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, huge0, 0, flags), large2,
+ assert_zu_eq(xallocx(p, huge0, 0, 0), large2,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ assert_zu_eq(xallocx(p, large0, 0, 0), large0,
"Unexpected xallocx() behavior");
/* Test size increase with non-zero extra. */
- assert_zu_lt(xallocx(p, large0, huge0 - large0, flags), huge0,
+ assert_zu_lt(xallocx(p, large0, huge0 - large0, 0), huge0,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, large0, 0, flags), large0,
+ assert_zu_eq(xallocx(p, large0, 0, 0), large0,
"Unexpected xallocx() behavior");
/* Test size increase with non-zero extra. */
- assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2,
+ assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, large2, 0, flags), large2,
+ assert_zu_eq(xallocx(p, large2, 0, 0), large2,
"Unexpected xallocx() behavior");
/* Test size+extra overflow. */
- assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, flags), huge0,
+ assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, 0), huge0,
"Unexpected xallocx() behavior");
- dallocx(p, flags);
+ dallocx(p, 0);
}
TEST_END
TEST_BEGIN(test_extra_huge)
{
- int flags = MALLOCX_ARENA(arena_ind());
- size_t largemax, huge1, huge2, huge3, hugemax;
+ size_t largemax, huge0, huge1, huge2, hugemax;
void *p;
/* Get size classes. */
largemax = get_large_size(get_nlarge()-1);
+ huge0 = get_huge_size(0);
huge1 = get_huge_size(1);
huge2 = get_huge_size(2);
- huge3 = get_huge_size(3);
hugemax = get_huge_size(get_nhuge()-1);
- p = mallocx(huge3, flags);
+ p = mallocx(huge2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");
- assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
+ assert_zu_eq(xallocx(p, huge2, 0, 0), huge2,
"Unexpected xallocx() behavior");
/* Test size decrease with zero extra. */
- assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+ assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
"Unexpected xallocx() behavior");
- assert_zu_ge(xallocx(p, largemax, 0, flags), huge1,
+ assert_zu_ge(xallocx(p, largemax, 0, 0), huge0,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
+ assert_zu_eq(xallocx(p, huge2, 0, 0), huge2,
"Unexpected xallocx() behavior");
/* Test size decrease with non-zero extra. */
- assert_zu_eq(xallocx(p, huge1, huge3 - huge1, flags), huge3,
+ assert_zu_eq(xallocx(p, huge0, huge2 - huge0, 0), huge2,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, huge2, huge3 - huge2, flags), huge3,
+ assert_zu_eq(xallocx(p, huge1, huge2 - huge1, 0), huge2,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2,
+ assert_zu_eq(xallocx(p, huge0, huge1 - huge0, 0), huge1,
"Unexpected xallocx() behavior");
- assert_zu_ge(xallocx(p, largemax, huge1 - largemax, flags), huge1,
+ assert_zu_ge(xallocx(p, largemax, huge0 - largemax, 0), huge0,
"Unexpected xallocx() behavior");
- assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+ assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
"Unexpected xallocx() behavior");
/* Test size increase with zero extra. */
- assert_zu_le(xallocx(p, huge3, 0, flags), huge3,
+ assert_zu_le(xallocx(p, huge2, 0, 0), huge2,
"Unexpected xallocx() behavior");
- assert_zu_le(xallocx(p, hugemax+1, 0, flags), huge3,
+ assert_zu_le(xallocx(p, hugemax+1, 0, 0), huge2,
"Unexpected xallocx() behavior");
- assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+ assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
"Unexpected xallocx() behavior");
/* Test size increase with non-zero extra. */
- assert_zu_le(xallocx(p, huge1, SIZE_T_MAX - huge1, flags), hugemax,
+ assert_zu_le(xallocx(p, huge0, SIZE_T_MAX - huge0, 0), hugemax,
"Unexpected xallocx() behavior");
- assert_zu_ge(xallocx(p, huge1, 0, flags), huge1,
+ assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
"Unexpected xallocx() behavior");
/* Test size increase with non-zero extra. */
- assert_zu_le(xallocx(p, huge1, huge3 - huge1, flags), huge3,
+ assert_zu_le(xallocx(p, huge0, huge2 - huge0, 0), huge2,
"Unexpected xallocx() behavior");
- assert_zu_eq(xallocx(p, huge3, 0, flags), huge3,
+ assert_zu_eq(xallocx(p, huge2, 0, 0), huge2,
"Unexpected xallocx() behavior");
/* Test size+extra overflow. */
- assert_zu_le(xallocx(p, huge3, hugemax - huge3 + 1, flags), hugemax,
+ assert_zu_le(xallocx(p, huge2, hugemax - huge2 + 1, 0), hugemax,
"Unexpected xallocx() behavior");
- dallocx(p, flags);
+ dallocx(p, 0);
}
TEST_END
@@ -413,13 +388,12 @@ validate_fill(const void *p, uint8_t c, size_t offset, size_t len)
static void
test_zero(size_t szmin, size_t szmax)
{
- int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO;
size_t sz, nsz;
void *p;
#define FILL_BYTE 0x7aU
sz = szmax;
- p = mallocx(sz, flags);
+ p = mallocx(sz, MALLOCX_ZERO);
assert_ptr_not_null(p, "Unexpected mallocx() error");
assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
sz);
@@ -434,14 +408,14 @@ test_zero(size_t szmin, size_t szmax)
/* Shrink in place so that we can expect growing in place to succeed. */
sz = szmin;
- assert_zu_eq(xallocx(p, sz, 0, flags), sz,
+ assert_zu_eq(xallocx(p, sz, 0, MALLOCX_ZERO), sz,
"Unexpected xallocx() error");
assert_false(validate_fill(p, FILL_BYTE, 0, sz),
"Memory not filled: sz=%zu", sz);
for (sz = szmin; sz < szmax; sz = nsz) {
- nsz = nallocx(sz+1, flags);
- assert_zu_eq(xallocx(p, sz+1, 0, flags), nsz,
+ nsz = nallocx(sz+1, MALLOCX_ZERO);
+ assert_zu_eq(xallocx(p, sz+1, 0, MALLOCX_ZERO), nsz,
"Unexpected xallocx() failure");
assert_false(validate_fill(p, FILL_BYTE, 0, sz),
"Memory not filled: sz=%zu", sz);
@@ -452,7 +426,7 @@ test_zero(size_t szmin, size_t szmax)
"Memory not filled: nsz=%zu", nsz);
}
- dallocx(p, flags);
+ dallocx(p, 0);
}
TEST_BEGIN(test_zero_large)
diff --git a/deps/jemalloc/test/src/mtx.c b/deps/jemalloc/test/src/mtx.c
index 8a5dfdd99..73bd02f6d 100644
--- a/deps/jemalloc/test/src/mtx.c
+++ b/deps/jemalloc/test/src/mtx.c
@@ -11,8 +11,6 @@ mtx_init(mtx_t *mtx)
#ifdef _WIN32
if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT))
return (true);
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- mtx->lock = OS_UNFAIR_LOCK_INIT;
#elif (defined(JEMALLOC_OSSPIN))
mtx->lock = 0;
#else
@@ -35,7 +33,6 @@ mtx_fini(mtx_t *mtx)
{
#ifdef _WIN32
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
#elif (defined(JEMALLOC_OSSPIN))
#else
pthread_mutex_destroy(&mtx->lock);
@@ -48,8 +45,6 @@ mtx_lock(mtx_t *mtx)
#ifdef _WIN32
EnterCriticalSection(&mtx->lock);
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock_lock(&mtx->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mtx->lock);
#else
@@ -63,8 +58,6 @@ mtx_unlock(mtx_t *mtx)
#ifdef _WIN32
LeaveCriticalSection(&mtx->lock);
-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
- os_unfair_lock_unlock(&mtx->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mtx->lock);
#else
diff --git a/deps/jemalloc/test/src/test.c b/deps/jemalloc/test/src/test.c
index d70cc7501..8173614cf 100644
--- a/deps/jemalloc/test/src/test.c
+++ b/deps/jemalloc/test/src/test.c
@@ -60,30 +60,32 @@ p_test_fini(void)
malloc_printf("%s: %s\n", test_name, test_status_string(test_status));
}
-static test_status_t
-p_test_impl(bool do_malloc_init, test_t *t, va_list ap)
+test_status_t
+p_test(test_t *t, ...)
{
test_status_t ret;
+ va_list ap;
- if (do_malloc_init) {
- /*
- * Make sure initialization occurs prior to running tests.
- * Tests are special because they may use internal facilities
- * prior to triggering initialization as a side effect of
- * calling into the public API.
- */
- if (nallocx(1, 0) == 0) {
- malloc_printf("Initialization error");
- return (test_status_fail);
- }
+ /*
+ * Make sure initialization occurs prior to running tests. Tests are
+ * special because they may use internal facilities prior to triggering
+ * initialization as a side effect of calling into the public API. This
+ * is a final safety that works even if jemalloc_constructor() doesn't
+ * run, as for MSVC builds.
+ */
+ if (nallocx(1, 0) == 0) {
+ malloc_printf("Initialization error");
+ return (test_status_fail);
}
ret = test_status_pass;
+ va_start(ap, t);
for (; t != NULL; t = va_arg(ap, test_t *)) {
t();
if (test_status > ret)
ret = test_status;
}
+ va_end(ap);
malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
test_status_string(test_status_pass),
@@ -96,34 +98,6 @@ p_test_impl(bool do_malloc_init, test_t *t, va_list ap)
return (ret);
}
-test_status_t
-p_test(test_t *t, ...)
-{
- test_status_t ret;
- va_list ap;
-
- ret = test_status_pass;
- va_start(ap, t);
- ret = p_test_impl(true, t, ap);
- va_end(ap);
-
- return (ret);
-}
-
-test_status_t
-p_test_no_malloc_init(test_t *t, ...)
-{
- test_status_t ret;
- va_list ap;
-
- ret = test_status_pass;
- va_start(ap, t);
- ret = p_test_impl(false, t, ap);
- va_end(ap);
-
- return (ret);
-}
-
void
p_test_fail(const char *prefix, const char *message)
{
diff --git a/deps/jemalloc/test/src/timer.c b/deps/jemalloc/test/src/timer.c
index 3c7e63a26..0c93abaf9 100644
--- a/deps/jemalloc/test/src/timer.c
+++ b/deps/jemalloc/test/src/timer.c
@@ -4,26 +4,50 @@ void
timer_start(timedelta_t *timer)
{
- nstime_init(&timer->t0, 0);
- nstime_update(&timer->t0);
+#ifdef _WIN32
+ GetSystemTimeAsFileTime(&timer->ft0);
+#elif JEMALLOC_CLOCK_GETTIME
+ if (sysconf(_SC_MONOTONIC_CLOCK) <= 0)
+ timer->clock_id = CLOCK_REALTIME;
+ else
+ timer->clock_id = CLOCK_MONOTONIC;
+ clock_gettime(timer->clock_id, &timer->ts0);
+#else
+ gettimeofday(&timer->tv0, NULL);
+#endif
}
void
timer_stop(timedelta_t *timer)
{
- nstime_copy(&timer->t1, &timer->t0);
- nstime_update(&timer->t1);
+#ifdef _WIN32
+ GetSystemTimeAsFileTime(&timer->ft0);
+#elif JEMALLOC_CLOCK_GETTIME
+ clock_gettime(timer->clock_id, &timer->ts1);
+#else
+ gettimeofday(&timer->tv1, NULL);
+#endif
}
uint64_t
timer_usec(const timedelta_t *timer)
{
- nstime_t delta;
- nstime_copy(&delta, &timer->t1);
- nstime_subtract(&delta, &timer->t0);
- return (nstime_ns(&delta) / 1000);
+#ifdef _WIN32
+ uint64_t t0, t1;
+ t0 = (((uint64_t)timer->ft0.dwHighDateTime) << 32) |
+ timer->ft0.dwLowDateTime;
+ t1 = (((uint64_t)timer->ft1.dwHighDateTime) << 32) |
+ timer->ft1.dwLowDateTime;
+ return ((t1 - t0) / 10);
+#elif JEMALLOC_CLOCK_GETTIME
+ return (((timer->ts1.tv_sec - timer->ts0.tv_sec) * 1000000) +
+ (timer->ts1.tv_nsec - timer->ts0.tv_nsec) / 1000);
+#else
+ return (((timer->tv1.tv_sec - timer->tv0.tv_sec) * 1000000) +
+ timer->tv1.tv_usec - timer->tv0.tv_usec);
+#endif
}
void
@@ -32,8 +56,9 @@ timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen)
uint64_t t0 = timer_usec(a);
uint64_t t1 = timer_usec(b);
uint64_t mult;
- size_t i = 0;
- size_t j, n;
+ unsigned i = 0;
+ unsigned j;
+ int n;
/* Whole. */
n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1);
diff --git a/deps/jemalloc/test/stress/microbench.c b/deps/jemalloc/test/stress/microbench.c
index 7dc45f89c..ee39fea7f 100644
--- a/deps/jemalloc/test/stress/microbench.c
+++ b/deps/jemalloc/test/stress/microbench.c
@@ -1,8 +1,7 @@
#include "test/jemalloc_test.h"
JEMALLOC_INLINE_C void
-time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter,
- void (*func)(void))
+time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, void (*func)(void))
{
uint64_t i;
diff --git a/deps/jemalloc/test/unit/a0.c b/deps/jemalloc/test/unit/a0.c
deleted file mode 100644
index b9ba45a3d..000000000
--- a/deps/jemalloc/test/unit/a0.c
+++ /dev/null
@@ -1,19 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_a0)
-{
- void *p;
-
- p = a0malloc(1);
- assert_ptr_not_null(p, "Unexpected a0malloc() error");
- a0dalloc(p);
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test_no_malloc_init(
- test_a0));
-}
diff --git a/deps/jemalloc/test/unit/arena_reset.c b/deps/jemalloc/test/unit/arena_reset.c
deleted file mode 100755
index adf9baa5d..000000000
--- a/deps/jemalloc/test/unit/arena_reset.c
+++ /dev/null
@@ -1,159 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifdef JEMALLOC_PROF
-const char *malloc_conf = "prof:true,lg_prof_sample:0";
-#endif
-
-static unsigned
-get_nsizes_impl(const char *cmd)
-{
- unsigned ret;
- size_t z;
-
- z = sizeof(unsigned);
- assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
- "Unexpected mallctl(\"%s\", ...) failure", cmd);
-
- return (ret);
-}
-
-static unsigned
-get_nsmall(void)
-{
-
- return (get_nsizes_impl("arenas.nbins"));
-}
-
-static unsigned
-get_nlarge(void)
-{
-
- return (get_nsizes_impl("arenas.nlruns"));
-}
-
-static unsigned
-get_nhuge(void)
-{
-
- return (get_nsizes_impl("arenas.nhchunks"));
-}
-
-static size_t
-get_size_impl(const char *cmd, size_t ind)
-{
- size_t ret;
- size_t z;
- size_t mib[4];
- size_t miblen = 4;
-
- z = sizeof(size_t);
- assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
- 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
- mib[2] = ind;
- z = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
- 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
-
- return (ret);
-}
-
-static size_t
-get_small_size(size_t ind)
-{
-
- return (get_size_impl("arenas.bin.0.size", ind));
-}
-
-static size_t
-get_large_size(size_t ind)
-{
-
- return (get_size_impl("arenas.lrun.0.size", ind));
-}
-
-static size_t
-get_huge_size(size_t ind)
-{
-
- return (get_size_impl("arenas.hchunk.0.size", ind));
-}
-
-TEST_BEGIN(test_arena_reset)
-{
-#define NHUGE 4
- unsigned arena_ind, nsmall, nlarge, nhuge, nptrs, i;
- size_t sz, miblen;
- void **ptrs;
- int flags;
- size_t mib[3];
- tsdn_t *tsdn;
-
- test_skip_if((config_valgrind && unlikely(in_valgrind)) || (config_fill
- && unlikely(opt_quarantine)));
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
- 0, "Unexpected mallctl() failure");
-
- flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
-
- nsmall = get_nsmall();
- nlarge = get_nlarge();
- nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge();
- nptrs = nsmall + nlarge + nhuge;
- ptrs = (void **)malloc(nptrs * sizeof(void *));
- assert_ptr_not_null(ptrs, "Unexpected malloc() failure");
-
- /* Allocate objects with a wide range of sizes. */
- for (i = 0; i < nsmall; i++) {
- sz = get_small_size(i);
- ptrs[i] = mallocx(sz, flags);
- assert_ptr_not_null(ptrs[i],
- "Unexpected mallocx(%zu, %#x) failure", sz, flags);
- }
- for (i = 0; i < nlarge; i++) {
- sz = get_large_size(i);
- ptrs[nsmall + i] = mallocx(sz, flags);
- assert_ptr_not_null(ptrs[i],
- "Unexpected mallocx(%zu, %#x) failure", sz, flags);
- }
- for (i = 0; i < nhuge; i++) {
- sz = get_huge_size(i);
- ptrs[nsmall + nlarge + i] = mallocx(sz, flags);
- assert_ptr_not_null(ptrs[i],
- "Unexpected mallocx(%zu, %#x) failure", sz, flags);
- }
-
- tsdn = tsdn_fetch();
-
- /* Verify allocations. */
- for (i = 0; i < nptrs; i++) {
- assert_zu_gt(ivsalloc(tsdn, ptrs[i], false), 0,
- "Allocation should have queryable size");
- }
-
- /* Reset. */
- miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
- "Unexpected mallctlnametomib() failure");
- mib[1] = (size_t)arena_ind;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
- "Unexpected mallctlbymib() failure");
-
- /* Verify allocations no longer exist. */
- for (i = 0; i < nptrs; i++) {
- assert_zu_eq(ivsalloc(tsdn, ptrs[i], false), 0,
- "Allocation should no longer exist");
- }
-
- free(ptrs);
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_arena_reset));
-}
diff --git a/deps/jemalloc/test/unit/bitmap.c b/deps/jemalloc/test/unit/bitmap.c
index a2dd54630..7da583d85 100644
--- a/deps/jemalloc/test/unit/bitmap.c
+++ b/deps/jemalloc/test/unit/bitmap.c
@@ -6,11 +6,7 @@ TEST_BEGIN(test_bitmap_size)
prev_size = 0;
for (i = 1; i <= BITMAP_MAXBITS; i++) {
- bitmap_info_t binfo;
- size_t size;
-
- bitmap_info_init(&binfo, i);
- size = bitmap_size(&binfo);
+ size_t size = bitmap_size(i);
assert_true(size >= prev_size,
"Bitmap size is smaller than expected");
prev_size = size;
@@ -27,8 +23,8 @@ TEST_BEGIN(test_bitmap_init)
bitmap_info_init(&binfo, i);
{
size_t j;
- bitmap_t *bitmap = (bitmap_t *)malloc(
- bitmap_size(&binfo));
+ bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
+ bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++) {
@@ -50,8 +46,8 @@ TEST_BEGIN(test_bitmap_set)
bitmap_info_init(&binfo, i);
{
size_t j;
- bitmap_t *bitmap = (bitmap_t *)malloc(
- bitmap_size(&binfo));
+ bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
+ bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++)
@@ -73,8 +69,8 @@ TEST_BEGIN(test_bitmap_unset)
bitmap_info_init(&binfo, i);
{
size_t j;
- bitmap_t *bitmap = (bitmap_t *)malloc(
- bitmap_size(&binfo));
+ bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
+ bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
for (j = 0; j < i; j++)
@@ -101,9 +97,9 @@ TEST_BEGIN(test_bitmap_sfu)
bitmap_info_t binfo;
bitmap_info_init(&binfo, i);
{
- size_t j;
- bitmap_t *bitmap = (bitmap_t *)malloc(
- bitmap_size(&binfo));
+ ssize_t j;
+ bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) *
+ bitmap_info_ngroups(&binfo));
bitmap_init(bitmap, &binfo);
/* Iteratively set bits starting at the beginning. */
@@ -119,7 +115,7 @@ TEST_BEGIN(test_bitmap_sfu)
* Iteratively unset bits starting at the end, and
* verify that bitmap_sfu() reaches the unset bits.
*/
- for (j = i - 1; j < i; j--) { /* (i..0] */
+ for (j = i - 1; j >= 0; j--) {
bitmap_unset(bitmap, &binfo, j);
assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
"First unset bit should the bit previously "
diff --git a/deps/jemalloc/test/unit/ckh.c b/deps/jemalloc/test/unit/ckh.c
index 2cbc22688..b11759599 100644
--- a/deps/jemalloc/test/unit/ckh.c
+++ b/deps/jemalloc/test/unit/ckh.c
@@ -7,8 +7,8 @@ TEST_BEGIN(test_new_delete)
tsd = tsd_fetch();
- assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
- ckh_string_keycomp), "Unexpected ckh_new() error");
+ assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp),
+ "Unexpected ckh_new() error");
ckh_delete(tsd, &ckh);
assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
@@ -32,8 +32,8 @@ TEST_BEGIN(test_count_insert_search_remove)
tsd = tsd_fetch();
- assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
- ckh_string_keycomp), "Unexpected ckh_new() error");
+ assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp),
+ "Unexpected ckh_new() error");
assert_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu", ZU(0),
ckh_count(&ckh));
diff --git a/deps/jemalloc/test/unit/decay.c b/deps/jemalloc/test/unit/decay.c
deleted file mode 100755
index 5af8f8074..000000000
--- a/deps/jemalloc/test/unit/decay.c
+++ /dev/null
@@ -1,374 +0,0 @@
-#include "test/jemalloc_test.h"
-
-const char *malloc_conf = "purge:decay,decay_time:1";
-
-static nstime_monotonic_t *nstime_monotonic_orig;
-static nstime_update_t *nstime_update_orig;
-
-static unsigned nupdates_mock;
-static nstime_t time_mock;
-static bool monotonic_mock;
-
-static bool
-nstime_monotonic_mock(void)
-{
-
- return (monotonic_mock);
-}
-
-static bool
-nstime_update_mock(nstime_t *time)
-{
-
- nupdates_mock++;
- if (monotonic_mock)
- nstime_copy(time, &time_mock);
- return (!monotonic_mock);
-}
-
-TEST_BEGIN(test_decay_ticks)
-{
- ticker_t *decay_ticker;
- unsigned tick0, tick1;
- size_t sz, huge0, large0;
- void *p;
-
- test_skip_if(opt_purge != purge_mode_decay);
-
- decay_ticker = decay_ticker_get(tsd_fetch(), 0);
- assert_ptr_not_null(decay_ticker,
- "Unexpected failure getting decay ticker");
-
- sz = sizeof(size_t);
- assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL,
- 0), 0, "Unexpected mallctl failure");
- assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL,
- 0), 0, "Unexpected mallctl failure");
-
- /*
- * Test the standard APIs using a huge size class, since we can't
- * control tcache interactions (except by completely disabling tcache
- * for the entire test program).
- */
-
- /* malloc(). */
- tick0 = ticker_read(decay_ticker);
- p = malloc(huge0);
- assert_ptr_not_null(p, "Unexpected malloc() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
- /* free(). */
- tick0 = ticker_read(decay_ticker);
- free(p);
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
-
- /* calloc(). */
- tick0 = ticker_read(decay_ticker);
- p = calloc(1, huge0);
- assert_ptr_not_null(p, "Unexpected calloc() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
- free(p);
-
- /* posix_memalign(). */
- tick0 = ticker_read(decay_ticker);
- assert_d_eq(posix_memalign(&p, sizeof(size_t), huge0), 0,
- "Unexpected posix_memalign() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during posix_memalign()");
- free(p);
-
- /* aligned_alloc(). */
- tick0 = ticker_read(decay_ticker);
- p = aligned_alloc(sizeof(size_t), huge0);
- assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during aligned_alloc()");
- free(p);
-
- /* realloc(). */
- /* Allocate. */
- tick0 = ticker_read(decay_ticker);
- p = realloc(NULL, huge0);
- assert_ptr_not_null(p, "Unexpected realloc() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
- /* Reallocate. */
- tick0 = ticker_read(decay_ticker);
- p = realloc(p, huge0);
- assert_ptr_not_null(p, "Unexpected realloc() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
- /* Deallocate. */
- tick0 = ticker_read(decay_ticker);
- realloc(p, 0);
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
-
- /*
- * Test the *allocx() APIs using huge, large, and small size classes,
- * with tcache explicitly disabled.
- */
- {
- unsigned i;
- size_t allocx_sizes[3];
- allocx_sizes[0] = huge0;
- allocx_sizes[1] = large0;
- allocx_sizes[2] = 1;
-
- for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
- sz = allocx_sizes[i];
-
- /* mallocx(). */
- tick0 = ticker_read(decay_ticker);
- p = mallocx(sz, MALLOCX_TCACHE_NONE);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during mallocx() (sz=%zu)",
- sz);
- /* rallocx(). */
- tick0 = ticker_read(decay_ticker);
- p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
- assert_ptr_not_null(p, "Unexpected rallocx() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during rallocx() (sz=%zu)",
- sz);
- /* xallocx(). */
- tick0 = ticker_read(decay_ticker);
- xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during xallocx() (sz=%zu)",
- sz);
- /* dallocx(). */
- tick0 = ticker_read(decay_ticker);
- dallocx(p, MALLOCX_TCACHE_NONE);
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during dallocx() (sz=%zu)",
- sz);
- /* sdallocx(). */
- p = mallocx(sz, MALLOCX_TCACHE_NONE);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
- tick0 = ticker_read(decay_ticker);
- sdallocx(p, sz, MALLOCX_TCACHE_NONE);
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during sdallocx() "
- "(sz=%zu)", sz);
- }
- }
-
- /*
- * Test tcache fill/flush interactions for large and small size classes,
- * using an explicit tcache.
- */
- if (config_tcache) {
- unsigned tcache_ind, i;
- size_t tcache_sizes[2];
- tcache_sizes[0] = large0;
- tcache_sizes[1] = 1;
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
- NULL, 0), 0, "Unexpected mallctl failure");
-
- for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
- sz = tcache_sizes[i];
-
- /* tcache fill. */
- tick0 = ticker_read(decay_ticker);
- p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during tcache fill "
- "(sz=%zu)", sz);
- /* tcache flush. */
- dallocx(p, MALLOCX_TCACHE(tcache_ind));
- tick0 = ticker_read(decay_ticker);
- assert_d_eq(mallctl("tcache.flush", NULL, NULL,
- (void *)&tcache_ind, sizeof(unsigned)), 0,
- "Unexpected mallctl failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during tcache flush "
- "(sz=%zu)", sz);
- }
- }
-}
-TEST_END
-
-TEST_BEGIN(test_decay_ticker)
-{
-#define NPS 1024
- int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
- void *ps[NPS];
- uint64_t epoch;
- uint64_t npurge0 = 0;
- uint64_t npurge1 = 0;
- size_t sz, large;
- unsigned i, nupdates0;
- nstime_t time, decay_time, deadline;
-
- test_skip_if(opt_purge != purge_mode_decay);
-
- /*
- * Allocate a bunch of large objects, pause the clock, deallocate the
- * objects, restore the clock, then [md]allocx() in a tight loop to
- * verify the ticker triggers purging.
- */
-
- if (config_tcache) {
- size_t tcache_max;
-
- sz = sizeof(size_t);
- assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
- &sz, NULL, 0), 0, "Unexpected mallctl failure");
- large = nallocx(tcache_max + 1, flags);
- } else {
- sz = sizeof(size_t);
- assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large, &sz,
- NULL, 0), 0, "Unexpected mallctl failure");
- }
-
- assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
- "Unexpected mallctl failure");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
- sizeof(uint64_t)), 0, "Unexpected mallctl failure");
- sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
- NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
-
- for (i = 0; i < NPS; i++) {
- ps[i] = mallocx(large, flags);
- assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
- }
-
- nupdates_mock = 0;
- nstime_init(&time_mock, 0);
- nstime_update(&time_mock);
- monotonic_mock = true;
-
- nstime_monotonic_orig = nstime_monotonic;
- nstime_update_orig = nstime_update;
- nstime_monotonic = nstime_monotonic_mock;
- nstime_update = nstime_update_mock;
-
- for (i = 0; i < NPS; i++) {
- dallocx(ps[i], flags);
- nupdates0 = nupdates_mock;
- assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
- "Unexpected arena.0.decay failure");
- assert_u_gt(nupdates_mock, nupdates0,
- "Expected nstime_update() to be called");
- }
-
- nstime_monotonic = nstime_monotonic_orig;
- nstime_update = nstime_update_orig;
-
- nstime_init(&time, 0);
- nstime_update(&time);
- nstime_init2(&decay_time, opt_decay_time, 0);
- nstime_copy(&deadline, &time);
- nstime_add(&deadline, &decay_time);
- do {
- for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) {
- void *p = mallocx(1, flags);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
- dallocx(p, flags);
- }
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
- sizeof(uint64_t)), 0, "Unexpected mallctl failure");
- sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1,
- &sz, NULL, 0), config_stats ? 0 : ENOENT,
- "Unexpected mallctl result");
-
- nstime_update(&time);
- } while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0);
-
- if (config_stats)
- assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
-#undef NPS
-}
-TEST_END
-
-TEST_BEGIN(test_decay_nonmonotonic)
-{
-#define NPS (SMOOTHSTEP_NSTEPS + 1)
- int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
- void *ps[NPS];
- uint64_t epoch;
- uint64_t npurge0 = 0;
- uint64_t npurge1 = 0;
- size_t sz, large0;
- unsigned i, nupdates0;
-
- test_skip_if(opt_purge != purge_mode_decay);
-
- sz = sizeof(size_t);
- assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL,
- 0), 0, "Unexpected mallctl failure");
-
- assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
- "Unexpected mallctl failure");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
- sizeof(uint64_t)), 0, "Unexpected mallctl failure");
- sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
- NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
-
- nupdates_mock = 0;
- nstime_init(&time_mock, 0);
- nstime_update(&time_mock);
- monotonic_mock = false;
-
- nstime_monotonic_orig = nstime_monotonic;
- nstime_update_orig = nstime_update;
- nstime_monotonic = nstime_monotonic_mock;
- nstime_update = nstime_update_mock;
-
- for (i = 0; i < NPS; i++) {
- ps[i] = mallocx(large0, flags);
- assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
- }
-
- for (i = 0; i < NPS; i++) {
- dallocx(ps[i], flags);
- nupdates0 = nupdates_mock;
- assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
- "Unexpected arena.0.decay failure");
- assert_u_gt(nupdates_mock, nupdates0,
- "Expected nstime_update() to be called");
- }
-
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
- sizeof(uint64_t)), 0, "Unexpected mallctl failure");
- sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, &sz,
- NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
-
- if (config_stats)
- assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
-
- nstime_monotonic = nstime_monotonic_orig;
- nstime_update = nstime_update_orig;
-#undef NPS
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_decay_ticks,
- test_decay_ticker,
- test_decay_nonmonotonic));
-}
diff --git a/deps/jemalloc/test/unit/fork.c b/deps/jemalloc/test/unit/fork.c
deleted file mode 100644
index c530797c4..000000000
--- a/deps/jemalloc/test/unit/fork.c
+++ /dev/null
@@ -1,64 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#ifndef _WIN32
-#include <sys/wait.h>
-#endif
-
-TEST_BEGIN(test_fork)
-{
-#ifndef _WIN32
- void *p;
- pid_t pid;
-
- p = malloc(1);
- assert_ptr_not_null(p, "Unexpected malloc() failure");
-
- pid = fork();
-
- free(p);
-
- p = malloc(64);
- assert_ptr_not_null(p, "Unexpected malloc() failure");
- free(p);
-
- if (pid == -1) {
- /* Error. */
- test_fail("Unexpected fork() failure");
- } else if (pid == 0) {
- /* Child. */
- _exit(0);
- } else {
- int status;
-
- /* Parent. */
- while (true) {
- if (waitpid(pid, &status, 0) == -1)
- test_fail("Unexpected waitpid() failure");
- if (WIFSIGNALED(status)) {
- test_fail("Unexpected child termination due to "
- "signal %d", WTERMSIG(status));
- break;
- }
- if (WIFEXITED(status)) {
- if (WEXITSTATUS(status) != 0) {
- test_fail(
- "Unexpected child exit value %d",
- WEXITSTATUS(status));
- }
- break;
- }
- }
- }
-#else
- test_skip("fork(2) is irrelevant to Windows");
-#endif
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_fork));
-}
diff --git a/deps/jemalloc/test/unit/hash.c b/deps/jemalloc/test/unit/hash.c
index 010c9d76f..77a8cede9 100644
--- a/deps/jemalloc/test/unit/hash.c
+++ b/deps/jemalloc/test/unit/hash.c
@@ -35,7 +35,7 @@ typedef enum {
hash_variant_x64_128
} hash_variant_t;
-static int
+static size_t
hash_variant_bits(hash_variant_t variant)
{
@@ -59,20 +59,19 @@ hash_variant_string(hash_variant_t variant)
}
}
-#define KEY_SIZE 256
static void
-hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
+hash_variant_verify(hash_variant_t variant)
{
- const int hashbytes = hash_variant_bits(variant) / 8;
- const int hashes_size = hashbytes * 256;
- VARIABLE_ARRAY(uint8_t, hashes, hashes_size);
+ const size_t hashbytes = hash_variant_bits(variant) / 8;
+ uint8_t key[256];
+ VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256);
VARIABLE_ARRAY(uint8_t, final, hashbytes);
unsigned i;
uint32_t computed, expected;
- memset(key, 0, KEY_SIZE);
- memset(hashes, 0, hashes_size);
- memset(final, 0, hashbytes);
+ memset(key, 0, sizeof(key));
+ memset(hashes, 0, sizeof(hashes));
+ memset(final, 0, sizeof(final));
/*
* Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
@@ -103,17 +102,17 @@ hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
/* Hash the result array. */
switch (variant) {
case hash_variant_x86_32: {
- uint32_t out = hash_x86_32(hashes, hashes_size, 0);
+ uint32_t out = hash_x86_32(hashes, hashbytes*256, 0);
memcpy(final, &out, sizeof(out));
break;
} case hash_variant_x86_128: {
uint64_t out[2];
- hash_x86_128(hashes, hashes_size, 0, out);
+ hash_x86_128(hashes, hashbytes*256, 0, out);
memcpy(final, out, sizeof(out));
break;
} case hash_variant_x64_128: {
uint64_t out[2];
- hash_x64_128(hashes, hashes_size, 0, out);
+ hash_x64_128(hashes, hashbytes*256, 0, out);
memcpy(final, out, sizeof(out));
break;
} default: not_reached();
@@ -140,19 +139,6 @@ hash_variant_verify_key(hash_variant_t variant, uint8_t *key)
hash_variant_string(variant), expected, computed);
}
-static void
-hash_variant_verify(hash_variant_t variant)
-{
-#define MAX_ALIGN 16
- uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)];
- unsigned i;
-
- for (i = 0; i < MAX_ALIGN; i++)
- hash_variant_verify_key(variant, &key[i]);
-#undef MAX_ALIGN
-}
-#undef KEY_SIZE
-
TEST_BEGIN(test_hash_x86_32)
{
diff --git a/deps/jemalloc/test/unit/junk.c b/deps/jemalloc/test/unit/junk.c
index 460bd524d..b23dd1e95 100644
--- a/deps/jemalloc/test/unit/junk.c
+++ b/deps/jemalloc/test/unit/junk.c
@@ -29,7 +29,7 @@ arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
arena_dalloc_junk_small_orig(ptr, bin_info);
for (i = 0; i < bin_info->reg_size; i++) {
- assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
+ assert_c_eq(((char *)ptr)[i], 0x5a,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, bin_info->reg_size);
}
@@ -44,7 +44,7 @@ arena_dalloc_junk_large_intercept(void *ptr, size_t usize)
arena_dalloc_junk_large_orig(ptr, usize);
for (i = 0; i < usize; i++) {
- assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
+ assert_c_eq(((char *)ptr)[i], 0x5a,
"Missing junk fill for byte %zu/%zu of deallocated region",
i, usize);
}
@@ -69,7 +69,7 @@ huge_dalloc_junk_intercept(void *ptr, size_t usize)
static void
test_junk(size_t sz_min, size_t sz_max)
{
- uint8_t *s;
+ char *s;
size_t sz_prev, sz, i;
if (opt_junk_free) {
@@ -82,23 +82,23 @@ test_junk(size_t sz_min, size_t sz_max)
}
sz_prev = 0;
- s = (uint8_t *)mallocx(sz_min, 0);
+ s = (char *)mallocx(sz_min, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
for (sz = sallocx(s, 0); sz <= sz_max;
sz_prev = sz, sz = sallocx(s, 0)) {
if (sz_prev > 0) {
- assert_u_eq(s[0], 'a',
+ assert_c_eq(s[0], 'a',
"Previously allocated byte %zu/%zu is corrupted",
ZU(0), sz_prev);
- assert_u_eq(s[sz_prev-1], 'a',
+ assert_c_eq(s[sz_prev-1], 'a',
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);
}
for (i = sz_prev; i < sz; i++) {
if (opt_junk_alloc) {
- assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK,
+ assert_c_eq(s[i], 0xa5,
"Newly allocated byte %zu/%zu isn't "
"junk-filled", i, sz);
}
@@ -107,7 +107,7 @@ test_junk(size_t sz_min, size_t sz_max)
if (xallocx(s, sz+1, 0, 0) == sz) {
watch_junking(s);
- s = (uint8_t *)rallocx(s, sz+1, 0);
+ s = (char *)rallocx(s, sz+1, 0);
assert_ptr_not_null((void *)s,
"Unexpected rallocx() failure");
assert_true(!opt_junk_free || saw_junking,
@@ -244,6 +244,7 @@ int
main(void)
{
+ assert(!config_fill || opt_junk_alloc || opt_junk_free);
return (test(
test_junk_small,
test_junk_large,
diff --git a/deps/jemalloc/test/unit/junk_alloc.c b/deps/jemalloc/test/unit/junk_alloc.c
index a5895b5c0..8db3331d2 100644
--- a/deps/jemalloc/test/unit/junk_alloc.c
+++ b/deps/jemalloc/test/unit/junk_alloc.c
@@ -1,3 +1,3 @@
-#define JEMALLOC_TEST_JUNK_OPT "junk:alloc"
+#define JEMALLOC_TEST_JUNK_OPT "junk:alloc"
#include "junk.c"
#undef JEMALLOC_TEST_JUNK_OPT
diff --git a/deps/jemalloc/test/unit/junk_free.c b/deps/jemalloc/test/unit/junk_free.c
index bb5183c90..482a61d07 100644
--- a/deps/jemalloc/test/unit/junk_free.c
+++ b/deps/jemalloc/test/unit/junk_free.c
@@ -1,3 +1,3 @@
-#define JEMALLOC_TEST_JUNK_OPT "junk:free"
+#define JEMALLOC_TEST_JUNK_OPT "junk:free"
#include "junk.c"
#undef JEMALLOC_TEST_JUNK_OPT
diff --git a/deps/jemalloc/test/unit/mallctl.c b/deps/jemalloc/test/unit/mallctl.c
index 2353c92c1..31e354ca7 100755..100644
--- a/deps/jemalloc/test/unit/mallctl.c
+++ b/deps/jemalloc/test/unit/mallctl.c
@@ -12,18 +12,16 @@ TEST_BEGIN(test_mallctl_errors)
EPERM, "mallctl() should return EPERM on attempt to write "
"read-only value");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
- sizeof(epoch)-1), EINVAL,
- "mallctl() should return EINVAL for input size mismatch");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
- sizeof(epoch)+1), EINVAL,
- "mallctl() should return EINVAL for input size mismatch");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1),
+ EINVAL, "mallctl() should return EINVAL for input size mismatch");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1),
+ EINVAL, "mallctl() should return EINVAL for input size mismatch");
sz = sizeof(epoch)-1;
- assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
+ assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
"mallctl() should return EINVAL for output size mismatch");
sz = sizeof(epoch)+1;
- assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
+ assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL,
"mallctl() should return EINVAL for output size mismatch");
}
TEST_END
@@ -58,20 +56,18 @@ TEST_BEGIN(test_mallctlbymib_errors)
assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
sizeof(epoch)-1), EINVAL,
"mallctlbymib() should return EINVAL for input size mismatch");
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
+ assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch,
sizeof(epoch)+1), EINVAL,
"mallctlbymib() should return EINVAL for input size mismatch");
sz = sizeof(epoch)-1;
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
- EINVAL,
+ assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
"mallctlbymib() should return EINVAL for output size mismatch");
sz = sizeof(epoch)+1;
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
- EINVAL,
+ assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL,
"mallctlbymib() should return EINVAL for output size mismatch");
}
TEST_END
@@ -87,19 +83,18 @@ TEST_BEGIN(test_mallctl_read_write)
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Read. */
- assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
+ assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Write. */
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
- sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)),
+ 0, "Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Read+write. */
- assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
- (void *)&new_epoch, sizeof(new_epoch)), 0,
- "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch,
+ sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
}
TEST_END
@@ -122,30 +117,29 @@ TEST_END
TEST_BEGIN(test_mallctl_config)
{
-#define TEST_MALLCTL_CONFIG(config, t) do { \
- t oldval; \
+#define TEST_MALLCTL_CONFIG(config) do { \
+ bool oldval; \
size_t sz = sizeof(oldval); \
- assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \
- NULL, 0), 0, "Unexpected mallctl() failure"); \
+ assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \
+ 0, "Unexpected mallctl() failure"); \
assert_b_eq(oldval, config_##config, "Incorrect config value"); \
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
} while (0)
- TEST_MALLCTL_CONFIG(cache_oblivious, bool);
- TEST_MALLCTL_CONFIG(debug, bool);
- TEST_MALLCTL_CONFIG(fill, bool);
- TEST_MALLCTL_CONFIG(lazy_lock, bool);
- TEST_MALLCTL_CONFIG(malloc_conf, const char *);
- TEST_MALLCTL_CONFIG(munmap, bool);
- TEST_MALLCTL_CONFIG(prof, bool);
- TEST_MALLCTL_CONFIG(prof_libgcc, bool);
- TEST_MALLCTL_CONFIG(prof_libunwind, bool);
- TEST_MALLCTL_CONFIG(stats, bool);
- TEST_MALLCTL_CONFIG(tcache, bool);
- TEST_MALLCTL_CONFIG(tls, bool);
- TEST_MALLCTL_CONFIG(utrace, bool);
- TEST_MALLCTL_CONFIG(valgrind, bool);
- TEST_MALLCTL_CONFIG(xmalloc, bool);
+ TEST_MALLCTL_CONFIG(cache_oblivious);
+ TEST_MALLCTL_CONFIG(debug);
+ TEST_MALLCTL_CONFIG(fill);
+ TEST_MALLCTL_CONFIG(lazy_lock);
+ TEST_MALLCTL_CONFIG(munmap);
+ TEST_MALLCTL_CONFIG(prof);
+ TEST_MALLCTL_CONFIG(prof_libgcc);
+ TEST_MALLCTL_CONFIG(prof_libunwind);
+ TEST_MALLCTL_CONFIG(stats);
+ TEST_MALLCTL_CONFIG(tcache);
+ TEST_MALLCTL_CONFIG(tls);
+ TEST_MALLCTL_CONFIG(utrace);
+ TEST_MALLCTL_CONFIG(valgrind);
+ TEST_MALLCTL_CONFIG(xmalloc);
#undef TEST_MALLCTL_CONFIG
}
@@ -159,8 +153,7 @@ TEST_BEGIN(test_mallctl_opt)
t oldval; \
size_t sz = sizeof(oldval); \
int expected = config_##config ? 0 : ENOENT; \
- int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \
- 0); \
+ int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \
assert_d_eq(result, expected, \
"Unexpected mallctl() result for opt."#opt); \
assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
@@ -169,10 +162,8 @@ TEST_BEGIN(test_mallctl_opt)
TEST_MALLCTL_OPT(bool, abort, always);
TEST_MALLCTL_OPT(size_t, lg_chunk, always);
TEST_MALLCTL_OPT(const char *, dss, always);
- TEST_MALLCTL_OPT(unsigned, narenas, always);
- TEST_MALLCTL_OPT(const char *, purge, always);
+ TEST_MALLCTL_OPT(size_t, narenas, always);
TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always);
- TEST_MALLCTL_OPT(ssize_t, decay_time, always);
TEST_MALLCTL_OPT(bool, stats_print, always);
TEST_MALLCTL_OPT(const char *, junk, fill);
TEST_MALLCTL_OPT(size_t, quarantine, fill);
@@ -203,7 +194,7 @@ TEST_BEGIN(test_manpage_example)
size_t len, miblen;
len = sizeof(nbins);
- assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
+ assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
"Unexpected mallctl() failure");
miblen = 4;
@@ -214,8 +205,8 @@ TEST_BEGIN(test_manpage_example)
mib[2] = i;
len = sizeof(bin_size);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
- NULL, 0), 0, "Unexpected mallctlbymib() failure");
+ assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0),
+ 0, "Unexpected mallctlbymib() failure");
/* Do something with bin_size... */
}
}
@@ -264,25 +255,25 @@ TEST_BEGIN(test_tcache)
/* Create tcaches. */
for (i = 0; i < NTCACHES; i++) {
sz = sizeof(unsigned);
- assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
- 0), 0, "Unexpected mallctl() failure, i=%u", i);
+ assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure, i=%u", i);
}
/* Exercise tcache ID recycling. */
for (i = 0; i < NTCACHES; i++) {
- assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
- (void *)&tis[i], sizeof(unsigned)), 0,
- "Unexpected mallctl() failure, i=%u", i);
+ assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
}
for (i = 0; i < NTCACHES; i++) {
sz = sizeof(unsigned);
- assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
- 0), 0, "Unexpected mallctl() failure, i=%u", i);
+ assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure, i=%u", i);
}
/* Flush empty tcaches. */
for (i = 0; i < NTCACHES; i++) {
- assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
+ assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i],
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
i);
}
@@ -327,16 +318,16 @@ TEST_BEGIN(test_tcache)
/* Flush some non-empty tcaches. */
for (i = 0; i < NTCACHES/2; i++) {
- assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
+ assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i],
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
i);
}
/* Destroy tcaches. */
for (i = 0; i < NTCACHES; i++) {
- assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
- (void *)&tis[i], sizeof(unsigned)), 0,
- "Unexpected mallctl() failure, i=%u", i);
+ assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i],
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
+ i);
}
}
TEST_END
@@ -346,17 +337,15 @@ TEST_BEGIN(test_thread_arena)
unsigned arena_old, arena_new, narenas;
size_t sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
- 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
arena_new = narenas - 1;
- assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz,
- (void *)&arena_new, sizeof(unsigned)), 0,
- "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new,
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure");
arena_new = 0;
- assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz,
- (void *)&arena_new, sizeof(unsigned)), 0,
- "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new,
+ sizeof(unsigned)), 0, "Unexpected mallctl() failure");
}
TEST_END
@@ -365,20 +354,17 @@ TEST_BEGIN(test_arena_i_lg_dirty_mult)
ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
size_t sz = sizeof(ssize_t);
- test_skip_if(opt_purge != purge_mode_ratio);
-
- assert_d_eq(mallctl("arena.0.lg_dirty_mult",
- (void *)&orig_lg_dirty_mult, &sz, NULL, 0), 0,
- "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
lg_dirty_mult = -2;
assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
- (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+ &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
lg_dirty_mult = (sizeof(size_t) << 3);
assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL,
- (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+ &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
@@ -386,48 +372,15 @@ TEST_BEGIN(test_arena_i_lg_dirty_mult)
= lg_dirty_mult, lg_dirty_mult++) {
ssize_t old_lg_dirty_mult;
- assert_d_eq(mallctl("arena.0.lg_dirty_mult",
- (void *)&old_lg_dirty_mult, &sz, (void *)&lg_dirty_mult,
- sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arena.0.lg_dirty_mult", &old_lg_dirty_mult,
+ &sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
"Unexpected old arena.0.lg_dirty_mult");
}
}
TEST_END
-TEST_BEGIN(test_arena_i_decay_time)
-{
- ssize_t decay_time, orig_decay_time, prev_decay_time;
- size_t sz = sizeof(ssize_t);
-
- test_skip_if(opt_purge != purge_mode_decay);
-
- assert_d_eq(mallctl("arena.0.decay_time", (void *)&orig_decay_time, &sz,
- NULL, 0), 0, "Unexpected mallctl() failure");
-
- decay_time = -2;
- assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
- (void *)&decay_time, sizeof(ssize_t)), EFAULT,
- "Unexpected mallctl() success");
-
- decay_time = 0x7fffffff;
- assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL,
- (void *)&decay_time, sizeof(ssize_t)), 0,
- "Unexpected mallctl() failure");
-
- for (prev_decay_time = decay_time, decay_time = -1;
- decay_time < 20; prev_decay_time = decay_time, decay_time++) {
- ssize_t old_decay_time;
-
- assert_d_eq(mallctl("arena.0.decay_time", (void *)&old_decay_time,
- &sz, (void *)&decay_time, sizeof(ssize_t)), 0,
- "Unexpected mallctl() failure");
- assert_zd_eq(old_decay_time, prev_decay_time,
- "Unexpected old arena.0.decay_time");
- }
-}
-TEST_END
-
TEST_BEGIN(test_arena_i_purge)
{
unsigned narenas;
@@ -438,29 +391,9 @@ TEST_BEGIN(test_arena_i_purge)
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
- 0, "Unexpected mallctl() failure");
- assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
- "Unexpected mallctlnametomib() failure");
- mib[1] = narenas;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
- "Unexpected mallctlbymib() failure");
-}
-TEST_END
-
-TEST_BEGIN(test_arena_i_decay)
-{
- unsigned narenas;
- size_t sz = sizeof(unsigned);
- size_t mib[3];
- size_t miblen = 3;
-
- assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+ assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
-
- assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
- 0, "Unexpected mallctl() failure");
- assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
+ assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = narenas;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
@@ -480,35 +413,31 @@ TEST_BEGIN(test_arena_i_dss)
"Unexpected mallctlnametomib() error");
dss_prec_new = "disabled";
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
- (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
- "Unexpected mallctl() failure");
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
+ sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
- (void *)&dss_prec_old, sizeof(dss_prec_old)), 0,
- "Unexpected mallctl() failure");
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
+ sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure");
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
- 0), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
"Unexpected value for dss precedence");
mib[1] = narenas_total_get();
dss_prec_new = "disabled";
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
- (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
- "Unexpected mallctl() failure");
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
+ sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
- (void *)&dss_prec_old, sizeof(dss_prec_new)), 0,
- "Unexpected mallctl() failure");
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
+ sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
- 0), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
"Unexpected value for dss precedence");
}
@@ -519,14 +448,14 @@ TEST_BEGIN(test_arenas_initialized)
unsigned narenas;
size_t sz = sizeof(narenas);
- assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
- 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
{
VARIABLE_ARRAY(bool, initialized, narenas);
sz = narenas * sizeof(bool);
- assert_d_eq(mallctl("arenas.initialized", (void *)initialized,
- &sz, NULL, 0), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.initialized", initialized, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
}
}
TEST_END
@@ -536,19 +465,17 @@ TEST_BEGIN(test_arenas_lg_dirty_mult)
ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult;
size_t sz = sizeof(ssize_t);
- test_skip_if(opt_purge != purge_mode_ratio);
-
- assert_d_eq(mallctl("arenas.lg_dirty_mult", (void *)&orig_lg_dirty_mult,
- &sz, NULL, 0), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz,
+ NULL, 0), 0, "Unexpected mallctl() failure");
lg_dirty_mult = -2;
assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
- (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+ &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
lg_dirty_mult = (sizeof(size_t) << 3);
assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL,
- (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT,
+ &lg_dirty_mult, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1;
@@ -556,56 +483,23 @@ TEST_BEGIN(test_arenas_lg_dirty_mult)
lg_dirty_mult, lg_dirty_mult++) {
ssize_t old_lg_dirty_mult;
- assert_d_eq(mallctl("arenas.lg_dirty_mult",
- (void *)&old_lg_dirty_mult, &sz, (void *)&lg_dirty_mult,
- sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.lg_dirty_mult", &old_lg_dirty_mult,
+ &sz, &lg_dirty_mult, sizeof(ssize_t)), 0,
+ "Unexpected mallctl() failure");
assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult,
"Unexpected old arenas.lg_dirty_mult");
}
}
TEST_END
-TEST_BEGIN(test_arenas_decay_time)
-{
- ssize_t decay_time, orig_decay_time, prev_decay_time;
- size_t sz = sizeof(ssize_t);
-
- test_skip_if(opt_purge != purge_mode_decay);
-
- assert_d_eq(mallctl("arenas.decay_time", (void *)&orig_decay_time, &sz,
- NULL, 0), 0, "Unexpected mallctl() failure");
-
- decay_time = -2;
- assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
- (void *)&decay_time, sizeof(ssize_t)), EFAULT,
- "Unexpected mallctl() success");
-
- decay_time = 0x7fffffff;
- assert_d_eq(mallctl("arenas.decay_time", NULL, NULL,
- (void *)&decay_time, sizeof(ssize_t)), 0,
- "Expected mallctl() failure");
-
- for (prev_decay_time = decay_time, decay_time = -1;
- decay_time < 20; prev_decay_time = decay_time, decay_time++) {
- ssize_t old_decay_time;
-
- assert_d_eq(mallctl("arenas.decay_time",
- (void *)&old_decay_time, &sz, (void *)&decay_time,
- sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
- assert_zd_eq(old_decay_time, prev_decay_time,
- "Unexpected old arenas.decay_time");
- }
-}
-TEST_END
-
TEST_BEGIN(test_arenas_constants)
{
#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
- assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \
- 0), 0, "Unexpected mallctl() failure"); \
+ assert_d_eq(mallctl("arenas."#name, &name, &sz, NULL, 0), 0, \
+ "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
@@ -625,8 +519,8 @@ TEST_BEGIN(test_arenas_bin_constants)
#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
- assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \
- NULL, 0), 0, "Unexpected mallctl() failure"); \
+ assert_d_eq(mallctl("arenas.bin.0."#name, &name, &sz, NULL, 0), \
+ 0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
@@ -644,8 +538,8 @@ TEST_BEGIN(test_arenas_lrun_constants)
#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
- assert_d_eq(mallctl("arenas.lrun.0."#name, (void *)&name, &sz, \
- NULL, 0), 0, "Unexpected mallctl() failure"); \
+ assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \
+ 0), 0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
@@ -661,8 +555,8 @@ TEST_BEGIN(test_arenas_hchunk_constants)
#define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
- assert_d_eq(mallctl("arenas.hchunk.0."#name, (void *)&name, \
- &sz, NULL, 0), 0, "Unexpected mallctl() failure"); \
+ assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL, \
+ 0), 0, "Unexpected mallctl() failure"); \
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
@@ -677,12 +571,12 @@ TEST_BEGIN(test_arenas_extend)
unsigned narenas_before, arena, narenas_after;
size_t sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
- NULL, 0), 0, "Unexpected mallctl() failure");
- assert_d_eq(mallctl("arenas.extend", (void *)&arena, &sz, NULL, 0), 0,
+ assert_d_eq(mallctl("arenas.narenas", &narenas_before, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.extend", &arena, &sz, NULL, 0), 0,
+ "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("arenas.narenas", &narenas_after, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
- 0), 0, "Unexpected mallctl() failure");
assert_u_eq(narenas_before+1, narenas_after,
"Unexpected number of arenas before versus after extension");
@@ -696,14 +590,12 @@ TEST_BEGIN(test_stats_arenas)
#define TEST_STATS_ARENAS(t, name) do { \
t name; \
size_t sz = sizeof(t); \
- assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \
- NULL, 0), 0, "Unexpected mallctl() failure"); \
+ assert_d_eq(mallctl("stats.arenas.0."#name, &name, &sz, NULL, \
+ 0), 0, "Unexpected mallctl() failure"); \
} while (0)
- TEST_STATS_ARENAS(unsigned, nthreads);
TEST_STATS_ARENAS(const char *, dss);
- TEST_STATS_ARENAS(ssize_t, lg_dirty_mult);
- TEST_STATS_ARENAS(ssize_t, decay_time);
+ TEST_STATS_ARENAS(unsigned, nthreads);
TEST_STATS_ARENAS(size_t, pactive);
TEST_STATS_ARENAS(size_t, pdirty);
@@ -728,13 +620,10 @@ main(void)
test_tcache,
test_thread_arena,
test_arena_i_lg_dirty_mult,
- test_arena_i_decay_time,
test_arena_i_purge,
- test_arena_i_decay,
test_arena_i_dss,
test_arenas_initialized,
test_arenas_lg_dirty_mult,
- test_arenas_decay_time,
test_arenas_constants,
test_arenas_bin_constants,
test_arenas_lrun_constants,
diff --git a/deps/jemalloc/test/unit/math.c b/deps/jemalloc/test/unit/math.c
index adb72bed9..ebec77a62 100644
--- a/deps/jemalloc/test/unit/math.c
+++ b/deps/jemalloc/test/unit/math.c
@@ -5,10 +5,6 @@
#include <float.h>
-#ifdef __PGI
-#undef INFINITY
-#endif
-
#ifndef INFINITY
#define INFINITY (DBL_MAX + DBL_MAX)
#endif
diff --git a/deps/jemalloc/test/unit/nstime.c b/deps/jemalloc/test/unit/nstime.c
deleted file mode 100644
index 0368bc26e..000000000
--- a/deps/jemalloc/test/unit/nstime.c
+++ /dev/null
@@ -1,227 +0,0 @@
-#include "test/jemalloc_test.h"
-
-#define BILLION UINT64_C(1000000000)
-
-TEST_BEGIN(test_nstime_init)
-{
- nstime_t nst;
-
- nstime_init(&nst, 42000000043);
- assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
- assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
- assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_init2)
-{
- nstime_t nst;
-
- nstime_init2(&nst, 42, 43);
- assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
- assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_copy)
-{
- nstime_t nsta, nstb;
-
- nstime_init2(&nsta, 42, 43);
- nstime_init(&nstb, 0);
- nstime_copy(&nstb, &nsta);
- assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
- assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_compare)
-{
- nstime_t nsta, nstb;
-
- nstime_init2(&nsta, 42, 43);
- nstime_copy(&nstb, &nsta);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
- assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
-
- nstime_init2(&nstb, 42, 42);
- assert_d_eq(nstime_compare(&nsta, &nstb), 1,
- "nsta should be greater than nstb");
- assert_d_eq(nstime_compare(&nstb, &nsta), -1,
- "nstb should be less than nsta");
-
- nstime_init2(&nstb, 42, 44);
- assert_d_eq(nstime_compare(&nsta, &nstb), -1,
- "nsta should be less than nstb");
- assert_d_eq(nstime_compare(&nstb, &nsta), 1,
- "nstb should be greater than nsta");
-
- nstime_init2(&nstb, 41, BILLION - 1);
- assert_d_eq(nstime_compare(&nsta, &nstb), 1,
- "nsta should be greater than nstb");
- assert_d_eq(nstime_compare(&nstb, &nsta), -1,
- "nstb should be less than nsta");
-
- nstime_init2(&nstb, 43, 0);
- assert_d_eq(nstime_compare(&nsta, &nstb), -1,
- "nsta should be less than nstb");
- assert_d_eq(nstime_compare(&nstb, &nsta), 1,
- "nstb should be greater than nsta");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_add)
-{
- nstime_t nsta, nstb;
-
- nstime_init2(&nsta, 42, 43);
- nstime_copy(&nstb, &nsta);
- nstime_add(&nsta, &nstb);
- nstime_init2(&nstb, 84, 86);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
- "Incorrect addition result");
-
- nstime_init2(&nsta, 42, BILLION - 1);
- nstime_copy(&nstb, &nsta);
- nstime_add(&nsta, &nstb);
- nstime_init2(&nstb, 85, BILLION - 2);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
- "Incorrect addition result");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_subtract)
-{
- nstime_t nsta, nstb;
-
- nstime_init2(&nsta, 42, 43);
- nstime_copy(&nstb, &nsta);
- nstime_subtract(&nsta, &nstb);
- nstime_init(&nstb, 0);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
- "Incorrect subtraction result");
-
- nstime_init2(&nsta, 42, 43);
- nstime_init2(&nstb, 41, 44);
- nstime_subtract(&nsta, &nstb);
- nstime_init2(&nstb, 0, BILLION - 1);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
- "Incorrect subtraction result");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_imultiply)
-{
- nstime_t nsta, nstb;
-
- nstime_init2(&nsta, 42, 43);
- nstime_imultiply(&nsta, 10);
- nstime_init2(&nstb, 420, 430);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
- "Incorrect multiplication result");
-
- nstime_init2(&nsta, 42, 666666666);
- nstime_imultiply(&nsta, 3);
- nstime_init2(&nstb, 127, 999999998);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
- "Incorrect multiplication result");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_idivide)
-{
- nstime_t nsta, nstb;
-
- nstime_init2(&nsta, 42, 43);
- nstime_copy(&nstb, &nsta);
- nstime_imultiply(&nsta, 10);
- nstime_idivide(&nsta, 10);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
- "Incorrect division result");
-
- nstime_init2(&nsta, 42, 666666666);
- nstime_copy(&nstb, &nsta);
- nstime_imultiply(&nsta, 3);
- nstime_idivide(&nsta, 3);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
- "Incorrect division result");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_divide)
-{
- nstime_t nsta, nstb, nstc;
-
- nstime_init2(&nsta, 42, 43);
- nstime_copy(&nstb, &nsta);
- nstime_imultiply(&nsta, 10);
- assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
- "Incorrect division result");
-
- nstime_init2(&nsta, 42, 43);
- nstime_copy(&nstb, &nsta);
- nstime_imultiply(&nsta, 10);
- nstime_init(&nstc, 1);
- nstime_add(&nsta, &nstc);
- assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
- "Incorrect division result");
-
- nstime_init2(&nsta, 42, 43);
- nstime_copy(&nstb, &nsta);
- nstime_imultiply(&nsta, 10);
- nstime_init(&nstc, 1);
- nstime_subtract(&nsta, &nstc);
- assert_u64_eq(nstime_divide(&nsta, &nstb), 9,
- "Incorrect division result");
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_monotonic)
-{
-
- nstime_monotonic();
-}
-TEST_END
-
-TEST_BEGIN(test_nstime_update)
-{
- nstime_t nst;
-
- nstime_init(&nst, 0);
-
- assert_false(nstime_update(&nst), "Basic time update failed.");
-
- /* Only Rip Van Winkle sleeps this long. */
- {
- nstime_t addend;
- nstime_init2(&addend, 631152000, 0);
- nstime_add(&nst, &addend);
- }
- {
- nstime_t nst0;
- nstime_copy(&nst0, &nst);
- assert_true(nstime_update(&nst),
- "Update should detect time roll-back.");
- assert_d_eq(nstime_compare(&nst, &nst0), 0,
- "Time should not have been modified");
- }
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_nstime_init,
- test_nstime_init2,
- test_nstime_copy,
- test_nstime_compare,
- test_nstime_add,
- test_nstime_subtract,
- test_nstime_imultiply,
- test_nstime_idivide,
- test_nstime_divide,
- test_nstime_monotonic,
- test_nstime_update));
-}
diff --git a/deps/jemalloc/test/unit/pack.c b/deps/jemalloc/test/unit/pack.c
deleted file mode 100644
index 0b6ffcd21..000000000
--- a/deps/jemalloc/test/unit/pack.c
+++ /dev/null
@@ -1,206 +0,0 @@
-#include "test/jemalloc_test.h"
-
-const char *malloc_conf =
- /* Use smallest possible chunk size. */
- "lg_chunk:0"
- /* Immediately purge to minimize fragmentation. */
- ",lg_dirty_mult:-1"
- ",decay_time:-1"
- ;
-
-/*
- * Size class that is a divisor of the page size, ideally 4+ regions per run.
- */
-#if LG_PAGE <= 14
-#define SZ (ZU(1) << (LG_PAGE - 2))
-#else
-#define SZ 4096
-#endif
-
-/*
- * Number of chunks to consume at high water mark. Should be at least 2 so that
- * if mmap()ed memory grows downward, downward growth of mmap()ed memory is
- * tested.
- */
-#define NCHUNKS 8
-
-static unsigned
-binind_compute(void)
-{
- size_t sz;
- unsigned nbins, i;
-
- sz = sizeof(nbins);
- assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
- "Unexpected mallctl failure");
-
- for (i = 0; i < nbins; i++) {
- size_t mib[4];
- size_t miblen = sizeof(mib)/sizeof(size_t);
- size_t size;
-
- assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
- &miblen), 0, "Unexpected mallctlnametomb failure");
- mib[2] = (size_t)i;
-
- sz = sizeof(size);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
- 0), 0, "Unexpected mallctlbymib failure");
- if (size == SZ)
- return (i);
- }
-
- test_fail("Unable to compute nregs_per_run");
- return (0);
-}
-
-static size_t
-nregs_per_run_compute(void)
-{
- uint32_t nregs;
- size_t sz;
- unsigned binind = binind_compute();
- size_t mib[4];
- size_t miblen = sizeof(mib)/sizeof(size_t);
-
- assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
- "Unexpected mallctlnametomb failure");
- mib[2] = (size_t)binind;
- sz = sizeof(nregs);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
- 0), 0, "Unexpected mallctlbymib failure");
- return (nregs);
-}
-
-static size_t
-npages_per_run_compute(void)
-{
- size_t sz;
- unsigned binind = binind_compute();
- size_t mib[4];
- size_t miblen = sizeof(mib)/sizeof(size_t);
- size_t run_size;
-
- assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
- "Unexpected mallctlnametomb failure");
- mib[2] = (size_t)binind;
- sz = sizeof(run_size);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz, NULL,
- 0), 0, "Unexpected mallctlbymib failure");
- return (run_size >> LG_PAGE);
-}
-
-static size_t
-npages_per_chunk_compute(void)
-{
-
- return ((chunksize >> LG_PAGE) - map_bias);
-}
-
-static size_t
-nruns_per_chunk_compute(void)
-{
-
- return (npages_per_chunk_compute() / npages_per_run_compute());
-}
-
-static unsigned
-arenas_extend_mallctl(void)
-{
- unsigned arena_ind;
- size_t sz;
-
- sz = sizeof(arena_ind);
- assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0),
- 0, "Error in arenas.extend");
-
- return (arena_ind);
-}
-
-static void
-arena_reset_mallctl(unsigned arena_ind)
-{
- size_t mib[3];
- size_t miblen = sizeof(mib)/sizeof(size_t);
-
- assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
- "Unexpected mallctlnametomib() failure");
- mib[1] = (size_t)arena_ind;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
- "Unexpected mallctlbymib() failure");
-}
-
-TEST_BEGIN(test_pack)
-{
- unsigned arena_ind = arenas_extend_mallctl();
- size_t nregs_per_run = nregs_per_run_compute();
- size_t nruns_per_chunk = nruns_per_chunk_compute();
- size_t nruns = nruns_per_chunk * NCHUNKS;
- size_t nregs = nregs_per_run * nruns;
- VARIABLE_ARRAY(void *, ptrs, nregs);
- size_t i, j, offset;
-
- /* Fill matrix. */
- for (i = offset = 0; i < nruns; i++) {
- for (j = 0; j < nregs_per_run; j++) {
- void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
- MALLOCX_TCACHE_NONE);
- assert_ptr_not_null(p,
- "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
- " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
- SZ, arena_ind, i, j);
- ptrs[(i * nregs_per_run) + j] = p;
- }
- }
-
- /*
- * Free all but one region of each run, but rotate which region is
- * preserved, so that subsequent allocations exercise the within-run
- * layout policy.
- */
- offset = 0;
- for (i = offset = 0;
- i < nruns;
- i++, offset = (offset + 1) % nregs_per_run) {
- for (j = 0; j < nregs_per_run; j++) {
- void *p = ptrs[(i * nregs_per_run) + j];
- if (offset == j)
- continue;
- dallocx(p, MALLOCX_ARENA(arena_ind) |
- MALLOCX_TCACHE_NONE);
- }
- }
-
- /*
- * Logically refill matrix, skipping preserved regions and verifying
- * that the matrix is unmodified.
- */
- offset = 0;
- for (i = offset = 0;
- i < nruns;
- i++, offset = (offset + 1) % nregs_per_run) {
- for (j = 0; j < nregs_per_run; j++) {
- void *p;
-
- if (offset == j)
- continue;
- p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
- MALLOCX_TCACHE_NONE);
- assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
- "Unexpected refill discrepancy, run=%zu, reg=%zu\n",
- i, j);
- }
- }
-
- /* Clean up. */
- arena_reset_mallctl(arena_ind);
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_pack));
-}
diff --git a/deps/jemalloc/test/unit/pages.c b/deps/jemalloc/test/unit/pages.c
deleted file mode 100644
index d31a35e68..000000000
--- a/deps/jemalloc/test/unit/pages.c
+++ /dev/null
@@ -1,27 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_pages_huge)
-{
- bool commit;
- void *pages;
-
- commit = true;
- pages = pages_map(NULL, PAGE, &commit);
- assert_ptr_not_null(pages, "Unexpected pages_map() error");
-
- assert_false(pages_huge(pages, PAGE),
- "Unexpected pages_huge() result");
- assert_false(pages_nohuge(pages, PAGE),
- "Unexpected pages_nohuge() result");
-
- pages_unmap(pages, PAGE);
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_pages_huge));
-}
diff --git a/deps/jemalloc/test/unit/ph.c b/deps/jemalloc/test/unit/ph.c
deleted file mode 100644
index da442f07e..000000000
--- a/deps/jemalloc/test/unit/ph.c
+++ /dev/null
@@ -1,290 +0,0 @@
-#include "test/jemalloc_test.h"
-
-typedef struct node_s node_t;
-
-struct node_s {
-#define NODE_MAGIC 0x9823af7e
- uint32_t magic;
- phn(node_t) link;
- uint64_t key;
-};
-
-static int
-node_cmp(const node_t *a, const node_t *b)
-{
- int ret;
-
- ret = (a->key > b->key) - (a->key < b->key);
- if (ret == 0) {
- /*
- * Duplicates are not allowed in the heap, so force an
- * arbitrary ordering for non-identical items with equal keys.
- */
- ret = (((uintptr_t)a) > ((uintptr_t)b))
- - (((uintptr_t)a) < ((uintptr_t)b));
- }
- return (ret);
-}
-
-static int
-node_cmp_magic(const node_t *a, const node_t *b) {
-
- assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
- assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
-
- return (node_cmp(a, b));
-}
-
-typedef ph(node_t) heap_t;
-ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic);
-
-static void
-node_print(const node_t *node, unsigned depth)
-{
- unsigned i;
- node_t *leftmost_child, *sibling;
-
- for (i = 0; i < depth; i++)
- malloc_printf("\t");
- malloc_printf("%2"FMTu64"\n", node->key);
-
- leftmost_child = phn_lchild_get(node_t, link, node);
- if (leftmost_child == NULL)
- return;
- node_print(leftmost_child, depth + 1);
-
- for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
- NULL; sibling = phn_next_get(node_t, link, sibling)) {
- node_print(sibling, depth + 1);
- }
-}
-
-static void
-heap_print(const heap_t *heap)
-{
- node_t *auxelm;
-
- malloc_printf("vvv heap %p vvv\n", heap);
- if (heap->ph_root == NULL)
- goto label_return;
-
- node_print(heap->ph_root, 0);
-
- for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
- auxelm = phn_next_get(node_t, link, auxelm)) {
- assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
- link, auxelm)), auxelm,
- "auxelm's prev doesn't link to auxelm");
- node_print(auxelm, 0);
- }
-
-label_return:
- malloc_printf("^^^ heap %p ^^^\n", heap);
-}
-
-static unsigned
-node_validate(const node_t *node, const node_t *parent)
-{
- unsigned nnodes = 1;
- node_t *leftmost_child, *sibling;
-
- if (parent != NULL) {
- assert_d_ge(node_cmp_magic(node, parent), 0,
- "Child is less than parent");
- }
-
- leftmost_child = phn_lchild_get(node_t, link, node);
- if (leftmost_child == NULL)
- return (nnodes);
- assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child),
- (void *)node, "Leftmost child does not link to node");
- nnodes += node_validate(leftmost_child, node);
-
- for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
- NULL; sibling = phn_next_get(node_t, link, sibling)) {
- assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
- link, sibling)), sibling,
- "sibling's prev doesn't link to sibling");
- nnodes += node_validate(sibling, node);
- }
- return (nnodes);
-}
-
-static unsigned
-heap_validate(const heap_t *heap)
-{
- unsigned nnodes = 0;
- node_t *auxelm;
-
- if (heap->ph_root == NULL)
- goto label_return;
-
- nnodes += node_validate(heap->ph_root, NULL);
-
- for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
- auxelm = phn_next_get(node_t, link, auxelm)) {
- assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
- link, auxelm)), auxelm,
- "auxelm's prev doesn't link to auxelm");
- nnodes += node_validate(auxelm, NULL);
- }
-
-label_return:
- if (false)
- heap_print(heap);
- return (nnodes);
-}
-
-TEST_BEGIN(test_ph_empty)
-{
- heap_t heap;
-
- heap_new(&heap);
- assert_true(heap_empty(&heap), "Heap should be empty");
- assert_ptr_null(heap_first(&heap), "Unexpected node");
-}
-TEST_END
-
-static void
-node_remove(heap_t *heap, node_t *node)
-{
-
- heap_remove(heap, node);
-
- node->magic = 0;
-}
-
-static node_t *
-node_remove_first(heap_t *heap)
-{
- node_t *node = heap_remove_first(heap);
- node->magic = 0;
- return (node);
-}
-
-TEST_BEGIN(test_ph_random)
-{
-#define NNODES 25
-#define NBAGS 250
-#define SEED 42
- sfmt_t *sfmt;
- uint64_t bag[NNODES];
- heap_t heap;
- node_t nodes[NNODES];
- unsigned i, j, k;
-
- sfmt = init_gen_rand(SEED);
- for (i = 0; i < NBAGS; i++) {
- switch (i) {
- case 0:
- /* Insert in order. */
- for (j = 0; j < NNODES; j++)
- bag[j] = j;
- break;
- case 1:
- /* Insert in reverse order. */
- for (j = 0; j < NNODES; j++)
- bag[j] = NNODES - j - 1;
- break;
- default:
- for (j = 0; j < NNODES; j++)
- bag[j] = gen_rand64_range(sfmt, NNODES);
- }
-
- for (j = 1; j <= NNODES; j++) {
- /* Initialize heap and nodes. */
- heap_new(&heap);
- assert_u_eq(heap_validate(&heap), 0,
- "Incorrect node count");
- for (k = 0; k < j; k++) {
- nodes[k].magic = NODE_MAGIC;
- nodes[k].key = bag[k];
- }
-
- /* Insert nodes. */
- for (k = 0; k < j; k++) {
- heap_insert(&heap, &nodes[k]);
- if (i % 13 == 12) {
- /* Trigger merging. */
- assert_ptr_not_null(heap_first(&heap),
- "Heap should not be empty");
- }
- assert_u_eq(heap_validate(&heap), k + 1,
- "Incorrect node count");
- }
-
- assert_false(heap_empty(&heap),
- "Heap should not be empty");
-
- /* Remove nodes. */
- switch (i % 4) {
- case 0:
- for (k = 0; k < j; k++) {
- assert_u_eq(heap_validate(&heap), j - k,
- "Incorrect node count");
- node_remove(&heap, &nodes[k]);
- assert_u_eq(heap_validate(&heap), j - k
- - 1, "Incorrect node count");
- }
- break;
- case 1:
- for (k = j; k > 0; k--) {
- node_remove(&heap, &nodes[k-1]);
- assert_u_eq(heap_validate(&heap), k - 1,
- "Incorrect node count");
- }
- break;
- case 2: {
- node_t *prev = NULL;
- for (k = 0; k < j; k++) {
- node_t *node = node_remove_first(&heap);
- assert_u_eq(heap_validate(&heap), j - k
- - 1, "Incorrect node count");
- if (prev != NULL) {
- assert_d_ge(node_cmp(node,
- prev), 0,
- "Bad removal order");
- }
- prev = node;
- }
- break;
- } case 3: {
- node_t *prev = NULL;
- for (k = 0; k < j; k++) {
- node_t *node = heap_first(&heap);
- assert_u_eq(heap_validate(&heap), j - k,
- "Incorrect node count");
- if (prev != NULL) {
- assert_d_ge(node_cmp(node,
- prev), 0,
- "Bad removal order");
- }
- node_remove(&heap, node);
- assert_u_eq(heap_validate(&heap), j - k
- - 1, "Incorrect node count");
- prev = node;
- }
- break;
- } default:
- not_reached();
- }
-
- assert_ptr_null(heap_first(&heap),
- "Heap should be empty");
- assert_true(heap_empty(&heap), "Heap should be empty");
- }
- }
- fini_gen_rand(sfmt);
-#undef NNODES
-#undef SEED
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_ph_empty,
- test_ph_random));
-}
diff --git a/deps/jemalloc/test/unit/prng.c b/deps/jemalloc/test/unit/prng.c
deleted file mode 100644
index 80c9d733f..000000000
--- a/deps/jemalloc/test/unit/prng.c
+++ /dev/null
@@ -1,263 +0,0 @@
-#include "test/jemalloc_test.h"
-
-static void
-test_prng_lg_range_u32(bool atomic)
-{
- uint32_t sa, sb, ra, rb;
- unsigned lg_range;
-
- sa = 42;
- ra = prng_lg_range_u32(&sa, 32, atomic);
- sa = 42;
- rb = prng_lg_range_u32(&sa, 32, atomic);
- assert_u32_eq(ra, rb,
- "Repeated generation should produce repeated results");
-
- sb = 42;
- rb = prng_lg_range_u32(&sb, 32, atomic);
- assert_u32_eq(ra, rb,
- "Equivalent generation should produce equivalent results");
-
- sa = 42;
- ra = prng_lg_range_u32(&sa, 32, atomic);
- rb = prng_lg_range_u32(&sa, 32, atomic);
- assert_u32_ne(ra, rb,
- "Full-width results must not immediately repeat");
-
- sa = 42;
- ra = prng_lg_range_u32(&sa, 32, atomic);
- for (lg_range = 31; lg_range > 0; lg_range--) {
- sb = 42;
- rb = prng_lg_range_u32(&sb, lg_range, atomic);
- assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
- 0, "High order bits should be 0, lg_range=%u", lg_range);
- assert_u32_eq(rb, (ra >> (32 - lg_range)),
- "Expected high order bits of full-width result, "
- "lg_range=%u", lg_range);
- }
-}
-
-static void
-test_prng_lg_range_u64(void)
-{
- uint64_t sa, sb, ra, rb;
- unsigned lg_range;
-
- sa = 42;
- ra = prng_lg_range_u64(&sa, 64);
- sa = 42;
- rb = prng_lg_range_u64(&sa, 64);
- assert_u64_eq(ra, rb,
- "Repeated generation should produce repeated results");
-
- sb = 42;
- rb = prng_lg_range_u64(&sb, 64);
- assert_u64_eq(ra, rb,
- "Equivalent generation should produce equivalent results");
-
- sa = 42;
- ra = prng_lg_range_u64(&sa, 64);
- rb = prng_lg_range_u64(&sa, 64);
- assert_u64_ne(ra, rb,
- "Full-width results must not immediately repeat");
-
- sa = 42;
- ra = prng_lg_range_u64(&sa, 64);
- for (lg_range = 63; lg_range > 0; lg_range--) {
- sb = 42;
- rb = prng_lg_range_u64(&sb, lg_range);
- assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
- 0, "High order bits should be 0, lg_range=%u", lg_range);
- assert_u64_eq(rb, (ra >> (64 - lg_range)),
- "Expected high order bits of full-width result, "
- "lg_range=%u", lg_range);
- }
-}
-
-static void
-test_prng_lg_range_zu(bool atomic)
-{
- size_t sa, sb, ra, rb;
- unsigned lg_range;
-
- sa = 42;
- ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
- sa = 42;
- rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
- assert_zu_eq(ra, rb,
- "Repeated generation should produce repeated results");
-
- sb = 42;
- rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
- assert_zu_eq(ra, rb,
- "Equivalent generation should produce equivalent results");
-
- sa = 42;
- ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
- rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
- assert_zu_ne(ra, rb,
- "Full-width results must not immediately repeat");
-
- sa = 42;
- ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
- for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0;
- lg_range--) {
- sb = 42;
- rb = prng_lg_range_zu(&sb, lg_range, atomic);
- assert_zu_eq((rb & (SIZE_T_MAX << lg_range)),
- 0, "High order bits should be 0, lg_range=%u", lg_range);
- assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
- lg_range)), "Expected high order bits of full-width "
- "result, lg_range=%u", lg_range);
- }
-}
-
-TEST_BEGIN(test_prng_lg_range_u32_nonatomic)
-{
-
- test_prng_lg_range_u32(false);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_lg_range_u32_atomic)
-{
-
- test_prng_lg_range_u32(true);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_lg_range_u64_nonatomic)
-{
-
- test_prng_lg_range_u64();
-}
-TEST_END
-
-TEST_BEGIN(test_prng_lg_range_zu_nonatomic)
-{
-
- test_prng_lg_range_zu(false);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_lg_range_zu_atomic)
-{
-
- test_prng_lg_range_zu(true);
-}
-TEST_END
-
-static void
-test_prng_range_u32(bool atomic)
-{
- uint32_t range;
-#define MAX_RANGE 10000000
-#define RANGE_STEP 97
-#define NREPS 10
-
- for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
- uint32_t s;
- unsigned rep;
-
- s = range;
- for (rep = 0; rep < NREPS; rep++) {
- uint32_t r = prng_range_u32(&s, range, atomic);
-
- assert_u32_lt(r, range, "Out of range");
- }
- }
-}
-
-static void
-test_prng_range_u64(void)
-{
- uint64_t range;
-#define MAX_RANGE 10000000
-#define RANGE_STEP 97
-#define NREPS 10
-
- for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
- uint64_t s;
- unsigned rep;
-
- s = range;
- for (rep = 0; rep < NREPS; rep++) {
- uint64_t r = prng_range_u64(&s, range);
-
- assert_u64_lt(r, range, "Out of range");
- }
- }
-}
-
-static void
-test_prng_range_zu(bool atomic)
-{
- size_t range;
-#define MAX_RANGE 10000000
-#define RANGE_STEP 97
-#define NREPS 10
-
- for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
- size_t s;
- unsigned rep;
-
- s = range;
- for (rep = 0; rep < NREPS; rep++) {
- size_t r = prng_range_zu(&s, range, atomic);
-
- assert_zu_lt(r, range, "Out of range");
- }
- }
-}
-
-TEST_BEGIN(test_prng_range_u32_nonatomic)
-{
-
- test_prng_range_u32(false);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_range_u32_atomic)
-{
-
- test_prng_range_u32(true);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_range_u64_nonatomic)
-{
-
- test_prng_range_u64();
-}
-TEST_END
-
-TEST_BEGIN(test_prng_range_zu_nonatomic)
-{
-
- test_prng_range_zu(false);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_range_zu_atomic)
-{
-
- test_prng_range_zu(true);
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_prng_lg_range_u32_nonatomic,
- test_prng_lg_range_u32_atomic,
- test_prng_lg_range_u64_nonatomic,
- test_prng_lg_range_zu_nonatomic,
- test_prng_lg_range_zu_atomic,
- test_prng_range_u32_nonatomic,
- test_prng_range_u32_atomic,
- test_prng_range_u64_nonatomic,
- test_prng_range_zu_nonatomic,
- test_prng_range_zu_atomic));
-}
diff --git a/deps/jemalloc/test/unit/prof_accum.c b/deps/jemalloc/test/unit/prof_accum.c
index d941b5bc6..fd229e0fd 100755..100644
--- a/deps/jemalloc/test/unit/prof_accum.c
+++ b/deps/jemalloc/test/unit/prof_accum.c
@@ -68,9 +68,8 @@ TEST_BEGIN(test_idump)
test_skip_if(!config_prof);
active = true;
- assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
- sizeof(active)), 0,
- "Unexpected mallctl failure while activating profiling");
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+ 0, "Unexpected mallctl failure while activating profiling");
prof_dump_open = prof_dump_open_intercept;
diff --git a/deps/jemalloc/test/unit/prof_active.c b/deps/jemalloc/test/unit/prof_active.c
index d00943a4c..814909572 100755..100644
--- a/deps/jemalloc/test/unit/prof_active.c
+++ b/deps/jemalloc/test/unit/prof_active.c
@@ -12,7 +12,7 @@ mallctl_bool_get(const char *name, bool expected, const char *func, int line)
size_t sz;
sz = sizeof(old);
- assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0,
+ assert_d_eq(mallctl(name, &old, &sz, NULL, 0), 0,
"%s():%d: Unexpected mallctl failure reading %s", func, line, name);
assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
name);
@@ -26,8 +26,7 @@ mallctl_bool_set(const char *name, bool old_expected, bool val_new,
size_t sz;
sz = sizeof(old);
- assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new,
- sizeof(val_new)), 0,
+ assert_d_eq(mallctl(name, &old, &sz, &val_new, sizeof(val_new)), 0,
"%s():%d: Unexpected mallctl failure reading/writing %s", func,
line, name);
assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
diff --git a/deps/jemalloc/test/unit/prof_gdump.c b/deps/jemalloc/test/unit/prof_gdump.c
index 996cb6704..a0e6ee921 100755..100644
--- a/deps/jemalloc/test/unit/prof_gdump.c
+++ b/deps/jemalloc/test/unit/prof_gdump.c
@@ -28,9 +28,8 @@ TEST_BEGIN(test_gdump)
test_skip_if(!config_prof);
active = true;
- assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
- sizeof(active)), 0,
- "Unexpected mallctl failure while activating profiling");
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+ 0, "Unexpected mallctl failure while activating profiling");
prof_dump_open = prof_dump_open_intercept;
@@ -46,8 +45,8 @@ TEST_BEGIN(test_gdump)
gdump = false;
sz = sizeof(gdump_old);
- assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
- (void *)&gdump, sizeof(gdump)), 0,
+ assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
+ sizeof(gdump)), 0,
"Unexpected mallctl failure while disabling prof.gdump");
assert(gdump_old);
did_prof_dump_open = false;
@@ -57,8 +56,8 @@ TEST_BEGIN(test_gdump)
gdump = true;
sz = sizeof(gdump_old);
- assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
- (void *)&gdump, sizeof(gdump)), 0,
+ assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump,
+ sizeof(gdump)), 0,
"Unexpected mallctl failure while enabling prof.gdump");
assert(!gdump_old);
did_prof_dump_open = false;
diff --git a/deps/jemalloc/test/unit/prof_idump.c b/deps/jemalloc/test/unit/prof_idump.c
index 16c6462de..bdea53ecd 100755..100644
--- a/deps/jemalloc/test/unit/prof_idump.c
+++ b/deps/jemalloc/test/unit/prof_idump.c
@@ -29,9 +29,8 @@ TEST_BEGIN(test_idump)
test_skip_if(!config_prof);
active = true;
- assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
- sizeof(active)), 0,
- "Unexpected mallctl failure while activating profiling");
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+ 0, "Unexpected mallctl failure while activating profiling");
prof_dump_open = prof_dump_open_intercept;
diff --git a/deps/jemalloc/test/unit/prof_reset.c b/deps/jemalloc/test/unit/prof_reset.c
index 59d70796a..69983e5e5 100755..100644
--- a/deps/jemalloc/test/unit/prof_reset.c
+++ b/deps/jemalloc/test/unit/prof_reset.c
@@ -20,8 +20,8 @@ static void
set_prof_active(bool active)
{
- assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
- sizeof(active)), 0, "Unexpected mallctl failure");
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
+ 0, "Unexpected mallctl failure");
}
static size_t
@@ -30,8 +30,7 @@ get_lg_prof_sample(void)
size_t lg_prof_sample;
size_t sz = sizeof(size_t);
- assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz,
- NULL, 0), 0,
+ assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
"Unexpected mallctl failure while reading profiling sample rate");
return (lg_prof_sample);
}
@@ -40,7 +39,7 @@ static void
do_prof_reset(size_t lg_prof_sample)
{
assert_d_eq(mallctl("prof.reset", NULL, NULL,
- (void *)&lg_prof_sample, sizeof(size_t)), 0,
+ &lg_prof_sample, sizeof(size_t)), 0,
"Unexpected mallctl failure while resetting profile data");
assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
"Expected profile sample rate change");
@@ -55,8 +54,8 @@ TEST_BEGIN(test_prof_reset_basic)
test_skip_if(!config_prof);
sz = sizeof(size_t);
- assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
- &sz, NULL, 0), 0,
+ assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz,
+ NULL, 0), 0,
"Unexpected mallctl failure while reading profiling sample rate");
assert_zu_eq(lg_prof_sample_orig, 0,
"Unexpected profiling sample rate");
@@ -95,8 +94,7 @@ TEST_END
bool prof_dump_header_intercepted = false;
prof_cnt_t cnt_all_copy = {0, 0, 0, 0};
static bool
-prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err,
- const prof_cnt_t *cnt_all)
+prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all)
{
prof_dump_header_intercepted = true;
diff --git a/deps/jemalloc/test/unit/prof_thread_name.c b/deps/jemalloc/test/unit/prof_thread_name.c
index 9ec549776..f501158d7 100755..100644
--- a/deps/jemalloc/test/unit/prof_thread_name.c
+++ b/deps/jemalloc/test/unit/prof_thread_name.c
@@ -12,9 +12,8 @@ mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
size_t sz;
sz = sizeof(thread_name_old);
- assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz,
- NULL, 0), 0,
- "%s():%d: Unexpected mallctl failure reading thread.prof.name",
+ assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, NULL, 0),
+ 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name",
func, line);
assert_str_eq(thread_name_old, thread_name_expected,
"%s():%d: Unexpected thread.prof.name value", func, line);
@@ -27,8 +26,8 @@ mallctl_thread_name_set_impl(const char *thread_name, const char *func,
int line)
{
- assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
- (void *)&thread_name, sizeof(thread_name)), 0,
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
+ sizeof(thread_name)), 0,
"%s():%d: Unexpected mallctl failure reading thread.prof.name",
func, line);
mallctl_thread_name_get_impl(thread_name, func, line);
@@ -47,15 +46,15 @@ TEST_BEGIN(test_prof_thread_name_validation)
/* NULL input shouldn't be allowed. */
thread_name = NULL;
- assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
- (void *)&thread_name, sizeof(thread_name)), EFAULT,
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
+ sizeof(thread_name)), EFAULT,
"Unexpected mallctl result writing \"%s\" to thread.prof.name",
thread_name);
/* '\n' shouldn't be allowed. */
thread_name = "hi\nthere";
- assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
- (void *)&thread_name, sizeof(thread_name)), EFAULT,
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name,
+ sizeof(thread_name)), EFAULT,
"Unexpected mallctl result writing \"%s\" to thread.prof.name",
thread_name);
@@ -65,9 +64,8 @@ TEST_BEGIN(test_prof_thread_name_validation)
size_t sz;
sz = sizeof(thread_name_old);
- assert_d_eq(mallctl("thread.prof.name",
- (void *)&thread_name_old, &sz, (void *)&thread_name,
- sizeof(thread_name)), EPERM,
+ assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz,
+ &thread_name, sizeof(thread_name)), EPERM,
"Unexpected mallctl result writing \"%s\" to "
"thread.prof.name", thread_name);
}
diff --git a/deps/jemalloc/test/unit/rb.c b/deps/jemalloc/test/unit/rb.c
index cf3d3a783..b38eb0e33 100644
--- a/deps/jemalloc/test/unit/rb.c
+++ b/deps/jemalloc/test/unit/rb.c
@@ -3,7 +3,7 @@
#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
a_type *rbp_bh_t; \
for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
- rbp_bh_t != NULL; \
+ rbp_bh_t != &(a_rbt)->rbt_nil; \
rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
(r_height)++; \
@@ -21,7 +21,7 @@ struct node_s {
};
static int
-node_cmp(const node_t *a, const node_t *b) {
+node_cmp(node_t *a, node_t *b) {
int ret;
assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
@@ -68,43 +68,38 @@ TEST_BEGIN(test_rb_empty)
TEST_END
static unsigned
-tree_recurse(node_t *node, unsigned black_height, unsigned black_depth)
+tree_recurse(node_t *node, unsigned black_height, unsigned black_depth,
+ node_t *nil)
{
unsigned ret = 0;
- node_t *left_node;
- node_t *right_node;
-
- if (node == NULL)
- return (ret);
-
- left_node = rbtn_left_get(node_t, link, node);
- right_node = rbtn_right_get(node_t, link, node);
+ node_t *left_node = rbtn_left_get(node_t, link, node);
+ node_t *right_node = rbtn_right_get(node_t, link, node);
if (!rbtn_red_get(node_t, link, node))
black_depth++;
/* Red nodes must be interleaved with black nodes. */
if (rbtn_red_get(node_t, link, node)) {
- if (left_node != NULL)
- assert_false(rbtn_red_get(node_t, link, left_node),
- "Node should be black");
- if (right_node != NULL)
- assert_false(rbtn_red_get(node_t, link, right_node),
- "Node should be black");
+ assert_false(rbtn_red_get(node_t, link, left_node),
+ "Node should be black");
+ assert_false(rbtn_red_get(node_t, link, right_node),
+ "Node should be black");
}
+ if (node == nil)
+ return (ret);
/* Self. */
assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
/* Left subtree. */
- if (left_node != NULL)
- ret += tree_recurse(left_node, black_height, black_depth);
+ if (left_node != nil)
+ ret += tree_recurse(left_node, black_height, black_depth, nil);
else
ret += (black_depth != black_height);
/* Right subtree. */
- if (right_node != NULL)
- ret += tree_recurse(right_node, black_height, black_depth);
+ if (right_node != nil)
+ ret += tree_recurse(right_node, black_height, black_depth, nil);
else
ret += (black_depth != black_height);
@@ -186,7 +181,8 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes)
node->magic = 0;
rbtn_black_height(node_t, link, tree, black_height);
- imbalances = tree_recurse(tree->rbt_root, black_height, 0);
+ imbalances = tree_recurse(tree->rbt_root, black_height, 0,
+ &(tree->rbt_nil));
assert_u_eq(imbalances, 0, "Tree is unbalanced");
assert_u_eq(tree_iterate(tree), nnodes-1,
"Unexpected node iteration count");
@@ -216,15 +212,6 @@ remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data)
return (ret);
}
-static void
-destroy_cb(node_t *node, void *data)
-{
- unsigned *nnodes = (unsigned *)data;
-
- assert_u_gt(*nnodes, 0, "Destruction removed too many nodes");
- (*nnodes)--;
-}
-
TEST_BEGIN(test_rb_random)
{
#define NNODES 25
@@ -257,6 +244,7 @@ TEST_BEGIN(test_rb_random)
for (j = 1; j <= NNODES; j++) {
/* Initialize tree and nodes. */
tree_new(&tree);
+ tree.rbt_nil.magic = 0;
for (k = 0; k < j; k++) {
nodes[k].magic = NODE_MAGIC;
nodes[k].key = bag[k];
@@ -269,7 +257,7 @@ TEST_BEGIN(test_rb_random)
rbtn_black_height(node_t, link, &tree,
black_height);
imbalances = tree_recurse(tree.rbt_root,
- black_height, 0);
+ black_height, 0, &(tree.rbt_nil));
assert_u_eq(imbalances, 0,
"Tree is unbalanced");
@@ -290,7 +278,7 @@ TEST_BEGIN(test_rb_random)
}
/* Remove nodes. */
- switch (i % 5) {
+ switch (i % 4) {
case 0:
for (k = 0; k < j; k++)
node_remove(&tree, &nodes[k], j - k);
@@ -326,12 +314,6 @@ TEST_BEGIN(test_rb_random)
assert_u_eq(nnodes, 0,
"Removal terminated early");
break;
- } case 4: {
- unsigned nnodes = j;
- tree_destroy(&tree, destroy_cb, &nnodes);
- assert_u_eq(nnodes, 0,
- "Destruction terminated early");
- break;
} default:
not_reached();
}
diff --git a/deps/jemalloc/test/unit/run_quantize.c b/deps/jemalloc/test/unit/run_quantize.c
deleted file mode 100644
index 089176f39..000000000
--- a/deps/jemalloc/test/unit/run_quantize.c
+++ /dev/null
@@ -1,149 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_small_run_size)
-{
- unsigned nbins, i;
- size_t sz, run_size;
- size_t mib[4];
- size_t miblen = sizeof(mib) / sizeof(size_t);
-
- /*
- * Iterate over all small size classes, get their run sizes, and verify
- * that the quantized size is the same as the run size.
- */
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
- "Unexpected mallctl failure");
-
- assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0,
- "Unexpected mallctlnametomib failure");
- for (i = 0; i < nbins; i++) {
- mib[2] = i;
- sz = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz,
- NULL, 0), 0, "Unexpected mallctlbymib failure");
- assert_zu_eq(run_size, run_quantize_floor(run_size),
- "Small run quantization should be a no-op (run_size=%zu)",
- run_size);
- assert_zu_eq(run_size, run_quantize_ceil(run_size),
- "Small run quantization should be a no-op (run_size=%zu)",
- run_size);
- }
-}
-TEST_END
-
-TEST_BEGIN(test_large_run_size)
-{
- bool cache_oblivious;
- unsigned nlruns, i;
- size_t sz, run_size_prev, ceil_prev;
- size_t mib[4];
- size_t miblen = sizeof(mib) / sizeof(size_t);
-
- /*
- * Iterate over all large size classes, get their run sizes, and verify
- * that the quantized size is the same as the run size.
- */
-
- sz = sizeof(bool);
- assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious,
- &sz, NULL, 0), 0, "Unexpected mallctl failure");
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0,
- "Unexpected mallctl failure");
-
- assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0,
- "Unexpected mallctlnametomib failure");
- for (i = 0; i < nlruns; i++) {
- size_t lrun_size, run_size, floor, ceil;
-
- mib[2] = i;
- sz = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&lrun_size, &sz,
- NULL, 0), 0, "Unexpected mallctlbymib failure");
- run_size = cache_oblivious ? lrun_size + PAGE : lrun_size;
- floor = run_quantize_floor(run_size);
- ceil = run_quantize_ceil(run_size);
-
- assert_zu_eq(run_size, floor,
- "Large run quantization should be a no-op for precise "
- "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
- assert_zu_eq(run_size, ceil,
- "Large run quantization should be a no-op for precise "
- "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size);
-
- if (i > 0) {
- assert_zu_eq(run_size_prev, run_quantize_floor(run_size
- - PAGE), "Floor should be a precise size");
- if (run_size_prev < ceil_prev) {
- assert_zu_eq(ceil_prev, run_size,
- "Ceiling should be a precise size "
- "(run_size_prev=%zu, ceil_prev=%zu, "
- "run_size=%zu)", run_size_prev, ceil_prev,
- run_size);
- }
- }
- run_size_prev = floor;
- ceil_prev = run_quantize_ceil(run_size + PAGE);
- }
-}
-TEST_END
-
-TEST_BEGIN(test_monotonic)
-{
- unsigned nbins, nlruns, i;
- size_t sz, floor_prev, ceil_prev;
-
- /*
- * Iterate over all run sizes and verify that
- * run_quantize_{floor,ceil}() are monotonic.
- */
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
- "Unexpected mallctl failure");
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0,
- "Unexpected mallctl failure");
-
- floor_prev = 0;
- ceil_prev = 0;
- for (i = 1; i <= chunksize >> LG_PAGE; i++) {
- size_t run_size, floor, ceil;
-
- run_size = i << LG_PAGE;
- floor = run_quantize_floor(run_size);
- ceil = run_quantize_ceil(run_size);
-
- assert_zu_le(floor, run_size,
- "Floor should be <= (floor=%zu, run_size=%zu, ceil=%zu)",
- floor, run_size, ceil);
- assert_zu_ge(ceil, run_size,
- "Ceiling should be >= (floor=%zu, run_size=%zu, ceil=%zu)",
- floor, run_size, ceil);
-
- assert_zu_le(floor_prev, floor, "Floor should be monotonic "
- "(floor_prev=%zu, floor=%zu, run_size=%zu, ceil=%zu)",
- floor_prev, floor, run_size, ceil);
- assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
- "(floor=%zu, run_size=%zu, ceil_prev=%zu, ceil=%zu)",
- floor, run_size, ceil_prev, ceil);
-
- floor_prev = floor;
- ceil_prev = ceil;
- }
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_small_run_size,
- test_large_run_size,
- test_monotonic));
-}
diff --git a/deps/jemalloc/test/unit/size_classes.c b/deps/jemalloc/test/unit/size_classes.c
index 81cc60617..d3aaebd77 100755..100644
--- a/deps/jemalloc/test/unit/size_classes.c
+++ b/deps/jemalloc/test/unit/size_classes.c
@@ -8,8 +8,8 @@ get_max_size_class(void)
size_t sz, miblen, max_size_class;
sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0),
- 0, "Unexpected mallctl() error");
+ assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0,
+ "Unexpected mallctl() error");
miblen = sizeof(mib) / sizeof(size_t);
assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
@@ -17,8 +17,8 @@ get_max_size_class(void)
mib[2] = nhchunks - 1;
sz = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
- NULL, 0), 0, "Unexpected mallctlbymib() error");
+ assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0,
+ "Unexpected mallctlbymib() error");
return (max_size_class);
}
@@ -80,105 +80,10 @@ TEST_BEGIN(test_size_classes)
}
TEST_END
-TEST_BEGIN(test_psize_classes)
-{
- size_t size_class, max_size_class;
- pszind_t pind, max_pind;
-
- max_size_class = get_max_size_class();
- max_pind = psz2ind(max_size_class);
-
- for (pind = 0, size_class = pind2sz(pind); pind < max_pind ||
- size_class < max_size_class; pind++, size_class =
- pind2sz(pind)) {
- assert_true(pind < max_pind,
- "Loop conditionals should be equivalent; pind=%u, "
- "size_class=%zu (%#zx)", pind, size_class, size_class);
- assert_true(size_class < max_size_class,
- "Loop conditionals should be equivalent; pind=%u, "
- "size_class=%zu (%#zx)", pind, size_class, size_class);
-
- assert_u_eq(pind, psz2ind(size_class),
- "psz2ind() does not reverse pind2sz(): pind=%u -->"
- " size_class=%zu --> pind=%u --> size_class=%zu", pind,
- size_class, psz2ind(size_class),
- pind2sz(psz2ind(size_class)));
- assert_zu_eq(size_class, pind2sz(psz2ind(size_class)),
- "pind2sz() does not reverse psz2ind(): pind=%u -->"
- " size_class=%zu --> pind=%u --> size_class=%zu", pind,
- size_class, psz2ind(size_class),
- pind2sz(psz2ind(size_class)));
-
- assert_u_eq(pind+1, psz2ind(size_class+1),
- "Next size_class does not round up properly");
-
- assert_zu_eq(size_class, (pind > 0) ?
- psz2u(pind2sz(pind-1)+1) : psz2u(1),
- "psz2u() does not round up to size class");
- assert_zu_eq(size_class, psz2u(size_class-1),
- "psz2u() does not round up to size class");
- assert_zu_eq(size_class, psz2u(size_class),
- "psz2u() does not compute same size class");
- assert_zu_eq(psz2u(size_class+1), pind2sz(pind+1),
- "psz2u() does not round up to next size class");
- }
-
- assert_u_eq(pind, psz2ind(pind2sz(pind)),
- "psz2ind() does not reverse pind2sz()");
- assert_zu_eq(max_size_class, pind2sz(psz2ind(max_size_class)),
- "pind2sz() does not reverse psz2ind()");
-
- assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1),
- "psz2u() does not round up to size class");
- assert_zu_eq(size_class, psz2u(size_class-1),
- "psz2u() does not round up to size class");
- assert_zu_eq(size_class, psz2u(size_class),
- "psz2u() does not compute same size class");
-}
-TEST_END
-
-TEST_BEGIN(test_overflow)
-{
- size_t max_size_class;
-
- max_size_class = get_max_size_class();
-
- assert_u_eq(size2index(max_size_class+1), NSIZES,
- "size2index() should return NSIZES on overflow");
- assert_u_eq(size2index(ZU(PTRDIFF_MAX)+1), NSIZES,
- "size2index() should return NSIZES on overflow");
- assert_u_eq(size2index(SIZE_T_MAX), NSIZES,
- "size2index() should return NSIZES on overflow");
-
- assert_zu_eq(s2u(max_size_class+1), 0,
- "s2u() should return 0 for unsupported size");
- assert_zu_eq(s2u(ZU(PTRDIFF_MAX)+1), 0,
- "s2u() should return 0 for unsupported size");
- assert_zu_eq(s2u(SIZE_T_MAX), 0,
- "s2u() should return 0 on overflow");
-
- assert_u_eq(psz2ind(max_size_class+1), NPSIZES,
- "psz2ind() should return NPSIZES on overflow");
- assert_u_eq(psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES,
- "psz2ind() should return NPSIZES on overflow");
- assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES,
- "psz2ind() should return NPSIZES on overflow");
-
- assert_zu_eq(psz2u(max_size_class+1), 0,
- "psz2u() should return 0 for unsupported size");
- assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), 0,
- "psz2u() should return 0 for unsupported size");
- assert_zu_eq(psz2u(SIZE_T_MAX), 0,
- "psz2u() should return 0 on overflow");
-}
-TEST_END
-
int
main(void)
{
return (test(
- test_size_classes,
- test_psize_classes,
- test_overflow));
+ test_size_classes));
}
diff --git a/deps/jemalloc/test/unit/smoothstep.c b/deps/jemalloc/test/unit/smoothstep.c
deleted file mode 100644
index 4cfb21343..000000000
--- a/deps/jemalloc/test/unit/smoothstep.c
+++ /dev/null
@@ -1,106 +0,0 @@
-#include "test/jemalloc_test.h"
-
-static const uint64_t smoothstep_tab[] = {
-#define STEP(step, h, x, y) \
- h,
- SMOOTHSTEP
-#undef STEP
-};
-
-TEST_BEGIN(test_smoothstep_integral)
-{
- uint64_t sum, min, max;
- unsigned i;
-
- /*
- * The integral of smoothstep in the [0..1] range equals 1/2. Verify
- * that the fixed point representation's integral is no more than
- * rounding error distant from 1/2. Regarding rounding, each table
- * element is rounded down to the nearest fixed point value, so the
- * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps.
- */
- sum = 0;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
- sum += smoothstep_tab[i];
-
- max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
- min = max - SMOOTHSTEP_NSTEPS;
-
- assert_u64_ge(sum, min,
- "Integral too small, even accounting for truncation");
- assert_u64_le(sum, max, "Integral exceeds 1/2");
- if (false) {
- malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n",
- max - sum, SMOOTHSTEP_NSTEPS);
- }
-}
-TEST_END
-
-TEST_BEGIN(test_smoothstep_monotonic)
-{
- uint64_t prev_h;
- unsigned i;
-
- /*
- * The smoothstep function is monotonic in [0..1], i.e. its slope is
- * non-negative. In practice we want to parametrize table generation
- * such that piecewise slope is greater than zero, but do not require
- * that here.
- */
- prev_h = 0;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
- uint64_t h = smoothstep_tab[i];
- assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
- prev_h = h;
- }
- assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
- (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1");
-}
-TEST_END
-
-TEST_BEGIN(test_smoothstep_slope)
-{
- uint64_t prev_h, prev_delta;
- unsigned i;
-
- /*
- * The smoothstep slope strictly increases until x=0.5, and then
- * strictly decreases until x=1.0. Verify the slightly weaker
- * requirement of monotonicity, so that inadequate table precision does
- * not cause false test failures.
- */
- prev_h = 0;
- prev_delta = 0;
- for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) {
- uint64_t h = smoothstep_tab[i];
- uint64_t delta = h - prev_h;
- assert_u64_ge(delta, prev_delta,
- "Slope must monotonically increase in 0.0 <= x <= 0.5, "
- "i=%u", i);
- prev_h = h;
- prev_delta = delta;
- }
-
- prev_h = KQU(1) << SMOOTHSTEP_BFP;
- prev_delta = 0;
- for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) {
- uint64_t h = smoothstep_tab[i];
- uint64_t delta = prev_h - h;
- assert_u64_ge(delta, prev_delta,
- "Slope must monotonically decrease in 0.5 <= x <= 1.0, "
- "i=%u", i);
- prev_h = h;
- prev_delta = delta;
- }
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_smoothstep_integral,
- test_smoothstep_monotonic,
- test_smoothstep_slope));
-}
diff --git a/deps/jemalloc/test/unit/stats.c b/deps/jemalloc/test/unit/stats.c
index 315717dfb..8e4bc631e 100755..100644
--- a/deps/jemalloc/test/unit/stats.c
+++ b/deps/jemalloc/test/unit/stats.c
@@ -7,18 +7,18 @@ TEST_BEGIN(test_stats_summary)
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(cactive);
- assert_d_eq(mallctl("stats.cactive", (void *)&cactive, &sz, NULL, 0),
- expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
- 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
- expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
+ assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
+ assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.resident", &resident, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected,
+ "Unexpected mallctl() result");
if (config_stats) {
assert_zu_le(active, *cactive,
@@ -45,19 +45,19 @@ TEST_BEGIN(test_stats_huge)
p = mallocx(large_maxclass+1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
- 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", (void *)&nrequests,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL,
+ 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL,
+ 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
@@ -83,8 +83,8 @@ TEST_BEGIN(test_stats_arenas_summary)
uint64_t npurge, nmadvise, purged;
arena = 0;
- assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
- sizeof(arena)), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
little = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(little, "Unexpected mallocx() failure");
@@ -93,26 +93,22 @@ TEST_BEGIN(test_stats_arenas_summary)
huge = mallocx(chunksize, 0);
assert_ptr_not_null(huge, "Unexpected mallocx() failure");
- dallocx(little, 0);
- dallocx(large, 0);
- dallocx(huge, 0);
-
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
- 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
- 0), expected, "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0),
+ expected, "Unexepected mallctl() result");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge, &sz, NULL,
- 0), expected, "Unexepected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.nmadvise", (void *)&nmadvise, &sz,
- NULL, 0), expected, "Unexepected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.purged", (void *)&purged, &sz, NULL,
- 0), expected, "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0),
+ expected, "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0),
+ expected, "Unexepected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0),
+ expected, "Unexepected mallctl() result");
if (config_stats) {
assert_u64_gt(npurge, 0,
@@ -120,6 +116,10 @@ TEST_BEGIN(test_stats_arenas_summary)
assert_u64_le(nmadvise, purged,
"nmadvise should be no greater than purged");
}
+
+ dallocx(little, 0);
+ dallocx(large, 0);
+ dallocx(huge, 0);
}
TEST_END
@@ -150,8 +150,8 @@ TEST_BEGIN(test_stats_arenas_small)
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
arena = 0;
- assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
- sizeof(arena)), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
p = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
@@ -159,21 +159,19 @@ TEST_BEGIN(test_stats_arenas_small)
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
- 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.small.allocated",
- (void *)&allocated, &sz, NULL, 0), expected,
- "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.small.nrequests",
- (void *)&nrequests, &sz, NULL, 0), expected,
- "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
@@ -199,36 +197,34 @@ TEST_BEGIN(test_stats_arenas_large)
int expected = config_stats ? 0 : ENOENT;
arena = 0;
- assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
- sizeof(arena)), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
p = mallocx(large_maxclass, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
- 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.large.allocated",
- (void *)&allocated, &sz, NULL, 0), expected,
- "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.large.nrequests",
- (void *)&nrequests, &sz, NULL, 0), expected,
- "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
- assert_u64_gt(nmalloc, 0,
+ assert_zu_gt(nmalloc, 0,
"nmalloc should be greater than zero");
- assert_u64_ge(nmalloc, ndalloc,
+ assert_zu_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
- assert_u64_gt(nrequests, 0,
+ assert_zu_gt(nrequests, 0,
"nrequests should be greater than zero");
}
@@ -245,30 +241,30 @@ TEST_BEGIN(test_stats_arenas_huge)
int expected = config_stats ? 0 : ENOENT;
arena = 0;
- assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
- sizeof(arena)), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
p = mallocx(chunksize, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
- 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_zu_gt(allocated, 0,
"allocated should be greater than zero");
- assert_u64_gt(nmalloc, 0,
+ assert_zu_gt(nmalloc, 0,
"nmalloc should be greater than zero");
- assert_u64_ge(nmalloc, ndalloc,
+ assert_zu_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
}
@@ -286,8 +282,8 @@ TEST_BEGIN(test_stats_arenas_bins)
int expected = config_stats ? 0 : ENOENT;
arena = 0;
- assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
- sizeof(arena)), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
p = mallocx(arena_bin_info[0].reg_size, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
@@ -295,36 +291,35 @@ TEST_BEGIN(test_stats_arenas_bins)
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
- 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", (void *)&nmalloc,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", (void *)&ndalloc,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests",
- (void *)&nrequests, &sz, NULL, 0), expected,
- "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", (void *)&curregs,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", (void *)&nfills,
- &sz, NULL, 0), config_tcache ? expected : ENOENT,
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz,
+ NULL, 0), config_tcache ? expected : ENOENT,
"Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", (void *)&nflushes,
- &sz, NULL, 0), config_tcache ? expected : ENOENT,
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz,
+ NULL, 0), config_tcache ? expected : ENOENT,
"Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", (void *)&nruns, &sz,
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz,
NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", (void *)&nreruns,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", (void *)&curruns,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_u64_gt(nmalloc, 0,
@@ -360,26 +355,25 @@ TEST_BEGIN(test_stats_arenas_lruns)
int expected = config_stats ? 0 : ENOENT;
arena = 0;
- assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
- sizeof(arena)), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
p = mallocx(LARGE_MINCLASS, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
- 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", (void *)&nmalloc,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", (void *)&ndalloc,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests",
- (void *)&nrequests, &sz, NULL, 0), expected,
- "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", (void *)&curruns,
- &sz, NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_u64_gt(nmalloc, 0,
@@ -405,26 +399,23 @@ TEST_BEGIN(test_stats_arenas_hchunks)
int expected = config_stats ? 0 : ENOENT;
arena = 0;
- assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena,
- sizeof(arena)), 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
+ 0, "Unexpected mallctl() failure");
p = mallocx(chunksize, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
- 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
+ "Unexpected mallctl() failure");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc",
- (void *)&nmalloc, &sz, NULL, 0), expected,
- "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc",
- (void *)&ndalloc, &sz, NULL, 0), expected,
- "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz,
+ NULL, 0), expected, "Unexpected mallctl() result");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks",
- (void *)&curhchunks, &sz, NULL, 0), expected,
- "Unexpected mallctl() result");
+ assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks,
+ &sz, NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
assert_u64_gt(nmalloc, 0,
diff --git a/deps/jemalloc/test/unit/ticker.c b/deps/jemalloc/test/unit/ticker.c
deleted file mode 100644
index e737020ab..000000000
--- a/deps/jemalloc/test/unit/ticker.c
+++ /dev/null
@@ -1,76 +0,0 @@
-#include "test/jemalloc_test.h"
-
-TEST_BEGIN(test_ticker_tick)
-{
-#define NREPS 2
-#define NTICKS 3
- ticker_t ticker;
- int32_t i, j;
-
- ticker_init(&ticker, NTICKS);
- for (i = 0; i < NREPS; i++) {
- for (j = 0; j < NTICKS; j++) {
- assert_u_eq(ticker_read(&ticker), NTICKS - j,
- "Unexpected ticker value (i=%d, j=%d)", i, j);
- assert_false(ticker_tick(&ticker),
- "Unexpected ticker fire (i=%d, j=%d)", i, j);
- }
- assert_u32_eq(ticker_read(&ticker), 0,
- "Expected ticker depletion");
- assert_true(ticker_tick(&ticker),
- "Expected ticker fire (i=%d)", i);
- assert_u32_eq(ticker_read(&ticker), NTICKS,
- "Expected ticker reset");
- }
-#undef NTICKS
-}
-TEST_END
-
-TEST_BEGIN(test_ticker_ticks)
-{
-#define NTICKS 3
- ticker_t ticker;
-
- ticker_init(&ticker, NTICKS);
-
- assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
- assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
- assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
- assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
- assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
-
- assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
- assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
-#undef NTICKS
-}
-TEST_END
-
-TEST_BEGIN(test_ticker_copy)
-{
-#define NTICKS 3
- ticker_t ta, tb;
-
- ticker_init(&ta, NTICKS);
- ticker_copy(&tb, &ta);
- assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
- assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
- assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
-
- ticker_tick(&ta);
- ticker_copy(&tb, &ta);
- assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
- assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
- assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
-#undef NTICKS
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_ticker_tick,
- test_ticker_ticks,
- test_ticker_copy));
-}
diff --git a/deps/jemalloc/test/unit/tsd.c b/deps/jemalloc/test/unit/tsd.c
index d5f96ac36..8be787fda 100644
--- a/deps/jemalloc/test/unit/tsd.c
+++ b/deps/jemalloc/test/unit/tsd.c
@@ -58,18 +58,18 @@ thd_start(void *arg)
data_t d = (data_t)(uintptr_t)arg;
void *p;
- assert_x_eq(*data_tsd_get(true), DATA_INIT,
+ assert_x_eq(*data_tsd_get(), DATA_INIT,
"Initial tsd get should return initialization value");
p = malloc(1);
assert_ptr_not_null(p, "Unexpected malloc() failure");
data_tsd_set(&d);
- assert_x_eq(*data_tsd_get(true), d,
+ assert_x_eq(*data_tsd_get(), d,
"After tsd set, tsd get should return value that was set");
d = 0;
- assert_x_eq(*data_tsd_get(true), (data_t)(uintptr_t)arg,
+ assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg,
"Resetting local data should have no effect on tsd");
free(p);
@@ -79,7 +79,7 @@ thd_start(void *arg)
TEST_BEGIN(test_tsd_main_thread)
{
- thd_start((void *)(uintptr_t)0xa5f3e329);
+ thd_start((void *) 0xa5f3e329);
}
TEST_END
@@ -99,11 +99,6 @@ int
main(void)
{
- /* Core tsd bootstrapping must happen prior to data_tsd_boot(). */
- if (nallocx(1, 0) == 0) {
- malloc_printf("Initialization error");
- return (test_status_fail);
- }
data_tsd_boot();
return (test(
diff --git a/deps/jemalloc/test/unit/util.c b/deps/jemalloc/test/unit/util.c
index b1f9abd9b..8ab39a458 100644
--- a/deps/jemalloc/test/unit/util.c
+++ b/deps/jemalloc/test/unit/util.c
@@ -1,54 +1,33 @@
#include "test/jemalloc_test.h"
-#define TEST_POW2_CEIL(t, suf, pri) do { \
- unsigned i, pow2; \
- t x; \
- \
- assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \
- \
- for (i = 0; i < sizeof(t) * 8; i++) { \
- assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \
- << i, "Unexpected result"); \
- } \
- \
- for (i = 2; i < sizeof(t) * 8; i++) { \
- assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \
- ((t)1) << i, "Unexpected result"); \
- } \
- \
- for (i = 0; i < sizeof(t) * 8 - 1; i++) { \
- assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \
- ((t)1) << (i+1), "Unexpected result"); \
- } \
- \
- for (pow2 = 1; pow2 < 25; pow2++) { \
- for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \
- x++) { \
- assert_##suf##_eq(pow2_ceil_##suf(x), \
- ((t)1) << pow2, \
- "Unexpected result, x=%"pri, x); \
- } \
- } \
-} while (0)
-
-TEST_BEGIN(test_pow2_ceil_u64)
+TEST_BEGIN(test_pow2_ceil)
{
+ unsigned i, pow2;
+ size_t x;
- TEST_POW2_CEIL(uint64_t, u64, FMTu64);
-}
-TEST_END
+ assert_zu_eq(pow2_ceil(0), 0, "Unexpected result");
-TEST_BEGIN(test_pow2_ceil_u32)
-{
+ for (i = 0; i < sizeof(size_t) * 8; i++) {
+ assert_zu_eq(pow2_ceil(ZU(1) << i), ZU(1) << i,
+ "Unexpected result");
+ }
- TEST_POW2_CEIL(uint32_t, u32, FMTu32);
-}
-TEST_END
+ for (i = 2; i < sizeof(size_t) * 8; i++) {
+ assert_zu_eq(pow2_ceil((ZU(1) << i) - 1), ZU(1) << i,
+ "Unexpected result");
+ }
-TEST_BEGIN(test_pow2_ceil_zu)
-{
+ for (i = 0; i < sizeof(size_t) * 8 - 1; i++) {
+ assert_zu_eq(pow2_ceil((ZU(1) << i) + 1), ZU(1) << (i+1),
+ "Unexpected result");
+ }
- TEST_POW2_CEIL(size_t, zu, "zu");
+ for (pow2 = 1; pow2 < 25; pow2++) {
+ for (x = (ZU(1) << (pow2-1)) + 1; x <= ZU(1) << pow2; x++) {
+ assert_zu_eq(pow2_ceil(x), ZU(1) << pow2,
+ "Unexpected result, x=%zu", x);
+ }
+ }
}
TEST_END
@@ -75,7 +54,6 @@ TEST_BEGIN(test_malloc_strtoumax)
};
#define ERR(e) e, #e
#define KUMAX(x) ((uintmax_t)x##ULL)
-#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL)
struct test_s tests[] = {
{"0", "0", -1, ERR(EINVAL), UINTMAX_MAX},
{"0", "0", 1, ERR(EINVAL), UINTMAX_MAX},
@@ -88,13 +66,13 @@ TEST_BEGIN(test_malloc_strtoumax)
{"42", "", 0, ERR(0), KUMAX(42)},
{"+42", "", 0, ERR(0), KUMAX(42)},
- {"-42", "", 0, ERR(0), KSMAX(-42)},
+ {"-42", "", 0, ERR(0), KUMAX(-42)},
{"042", "", 0, ERR(0), KUMAX(042)},
{"+042", "", 0, ERR(0), KUMAX(042)},
- {"-042", "", 0, ERR(0), KSMAX(-042)},
+ {"-042", "", 0, ERR(0), KUMAX(-042)},
{"0x42", "", 0, ERR(0), KUMAX(0x42)},
{"+0x42", "", 0, ERR(0), KUMAX(0x42)},
- {"-0x42", "", 0, ERR(0), KSMAX(-0x42)},
+ {"-0x42", "", 0, ERR(0), KUMAX(-0x42)},
{"0", "", 0, ERR(0), KUMAX(0)},
{"1", "", 0, ERR(0), KUMAX(1)},
@@ -131,7 +109,6 @@ TEST_BEGIN(test_malloc_strtoumax)
};
#undef ERR
#undef KUMAX
-#undef KSMAX
unsigned i;
for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
@@ -162,14 +139,14 @@ TEST_BEGIN(test_malloc_snprintf_truncated)
{
#define BUFLEN 15
char buf[BUFLEN];
- size_t result;
+ int result;
size_t len;
-#define TEST(expected_str_untruncated, ...) do { \
+#define TEST(expected_str_untruncated, ...) do { \
result = malloc_snprintf(buf, len, __VA_ARGS__); \
assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
"Unexpected string inequality (\"%s\" vs \"%s\")", \
- buf, expected_str_untruncated); \
- assert_zu_eq(result, strlen(expected_str_untruncated), \
+ buf, expected_str_untruncated); \
+ assert_d_eq(result, strlen(expected_str_untruncated), \
"Unexpected result"); \
} while (0)
@@ -195,11 +172,11 @@ TEST_BEGIN(test_malloc_snprintf)
{
#define BUFLEN 128
char buf[BUFLEN];
- size_t result;
+ int result;
#define TEST(expected_str, ...) do { \
result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
assert_str_eq(buf, expected_str, "Unexpected output"); \
- assert_zu_eq(result, strlen(expected_str), "Unexpected result");\
+ assert_d_eq(result, strlen(expected_str), "Unexpected result"); \
} while (0)
TEST("hello", "hello");
@@ -309,9 +286,7 @@ main(void)
{
return (test(
- test_pow2_ceil_u64,
- test_pow2_ceil_u32,
- test_pow2_ceil_zu,
+ test_pow2_ceil,
test_malloc_strtoumax_no_endptr,
test_malloc_strtoumax,
test_malloc_snprintf_truncated,
diff --git a/deps/jemalloc/test/unit/witness.c b/deps/jemalloc/test/unit/witness.c
deleted file mode 100644
index ed172753c..000000000
--- a/deps/jemalloc/test/unit/witness.c
+++ /dev/null
@@ -1,278 +0,0 @@
-#include "test/jemalloc_test.h"
-
-static witness_lock_error_t *witness_lock_error_orig;
-static witness_owner_error_t *witness_owner_error_orig;
-static witness_not_owner_error_t *witness_not_owner_error_orig;
-static witness_lockless_error_t *witness_lockless_error_orig;
-
-static bool saw_lock_error;
-static bool saw_owner_error;
-static bool saw_not_owner_error;
-static bool saw_lockless_error;
-
-static void
-witness_lock_error_intercept(const witness_list_t *witnesses,
- const witness_t *witness)
-{
-
- saw_lock_error = true;
-}
-
-static void
-witness_owner_error_intercept(const witness_t *witness)
-{
-
- saw_owner_error = true;
-}
-
-static void
-witness_not_owner_error_intercept(const witness_t *witness)
-{
-
- saw_not_owner_error = true;
-}
-
-static void
-witness_lockless_error_intercept(const witness_list_t *witnesses)
-{
-
- saw_lockless_error = true;
-}
-
-static int
-witness_comp(const witness_t *a, const witness_t *b)
-{
-
- assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
-
- return (strcmp(a->name, b->name));
-}
-
-static int
-witness_comp_reverse(const witness_t *a, const witness_t *b)
-{
-
- assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
-
- return (-strcmp(a->name, b->name));
-}
-
-TEST_BEGIN(test_witness)
-{
- witness_t a, b;
- tsdn_t *tsdn;
-
- test_skip_if(!config_debug);
-
- tsdn = tsdn_fetch();
-
- witness_assert_lockless(tsdn);
-
- witness_init(&a, "a", 1, NULL);
- witness_assert_not_owner(tsdn, &a);
- witness_lock(tsdn, &a);
- witness_assert_owner(tsdn, &a);
-
- witness_init(&b, "b", 2, NULL);
- witness_assert_not_owner(tsdn, &b);
- witness_lock(tsdn, &b);
- witness_assert_owner(tsdn, &b);
-
- witness_unlock(tsdn, &a);
- witness_unlock(tsdn, &b);
-
- witness_assert_lockless(tsdn);
-}
-TEST_END
-
-TEST_BEGIN(test_witness_comp)
-{
- witness_t a, b, c, d;
- tsdn_t *tsdn;
-
- test_skip_if(!config_debug);
-
- tsdn = tsdn_fetch();
-
- witness_assert_lockless(tsdn);
-
- witness_init(&a, "a", 1, witness_comp);
- witness_assert_not_owner(tsdn, &a);
- witness_lock(tsdn, &a);
- witness_assert_owner(tsdn, &a);
-
- witness_init(&b, "b", 1, witness_comp);
- witness_assert_not_owner(tsdn, &b);
- witness_lock(tsdn, &b);
- witness_assert_owner(tsdn, &b);
- witness_unlock(tsdn, &b);
-
- witness_lock_error_orig = witness_lock_error;
- witness_lock_error = witness_lock_error_intercept;
- saw_lock_error = false;
-
- witness_init(&c, "c", 1, witness_comp_reverse);
- witness_assert_not_owner(tsdn, &c);
- assert_false(saw_lock_error, "Unexpected witness lock error");
- witness_lock(tsdn, &c);
- assert_true(saw_lock_error, "Expected witness lock error");
- witness_unlock(tsdn, &c);
-
- saw_lock_error = false;
-
- witness_init(&d, "d", 1, NULL);
- witness_assert_not_owner(tsdn, &d);
- assert_false(saw_lock_error, "Unexpected witness lock error");
- witness_lock(tsdn, &d);
- assert_true(saw_lock_error, "Expected witness lock error");
- witness_unlock(tsdn, &d);
-
- witness_unlock(tsdn, &a);
-
- witness_assert_lockless(tsdn);
-
- witness_lock_error = witness_lock_error_orig;
-}
-TEST_END
-
-TEST_BEGIN(test_witness_reversal)
-{
- witness_t a, b;
- tsdn_t *tsdn;
-
- test_skip_if(!config_debug);
-
- witness_lock_error_orig = witness_lock_error;
- witness_lock_error = witness_lock_error_intercept;
- saw_lock_error = false;
-
- tsdn = tsdn_fetch();
-
- witness_assert_lockless(tsdn);
-
- witness_init(&a, "a", 1, NULL);
- witness_init(&b, "b", 2, NULL);
-
- witness_lock(tsdn, &b);
- assert_false(saw_lock_error, "Unexpected witness lock error");
- witness_lock(tsdn, &a);
- assert_true(saw_lock_error, "Expected witness lock error");
-
- witness_unlock(tsdn, &a);
- witness_unlock(tsdn, &b);
-
- witness_assert_lockless(tsdn);
-
- witness_lock_error = witness_lock_error_orig;
-}
-TEST_END
-
-TEST_BEGIN(test_witness_recursive)
-{
- witness_t a;
- tsdn_t *tsdn;
-
- test_skip_if(!config_debug);
-
- witness_not_owner_error_orig = witness_not_owner_error;
- witness_not_owner_error = witness_not_owner_error_intercept;
- saw_not_owner_error = false;
-
- witness_lock_error_orig = witness_lock_error;
- witness_lock_error = witness_lock_error_intercept;
- saw_lock_error = false;
-
- tsdn = tsdn_fetch();
-
- witness_assert_lockless(tsdn);
-
- witness_init(&a, "a", 1, NULL);
-
- witness_lock(tsdn, &a);
- assert_false(saw_lock_error, "Unexpected witness lock error");
- assert_false(saw_not_owner_error, "Unexpected witness not owner error");
- witness_lock(tsdn, &a);
- assert_true(saw_lock_error, "Expected witness lock error");
- assert_true(saw_not_owner_error, "Expected witness not owner error");
-
- witness_unlock(tsdn, &a);
-
- witness_assert_lockless(tsdn);
-
- witness_owner_error = witness_owner_error_orig;
- witness_lock_error = witness_lock_error_orig;
-
-}
-TEST_END
-
-TEST_BEGIN(test_witness_unlock_not_owned)
-{
- witness_t a;
- tsdn_t *tsdn;
-
- test_skip_if(!config_debug);
-
- witness_owner_error_orig = witness_owner_error;
- witness_owner_error = witness_owner_error_intercept;
- saw_owner_error = false;
-
- tsdn = tsdn_fetch();
-
- witness_assert_lockless(tsdn);
-
- witness_init(&a, "a", 1, NULL);
-
- assert_false(saw_owner_error, "Unexpected owner error");
- witness_unlock(tsdn, &a);
- assert_true(saw_owner_error, "Expected owner error");
-
- witness_assert_lockless(tsdn);
-
- witness_owner_error = witness_owner_error_orig;
-}
-TEST_END
-
-TEST_BEGIN(test_witness_lockful)
-{
- witness_t a;
- tsdn_t *tsdn;
-
- test_skip_if(!config_debug);
-
- witness_lockless_error_orig = witness_lockless_error;
- witness_lockless_error = witness_lockless_error_intercept;
- saw_lockless_error = false;
-
- tsdn = tsdn_fetch();
-
- witness_assert_lockless(tsdn);
-
- witness_init(&a, "a", 1, NULL);
-
- assert_false(saw_lockless_error, "Unexpected lockless error");
- witness_assert_lockless(tsdn);
-
- witness_lock(tsdn, &a);
- witness_assert_lockless(tsdn);
- assert_true(saw_lockless_error, "Expected lockless error");
-
- witness_unlock(tsdn, &a);
-
- witness_assert_lockless(tsdn);
-
- witness_lockless_error = witness_lockless_error_orig;
-}
-TEST_END
-
-int
-main(void)
-{
-
- return (test(
- test_witness,
- test_witness_comp,
- test_witness_reversal,
- test_witness_recursive,
- test_witness_unlock_not_owned,
- test_witness_lockful));
-}
diff --git a/deps/jemalloc/test/unit/zero.c b/deps/jemalloc/test/unit/zero.c
index 30ebe37a4..93afc2b87 100644
--- a/deps/jemalloc/test/unit/zero.c
+++ b/deps/jemalloc/test/unit/zero.c
@@ -8,41 +8,39 @@ const char *malloc_conf =
static void
test_zero(size_t sz_min, size_t sz_max)
{
- uint8_t *s;
+ char *s;
size_t sz_prev, sz, i;
-#define MAGIC ((uint8_t)0x61)
sz_prev = 0;
- s = (uint8_t *)mallocx(sz_min, 0);
+ s = (char *)mallocx(sz_min, 0);
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
for (sz = sallocx(s, 0); sz <= sz_max;
sz_prev = sz, sz = sallocx(s, 0)) {
if (sz_prev > 0) {
- assert_u_eq(s[0], MAGIC,
+ assert_c_eq(s[0], 'a',
"Previously allocated byte %zu/%zu is corrupted",
ZU(0), sz_prev);
- assert_u_eq(s[sz_prev-1], MAGIC,
+ assert_c_eq(s[sz_prev-1], 'a',
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);
}
for (i = sz_prev; i < sz; i++) {
- assert_u_eq(s[i], 0x0,
+ assert_c_eq(s[i], 0x0,
"Newly allocated byte %zu/%zu isn't zero-filled",
i, sz);
- s[i] = MAGIC;
+ s[i] = 'a';
}
if (xallocx(s, sz+1, 0, 0) == sz) {
- s = (uint8_t *)rallocx(s, sz+1, 0);
+ s = (char *)rallocx(s, sz+1, 0);
assert_ptr_not_null((void *)s,
"Unexpected rallocx() failure");
}
}
dallocx(s, 0);
-#undef MAGIC
}
TEST_BEGIN(test_zero_small)