summaryrefslogtreecommitdiff
path: root/deps/jemalloc/src/pages.c
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/src/pages.c')
-rw-r--r--deps/jemalloc/src/pages.c209
1 files changed, 192 insertions, 17 deletions
diff --git a/deps/jemalloc/src/pages.c b/deps/jemalloc/src/pages.c
index 13de27a00..8c83a7de7 100644
--- a/deps/jemalloc/src/pages.c
+++ b/deps/jemalloc/src/pages.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_PAGES_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/pages.h"
@@ -14,6 +13,14 @@
#include <vm/vm_param.h>
#endif
#endif
+#ifdef __NetBSD__
+#include <sys/bitops.h> /* ilog2 */
+#endif
+#ifdef JEMALLOC_HAVE_VM_MAKE_TAG
+#define PAGES_FD_TAG VM_MAKE_TAG(101U)
+#else
+#define PAGES_FD_TAG -1
+#endif
/******************************************************************************/
/* Data. */
@@ -40,6 +47,57 @@ thp_mode_t init_system_thp_mode;
/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */
static bool pages_can_purge_lazy_runtime = true;
+#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+static int madvise_dont_need_zeros_is_faulty = -1;
+/**
+ * Check that MADV_DONTNEED will actually zero pages on subsequent access.
+ *
+ * Since qemu does not support this, yet [1], and you can get very tricky
+ * assert if you will run program with jemalloc in use under qemu:
+ *
+ * <jemalloc>: ../contrib/jemalloc/src/extent.c:1195: Failed assertion: "p[i] == 0"
+ *
+ * [1]: https://patchwork.kernel.org/patch/10576637/
+ */
+static int madvise_MADV_DONTNEED_zeroes_pages()
+{
+ int works = -1;
+ size_t size = PAGE;
+
+ void * addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+
+ if (addr == MAP_FAILED) {
+ malloc_write("<jemalloc>: Cannot allocate memory for "
+ "MADV_DONTNEED check\n");
+ if (opt_abort) {
+ abort();
+ }
+ }
+
+ memset(addr, 'A', size);
+ if (madvise(addr, size, MADV_DONTNEED) == 0) {
+ works = memchr(addr, 'A', size) == NULL;
+ } else {
+ /*
+ * If madvise() does not support MADV_DONTNEED, then we can
+ * call it anyway, and use it's return code.
+ */
+ works = 1;
+ }
+
+ if (munmap(addr, size) != 0) {
+ malloc_write("<jemalloc>: Cannot deallocate memory for "
+ "MADV_DONTNEED check\n");
+ if (opt_abort) {
+ abort();
+ }
+ }
+
+ return works;
+}
+#endif
+
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
@@ -74,9 +132,21 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
* of existing mappings, and we only want to create new mappings.
*/
{
+#ifdef __NetBSD__
+ /*
+ * On NetBSD PAGE for a platform is defined to the
+ * maximum page size of all machine architectures
+ * for that platform, so that we can use the same
+ * binaries across all machine architectures.
+ */
+ if (alignment > os_page || PAGE > os_page) {
+ unsigned int a = ilog2(MAX(alignment, PAGE));
+ mmap_flags |= MAP_ALIGNED(a);
+ }
+#endif
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
- ret = mmap(addr, size, prot, mmap_flags, -1, 0);
+ ret = mmap(addr, size, prot, mmap_flags, PAGES_FD_TAG, 0);
}
assert(ret != NULL);
@@ -197,8 +267,8 @@ pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
flags |= MAP_FIXED | MAP_EXCL;
} else {
unsigned alignment_bits = ffs_zu(alignment);
- assert(alignment_bits > 1);
- flags |= MAP_ALIGNED(alignment_bits - 1);
+ assert(alignment_bits > 0);
+ flags |= MAP_ALIGNED(alignment_bits);
}
void *ret = mmap(addr, size, prot, flags, -1, 0);
@@ -246,14 +316,10 @@ pages_unmap(void *addr, size_t size) {
}
static bool
-pages_commit_impl(void *addr, size_t size, bool commit) {
+os_pages_commit(void *addr, size_t size, bool commit) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
- if (os_overcommits) {
- return true;
- }
-
#ifdef _WIN32
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
@@ -261,7 +327,7 @@ pages_commit_impl(void *addr, size_t size, bool commit) {
{
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
- -1, 0);
+ PAGES_FD_TAG, 0);
if (result == MAP_FAILED) {
return true;
}
@@ -278,6 +344,15 @@ pages_commit_impl(void *addr, size_t size, bool commit) {
#endif
}
+static bool
+pages_commit_impl(void *addr, size_t size, bool commit) {
+ if (os_overcommits) {
+ return true;
+ }
+
+ return os_pages_commit(addr, size, commit);
+}
+
bool
pages_commit(void *addr, size_t size) {
return pages_commit_impl(addr, size, true);
@@ -288,6 +363,66 @@ pages_decommit(void *addr, size_t size) {
return pages_commit_impl(addr, size, false);
}
+void
+pages_mark_guards(void *head, void *tail) {
+ assert(head != NULL || tail != NULL);
+ assert(head == NULL || tail == NULL ||
+ (uintptr_t)head < (uintptr_t)tail);
+#ifdef JEMALLOC_HAVE_MPROTECT
+ if (head != NULL) {
+ mprotect(head, PAGE, PROT_NONE);
+ }
+ if (tail != NULL) {
+ mprotect(tail, PAGE, PROT_NONE);
+ }
+#else
+ /* Decommit sets to PROT_NONE / MEM_DECOMMIT. */
+ if (head != NULL) {
+ os_pages_commit(head, PAGE, false);
+ }
+ if (tail != NULL) {
+ os_pages_commit(tail, PAGE, false);
+ }
+#endif
+}
+
+void
+pages_unmark_guards(void *head, void *tail) {
+ assert(head != NULL || tail != NULL);
+ assert(head == NULL || tail == NULL ||
+ (uintptr_t)head < (uintptr_t)tail);
+#ifdef JEMALLOC_HAVE_MPROTECT
+ bool head_and_tail = (head != NULL) && (tail != NULL);
+ size_t range = head_and_tail ?
+ (uintptr_t)tail - (uintptr_t)head + PAGE :
+ SIZE_T_MAX;
+ /*
+ * The amount of work that the kernel does in mprotect depends on the
+ * range argument. SC_LARGE_MINCLASS is an arbitrary threshold chosen
+ * to prevent kernel from doing too much work that would outweigh the
+ * savings of performing one less system call.
+ */
+ bool ranged_mprotect = head_and_tail && range <= SC_LARGE_MINCLASS;
+ if (ranged_mprotect) {
+ mprotect(head, range, PROT_READ | PROT_WRITE);
+ } else {
+ if (head != NULL) {
+ mprotect(head, PAGE, PROT_READ | PROT_WRITE);
+ }
+ if (tail != NULL) {
+ mprotect(tail, PAGE, PROT_READ | PROT_WRITE);
+ }
+ }
+#else
+ if (head != NULL) {
+ os_pages_commit(head, PAGE, true);
+ }
+ if (tail != NULL) {
+ os_pages_commit(tail, PAGE, true);
+ }
+#endif
+}
+
bool
pages_purge_lazy(void *addr, size_t size) {
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
@@ -318,6 +453,9 @@ pages_purge_lazy(void *addr, size_t size) {
#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
!defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
return (madvise(addr, size, MADV_DONTNEED) != 0);
+#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \
+ !defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
+ return (posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
#else
not_reached();
#endif
@@ -334,7 +472,12 @@ pages_purge_forced(void *addr, size_t size) {
#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
- return (madvise(addr, size, MADV_DONTNEED) != 0);
+ return (unlikely(madvise_dont_need_zeros_is_faulty) ||
+ madvise(addr, size, MADV_DONTNEED) != 0);
+#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \
+ defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
+ return (unlikely(madvise_dont_need_zeros_is_faulty) ||
+ posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
#elif defined(JEMALLOC_MAPS_COALESCE)
/* Try to overlay a new demand-zeroed mapping. */
return pages_commit(addr, size);
@@ -349,8 +492,13 @@ pages_huge_impl(void *addr, size_t size, bool aligned) {
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
assert(HUGEPAGE_CEILING(size) == size);
}
-#ifdef JEMALLOC_HAVE_MADVISE_HUGE
+#if defined(JEMALLOC_HAVE_MADVISE_HUGE)
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
+#elif defined(JEMALLOC_HAVE_MEMCNTL)
+ struct memcntl_mha m = {0};
+ m.mha_cmd = MHA_MAPSIZE_VA;
+ m.mha_pagesize = HUGEPAGE;
+ return (memcntl(addr, size, MC_HAT_ADVISE, (caddr_t)&m, 0, 0) == 0);
#else
return true;
#endif
@@ -394,8 +542,10 @@ bool
pages_dontdump(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
-#ifdef JEMALLOC_MADVISE_DONTDUMP
+#if defined(JEMALLOC_MADVISE_DONTDUMP)
return madvise(addr, size, MADV_DONTDUMP) != 0;
+#elif defined(JEMALLOC_MADVISE_NOCORE)
+ return madvise(addr, size, MADV_NOCORE) != 0;
#else
return false;
#endif
@@ -405,8 +555,10 @@ bool
pages_dodump(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
-#ifdef JEMALLOC_MADVISE_DONTDUMP
+#if defined(JEMALLOC_MADVISE_DONTDUMP)
return madvise(addr, size, MADV_DODUMP) != 0;
+#elif defined(JEMALLOC_MADVISE_NOCORE)
+ return madvise(addr, size, MADV_CORE) != 0;
#else
return false;
#endif
@@ -547,14 +699,14 @@ pages_set_thp_state (void *ptr, size_t size) {
static void
init_thp_state(void) {
- if (!have_madvise_huge) {
+ if (!have_madvise_huge && !have_memcntl) {
if (metadata_thp_enabled() && opt_abort) {
malloc_write("<jemalloc>: no MADV_HUGEPAGE support\n");
abort();
}
goto label_error;
}
-
+#if defined(JEMALLOC_HAVE_MADVISE_HUGE)
static const char sys_state_madvise[] = "always [madvise] never\n";
static const char sys_state_always[] = "[always] madvise never\n";
static const char sys_state_never[] = "always madvise [never]\n";
@@ -563,6 +715,9 @@ init_thp_state(void) {
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
int fd = (int)syscall(SYS_open,
"/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
+#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
+ int fd = (int)syscall(SYS_openat,
+ AT_FDCWD, "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#else
int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#endif
@@ -578,7 +733,7 @@ init_thp_state(void) {
#endif
if (nread < 0) {
- goto label_error;
+ goto label_error;
}
if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) {
@@ -591,6 +746,10 @@ init_thp_state(void) {
goto label_error;
}
return;
+#elif defined(JEMALLOC_HAVE_MEMCNTL)
+ init_system_thp_mode = thp_mode_default;
+ return;
+#endif
label_error:
opt_thp = init_system_thp_mode = thp_mode_not_supported;
}
@@ -606,6 +765,20 @@ pages_boot(void) {
return true;
}
+#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+ if (!opt_trust_madvise) {
+ madvise_dont_need_zeros_is_faulty = !madvise_MADV_DONTNEED_zeroes_pages();
+ if (madvise_dont_need_zeros_is_faulty) {
+ malloc_write("<jemalloc>: MADV_DONTNEED does not work (memset will be used instead)\n");
+ malloc_write("<jemalloc>: (This is the expected behaviour if you are running under QEMU)\n");
+ }
+ } else {
+ /* In case opt_trust_madvise is disable,
+ * do not do runtime check */
+ madvise_dont_need_zeros_is_faulty = 0;
+ }
+#endif
+
#ifndef _WIN32
mmap_flags = MAP_PRIVATE | MAP_ANON;
#endif
@@ -619,6 +792,8 @@ pages_boot(void) {
mmap_flags |= MAP_NORESERVE;
}
# endif
+#elif defined(__NetBSD__)
+ os_overcommits = true;
#else
os_overcommits = false;
#endif