summaryrefslogtreecommitdiff
path: root/deps/jemalloc/src/ctl.c
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/src/ctl.c')
-rw-r--r--deps/jemalloc/src/ctl.c1687
1 files changed, 1333 insertions, 354 deletions
diff --git a/deps/jemalloc/src/ctl.c b/deps/jemalloc/src/ctl.c
index 48afaa61f..135271baf 100644
--- a/deps/jemalloc/src/ctl.c
+++ b/deps/jemalloc/src/ctl.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_CTL_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -6,8 +5,16 @@
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/inspect.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/peak_event.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_log.h"
+#include "jemalloc/internal/prof_recent.h"
+#include "jemalloc/internal/prof_stats.h"
+#include "jemalloc/internal/prof_sys.h"
+#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/util.h"
@@ -60,6 +67,8 @@ CTL_PROTO(background_thread)
CTL_PROTO(max_background_threads)
CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_flush)
+CTL_PROTO(thread_peak_read)
+CTL_PROTO(thread_peak_reset)
CTL_PROTO(thread_prof_name)
CTL_PROTO(thread_prof_active)
CTL_PROTO(thread_arena)
@@ -67,6 +76,7 @@ CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
+CTL_PROTO(thread_idle)
CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug)
CTL_PROTO(config_fill)
@@ -81,7 +91,20 @@ CTL_PROTO(config_utrace)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
CTL_PROTO(opt_abort_conf)
+CTL_PROTO(opt_cache_oblivious)
+CTL_PROTO(opt_trust_madvise)
CTL_PROTO(opt_confirm_conf)
+CTL_PROTO(opt_hpa)
+CTL_PROTO(opt_hpa_slab_max_alloc)
+CTL_PROTO(opt_hpa_hugification_threshold)
+CTL_PROTO(opt_hpa_hugify_delay_ms)
+CTL_PROTO(opt_hpa_min_purge_interval_ms)
+CTL_PROTO(opt_hpa_dirty_mult)
+CTL_PROTO(opt_hpa_sec_nshards)
+CTL_PROTO(opt_hpa_sec_max_alloc)
+CTL_PROTO(opt_hpa_sec_max_bytes)
+CTL_PROTO(opt_hpa_sec_bytes_after_flush)
+CTL_PROTO(opt_hpa_sec_batch_fill_extra)
CTL_PROTO(opt_metadata_thp)
CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss)
@@ -89,19 +112,31 @@ CTL_PROTO(opt_narenas)
CTL_PROTO(opt_percpu_arena)
CTL_PROTO(opt_oversize_threshold)
CTL_PROTO(opt_background_thread)
+CTL_PROTO(opt_mutex_max_spin)
CTL_PROTO(opt_max_background_threads)
CTL_PROTO(opt_dirty_decay_ms)
CTL_PROTO(opt_muzzy_decay_ms)
CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_stats_print_opts)
+CTL_PROTO(opt_stats_interval)
+CTL_PROTO(opt_stats_interval_opts)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
CTL_PROTO(opt_utrace)
CTL_PROTO(opt_xmalloc)
+CTL_PROTO(opt_experimental_infallible_new)
CTL_PROTO(opt_tcache)
+CTL_PROTO(opt_tcache_max)
+CTL_PROTO(opt_tcache_nslots_small_min)
+CTL_PROTO(opt_tcache_nslots_small_max)
+CTL_PROTO(opt_tcache_nslots_large)
+CTL_PROTO(opt_lg_tcache_nslots_mul)
+CTL_PROTO(opt_tcache_gc_incr_bytes)
+CTL_PROTO(opt_tcache_gc_delay_bytes)
+CTL_PROTO(opt_lg_tcache_flush_small_div)
+CTL_PROTO(opt_lg_tcache_flush_large_div)
CTL_PROTO(opt_thp)
CTL_PROTO(opt_lg_extent_max_active_fit)
-CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
@@ -111,7 +146,14 @@ CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
+CTL_PROTO(opt_prof_leak_error)
CTL_PROTO(opt_prof_accum)
+CTL_PROTO(opt_prof_recent_alloc_max)
+CTL_PROTO(opt_prof_stats)
+CTL_PROTO(opt_prof_sys_thread_name)
+CTL_PROTO(opt_prof_time_res)
+CTL_PROTO(opt_lg_san_uaf_align)
+CTL_PROTO(opt_zero_realloc)
CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy)
@@ -121,6 +163,7 @@ CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_destroy)
CTL_PROTO(arena_i_dss)
+CTL_PROTO(arena_i_oversize_threshold)
CTL_PROTO(arena_i_dirty_decay_ms)
CTL_PROTO(arena_i_muzzy_decay_ms)
CTL_PROTO(arena_i_extent_hooks)
@@ -148,11 +191,18 @@ CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
CTL_PROTO(prof_gdump)
+CTL_PROTO(prof_prefix)
CTL_PROTO(prof_reset)
CTL_PROTO(prof_interval)
CTL_PROTO(lg_prof_sample)
CTL_PROTO(prof_log_start)
CTL_PROTO(prof_log_stop)
+CTL_PROTO(prof_stats_bins_i_live)
+CTL_PROTO(prof_stats_bins_i_accum)
+INDEX_PROTO(prof_stats_bins_i)
+CTL_PROTO(prof_stats_lextents_i_live)
+CTL_PROTO(prof_stats_lextents_i_accum)
+INDEX_PROTO(prof_stats_lextents_i)
CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
@@ -188,6 +238,39 @@ CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
INDEX_PROTO(stats_arenas_i_extents_j)
+CTL_PROTO(stats_arenas_i_hpa_shard_npurge_passes)
+CTL_PROTO(stats_arenas_i_hpa_shard_npurges)
+CTL_PROTO(stats_arenas_i_hpa_shard_nhugifies)
+CTL_PROTO(stats_arenas_i_hpa_shard_ndehugifies)
+
+/* We have a set of stats for full slabs. */
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)
+
+/* A parallel set for the empty slabs. */
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)
+
+/*
+ * And one for the slabs that are neither empty nor full, but indexed by how
+ * full they are.
+ */
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)
+
+INDEX_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_uptime)
CTL_PROTO(stats_arenas_i_dss)
@@ -209,8 +292,10 @@ CTL_PROTO(stats_arenas_i_base)
CTL_PROTO(stats_arenas_i_internal)
CTL_PROTO(stats_arenas_i_metadata_thp)
CTL_PROTO(stats_arenas_i_tcache_bytes)
+CTL_PROTO(stats_arenas_i_tcache_stashed_bytes)
CTL_PROTO(stats_arenas_i_resident)
CTL_PROTO(stats_arenas_i_abandoned_vm)
+CTL_PROTO(stats_arenas_i_hpa_sec_bytes)
INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
@@ -222,12 +307,21 @@ CTL_PROTO(stats_metadata_thp)
CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
CTL_PROTO(stats_retained)
+CTL_PROTO(stats_zero_reallocs)
CTL_PROTO(experimental_hooks_install)
CTL_PROTO(experimental_hooks_remove)
+CTL_PROTO(experimental_hooks_prof_backtrace)
+CTL_PROTO(experimental_hooks_prof_dump)
+CTL_PROTO(experimental_hooks_safety_check_abort)
+CTL_PROTO(experimental_thread_activity_callback)
CTL_PROTO(experimental_utilization_query)
CTL_PROTO(experimental_utilization_batch_query)
CTL_PROTO(experimental_arenas_i_pactivep)
INDEX_PROTO(experimental_arenas_i)
+CTL_PROTO(experimental_prof_recent_alloc_max)
+CTL_PROTO(experimental_prof_recent_alloc_dump)
+CTL_PROTO(experimental_batch_alloc)
+CTL_PROTO(experimental_arenas_create_ext)
#define MUTEX_STATS_CTL_PROTO_GEN(n) \
CTL_PROTO(stats_##n##_num_ops) \
@@ -275,6 +369,11 @@ static const ctl_named_node_t thread_tcache_node[] = {
{NAME("flush"), CTL(thread_tcache_flush)}
};
+static const ctl_named_node_t thread_peak_node[] = {
+ {NAME("read"), CTL(thread_peak_read)},
+ {NAME("reset"), CTL(thread_peak_reset)},
+};
+
static const ctl_named_node_t thread_prof_node[] = {
{NAME("name"), CTL(thread_prof_name)},
{NAME("active"), CTL(thread_prof_active)}
@@ -287,7 +386,9 @@ static const ctl_named_node_t thread_node[] = {
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)},
{NAME("tcache"), CHILD(named, thread_tcache)},
- {NAME("prof"), CHILD(named, thread_prof)}
+ {NAME("peak"), CHILD(named, thread_peak)},
+ {NAME("prof"), CHILD(named, thread_prof)},
+ {NAME("idle"), CTL(thread_idle)}
};
static const ctl_named_node_t config_node[] = {
@@ -308,27 +409,60 @@ static const ctl_named_node_t config_node[] = {
static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
{NAME("abort_conf"), CTL(opt_abort_conf)},
+ {NAME("cache_oblivious"), CTL(opt_cache_oblivious)},
+ {NAME("trust_madvise"), CTL(opt_trust_madvise)},
{NAME("confirm_conf"), CTL(opt_confirm_conf)},
+ {NAME("hpa"), CTL(opt_hpa)},
+ {NAME("hpa_slab_max_alloc"), CTL(opt_hpa_slab_max_alloc)},
+ {NAME("hpa_hugification_threshold"),
+ CTL(opt_hpa_hugification_threshold)},
+ {NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)},
+ {NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)},
+ {NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)},
+ {NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)},
+ {NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)},
+ {NAME("hpa_sec_max_bytes"), CTL(opt_hpa_sec_max_bytes)},
+ {NAME("hpa_sec_bytes_after_flush"),
+ CTL(opt_hpa_sec_bytes_after_flush)},
+ {NAME("hpa_sec_batch_fill_extra"),
+ CTL(opt_hpa_sec_batch_fill_extra)},
{NAME("metadata_thp"), CTL(opt_metadata_thp)},
{NAME("retain"), CTL(opt_retain)},
{NAME("dss"), CTL(opt_dss)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("percpu_arena"), CTL(opt_percpu_arena)},
{NAME("oversize_threshold"), CTL(opt_oversize_threshold)},
+ {NAME("mutex_max_spin"), CTL(opt_mutex_max_spin)},
{NAME("background_thread"), CTL(opt_background_thread)},
{NAME("max_background_threads"), CTL(opt_max_background_threads)},
{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
{NAME("stats_print"), CTL(opt_stats_print)},
{NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
+ {NAME("stats_interval"), CTL(opt_stats_interval)},
+ {NAME("stats_interval_opts"), CTL(opt_stats_interval_opts)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
{NAME("utrace"), CTL(opt_utrace)},
{NAME("xmalloc"), CTL(opt_xmalloc)},
+ {NAME("experimental_infallible_new"),
+ CTL(opt_experimental_infallible_new)},
{NAME("tcache"), CTL(opt_tcache)},
+ {NAME("tcache_max"), CTL(opt_tcache_max)},
+ {NAME("tcache_nslots_small_min"),
+ CTL(opt_tcache_nslots_small_min)},
+ {NAME("tcache_nslots_small_max"),
+ CTL(opt_tcache_nslots_small_max)},
+ {NAME("tcache_nslots_large"), CTL(opt_tcache_nslots_large)},
+ {NAME("lg_tcache_nslots_mul"), CTL(opt_lg_tcache_nslots_mul)},
+ {NAME("tcache_gc_incr_bytes"), CTL(opt_tcache_gc_incr_bytes)},
+ {NAME("tcache_gc_delay_bytes"), CTL(opt_tcache_gc_delay_bytes)},
+ {NAME("lg_tcache_flush_small_div"),
+ CTL(opt_lg_tcache_flush_small_div)},
+ {NAME("lg_tcache_flush_large_div"),
+ CTL(opt_lg_tcache_flush_large_div)},
{NAME("thp"), CTL(opt_thp)},
{NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
- {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
{NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)},
@@ -338,7 +472,14 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_final"), CTL(opt_prof_final)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
- {NAME("prof_accum"), CTL(opt_prof_accum)}
+ {NAME("prof_leak_error"), CTL(opt_prof_leak_error)},
+ {NAME("prof_accum"), CTL(opt_prof_accum)},
+ {NAME("prof_recent_alloc_max"), CTL(opt_prof_recent_alloc_max)},
+ {NAME("prof_stats"), CTL(opt_prof_stats)},
+ {NAME("prof_sys_thread_name"), CTL(opt_prof_sys_thread_name)},
+ {NAME("prof_time_resolution"), CTL(opt_prof_time_res)},
+ {NAME("lg_san_uaf_align"), CTL(opt_lg_san_uaf_align)},
+ {NAME("zero_realloc"), CTL(opt_zero_realloc)}
};
static const ctl_named_node_t tcache_node[] = {
@@ -354,6 +495,11 @@ static const ctl_named_node_t arena_i_node[] = {
{NAME("reset"), CTL(arena_i_reset)},
{NAME("destroy"), CTL(arena_i_destroy)},
{NAME("dss"), CTL(arena_i_dss)},
+ /*
+ * Undocumented for now, since we anticipate an arena API in flux after
+ * we cut the last 5-series release.
+ */
+ {NAME("oversize_threshold"), CTL(arena_i_oversize_threshold)},
{NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
{NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
@@ -408,17 +554,51 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("lookup"), CTL(arenas_lookup)}
};
+static const ctl_named_node_t prof_stats_bins_i_node[] = {
+ {NAME("live"), CTL(prof_stats_bins_i_live)},
+ {NAME("accum"), CTL(prof_stats_bins_i_accum)}
+};
+
+static const ctl_named_node_t super_prof_stats_bins_i_node[] = {
+ {NAME(""), CHILD(named, prof_stats_bins_i)}
+};
+
+static const ctl_indexed_node_t prof_stats_bins_node[] = {
+ {INDEX(prof_stats_bins_i)}
+};
+
+static const ctl_named_node_t prof_stats_lextents_i_node[] = {
+ {NAME("live"), CTL(prof_stats_lextents_i_live)},
+ {NAME("accum"), CTL(prof_stats_lextents_i_accum)}
+};
+
+static const ctl_named_node_t super_prof_stats_lextents_i_node[] = {
+ {NAME(""), CHILD(named, prof_stats_lextents_i)}
+};
+
+static const ctl_indexed_node_t prof_stats_lextents_node[] = {
+ {INDEX(prof_stats_lextents_i)}
+};
+
+static const ctl_named_node_t prof_stats_node[] = {
+ {NAME("bins"), CHILD(indexed, prof_stats_bins)},
+ {NAME("lextents"), CHILD(indexed, prof_stats_lextents)},
+};
+
static const ctl_named_node_t prof_node[] = {
{NAME("thread_active_init"), CTL(prof_thread_active_init)},
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
{NAME("gdump"), CTL(prof_gdump)},
+ {NAME("prefix"), CTL(prof_prefix)},
{NAME("reset"), CTL(prof_reset)},
{NAME("interval"), CTL(prof_interval)},
{NAME("lg_sample"), CTL(lg_prof_sample)},
{NAME("log_start"), CTL(prof_log_start)},
- {NAME("log_stop"), CTL(prof_log_stop)}
+ {NAME("log_stop"), CTL(prof_log_stop)},
+ {NAME("stats"), CHILD(named, prof_stats)}
};
+
static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
@@ -521,6 +701,75 @@ MUTEX_PROF_ARENA_MUTEXES
#undef OP
};
+static const ctl_named_node_t stats_arenas_i_hpa_shard_full_slabs_node[] = {
+ {NAME("npageslabs_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)},
+ {NAME("npageslabs_huge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)},
+ {NAME("nactive_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)},
+ {NAME("nactive_huge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)},
+ {NAME("ndirty_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)},
+ {NAME("ndirty_huge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)}
+};
+
+static const ctl_named_node_t stats_arenas_i_hpa_shard_empty_slabs_node[] = {
+ {NAME("npageslabs_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)},
+ {NAME("npageslabs_huge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)},
+ {NAME("nactive_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)},
+ {NAME("nactive_huge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)},
+ {NAME("ndirty_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)},
+ {NAME("ndirty_huge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)}
+};
+
+static const ctl_named_node_t stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
+ {NAME("npageslabs_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)},
+ {NAME("npageslabs_huge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)},
+ {NAME("nactive_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)},
+ {NAME("nactive_huge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)},
+ {NAME("ndirty_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)},
+ {NAME("ndirty_huge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)}
+};
+
+static const ctl_named_node_t super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
+ {NAME(""),
+ CHILD(named, stats_arenas_i_hpa_shard_nonfull_slabs_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_hpa_shard_nonfull_slabs_node[] =
+{
+ {INDEX(stats_arenas_i_hpa_shard_nonfull_slabs_j)}
+};
+
+static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
+ {NAME("full_slabs"), CHILD(named,
+ stats_arenas_i_hpa_shard_full_slabs)},
+ {NAME("empty_slabs"), CHILD(named,
+ stats_arenas_i_hpa_shard_empty_slabs)},
+ {NAME("nonfull_slabs"), CHILD(indexed,
+ stats_arenas_i_hpa_shard_nonfull_slabs)},
+
+ {NAME("npurge_passes"), CTL(stats_arenas_i_hpa_shard_npurge_passes)},
+ {NAME("npurges"), CTL(stats_arenas_i_hpa_shard_npurges)},
+ {NAME("nhugifies"), CTL(stats_arenas_i_hpa_shard_nhugifies)},
+ {NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)}
+};
+
static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("uptime"), CTL(stats_arenas_i_uptime)},
@@ -543,14 +792,18 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("internal"), CTL(stats_arenas_i_internal)},
{NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
{NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
+ {NAME("tcache_stashed_bytes"),
+ CTL(stats_arenas_i_tcache_stashed_bytes)},
{NAME("resident"), CTL(stats_arenas_i_resident)},
{NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)},
+ {NAME("hpa_sec_bytes"), CTL(stats_arenas_i_hpa_sec_bytes)},
{NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
{NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
{NAME("extents"), CHILD(indexed, stats_arenas_i_extents)},
- {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
+ {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)},
+ {NAME("hpa_shard"), CHILD(named, stats_arenas_i_hpa_shard)}
};
static const ctl_named_node_t super_stats_arenas_i_node[] = {
{NAME(""), CHILD(named, stats_arenas_i)}
@@ -589,12 +842,21 @@ static const ctl_named_node_t stats_node[] = {
{NAME("background_thread"),
CHILD(named, stats_background_thread)},
{NAME("mutexes"), CHILD(named, stats_mutexes)},
- {NAME("arenas"), CHILD(indexed, stats_arenas)}
+ {NAME("arenas"), CHILD(indexed, stats_arenas)},
+ {NAME("zero_reallocs"), CTL(stats_zero_reallocs)},
};
static const ctl_named_node_t experimental_hooks_node[] = {
{NAME("install"), CTL(experimental_hooks_install)},
- {NAME("remove"), CTL(experimental_hooks_remove)}
+ {NAME("remove"), CTL(experimental_hooks_remove)},
+ {NAME("prof_backtrace"), CTL(experimental_hooks_prof_backtrace)},
+ {NAME("prof_dump"), CTL(experimental_hooks_prof_dump)},
+ {NAME("safety_check_abort"), CTL(experimental_hooks_safety_check_abort)},
+};
+
+static const ctl_named_node_t experimental_thread_node[] = {
+ {NAME("activity_callback"),
+ CTL(experimental_thread_activity_callback)}
};
static const ctl_named_node_t experimental_utilization_node[] = {
@@ -613,10 +875,19 @@ static const ctl_indexed_node_t experimental_arenas_node[] = {
{INDEX(experimental_arenas_i)}
};
+static const ctl_named_node_t experimental_prof_recent_node[] = {
+ {NAME("alloc_max"), CTL(experimental_prof_recent_alloc_max)},
+ {NAME("alloc_dump"), CTL(experimental_prof_recent_alloc_dump)},
+};
+
static const ctl_named_node_t experimental_node[] = {
{NAME("hooks"), CHILD(named, experimental_hooks)},
{NAME("utilization"), CHILD(named, experimental_utilization)},
- {NAME("arenas"), CHILD(indexed, experimental_arenas)}
+ {NAME("arenas"), CHILD(indexed, experimental_arenas)},
+ {NAME("arenas_create_ext"), CTL(experimental_arenas_create_ext)},
+ {NAME("prof_recent"), CHILD(named, experimental_prof_recent)},
+ {NAME("batch_alloc"), CTL(experimental_batch_alloc)},
+ {NAME("thread"), CHILD(named, experimental_thread)}
};
static const ctl_named_node_t root_node[] = {
@@ -650,28 +921,13 @@ static const ctl_named_node_t super_root_node[] = {
* synchronized by the ctl mutex.
*/
static void
-ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
-#ifdef JEMALLOC_ATOMIC_U64
- uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
- uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
- atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
-#else
- *dst += *src;
-#endif
-}
-
-/* Likewise: with ctl mutex synchronization, reading is simple. */
-static uint64_t
-ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
-#ifdef JEMALLOC_ATOMIC_U64
- return atomic_load_u64(p, ATOMIC_RELAXED);
-#else
- return *p;
-#endif
+ctl_accum_locked_u64(locked_u64_t *dst, locked_u64_t *src) {
+ locked_inc_u64_unsynchronized(dst,
+ locked_read_u64_unsynchronized(src));
}
static void
-accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
+ctl_accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
@@ -783,11 +1039,15 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) {
ctl_arena->astats->nfills_small = 0;
ctl_arena->astats->nflushes_small = 0;
memset(ctl_arena->astats->bstats, 0, SC_NBINS *
- sizeof(bin_stats_t));
+ sizeof(bin_stats_data_t));
memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
sizeof(arena_stats_large_t));
memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
- sizeof(arena_stats_extents_t));
+ sizeof(pac_estats_t));
+ memset(&ctl_arena->astats->hpastats, 0,
+ sizeof(hpa_shard_stats_t));
+ memset(&ctl_arena->astats->secstats, 0,
+ sizeof(sec_stats_t));
}
}
@@ -801,22 +1061,19 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
&ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
&ctl_arena->pdirty, &ctl_arena->pmuzzy,
&ctl_arena->astats->astats, ctl_arena->astats->bstats,
- ctl_arena->astats->lstats, ctl_arena->astats->estats);
+ ctl_arena->astats->lstats, ctl_arena->astats->estats,
+ &ctl_arena->astats->hpastats, &ctl_arena->astats->secstats);
for (i = 0; i < SC_NBINS; i++) {
- ctl_arena->astats->allocated_small +=
- ctl_arena->astats->bstats[i].curregs *
+ bin_stats_t *bstats =
+ &ctl_arena->astats->bstats[i].stats_data;
+ ctl_arena->astats->allocated_small += bstats->curregs *
sz_index2size(i);
- ctl_arena->astats->nmalloc_small +=
- ctl_arena->astats->bstats[i].nmalloc;
- ctl_arena->astats->ndalloc_small +=
- ctl_arena->astats->bstats[i].ndalloc;
- ctl_arena->astats->nrequests_small +=
- ctl_arena->astats->bstats[i].nrequests;
- ctl_arena->astats->nfills_small +=
- ctl_arena->astats->bstats[i].nfills;
- ctl_arena->astats->nflushes_small +=
- ctl_arena->astats->bstats[i].nflushes;
+ ctl_arena->astats->nmalloc_small += bstats->nmalloc;
+ ctl_arena->astats->ndalloc_small += bstats->ndalloc;
+ ctl_arena->astats->nrequests_small += bstats->nrequests;
+ ctl_arena->astats->nfills_small += bstats->nfills;
+ ctl_arena->astats->nflushes_small += bstats->nflushes;
}
} else {
arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
@@ -848,27 +1105,32 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
ctl_arena_stats_t *astats = ctl_arena->astats;
if (!destroyed) {
- accum_atomic_zu(&sdstats->astats.mapped,
- &astats->astats.mapped);
- accum_atomic_zu(&sdstats->astats.retained,
- &astats->astats.retained);
- accum_atomic_zu(&sdstats->astats.extent_avail,
- &astats->astats.extent_avail);
- }
-
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
- &astats->astats.decay_dirty.npurge);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
- &astats->astats.decay_dirty.nmadvise);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
- &astats->astats.decay_dirty.purged);
-
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
- &astats->astats.decay_muzzy.npurge);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
- &astats->astats.decay_muzzy.nmadvise);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
- &astats->astats.decay_muzzy.purged);
+ sdstats->astats.mapped += astats->astats.mapped;
+ sdstats->astats.pa_shard_stats.pac_stats.retained
+ += astats->astats.pa_shard_stats.pac_stats.retained;
+ sdstats->astats.pa_shard_stats.edata_avail
+ += astats->astats.pa_shard_stats.edata_avail;
+ }
+
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge,
+ &astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge);
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise,
+ &astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise);
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.purged,
+ &astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged);
+
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge,
+ &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge);
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise,
+ &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise);
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged,
+ &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged);
#define OP(mtx) malloc_mutex_prof_merge( \
&(sdstats->astats.mutex_prof_data[ \
@@ -878,14 +1140,11 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
MUTEX_PROF_ARENA_MUTEXES
#undef OP
if (!destroyed) {
- accum_atomic_zu(&sdstats->astats.base,
- &astats->astats.base);
- accum_atomic_zu(&sdstats->astats.internal,
+ sdstats->astats.base += astats->astats.base;
+ sdstats->astats.resident += astats->astats.resident;
+ sdstats->astats.metadata_thp += astats->astats.metadata_thp;
+ ctl_accum_atomic_zu(&sdstats->astats.internal,
&astats->astats.internal);
- accum_atomic_zu(&sdstats->astats.resident,
- &astats->astats.resident);
- accum_atomic_zu(&sdstats->astats.metadata_thp,
- &astats->astats.metadata_thp);
} else {
assert(atomic_load_zu(
&astats->astats.internal, ATOMIC_RELAXED) == 0);
@@ -903,23 +1162,23 @@ MUTEX_PROF_ARENA_MUTEXES
sdstats->nflushes_small += astats->nflushes_small;
if (!destroyed) {
- accum_atomic_zu(&sdstats->astats.allocated_large,
- &astats->astats.allocated_large);
+ sdstats->astats.allocated_large +=
+ astats->astats.allocated_large;
} else {
- assert(atomic_load_zu(&astats->astats.allocated_large,
- ATOMIC_RELAXED) == 0);
+ assert(astats->astats.allocated_large == 0);
}
- ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
- &astats->astats.nmalloc_large);
- ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
- &astats->astats.ndalloc_large);
- ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
- &astats->astats.nrequests_large);
- accum_atomic_zu(&sdstats->astats.abandoned_vm,
- &astats->astats.abandoned_vm);
-
- accum_atomic_zu(&sdstats->astats.tcache_bytes,
- &astats->astats.tcache_bytes);
+ sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
+ sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
+ sdstats->astats.nrequests_large
+ += astats->astats.nrequests_large;
+ sdstats->astats.nflushes_large += astats->astats.nflushes_large;
+ ctl_accum_atomic_zu(
+ &sdstats->astats.pa_shard_stats.pac_stats.abandoned_vm,
+ &astats->astats.pa_shard_stats.pac_stats.abandoned_vm);
+
+ sdstats->astats.tcache_bytes += astats->astats.tcache_bytes;
+ sdstats->astats.tcache_stashed_bytes +=
+ astats->astats.tcache_stashed_bytes;
if (ctl_arena->arena_ind == 0) {
sdstats->astats.uptime = astats->astats.uptime;
@@ -927,29 +1186,26 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge bin stats. */
for (i = 0; i < SC_NBINS; i++) {
- sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
- sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
- sdstats->bstats[i].nrequests +=
- astats->bstats[i].nrequests;
+ bin_stats_t *bstats = &astats->bstats[i].stats_data;
+ bin_stats_t *merged = &sdstats->bstats[i].stats_data;
+ merged->nmalloc += bstats->nmalloc;
+ merged->ndalloc += bstats->ndalloc;
+ merged->nrequests += bstats->nrequests;
if (!destroyed) {
- sdstats->bstats[i].curregs +=
- astats->bstats[i].curregs;
+ merged->curregs += bstats->curregs;
} else {
- assert(astats->bstats[i].curregs == 0);
+ assert(bstats->curregs == 0);
}
- sdstats->bstats[i].nfills += astats->bstats[i].nfills;
- sdstats->bstats[i].nflushes +=
- astats->bstats[i].nflushes;
- sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
- sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
+ merged->nfills += bstats->nfills;
+ merged->nflushes += bstats->nflushes;
+ merged->nslabs += bstats->nslabs;
+ merged->reslabs += bstats->reslabs;
if (!destroyed) {
- sdstats->bstats[i].curslabs +=
- astats->bstats[i].curslabs;
- sdstats->bstats[i].nonfull_slabs +=
- astats->bstats[i].nonfull_slabs;
+ merged->curslabs += bstats->curslabs;
+ merged->nonfull_slabs += bstats->nonfull_slabs;
} else {
- assert(astats->bstats[i].curslabs == 0);
- assert(astats->bstats[i].nonfull_slabs == 0);
+ assert(bstats->curslabs == 0);
+ assert(bstats->nonfull_slabs == 0);
}
malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
&astats->bstats[i].mutex_data);
@@ -957,11 +1213,11 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge stats for large allocations. */
for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
- ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
+ ctl_accum_locked_u64(&sdstats->lstats[i].nmalloc,
&astats->lstats[i].nmalloc);
- ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
+ ctl_accum_locked_u64(&sdstats->lstats[i].ndalloc,
&astats->lstats[i].ndalloc);
- ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
+ ctl_accum_locked_u64(&sdstats->lstats[i].nrequests,
&astats->lstats[i].nrequests);
if (!destroyed) {
sdstats->lstats[i].curlextents +=
@@ -973,19 +1229,21 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge extents stats. */
for (i = 0; i < SC_NPSIZES; i++) {
- accum_atomic_zu(&sdstats->estats[i].ndirty,
- &astats->estats[i].ndirty);
- accum_atomic_zu(&sdstats->estats[i].nmuzzy,
- &astats->estats[i].nmuzzy);
- accum_atomic_zu(&sdstats->estats[i].nretained,
- &astats->estats[i].nretained);
- accum_atomic_zu(&sdstats->estats[i].dirty_bytes,
- &astats->estats[i].dirty_bytes);
- accum_atomic_zu(&sdstats->estats[i].muzzy_bytes,
- &astats->estats[i].muzzy_bytes);
- accum_atomic_zu(&sdstats->estats[i].retained_bytes,
- &astats->estats[i].retained_bytes);
+ sdstats->estats[i].ndirty += astats->estats[i].ndirty;
+ sdstats->estats[i].nmuzzy += astats->estats[i].nmuzzy;
+ sdstats->estats[i].nretained
+ += astats->estats[i].nretained;
+ sdstats->estats[i].dirty_bytes
+ += astats->estats[i].dirty_bytes;
+ sdstats->estats[i].muzzy_bytes
+ += astats->estats[i].muzzy_bytes;
+ sdstats->estats[i].retained_bytes
+ += astats->estats[i].retained_bytes;
}
+
+ /* Merge HPA stats. */
+ hpa_shard_stats_accum(&sdstats->hpastats, &astats->hpastats);
+ sec_stats_accum(&sdstats->secstats, &astats->secstats);
}
}
@@ -1001,7 +1259,7 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
}
static unsigned
-ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
+ctl_arena_init(tsd_t *tsd, const arena_config_t *config) {
unsigned arena_ind;
ctl_arena_t *ctl_arena;
@@ -1019,7 +1277,7 @@ ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
}
/* Initialize new arena. */
- if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
+ if (arena_init(tsd_tsdn(tsd), arena_ind, config) == NULL) {
return UINT_MAX;
}
@@ -1036,8 +1294,11 @@ ctl_background_thread_stats_read(tsdn_t *tsdn) {
if (!have_background_thread ||
background_thread_stats_read(tsdn, stats)) {
memset(stats, 0, sizeof(background_thread_stats_t));
- nstime_init(&stats->run_interval, 0);
+ nstime_init_zero(&stats->run_interval);
}
+ malloc_mutex_prof_copy(
+ &ctl_stats->mutex_prof_data[global_prof_mutex_max_per_bg_thd],
+ &stats->max_counter_per_bg_thd);
}
static void
@@ -1069,21 +1330,17 @@ ctl_refresh(tsdn_t *tsdn) {
if (config_stats) {
ctl_stats->allocated = ctl_sarena->astats->allocated_small +
- atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
- ATOMIC_RELAXED);
+ ctl_sarena->astats->astats.allocated_large;
ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
- ctl_stats->metadata = atomic_load_zu(
- &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
+ ctl_stats->metadata = ctl_sarena->astats->astats.base +
atomic_load_zu(&ctl_sarena->astats->astats.internal,
ATOMIC_RELAXED);
- ctl_stats->metadata_thp = atomic_load_zu(
- &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
- ctl_stats->resident = atomic_load_zu(
- &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
- ctl_stats->mapped = atomic_load_zu(
- &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
- ctl_stats->retained = atomic_load_zu(
- &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
+ ctl_stats->resident = ctl_sarena->astats->astats.resident;
+ ctl_stats->metadata_thp =
+ ctl_sarena->astats->astats.metadata_thp;
+ ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
+ ctl_stats->retained = ctl_sarena->astats->astats
+ .pa_shard_stats.pac_stats.retained;
ctl_background_thread_stats_read(tsdn);
@@ -1093,8 +1350,20 @@ ctl_refresh(tsdn_t *tsdn) {
malloc_mutex_unlock(tsdn, &mtx);
if (config_prof && opt_prof) {
- READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
- bt2gctx_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof, bt2gctx_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_thds_data, tdatas_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_dump, prof_dump_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_recent_alloc,
+ prof_recent_alloc_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_recent_dump,
+ prof_recent_dump_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_stats, prof_stats_mtx);
}
if (have_background_thread) {
READ_GLOBAL_MUTEX_PROF_DATA(
@@ -1191,8 +1460,9 @@ label_return:
}
static int
-ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
- size_t *mibp, size_t *depthp) {
+ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node,
+ const char *name, const ctl_named_node_t **ending_nodep, size_t *mibp,
+ size_t *depthp) {
int ret;
const char *elm, *tdot, *dot;
size_t elen, i, j;
@@ -1206,7 +1476,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
ret = ENOENT;
goto label_return;
}
- node = super_root_node;
+ node = starting_node;
for (i = 0; i < *depthp; i++) {
assert(node);
assert(node->nchildren > 0);
@@ -1220,10 +1490,6 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
if (strlen(child->name) == elen &&
strncmp(elm, child->name, elen) == 0) {
node = child;
- if (nodesp != NULL) {
- nodesp[i] =
- (const ctl_node_t *)node;
- }
mibp[i] = j;
break;
}
@@ -1250,13 +1516,11 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
goto label_return;
}
- if (nodesp != NULL) {
- nodesp[i] = (const ctl_node_t *)node;
- }
mibp[i] = (size_t)index;
}
- if (node->ctl != NULL) {
+ /* Reached the end? */
+ if (node->ctl != NULL || *dot == '\0') {
/* Terminal node. */
if (*dot != '\0') {
/*
@@ -1272,16 +1536,14 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
}
/* Update elm. */
- if (*dot == '\0') {
- /* No more elements. */
- ret = ENOENT;
- goto label_return;
- }
elm = &dot[1];
dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
strchr(elm, '\0');
elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
}
+ if (ending_nodep != NULL) {
+ *ending_nodep = node;
+ }
ret = 0;
label_return:
@@ -1293,7 +1555,6 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) {
int ret;
size_t depth;
- ctl_node_t const *nodes[CTL_MAX_DEPTH];
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
@@ -1303,12 +1564,12 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
}
depth = CTL_MAX_DEPTH;
- ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
+ ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, &node, mib,
+ &depth);
if (ret != 0) {
goto label_return;
}
- node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl) {
ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
} else {
@@ -1329,26 +1590,19 @@ ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
goto label_return;
}
- ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
+ ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, NULL, mibp,
+ miblenp);
label_return:
return(ret);
}
-int
-ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen) {
+static int
+ctl_lookupbymib(tsdn_t *tsdn, const ctl_named_node_t **ending_nodep,
+ const size_t *mib, size_t miblen) {
int ret;
- const ctl_named_node_t *node;
- size_t i;
- if (!ctl_initialized && ctl_init(tsd)) {
- ret = EAGAIN;
- goto label_return;
- }
-
- /* Iterate down the tree. */
- node = super_root_node;
- for (i = 0; i < miblen; i++) {
+ const ctl_named_node_t *node = super_root_node;
+ for (size_t i = 0; i < miblen; i++) {
assert(node);
assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) {
@@ -1363,13 +1617,36 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Indexed element. */
inode = ctl_indexed_node(node->children);
- node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
+ node = inode->index(tsdn, mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto label_return;
}
}
}
+ assert(ending_nodep != NULL);
+ *ending_nodep = node;
+ ret = 0;
+
+label_return:
+ return(ret);
+}
+
+int
+ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ const ctl_named_node_t *node;
+
+ if (!ctl_initialized && ctl_init(tsd)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
+ if (ret != 0) {
+ goto label_return;
+ }
/* Call the ctl function. */
if (node && node->ctl) {
@@ -1383,6 +1660,81 @@ label_return:
return(ret);
}
+int
+ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
+ size_t *miblenp) {
+ int ret;
+ const ctl_named_node_t *node;
+
+ if (!ctl_initialized && ctl_init(tsd)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
+ if (ret != 0) {
+ goto label_return;
+ }
+ if (node == NULL || node->ctl != NULL) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ assert(miblenp != NULL);
+ assert(*miblenp >= miblen);
+ *miblenp -= miblen;
+ ret = ctl_lookup(tsd_tsdn(tsd), node, name, NULL, mib + miblen,
+ miblenp);
+ *miblenp += miblen;
+label_return:
+ return(ret);
+}
+
+int
+ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
+ size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ const ctl_named_node_t *node;
+
+ if (!ctl_initialized && ctl_init(tsd)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
+ if (ret != 0) {
+ goto label_return;
+ }
+ if (node == NULL || node->ctl != NULL) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ assert(miblenp != NULL);
+ assert(*miblenp >= miblen);
+ *miblenp -= miblen;
+ /*
+ * The same node supplies the starting node and stores the ending node.
+ */
+ ret = ctl_lookup(tsd_tsdn(tsd), node, name, &node, mib + miblen,
+ miblenp);
+ *miblenp += miblen;
+ if (ret != 0) {
+ goto label_return;
+ }
+
+ if (node != NULL && node->ctl) {
+ ret = node->ctl(tsd, mib, *miblenp, oldp, oldlenp, newp,
+ newlen);
+ } else {
+ /* The name refers to a partial path through the ctl tree. */
+ ret = ENOENT;
+ }
+
+label_return:
+ return(ret);
+}
+
bool
ctl_boot(void) {
if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
@@ -1410,6 +1762,11 @@ ctl_postfork_child(tsdn_t *tsdn) {
malloc_mutex_postfork_child(tsdn, &ctl_mtx);
}
+void
+ctl_mtx_assert_held(tsdn_t *tsdn) {
+ malloc_mutex_assert_owner(tsdn, &ctl_mtx);
+}
+
/******************************************************************************/
/* *_ctl() functions. */
@@ -1427,6 +1784,7 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
+/* Can read or write, but not both. */
#define READ_XOR_WRITE() do { \
if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
newlen != 0)) { \
@@ -1435,12 +1793,31 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
+/* Can neither read nor write. */
+#define NEITHER_READ_NOR_WRITE() do { \
+ if (oldp != NULL || oldlenp != NULL || newp != NULL || \
+ newlen != 0) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+/* Verify that the space provided is enough. */
+#define VERIFY_READ(t) do { \
+ if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(t)) { \
+ *oldlenp = 0; \
+ ret = EINVAL; \
+ goto label_return; \
+ } \
+} while (0)
+
#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \
memcpy(oldp, (void *)&(v), copylen); \
+ *oldlenp = copylen; \
ret = EINVAL; \
goto label_return; \
} \
@@ -1458,6 +1835,14 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
+#define ASSURED_WRITE(v, t) do { \
+ if (newp == NULL || newlen != sizeof(t)) { \
+ ret = EINVAL; \
+ goto label_return; \
+ } \
+ (v) = *(t *)newp; \
+} while (0)
+
#define MIB_UNSIGNED(v, i) do { \
if (mib[i] > UINT_MAX) { \
ret = EFAULT; \
@@ -1497,8 +1882,8 @@ label_return: \
#define CTL_RO_CGEN(c, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1540,8 +1925,8 @@ label_return: \
*/
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1559,8 +1944,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1573,29 +1958,10 @@ label_return: \
return ret; \
}
-#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
-static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) { \
- int ret; \
- t oldval; \
- \
- if (!(c)) { \
- return ENOENT; \
- } \
- READONLY(); \
- oldval = (m(tsd)); \
- READ(oldval, t); \
- \
- ret = 0; \
-label_return: \
- return ret; \
-}
-
#define CTL_RO_CONFIG_GEN(n, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1761,7 +2127,34 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool)
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
+CTL_RO_NL_GEN(opt_cache_oblivious, opt_cache_oblivious, bool)
+CTL_RO_NL_GEN(opt_trust_madvise, opt_trust_madvise, bool)
CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
+
+/* HPA options. */
+CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool)
+CTL_RO_NL_GEN(opt_hpa_hugification_threshold,
+ opt_hpa_opts.hugification_threshold, size_t)
+CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t)
+CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms,
+ uint64_t)
+
+/*
+ * This will have to change before we publicly document this option; fxp_t and
+ * its representation are internal implementation details.
+ */
+CTL_RO_NL_GEN(opt_hpa_dirty_mult, opt_hpa_opts.dirty_mult, fxp_t)
+CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_opts.slab_max_alloc, size_t)
+
+/* HPA SEC options */
+CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_opts.nshards, size_t)
+CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_opts.max_alloc, size_t)
+CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_opts.max_bytes, size_t)
+CTL_RO_NL_GEN(opt_hpa_sec_bytes_after_flush, opt_hpa_sec_opts.bytes_after_flush,
+ size_t)
+CTL_RO_NL_GEN(opt_hpa_sec_batch_fill_extra, opt_hpa_sec_opts.batch_fill_extra,
+ size_t)
+
CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
const char *)
CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
@@ -1769,6 +2162,7 @@ CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
const char *)
+CTL_RO_NL_GEN(opt_mutex_max_spin, opt_mutex_max_spin, int64_t)
CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
@@ -1776,15 +2170,31 @@ CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
+CTL_RO_NL_GEN(opt_stats_interval, opt_stats_interval, int64_t)
+CTL_RO_NL_GEN(opt_stats_interval_opts, opt_stats_interval_opts, const char *)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_CGEN(config_enable_cxx, opt_experimental_infallible_new,
+ opt_experimental_infallible_new, bool)
CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
+CTL_RO_NL_GEN(opt_tcache_max, opt_tcache_max, size_t)
+CTL_RO_NL_GEN(opt_tcache_nslots_small_min, opt_tcache_nslots_small_min,
+ unsigned)
+CTL_RO_NL_GEN(opt_tcache_nslots_small_max, opt_tcache_nslots_small_max,
+ unsigned)
+CTL_RO_NL_GEN(opt_tcache_nslots_large, opt_tcache_nslots_large, unsigned)
+CTL_RO_NL_GEN(opt_lg_tcache_nslots_mul, opt_lg_tcache_nslots_mul, ssize_t)
+CTL_RO_NL_GEN(opt_tcache_gc_incr_bytes, opt_tcache_gc_incr_bytes, size_t)
+CTL_RO_NL_GEN(opt_tcache_gc_delay_bytes, opt_tcache_gc_delay_bytes, size_t)
+CTL_RO_NL_GEN(opt_lg_tcache_flush_small_div, opt_lg_tcache_flush_small_div,
+ unsigned)
+CTL_RO_NL_GEN(opt_lg_tcache_flush_large_div, opt_lg_tcache_flush_large_div,
+ unsigned)
CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
size_t)
-CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
@@ -1796,6 +2206,18 @@ CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_leak_error, opt_prof_leak_error, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_recent_alloc_max,
+ opt_prof_recent_alloc_max, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_stats, opt_prof_stats, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_sys_thread_name, opt_prof_sys_thread_name,
+ bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_time_res,
+ prof_time_res_mode_names[opt_prof_time_res], const char *)
+CTL_RO_NL_CGEN(config_uaf_detection, opt_lg_san_uaf_align,
+ opt_lg_san_uaf_align, ssize_t)
+CTL_RO_NL_GEN(opt_zero_realloc,
+ zero_realloc_mode_names[opt_zero_realloc_action], const char *)
/******************************************************************************/
@@ -1843,10 +2265,11 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
/* Set new arena/tcache associations. */
- arena_migrate(tsd, oldind, newind);
+ arena_migrate(tsd, oldarena, newarena);
if (tcache_available(tsd)) {
tcache_arena_reassociate(tsd_tsdn(tsd),
- tsd_tcachep_get(tsd), newarena);
+ tsd_tcache_slowp_get(tsd), tsd_tcachep_get(tsd),
+ newarena);
}
}
@@ -1855,14 +2278,10 @@ label_return:
return ret;
}
-CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
- uint64_t)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
- uint64_t *)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
- uint64_t)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
- tsd_thread_deallocatedp_get, uint64_t *)
+CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t)
+CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *)
+CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t)
+CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *)
static int
thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
@@ -1897,8 +2316,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
goto label_return;
}
- READONLY();
- WRITEONLY();
+ NEITHER_READ_NOR_WRITE();
tcache_flush(tsd);
@@ -1908,12 +2326,44 @@ label_return:
}
static int
+thread_peak_read_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+ if (!config_stats) {
+ return ENOENT;
+ }
+ READONLY();
+ peak_event_update(tsd);
+ uint64_t result = peak_event_max(tsd);
+ READ(result, uint64_t);
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+thread_peak_reset_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+ if (!config_stats) {
+ return ENOENT;
+ }
+ NEITHER_READ_NOR_WRITE();
+ peak_event_zero(tsd);
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -1950,8 +2400,12 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
return ENOENT;
}
- oldval = prof_thread_active_get(tsd);
+ oldval = opt_prof ? prof_thread_active_get(tsd) : false;
if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
@@ -1968,6 +2422,39 @@ label_return:
return ret;
}
+static int
+thread_idle_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+
+ NEITHER_READ_NOR_WRITE();
+
+ if (tcache_available(tsd)) {
+ tcache_flush(tsd);
+ }
+ /*
+ * This heuristic is perhaps not the most well-considered. But it
+ * matches the only idling policy we have experience with in the status
+ * quo. Over time we should investigate more principled approaches.
+ */
+ if (opt_narenas > ncpus * 2) {
+ arena_t *arena = arena_choose(tsd, NULL);
+ if (arena != NULL) {
+ arena_decay(tsd_tsdn(tsd), arena, false, true);
+ }
+ /*
+ * The missing arena case is not actually an error; a thread
+ * might be idle before it associates itself to one. This is
+ * unusual, but not wrong.
+ */
+ }
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
/******************************************************************************/
static int
@@ -1977,7 +2464,8 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind;
READONLY();
- if (tcaches_create(tsd, &tcache_ind)) {
+ VERIFY_READ(unsigned);
+ if (tcaches_create(tsd, b0get(), &tcache_ind)) {
ret = EFAULT;
goto label_return;
}
@@ -1995,12 +2483,7 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind;
WRITEONLY();
- tcache_ind = UINT_MAX;
- WRITE(tcache_ind, unsigned);
- if (tcache_ind == UINT_MAX) {
- ret = EFAULT;
- goto label_return;
- }
+ ASSURED_WRITE(tcache_ind, unsigned);
tcaches_flush(tsd, tcache_ind);
ret = 0;
@@ -2015,12 +2498,7 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind;
WRITEONLY();
- tcache_ind = UINT_MAX;
- WRITE(tcache_ind, unsigned);
- if (tcache_ind == UINT_MAX) {
- ret = EFAULT;
- goto label_return;
- }
+ ASSURED_WRITE(tcache_ind, unsigned);
tcaches_destroy(tsd, tcache_ind);
ret = 0;
@@ -2105,8 +2583,7 @@ arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
unsigned arena_ind;
- READONLY();
- WRITEONLY();
+ NEITHER_READ_NOR_WRITE();
MIB_UNSIGNED(arena_ind, 1);
arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
@@ -2121,8 +2598,7 @@ arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
unsigned arena_ind;
- READONLY();
- WRITEONLY();
+ NEITHER_READ_NOR_WRITE();
MIB_UNSIGNED(arena_ind, 1);
arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
@@ -2137,8 +2613,7 @@ arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
arena_t **arena) {
int ret;
- READONLY();
- WRITEONLY();
+ NEITHER_READ_NOR_WRITE();
MIB_UNSIGNED(*arena_ind, 1);
*arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
@@ -2211,6 +2686,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
arena_t *arena;
ctl_arena_t *ctl_darena, *ctl_arena;
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+
ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, &arena_ind, &arena);
if (ret != 0) {
@@ -2241,6 +2718,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
assert(ret == 0);
label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+
return ret;
}
@@ -2306,6 +2785,38 @@ label_return:
}
static int
+arena_i_oversize_threshold_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ unsigned arena_ind;
+ MIB_UNSIGNED(arena_ind, 1);
+
+ arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = atomic_load_zu(
+ &arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED);
+ READ(oldval, size_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(size_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ atomic_store_zu(&arena->pa_shard.pac.oversize_threshold,
+ *(size_t *)newp, ATOMIC_RELAXED);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
int ret;
@@ -2318,10 +2829,10 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EFAULT;
goto label_return;
}
+ extent_state_t state = dirty ? extent_state_dirty : extent_state_muzzy;
if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
- arena_muzzy_decay_ms_get(arena);
+ size_t oldval = arena_decay_ms_get(arena, state);
READ(oldval, ssize_t);
}
if (newp != NULL) {
@@ -2340,9 +2851,9 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
}
- if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
- *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
- arena, *(ssize_t *)newp)) {
+
+ if (arena_decay_ms_set(tsd_tsdn(tsd), arena, state,
+ *(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
@@ -2385,15 +2896,18 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
old_extent_hooks =
- (extent_hooks_t *)&extent_hooks_default;
+ (extent_hooks_t *)&ehooks_default_extent_hooks;
READ(old_extent_hooks, extent_hooks_t *);
if (newp != NULL) {
/* Initialize a new arena as a side effect. */
extent_hooks_t *new_extent_hooks
JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_extent_hooks, extent_hooks_t *);
+ arena_config_t config = arena_config_default;
+ config.extent_hooks = new_extent_hooks;
+
arena = arena_init(tsd_tsdn(tsd), arena_ind,
- new_extent_hooks);
+ &config);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
@@ -2404,11 +2918,13 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
extent_hooks_t *new_extent_hooks
JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_extent_hooks, extent_hooks_t *);
- old_extent_hooks = extent_hooks_set(tsd, arena,
- new_extent_hooks);
+ old_extent_hooks = arena_set_extent_hooks(tsd,
+ arena, new_extent_hooks);
READ(old_extent_hooks, extent_hooks_t *);
} else {
- old_extent_hooks = extent_hooks_get(arena);
+ old_extent_hooks =
+ ehooks_get_extent_hooks_ptr(
+ arena_get_ehooks(arena));
READ(old_extent_hooks, extent_hooks_t *);
}
}
@@ -2493,10 +3009,6 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
- if (*oldlenp != sizeof(unsigned)) {
- ret = EINVAL;
- goto label_return;
- }
narenas = ctl_arenas->narenas;
READ(narenas, unsigned);
@@ -2582,14 +3094,14 @@ static int
arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- extent_hooks_t *extent_hooks;
unsigned arena_ind;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- extent_hooks = (extent_hooks_t *)&extent_hooks_default;
- WRITE(extent_hooks, extent_hooks_t *);
- if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
+ VERIFY_READ(unsigned);
+ arena_config_t config = arena_config_default;
+ WRITE(config.extent_hooks, extent_hooks_t *);
+ if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
ret = EAGAIN;
goto label_return;
}
@@ -2602,26 +3114,52 @@ label_return:
}
static int
+experimental_arenas_create_ext_ctl(tsd_t *tsd,
+ const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+
+ arena_config_t config = arena_config_default;
+ VERIFY_READ(unsigned);
+ WRITE(config, arena_config_t);
+
+ if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ READ(arena_ind, unsigned);
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static int
arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
unsigned arena_ind;
void *ptr;
- extent_t *extent;
+ edata_t *edata;
arena_t *arena;
ptr = NULL;
ret = EINVAL;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(ptr, void *);
- extent = iealloc(tsd_tsdn(tsd), ptr);
- if (extent == NULL)
+ edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr);
+ if (edata == NULL) {
goto label_return;
+ }
- arena = extent_arena_get(extent);
- if (arena == NULL)
+ arena = arena_get_from_edata(edata);
+ if (arena == NULL) {
goto label_return;
+ }
arena_ind = arena_ind_get(arena);
READ(arena_ind, unsigned);
@@ -2646,6 +3184,10 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
}
if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
@@ -2653,7 +3195,8 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
*(bool *)newp);
} else {
- oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
+ oldval = opt_prof ? prof_thread_active_init_get(tsd_tsdn(tsd)) :
+ false;
}
READ(oldval, bool);
@@ -2669,7 +3212,8 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
bool oldval;
if (!config_prof) {
- return ENOENT;
+ ret = ENOENT;
+ goto label_return;
}
if (newp != NULL) {
@@ -2677,9 +3221,20 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL;
goto label_return;
}
- oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
+ bool val = *(bool *)newp;
+ if (!opt_prof) {
+ if (val) {
+ ret = ENOENT;
+ goto label_return;
+ } else {
+ /* No change needed (already off). */
+ oldval = false;
+ }
+ } else {
+ oldval = prof_active_set(tsd_tsdn(tsd), val);
+ }
} else {
- oldval = prof_active_get(tsd_tsdn(tsd));
+ oldval = opt_prof ? prof_active_get(tsd_tsdn(tsd)) : false;
}
READ(oldval, bool);
@@ -2694,7 +3249,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
int ret;
const char *filename = NULL;
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -2722,13 +3277,17 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
}
if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
} else {
- oldval = prof_gdump_get(tsd_tsdn(tsd));
+ oldval = opt_prof ? prof_gdump_get(tsd_tsdn(tsd)) : false;
}
READ(oldval, bool);
@@ -2738,12 +3297,32 @@ label_return:
}
static int
+prof_prefix_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ const char *prefix = NULL;
+
+ if (!config_prof || !opt_prof) {
+ return ENOENT;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ WRITEONLY();
+ WRITE(prefix, const char *);
+
+ ret = prof_prefix_set(tsd_tsdn(tsd), prefix) ? EFAULT : 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static int
prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
size_t lg_sample = lg_prof_sample;
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -2770,7 +3349,7 @@ prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
const char *filename = NULL;
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -2790,7 +3369,7 @@ label_return:
static int
prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -2801,6 +3380,87 @@ prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
return 0;
}
+static int
+experimental_hooks_prof_backtrace_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (oldp == NULL && newp == NULL) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (oldp != NULL) {
+ prof_backtrace_hook_t old_hook =
+ prof_backtrace_hook_get();
+ READ(old_hook, prof_backtrace_hook_t);
+ }
+ if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ prof_backtrace_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(new_hook, prof_backtrace_hook_t);
+ if (new_hook == NULL) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_backtrace_hook_set(new_hook);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+experimental_hooks_prof_dump_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (oldp == NULL && newp == NULL) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (oldp != NULL) {
+ prof_dump_hook_t old_hook =
+ prof_dump_hook_get();
+ READ(old_hook, prof_dump_hook_t);
+ }
+ if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ prof_dump_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(new_hook, prof_dump_hook_t);
+ prof_dump_hook_set(new_hook);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
+/* For integration test purpose only. No plan to move out of experimental. */
+static int
+experimental_hooks_safety_check_abort_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ WRITEONLY();
+ if (newp != NULL) {
+ if (newlen != sizeof(safety_check_abort_hook_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ safety_check_abort_hook_t hook JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(hook, safety_check_abort_hook_t);
+ safety_check_set_abort(hook);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
/******************************************************************************/
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
@@ -2818,6 +3478,9 @@ CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
+CTL_RO_CGEN(config_stats, stats_zero_reallocs,
+ atomic_load_zu(&zero_realloc_count, ATOMIC_RELAXED), size_t)
+
CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
ssize_t)
@@ -2830,55 +3493,61 @@ CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
- size_t)
+ arenas_i(mib[2])->astats->astats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
- size_t)
+ arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.retained, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
- ATOMIC_RELAXED),
- size_t)
+ arenas_i(mib[2])->astats->astats.pa_shard_stats.edata_avail, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_base,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
+ arenas_i(mib[2])->astats->astats.base,
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
- ATOMIC_RELAXED), size_t)
+ arenas_i(mib[2])->astats->astats.metadata_thp, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
- ATOMIC_RELAXED), size_t)
+ arenas_i(mib[2])->astats->astats.tcache_bytes, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_stashed_bytes,
+ arenas_i(mib[2])->astats->astats.tcache_stashed_bytes, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
+ arenas_i(mib[2])->astats->astats.resident,
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm,
+ atomic_load_zu(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm,
ATOMIC_RELAXED), size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_sec_bytes,
+ arenas_i(mib[2])->astats->secstats.bytes, size_t)
+
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
arenas_i(mib[2])->astats->allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
@@ -2892,27 +3561,21 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills,
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes,
arenas_i(mib[2])->astats->nflushes_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
- ATOMIC_RELAXED), size_t)
+ arenas_i(mib[2])->astats->astats.allocated_large, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.ndalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.nrequests_large, uint64_t)
/*
* Note: "nmalloc_large" here instead of "nfills" in the read. This is
* intentional (large has no batch fill).
*/
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.nflushes_large, uint64_t)
/* Lock profiling related APIs below. */
#define RO_MUTEX_CTL_GEN(n, l) \
@@ -2972,9 +3635,13 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
}
if (config_prof && opt_prof) {
MUTEX_PROF_RESET(bt2gctx_mtx);
+ MUTEX_PROF_RESET(tdatas_mtx);
+ MUTEX_PROF_RESET(prof_dump_mtx);
+ MUTEX_PROF_RESET(prof_recent_alloc_mtx);
+ MUTEX_PROF_RESET(prof_recent_dump_mtx);
+ MUTEX_PROF_RESET(prof_stats_mtx);
}
-
/* Per arena mutexes. */
unsigned n = narenas_total_get();
@@ -2984,18 +3651,18 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
continue;
}
MUTEX_PROF_RESET(arena->large_mtx);
- MUTEX_PROF_RESET(arena->extent_avail_mtx);
- MUTEX_PROF_RESET(arena->extents_dirty.mtx);
- MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
- MUTEX_PROF_RESET(arena->extents_retained.mtx);
- MUTEX_PROF_RESET(arena->decay_dirty.mtx);
- MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.decay_dirty.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.decay_muzzy.mtx);
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
MUTEX_PROF_RESET(arena->base->mtx);
- for (szind_t i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_t *bin = &arena->bins[i].bin_shards[j];
+ for (szind_t j = 0; j < SC_NBINS; j++) {
+ for (unsigned k = 0; k < bin_infos[j].n_shards; k++) {
+ bin_t *bin = arena_get_bin(arena, j, k);
MUTEX_PROF_RESET(bin->lock);
}
}
@@ -3005,25 +3672,25 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
- arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
- arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
- arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
- arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curregs, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
- arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nfills, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
- arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nflushes, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.reslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curslabs, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nonfull_slabs, size_t)
static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
@@ -3035,13 +3702,13 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
- ctl_arena_stats_read_u64(
+ locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
- ctl_arena_stats_read_u64(
+ locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
- ctl_arena_stats_read_u64(
+ locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
@@ -3056,29 +3723,17 @@ stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].ndirty,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].ndirty, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].nretained,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].nretained, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, size_t);
static const ctl_named_node_t *
stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
@@ -3089,6 +3744,82 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
return super_stats_arenas_i_extents_j_node;
}
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurge_passes,
+ arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, uint64_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurges,
+ arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurges, uint64_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nhugifies,
+ arenas_i(mib[2])->astats->hpastats.nonderived_stats.nhugifies, uint64_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndehugifies,
+ arenas_i(mib[2])->astats->hpastats.nonderived_stats.ndehugifies, uint64_t);
+
+/* Full, nonhuge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].nactive, size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].ndirty, size_t);
+
+/* Full, huge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].nactive, size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].ndirty, size_t);
+
+/* Empty, nonhuge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].nactive, size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].ndirty, size_t);
+
+/* Empty, huge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].nactive, size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].ndirty, size_t);
+
+/* Nonfull, nonhuge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].nactive,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].ndirty,
+ size_t);
+
+/* Nonfull, huge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].nactive,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].ndirty,
+ size_t);
+
+static const ctl_named_node_t *
+stats_arenas_i_hpa_shard_nonfull_slabs_j_index(tsdn_t *tsdn, const size_t *mib,
+ size_t miblen, size_t j) {
+ if (j >= PSSET_NPSIZES) {
+ return NULL;
+ }
+ return super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node;
+}
+
static bool
ctl_arenas_i_verify(size_t i) {
size_t a = arenas_i2a_impl(i, true, true);
@@ -3161,6 +3892,32 @@ label_return:
return ret;
}
+static int
+experimental_thread_activity_callback_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (!config_stats) {
+ return ENOENT;
+ }
+
+ activity_callback_thunk_t t_old = tsd_activity_callback_thunk_get(tsd);
+ READ(t_old, activity_callback_thunk_t);
+
+ if (newp != NULL) {
+ /*
+ * This initialization is unnecessary. If it's omitted, though,
+ * clang gets confused and warns on the subsequent use of t_new.
+ */
+ activity_callback_thunk_t t_new = {NULL, NULL};
+ WRITE(t_new, activity_callback_thunk_t);
+ tsd_activity_callback_thunk_set(tsd, t_new);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
/*
* Output six memory utilization entries for an input pointer, the first one of
* type (void *) and the remaining five of type size_t, describing the following
@@ -3178,7 +3935,8 @@ label_return:
* otherwise their values are undefined.
*
* This API is mainly intended for small class allocations, where extents are
- * used as slab.
+ * used as slab. Note that if the bin the extent belongs to is completely
+ * full, "(a)" will be NULL.
*
* In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)"
* will be zero (if stats are enabled; otherwise undefined). The other three
@@ -3232,11 +3990,11 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- assert(sizeof(extent_util_stats_verbose_t)
+ assert(sizeof(inspect_extent_util_stats_verbose_t)
== sizeof(void *) + sizeof(size_t) * 5);
if (oldp == NULL || oldlenp == NULL
- || *oldlenp != sizeof(extent_util_stats_verbose_t)
+ || *oldlenp != sizeof(inspect_extent_util_stats_verbose_t)
|| newp == NULL) {
ret = EINVAL;
goto label_return;
@@ -3244,9 +4002,9 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
void *ptr = NULL;
WRITE(ptr, void *);
- extent_util_stats_verbose_t *util_stats
- = (extent_util_stats_verbose_t *)oldp;
- extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
+ inspect_extent_util_stats_verbose_t *util_stats
+ = (inspect_extent_util_stats_verbose_t *)oldp;
+ inspect_extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
&util_stats->nfree, &util_stats->nregs, &util_stats->size,
&util_stats->bin_nfree, &util_stats->bin_nregs,
&util_stats->slabcur_addr);
@@ -3357,21 +4115,22 @@ experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- assert(sizeof(extent_util_stats_t) == sizeof(size_t) * 3);
+ assert(sizeof(inspect_extent_util_stats_t) == sizeof(size_t) * 3);
const size_t len = newlen / sizeof(const void *);
if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0
|| newlen != len * sizeof(const void *)
- || *oldlenp != len * sizeof(extent_util_stats_t)) {
+ || *oldlenp != len * sizeof(inspect_extent_util_stats_t)) {
ret = EINVAL;
goto label_return;
}
void **ptrs = (void **)newp;
- extent_util_stats_t *util_stats = (extent_util_stats_t *)oldp;
+ inspect_extent_util_stats_t *util_stats =
+ (inspect_extent_util_stats_t *)oldp;
size_t i;
for (i = 0; i < len; ++i) {
- extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
+ inspect_extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
&util_stats[i].nfree, &util_stats[i].nregs,
&util_stats[i].size);
}
@@ -3420,7 +4179,7 @@ experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \
defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
/* Expose the underlying counter for fast read. */
- pactivep = (size_t *)&(arena->nactive.repr);
+ pactivep = (size_t *)&(arena->pa_shard.nactive.repr);
READ(pactivep, size_t *);
ret = 0;
#else
@@ -3433,3 +4192,223 @@ label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
+
+static int
+experimental_prof_recent_alloc_max_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (!(config_prof && opt_prof)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ ssize_t old_max;
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ ssize_t max = *(ssize_t *)newp;
+ if (max < -1) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ old_max = prof_recent_alloc_max_ctl_write(tsd, max);
+ } else {
+ old_max = prof_recent_alloc_max_ctl_read();
+ }
+ READ(old_max, ssize_t);
+
+ ret = 0;
+
+label_return:
+ return ret;
+}
+
+typedef struct write_cb_packet_s write_cb_packet_t;
+struct write_cb_packet_s {
+ write_cb_t *write_cb;
+ void *cbopaque;
+};
+
+static int
+experimental_prof_recent_alloc_dump_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (!(config_prof && opt_prof)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ assert(sizeof(write_cb_packet_t) == sizeof(void *) * 2);
+
+ WRITEONLY();
+ write_cb_packet_t write_cb_packet;
+ ASSURED_WRITE(write_cb_packet, write_cb_packet_t);
+
+ prof_recent_alloc_dump(tsd, write_cb_packet.write_cb,
+ write_cb_packet.cbopaque);
+
+ ret = 0;
+
+label_return:
+ return ret;
+}
+
+typedef struct batch_alloc_packet_s batch_alloc_packet_t;
+struct batch_alloc_packet_s {
+ void **ptrs;
+ size_t num;
+ size_t size;
+ int flags;
+};
+
+static int
+experimental_batch_alloc_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ VERIFY_READ(size_t);
+
+ batch_alloc_packet_t batch_alloc_packet;
+ ASSURED_WRITE(batch_alloc_packet, batch_alloc_packet_t);
+ size_t filled = batch_alloc(batch_alloc_packet.ptrs,
+ batch_alloc_packet.num, batch_alloc_packet.size,
+ batch_alloc_packet.flags);
+ READ(filled, size_t);
+
+ ret = 0;
+
+label_return:
+ return ret;
+}
+
+static int
+prof_stats_bins_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned binind;
+ prof_stats_t stats;
+
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ READONLY();
+ MIB_UNSIGNED(binind, 3);
+ if (binind >= SC_NBINS) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_stats_get_live(tsd, (szind_t)binind, &stats);
+ READ(stats, prof_stats_t);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+prof_stats_bins_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned binind;
+ prof_stats_t stats;
+
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ READONLY();
+ MIB_UNSIGNED(binind, 3);
+ if (binind >= SC_NBINS) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_stats_get_accum(tsd, (szind_t)binind, &stats);
+ READ(stats, prof_stats_t);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static const ctl_named_node_t *
+prof_stats_bins_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t i) {
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ return NULL;
+ }
+ if (i >= SC_NBINS) {
+ return NULL;
+ }
+ return super_prof_stats_bins_i_node;
+}
+
+static int
+prof_stats_lextents_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned lextent_ind;
+ prof_stats_t stats;
+
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ READONLY();
+ MIB_UNSIGNED(lextent_ind, 3);
+ if (lextent_ind >= SC_NSIZES - SC_NBINS) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_stats_get_live(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
+ READ(stats, prof_stats_t);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+prof_stats_lextents_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned lextent_ind;
+ prof_stats_t stats;
+
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ READONLY();
+ MIB_UNSIGNED(lextent_ind, 3);
+ if (lextent_ind >= SC_NSIZES - SC_NBINS) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_stats_get_accum(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
+ READ(stats, prof_stats_t);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static const ctl_named_node_t *
+prof_stats_lextents_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t i) {
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ return NULL;
+ }
+ if (i >= SC_NSIZES - SC_NBINS) {
+ return NULL;
+ }
+ return super_prof_stats_lextents_i_node;
+}