summaryrefslogtreecommitdiff
path: root/deps/jemalloc/src/stats.c
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/src/stats.c')
-rw-r--r--deps/jemalloc/src/stats.c794
1 files changed, 655 insertions, 139 deletions
diff --git a/deps/jemalloc/src/stats.c b/deps/jemalloc/src/stats.c
index 118e05d29..efc70fd3c 100644
--- a/deps/jemalloc/src/stats.c
+++ b/deps/jemalloc/src/stats.c
@@ -1,12 +1,13 @@
-#define JEMALLOC_STATS_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/emitter.h"
+#include "jemalloc/internal/fxp.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
+#include "jemalloc/internal/prof_stats.h"
const char *global_mutex_names[mutex_prof_num_global_mutexes] = {
#define OP(mtx) #mtx,
@@ -25,22 +26,28 @@ const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
xmallctl(n, (void *)v, &sz, NULL, 0); \
} while (0)
-#define CTL_M2_GET(n, i, v, t) do { \
- size_t mib[CTL_MAX_DEPTH]; \
- size_t miblen = sizeof(mib) / sizeof(size_t); \
+#define CTL_LEAF_PREPARE(mib, miblen, name) do { \
+ assert(miblen < CTL_MAX_DEPTH); \
+ size_t miblen_new = CTL_MAX_DEPTH; \
+ xmallctlmibnametomib(mib, miblen, name, &miblen_new); \
+ assert(miblen_new > miblen); \
+} while (0)
+
+#define CTL_LEAF(mib, miblen, leaf, v, t) do { \
+ assert(miblen < CTL_MAX_DEPTH); \
+ size_t miblen_new = CTL_MAX_DEPTH; \
size_t sz = sizeof(t); \
- xmallctlnametomib(n, mib, &miblen); \
- mib[2] = (i); \
- xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
+ xmallctlbymibname(mib, miblen, leaf, &miblen_new, (void *)v, \
+ &sz, NULL, 0); \
+ assert(miblen_new == miblen + 1); \
} while (0)
-#define CTL_M2_M4_GET(n, i, j, v, t) do { \
+#define CTL_M2_GET(n, i, v, t) do { \
size_t mib[CTL_MAX_DEPTH]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \
- mib[4] = (j); \
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
} while (0)
@@ -50,6 +57,13 @@ const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
bool opt_stats_print = false;
char opt_stats_print_opts[stats_print_tot_num_options+1] = "";
+int64_t opt_stats_interval = STATS_INTERVAL_DEFAULT;
+char opt_stats_interval_opts[stats_print_tot_num_options+1] = "";
+
+static counter_accum_t stats_interval_accumulated;
+/* Per thread batch accum size for stats_interval. */
+static uint64_t stats_interval_accum_batch;
+
/******************************************************************************/
static uint64_t
@@ -91,13 +105,6 @@ get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) {
return false;
}
-#define MUTEX_CTL_STR_MAX_LENGTH 128
-static void
-gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix,
- const char *mutex, const char *counter) {
- malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter);
-}
-
static void
mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
emitter_col_t *name,
@@ -118,7 +125,7 @@ mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
#define WIDTH_uint32_t 12
#define WIDTH_uint64_t 16
-#define OP(counter, counter_type, human, derived, base_counter) \
+#define OP(counter, counter_type, human, derived, base_counter) \
col = &col_##counter_type[k_##counter_type]; \
++k_##counter_type; \
emitter_col_init(col, row); \
@@ -134,27 +141,31 @@ mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
}
static void
-mutex_stats_read_global(const char *name, emitter_col_t *col_name,
+mutex_stats_read_global(size_t mib[], size_t miblen, const char *name,
+ emitter_col_t *col_name,
emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
uint64_t uptime) {
- char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+ CTL_LEAF_PREPARE(mib, miblen, name);
+ size_t miblen_name = miblen + 1;
col_name->str_val = name;
emitter_col_t *dst;
#define EMITTER_TYPE_uint32_t emitter_type_uint32
#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, counter_type, human, derived, base_counter) \
+#define OP(counter, counter_type, human, derived, base_counter) \
dst = &col_##counter_type[mutex_counter_##counter]; \
dst->type = EMITTER_TYPE_##counter_type; \
if (!derived) { \
- gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
- "mutexes", name, #counter); \
- CTL_GET(cmd, (counter_type *)&dst->bool_val, counter_type); \
- } else { \
- emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
- dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
+ CTL_LEAF(mib, miblen_name, #counter, \
+ (counter_type *)&dst->bool_val, counter_type); \
+ } else { \
+ emitter_col_t *base = \
+ &col_##counter_type[mutex_counter_##base_counter]; \
+ dst->counter_type##_val = \
+ (counter_type)rate_per_second( \
+ base->counter_type##_val, uptime); \
}
MUTEX_PROF_COUNTERS
#undef OP
@@ -163,28 +174,31 @@ mutex_stats_read_global(const char *name, emitter_col_t *col_name,
}
static void
-mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind,
- const char *name, emitter_col_t *col_name,
+mutex_stats_read_arena(size_t mib[], size_t miblen, const char *name,
+ emitter_col_t *col_name,
emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
uint64_t uptime) {
- char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+ CTL_LEAF_PREPARE(mib, miblen, name);
+ size_t miblen_name = miblen + 1;
col_name->str_val = name;
emitter_col_t *dst;
#define EMITTER_TYPE_uint32_t emitter_type_uint32
#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, counter_type, human, derived, base_counter) \
+#define OP(counter, counter_type, human, derived, base_counter) \
dst = &col_##counter_type[mutex_counter_##counter]; \
dst->type = EMITTER_TYPE_##counter_type; \
- if (!derived) { \
- gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
- "arenas.0.mutexes", arena_mutex_names[mutex_ind], #counter);\
- CTL_M2_GET(cmd, arena_ind, (counter_type *)&dst->bool_val, counter_type); \
- } else { \
- emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
- dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
+ if (!derived) { \
+ CTL_LEAF(mib, miblen_name, #counter, \
+ (counter_type *)&dst->bool_val, counter_type); \
+ } else { \
+ emitter_col_t *base = \
+ &col_##counter_type[mutex_counter_##base_counter]; \
+ dst->counter_type##_val = \
+ (counter_type)rate_per_second( \
+ base->counter_type##_val, uptime); \
}
MUTEX_PROF_COUNTERS
#undef OP
@@ -193,26 +207,29 @@ mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind,
}
static void
-mutex_stats_read_arena_bin(unsigned arena_ind, unsigned bin_ind,
+mutex_stats_read_arena_bin(size_t mib[], size_t miblen,
emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
uint64_t uptime) {
- char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+ CTL_LEAF_PREPARE(mib, miblen, "mutex");
+ size_t miblen_mutex = miblen + 1;
+
emitter_col_t *dst;
#define EMITTER_TYPE_uint32_t emitter_type_uint32
#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, counter_type, human, derived, base_counter) \
+#define OP(counter, counter_type, human, derived, base_counter) \
dst = &col_##counter_type[mutex_counter_##counter]; \
dst->type = EMITTER_TYPE_##counter_type; \
- if (!derived) { \
- gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
- "arenas.0.bins.0","mutex", #counter); \
- CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \
- (counter_type *)&dst->bool_val, counter_type); \
- } else { \
- emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
- dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
+ if (!derived) { \
+ CTL_LEAF(mib, miblen_mutex, #counter, \
+ (counter_type *)&dst->bool_val, counter_type); \
+ } else { \
+ emitter_col_t *base = \
+ &col_##counter_type[mutex_counter_##base_counter]; \
+ dst->counter_type##_val = \
+ (counter_type)rate_per_second( \
+ base->counter_type##_val, uptime); \
}
MUTEX_PROF_COUNTERS
#undef OP
@@ -249,25 +266,42 @@ mutex_stats_emit(emitter_t *emitter, emitter_row_t *row,
#undef EMITTER_TYPE_uint64_t
}
-#define COL(row_name, column_name, left_or_right, col_width, etype) \
- emitter_col_t col_##column_name; \
- emitter_col_init(&col_##column_name, &row_name); \
- col_##column_name.justify = emitter_justify_##left_or_right; \
- col_##column_name.width = col_width; \
+#define COL_DECLARE(column_name) \
+ emitter_col_t col_##column_name;
+
+#define COL_INIT(row_name, column_name, left_or_right, col_width, etype)\
+ emitter_col_init(&col_##column_name, &row_name); \
+ col_##column_name.justify = emitter_justify_##left_or_right; \
+ col_##column_name.width = col_width; \
col_##column_name.type = emitter_type_##etype;
-#define COL_HDR(row_name, column_name, human, left_or_right, col_width, etype) \
- COL(row_name, column_name, left_or_right, col_width, etype) \
- emitter_col_t header_##column_name; \
- emitter_col_init(&header_##column_name, &header_##row_name); \
- header_##column_name.justify = emitter_justify_##left_or_right; \
- header_##column_name.width = col_width; \
- header_##column_name.type = emitter_type_title; \
+#define COL(row_name, column_name, left_or_right, col_width, etype) \
+ COL_DECLARE(column_name); \
+ COL_INIT(row_name, column_name, left_or_right, col_width, etype)
+
+#define COL_HDR_DECLARE(column_name) \
+ COL_DECLARE(column_name); \
+ emitter_col_t header_##column_name;
+
+#define COL_HDR_INIT(row_name, column_name, human, left_or_right, \
+ col_width, etype) \
+ COL_INIT(row_name, column_name, left_or_right, col_width, etype)\
+ emitter_col_init(&header_##column_name, &header_##row_name); \
+ header_##column_name.justify = emitter_justify_##left_or_right; \
+ header_##column_name.width = col_width; \
+ header_##column_name.type = emitter_type_title; \
header_##column_name.str_val = human ? human : #column_name;
+#define COL_HDR(row_name, column_name, human, left_or_right, col_width, \
+ etype) \
+ COL_HDR_DECLARE(column_name) \
+ COL_HDR_INIT(row_name, column_name, human, left_or_right, \
+ col_width, etype)
+JEMALLOC_COLD
static void
-stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t uptime) {
+stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i,
+ uint64_t uptime) {
size_t page;
bool in_gap, in_gap_prev;
unsigned nbins, j;
@@ -282,6 +316,9 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
emitter_row_t row;
emitter_row_init(&row);
+ bool prof_stats_on = config_prof && opt_prof && opt_prof_stats
+ && i == MALLCTL_ARENAS_ALL;
+
COL_HDR(row, size, NULL, right, 20, size)
COL_HDR(row, ind, NULL, right, 4, unsigned)
COL_HDR(row, allocated, NULL, right, 13, uint64)
@@ -291,6 +328,16 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
COL_HDR(row, nrequests, NULL, right, 13, uint64)
COL_HDR(row, nrequests_ps, "(#/sec)", right, 10, uint64)
+ COL_HDR_DECLARE(prof_live_requested);
+ COL_HDR_DECLARE(prof_live_count);
+ COL_HDR_DECLARE(prof_accum_requested);
+ COL_HDR_DECLARE(prof_accum_count);
+ if (prof_stats_on) {
+ COL_HDR_INIT(row, prof_live_requested, NULL, right, 21, uint64)
+ COL_HDR_INIT(row, prof_live_count, NULL, right, 17, uint64)
+ COL_HDR_INIT(row, prof_accum_requested, NULL, right, 21, uint64)
+ COL_HDR_INIT(row, prof_accum_count, NULL, right, 17, uint64)
+ }
COL_HDR(row, nshards, NULL, right, 9, unsigned)
COL_HDR(row, curregs, NULL, right, 13, size)
COL_HDR(row, curslabs, NULL, right, 13, size)
@@ -334,6 +381,19 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
emitter_table_row(emitter, &header_row);
emitter_json_array_kv_begin(emitter, "bins");
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = i;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "bins");
+
+ size_t arenas_bin_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
+
+ size_t prof_stats_mib[CTL_MAX_DEPTH];
+ if (prof_stats_on) {
+ CTL_LEAF_PREPARE(prof_stats_mib, 0, "prof.stats.bins");
+ }
+
for (j = 0, in_gap = false; j < nbins; j++) {
uint64_t nslabs;
size_t reg_size, slab_size, curregs;
@@ -342,44 +402,57 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
uint32_t nregs, nshards;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nreslabs;
+ prof_stats_t prof_live;
+ prof_stats_t prof_accum;
+
+ stats_arenas_mib[4] = j;
+ arenas_bin_mib[2] = j;
+
+ CTL_LEAF(stats_arenas_mib, 5, "nslabs", &nslabs, uint64_t);
+
+ if (prof_stats_on) {
+ prof_stats_mib[3] = j;
+ CTL_LEAF(prof_stats_mib, 4, "live", &prof_live,
+ prof_stats_t);
+ CTL_LEAF(prof_stats_mib, 4, "accum", &prof_accum,
+ prof_stats_t);
+ }
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs,
- uint64_t);
in_gap_prev = in_gap;
- in_gap = (nslabs == 0);
+ if (prof_stats_on) {
+ in_gap = (nslabs == 0 && prof_accum.count == 0);
+ } else {
+ in_gap = (nslabs == 0);
+ }
if (in_gap_prev && !in_gap) {
emitter_table_printf(emitter,
" ---\n");
}
- CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
- CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
- CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t);
- CTL_M2_GET("arenas.bin.0.nshards", j, &nshards, uint32_t);
+ if (in_gap && !emitter_outputs_json(emitter)) {
+ continue;
+ }
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
- size_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
- &nrequests, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills,
+ CTL_LEAF(arenas_bin_mib, 3, "size", &reg_size, size_t);
+ CTL_LEAF(arenas_bin_mib, 3, "nregs", &nregs, uint32_t);
+ CTL_LEAF(arenas_bin_mib, 3, "slab_size", &slab_size, size_t);
+ CTL_LEAF(arenas_bin_mib, 3, "nshards", &nshards, uint32_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "curregs", &curregs, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests,
uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
- size_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nonfull_slabs", i, j, &nonfull_slabs,
+ CTL_LEAF(stats_arenas_mib, 5, "nfills", &nfills, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nflushes", &nflushes, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nreslabs", &nreslabs, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "curslabs", &curslabs, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nonfull_slabs", &nonfull_slabs,
size_t);
if (mutex) {
- mutex_stats_read_arena_bin(i, j, col_mutex64,
- col_mutex32, uptime);
+ mutex_stats_read_arena_bin(stats_arenas_mib, 5,
+ col_mutex64, col_mutex32, uptime);
}
emitter_json_object_begin(emitter);
@@ -391,6 +464,16 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
&curregs);
emitter_json_kv(emitter, "nrequests", emitter_type_uint64,
&nrequests);
+ if (prof_stats_on) {
+ emitter_json_kv(emitter, "prof_live_requested",
+ emitter_type_uint64, &prof_live.req_sum);
+ emitter_json_kv(emitter, "prof_live_count",
+ emitter_type_uint64, &prof_live.count);
+ emitter_json_kv(emitter, "prof_accum_requested",
+ emitter_type_uint64, &prof_accum.req_sum);
+ emitter_json_kv(emitter, "prof_accum_count",
+ emitter_type_uint64, &prof_accum.count);
+ }
emitter_json_kv(emitter, "nfills", emitter_type_uint64,
&nfills);
emitter_json_kv(emitter, "nflushes", emitter_type_uint64,
@@ -437,6 +520,13 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
col_nrequests.uint64_val = nrequests;
col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
+ if (prof_stats_on) {
+ col_prof_live_requested.uint64_val = prof_live.req_sum;
+ col_prof_live_count.uint64_val = prof_live.count;
+ col_prof_accum_requested.uint64_val =
+ prof_accum.req_sum;
+ col_prof_accum_count.uint64_val = prof_accum.count;
+ }
col_nshards.unsigned_val = nshards;
col_curregs.size_val = curregs;
col_curslabs.size_val = curslabs;
@@ -466,6 +556,7 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
}
}
+JEMALLOC_COLD
static void
stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
unsigned nbins, nlextents, j;
@@ -479,6 +570,9 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
emitter_row_t row;
emitter_row_init(&row);
+ bool prof_stats_on = config_prof && opt_prof && opt_prof_stats
+ && i == MALLCTL_ARENAS_ALL;
+
COL_HDR(row, size, NULL, right, 20, size)
COL_HDR(row, ind, NULL, right, 4, unsigned)
COL_HDR(row, allocated, NULL, right, 13, size)
@@ -488,6 +582,16 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
COL_HDR(row, nrequests, NULL, right, 13, uint64)
COL_HDR(row, nrequests_ps, "(#/sec)", right, 8, uint64)
+ COL_HDR_DECLARE(prof_live_requested)
+ COL_HDR_DECLARE(prof_live_count)
+ COL_HDR_DECLARE(prof_accum_requested)
+ COL_HDR_DECLARE(prof_accum_count)
+ if (prof_stats_on) {
+ COL_HDR_INIT(row, prof_live_requested, NULL, right, 21, uint64)
+ COL_HDR_INIT(row, prof_live_count, NULL, right, 17, uint64)
+ COL_HDR_INIT(row, prof_accum_requested, NULL, right, 21, uint64)
+ COL_HDR_INIT(row, prof_accum_count, NULL, right, 17, uint64)
+ }
COL_HDR(row, curlextents, NULL, right, 13, size)
/* As with bins, we label the large extents table. */
@@ -496,16 +600,33 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
emitter_table_row(emitter, &header_row);
emitter_json_array_kv_begin(emitter, "lextents");
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = i;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "lextents");
+
+ size_t arenas_lextent_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(arenas_lextent_mib, 0, "arenas.lextent");
+
+ size_t prof_stats_mib[CTL_MAX_DEPTH];
+ if (prof_stats_on) {
+ CTL_LEAF_PREPARE(prof_stats_mib, 0, "prof.stats.lextents");
+ }
+
for (j = 0, in_gap = false; j < nlextents; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t lextent_size, curlextents;
+ prof_stats_t prof_live;
+ prof_stats_t prof_accum;
+
+ stats_arenas_mib[4] = j;
+ arenas_lextent_mib[2] = j;
+
+ CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests,
+ uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j,
- &nmalloc, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j,
- &ndalloc, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j,
- &nrequests, uint64_t);
in_gap_prev = in_gap;
in_gap = (nrequests == 0);
@@ -514,11 +635,29 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
" ---\n");
}
- CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j,
- &curlextents, size_t);
+ CTL_LEAF(arenas_lextent_mib, 3, "size", &lextent_size, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "curlextents", &curlextents,
+ size_t);
+
+ if (prof_stats_on) {
+ prof_stats_mib[3] = j;
+ CTL_LEAF(prof_stats_mib, 4, "live", &prof_live,
+ prof_stats_t);
+ CTL_LEAF(prof_stats_mib, 4, "accum", &prof_accum,
+ prof_stats_t);
+ }
emitter_json_object_begin(emitter);
+ if (prof_stats_on) {
+ emitter_json_kv(emitter, "prof_live_requested",
+ emitter_type_uint64, &prof_live.req_sum);
+ emitter_json_kv(emitter, "prof_live_count",
+ emitter_type_uint64, &prof_live.count);
+ emitter_json_kv(emitter, "prof_accum_requested",
+ emitter_type_uint64, &prof_accum.req_sum);
+ emitter_json_kv(emitter, "prof_accum_count",
+ emitter_type_uint64, &prof_accum.count);
+ }
emitter_json_kv(emitter, "curlextents", emitter_type_size,
&curlextents);
emitter_json_object_end(emitter);
@@ -532,6 +671,13 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
col_nrequests.uint64_val = nrequests;
col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
+ if (prof_stats_on) {
+ col_prof_live_requested.uint64_val = prof_live.req_sum;
+ col_prof_live_count.uint64_val = prof_live.count;
+ col_prof_accum_requested.uint64_val =
+ prof_accum.req_sum;
+ col_prof_accum_count.uint64_val = prof_accum.count;
+ }
col_curlextents.size_val = curlextents;
if (!in_gap) {
@@ -544,6 +690,7 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
}
}
+JEMALLOC_COLD
static void
stats_arena_extents_print(emitter_t *emitter, unsigned i) {
unsigned j;
@@ -570,22 +717,27 @@ stats_arena_extents_print(emitter_t *emitter, unsigned i) {
emitter_table_row(emitter, &header_row);
emitter_json_array_kv_begin(emitter, "extents");
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = i;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "extents");
+
in_gap = false;
for (j = 0; j < SC_NPSIZES; j++) {
size_t ndirty, nmuzzy, nretained, total, dirty_bytes,
muzzy_bytes, retained_bytes, total_bytes;
- CTL_M2_M4_GET("stats.arenas.0.extents.0.ndirty", i, j,
- &ndirty, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.nmuzzy", i, j,
- &nmuzzy, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.nretained", i, j,
- &nretained, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.dirty_bytes", i, j,
- &dirty_bytes, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.muzzy_bytes", i, j,
- &muzzy_bytes, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.retained_bytes", i, j,
+ stats_arenas_mib[4] = j;
+
+ CTL_LEAF(stats_arenas_mib, 5, "ndirty", &ndirty, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nmuzzy", &nmuzzy, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nretained", &nretained, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "dirty_bytes", &dirty_bytes,
+ size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "muzzy_bytes", &muzzy_bytes,
+ size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "retained_bytes",
&retained_bytes, size_t);
+
total = ndirty + nmuzzy + nretained;
total_bytes = dirty_bytes + muzzy_bytes + retained_bytes;
@@ -633,6 +785,230 @@ stats_arena_extents_print(emitter_t *emitter, unsigned i) {
}
static void
+stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
+ emitter_row_t header_row;
+ emitter_row_init(&header_row);
+ emitter_row_t row;
+ emitter_row_init(&row);
+
+ uint64_t npurge_passes;
+ uint64_t npurges;
+ uint64_t nhugifies;
+ uint64_t ndehugifies;
+
+ CTL_M2_GET("stats.arenas.0.hpa_shard.npurge_passes",
+ i, &npurge_passes, uint64_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.npurges",
+ i, &npurges, uint64_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.nhugifies",
+ i, &nhugifies, uint64_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.ndehugifies",
+ i, &ndehugifies, uint64_t);
+
+ size_t npageslabs_huge;
+ size_t nactive_huge;
+ size_t ndirty_huge;
+
+ size_t npageslabs_nonhuge;
+ size_t nactive_nonhuge;
+ size_t ndirty_nonhuge;
+ size_t nretained_nonhuge;
+
+ size_t sec_bytes;
+ CTL_M2_GET("stats.arenas.0.hpa_sec_bytes", i, &sec_bytes, size_t);
+ emitter_kv(emitter, "sec_bytes", "Bytes in small extent cache",
+ emitter_type_size, &sec_bytes);
+
+ /* First, global stats. */
+ emitter_table_printf(emitter,
+ "HPA shard stats:\n"
+ " Purge passes: %" FMTu64 " (%" FMTu64 " / sec)\n"
+ " Purges: %" FMTu64 " (%" FMTu64 " / sec)\n"
+ " Hugeifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
+ " Dehugifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
+ "\n",
+ npurge_passes, rate_per_second(npurge_passes, uptime),
+ npurges, rate_per_second(npurges, uptime),
+ nhugifies, rate_per_second(nhugifies, uptime),
+ ndehugifies, rate_per_second(ndehugifies, uptime));
+
+ emitter_json_object_kv_begin(emitter, "hpa_shard");
+ emitter_json_kv(emitter, "npurge_passes", emitter_type_uint64,
+ &npurge_passes);
+ emitter_json_kv(emitter, "npurges", emitter_type_uint64,
+ &npurges);
+ emitter_json_kv(emitter, "nhugifies", emitter_type_uint64,
+ &nhugifies);
+ emitter_json_kv(emitter, "ndehugifies", emitter_type_uint64,
+ &ndehugifies);
+
+ /* Next, full slab stats. */
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_huge",
+ i, &npageslabs_huge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_huge",
+ i, &nactive_huge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_huge",
+ i, &ndirty_huge, size_t);
+
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_nonhuge",
+ i, &npageslabs_nonhuge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_nonhuge",
+ i, &nactive_nonhuge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_nonhuge",
+ i, &ndirty_nonhuge, size_t);
+ nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
+ - nactive_nonhuge - ndirty_nonhuge;
+
+ emitter_table_printf(emitter,
+ " In full slabs:\n"
+ " npageslabs: %zu huge, %zu nonhuge\n"
+ " nactive: %zu huge, %zu nonhuge \n"
+ " ndirty: %zu huge, %zu nonhuge \n"
+ " nretained: 0 huge, %zu nonhuge \n",
+ npageslabs_huge, npageslabs_nonhuge,
+ nactive_huge, nactive_nonhuge,
+ ndirty_huge, ndirty_nonhuge,
+ nretained_nonhuge);
+
+ emitter_json_object_kv_begin(emitter, "full_slabs");
+ emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
+ &npageslabs_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
+ &npageslabs_nonhuge);
+ emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
+ &nactive_nonhuge);
+ emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
+ &ndirty_nonhuge);
+ emitter_json_object_end(emitter); /* End "full_slabs" */
+
+ /* Next, empty slab stats. */
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_huge",
+ i, &npageslabs_huge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_huge",
+ i, &nactive_huge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge",
+ i, &ndirty_huge, size_t);
+
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_nonhuge",
+ i, &npageslabs_nonhuge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_nonhuge",
+ i, &nactive_nonhuge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge",
+ i, &ndirty_nonhuge, size_t);
+ nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
+ - nactive_nonhuge - ndirty_nonhuge;
+
+ emitter_table_printf(emitter,
+ " In empty slabs:\n"
+ " npageslabs: %zu huge, %zu nonhuge\n"
+ " nactive: %zu huge, %zu nonhuge \n"
+ " ndirty: %zu huge, %zu nonhuge \n"
+ " nretained: 0 huge, %zu nonhuge \n"
+ "\n",
+ npageslabs_huge, npageslabs_nonhuge,
+ nactive_huge, nactive_nonhuge,
+ ndirty_huge, ndirty_nonhuge,
+ nretained_nonhuge);
+
+ emitter_json_object_kv_begin(emitter, "empty_slabs");
+ emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
+ &npageslabs_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
+ &npageslabs_nonhuge);
+ emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
+ &nactive_nonhuge);
+ emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
+ &ndirty_nonhuge);
+ emitter_json_object_end(emitter); /* End "empty_slabs" */
+
+ COL_HDR(row, size, NULL, right, 20, size)
+ COL_HDR(row, ind, NULL, right, 4, unsigned)
+ COL_HDR(row, npageslabs_huge, NULL, right, 16, size)
+ COL_HDR(row, nactive_huge, NULL, right, 16, size)
+ COL_HDR(row, ndirty_huge, NULL, right, 16, size)
+ COL_HDR(row, npageslabs_nonhuge, NULL, right, 20, size)
+ COL_HDR(row, nactive_nonhuge, NULL, right, 20, size)
+ COL_HDR(row, ndirty_nonhuge, NULL, right, 20, size)
+ COL_HDR(row, nretained_nonhuge, NULL, right, 20, size)
+
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = i;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "hpa_shard.nonfull_slabs");
+
+ emitter_table_row(emitter, &header_row);
+ emitter_json_array_kv_begin(emitter, "nonfull_slabs");
+ bool in_gap = false;
+ for (pszind_t j = 0; j < PSSET_NPSIZES && j < SC_NPSIZES; j++) {
+ stats_arenas_mib[5] = j;
+
+ CTL_LEAF(stats_arenas_mib, 6, "npageslabs_huge",
+ &npageslabs_huge, size_t);
+ CTL_LEAF(stats_arenas_mib, 6, "nactive_huge",
+ &nactive_huge, size_t);
+ CTL_LEAF(stats_arenas_mib, 6, "ndirty_huge",
+ &ndirty_huge, size_t);
+
+ CTL_LEAF(stats_arenas_mib, 6, "npageslabs_nonhuge",
+ &npageslabs_nonhuge, size_t);
+ CTL_LEAF(stats_arenas_mib, 6, "nactive_nonhuge",
+ &nactive_nonhuge, size_t);
+ CTL_LEAF(stats_arenas_mib, 6, "ndirty_nonhuge",
+ &ndirty_nonhuge, size_t);
+ nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
+ - nactive_nonhuge - ndirty_nonhuge;
+
+ bool in_gap_prev = in_gap;
+ in_gap = (npageslabs_huge == 0 && npageslabs_nonhuge == 0);
+ if (in_gap_prev && !in_gap) {
+ emitter_table_printf(emitter,
+ " ---\n");
+ }
+
+ col_size.size_val = sz_pind2sz(j);
+ col_ind.size_val = j;
+ col_npageslabs_huge.size_val = npageslabs_huge;
+ col_nactive_huge.size_val = nactive_huge;
+ col_ndirty_huge.size_val = ndirty_huge;
+ col_npageslabs_nonhuge.size_val = npageslabs_nonhuge;
+ col_nactive_nonhuge.size_val = nactive_nonhuge;
+ col_ndirty_nonhuge.size_val = ndirty_nonhuge;
+ col_nretained_nonhuge.size_val = nretained_nonhuge;
+ if (!in_gap) {
+ emitter_table_row(emitter, &row);
+ }
+
+ emitter_json_object_begin(emitter);
+ emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
+ &npageslabs_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "ndirty_huge", emitter_type_size,
+ &ndirty_huge);
+ emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
+ &npageslabs_nonhuge);
+ emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
+ &nactive_nonhuge);
+ emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
+ &ndirty_nonhuge);
+ emitter_json_object_end(emitter);
+ }
+ emitter_json_array_end(emitter); /* End "nonfull_slabs" */
+ emitter_json_object_end(emitter); /* End "hpa_shard" */
+ if (in_gap) {
+ emitter_table_printf(emitter, " ---\n");
+ }
+}
+
+static void
stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptime) {
emitter_row_t row;
emitter_col_t col_name;
@@ -645,21 +1021,27 @@ stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptim
emitter_json_object_kv_begin(emitter, "mutexes");
emitter_table_row(emitter, &row);
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = arena_ind;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "mutexes");
+
for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes;
i++) {
const char *name = arena_mutex_names[i];
emitter_json_object_kv_begin(emitter, name);
- mutex_stats_read_arena(arena_ind, i, name, &col_name, col64,
- col32, uptime);
+ mutex_stats_read_arena(stats_arenas_mib, 4, name, &col_name,
+ col64, col32, uptime);
mutex_stats_emit(emitter, &row, col64, col32);
emitter_json_object_end(emitter); /* Close the mutex dict. */
}
emitter_json_object_end(emitter); /* End "mutexes". */
}
+JEMALLOC_COLD
static void
stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
- bool mutex, bool extents) {
+ bool mutex, bool extents, bool hpa) {
unsigned nthreads;
const char *dss;
ssize_t dirty_decay_ms, muzzy_decay_ms;
@@ -673,7 +1055,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills,
large_nflushes;
- size_t tcache_bytes, abandoned_vm;
+ size_t tcache_bytes, tcache_stashed_bytes, abandoned_vm;
uint64_t uptime;
CTL_GET("arenas.page", &page, size_t);
@@ -817,12 +1199,12 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
COL(alloc_count_row, count_nmalloc, right, 16, title);
col_count_nmalloc.str_val = "nmalloc";
- COL(alloc_count_row, count_nmalloc_ps, right, 8, title);
+ COL(alloc_count_row, count_nmalloc_ps, right, 10, title);
col_count_nmalloc_ps.str_val = "(#/sec)";
COL(alloc_count_row, count_ndalloc, right, 16, title);
col_count_ndalloc.str_val = "ndalloc";
- COL(alloc_count_row, count_ndalloc_ps, right, 8, title);
+ COL(alloc_count_row, count_ndalloc_ps, right, 10, title);
col_count_ndalloc_ps.str_val = "(#/sec)";
COL(alloc_count_row, count_nrequests, right, 16, title);
@@ -962,6 +1344,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
GET_AND_EMIT_MEM_STAT(internal)
GET_AND_EMIT_MEM_STAT(metadata_thp)
GET_AND_EMIT_MEM_STAT(tcache_bytes)
+ GET_AND_EMIT_MEM_STAT(tcache_stashed_bytes)
GET_AND_EMIT_MEM_STAT(resident)
GET_AND_EMIT_MEM_STAT(abandoned_vm)
GET_AND_EMIT_MEM_STAT(extent_avail)
@@ -979,8 +1362,12 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
if (extents) {
stats_arena_extents_print(emitter, i);
}
+ if (hpa) {
+ stats_arena_hpa_shard_print(emitter, i, uptime);
+ }
}
+JEMALLOC_COLD
static void
stats_general_print(emitter_t *emitter) {
const char *cpv;
@@ -988,14 +1375,18 @@ stats_general_print(emitter_t *emitter) {
unsigned uv;
uint32_t u32v;
uint64_t u64v;
+ int64_t i64v;
ssize_t ssv, ssv2;
- size_t sv, bsz, usz, ssz, sssz, cpsz;
+ size_t sv, bsz, usz, u32sz, u64sz, i64sz, ssz, sssz, cpsz;
bsz = sizeof(bool);
usz = sizeof(unsigned);
ssz = sizeof(size_t);
sssz = sizeof(ssize_t);
cpsz = sizeof(const char *);
+ u32sz = sizeof(uint32_t);
+ i64sz = sizeof(int64_t);
+ u64sz = sizeof(uint64_t);
CTL_GET("version", &cpv, const char *);
emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv);
@@ -1051,6 +1442,11 @@ stats_general_print(emitter_t *emitter) {
#define OPT_WRITE_UNSIGNED(name) \
OPT_WRITE(name, uv, usz, emitter_type_unsigned)
+#define OPT_WRITE_INT64(name) \
+ OPT_WRITE(name, i64v, i64sz, emitter_type_int64)
+#define OPT_WRITE_UINT64(name) \
+ OPT_WRITE(name, u64v, u64sz, emitter_type_uint64)
+
#define OPT_WRITE_SIZE_T(name) \
OPT_WRITE(name, sv, ssz, emitter_type_size)
#define OPT_WRITE_SSIZE_T(name) \
@@ -1066,13 +1462,43 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("abort")
OPT_WRITE_BOOL("abort_conf")
+ OPT_WRITE_BOOL("cache_oblivious")
OPT_WRITE_BOOL("confirm_conf")
OPT_WRITE_BOOL("retain")
OPT_WRITE_CHAR_P("dss")
OPT_WRITE_UNSIGNED("narenas")
OPT_WRITE_CHAR_P("percpu_arena")
OPT_WRITE_SIZE_T("oversize_threshold")
+ OPT_WRITE_BOOL("hpa")
+ OPT_WRITE_SIZE_T("hpa_slab_max_alloc")
+ OPT_WRITE_SIZE_T("hpa_hugification_threshold")
+ OPT_WRITE_UINT64("hpa_hugify_delay_ms")
+ OPT_WRITE_UINT64("hpa_min_purge_interval_ms")
+ if (je_mallctl("opt.hpa_dirty_mult", (void *)&u32v, &u32sz, NULL, 0)
+ == 0) {
+ /*
+ * We cheat a little and "know" the secret meaning of this
+ * representation.
+ */
+ if (u32v == (uint32_t)-1) {
+ const char *neg1 = "-1";
+ emitter_kv(emitter, "hpa_dirty_mult",
+ "opt.hpa_dirty_mult", emitter_type_string, &neg1);
+ } else {
+ char buf[FXP_BUF_SIZE];
+ fxp_print(u32v, buf);
+ const char *bufp = buf;
+ emitter_kv(emitter, "hpa_dirty_mult",
+ "opt.hpa_dirty_mult", emitter_type_string, &bufp);
+ }
+ }
+ OPT_WRITE_SIZE_T("hpa_sec_nshards")
+ OPT_WRITE_SIZE_T("hpa_sec_max_alloc")
+ OPT_WRITE_SIZE_T("hpa_sec_max_bytes")
+ OPT_WRITE_SIZE_T("hpa_sec_bytes_after_flush")
+ OPT_WRITE_SIZE_T("hpa_sec_batch_fill_extra")
OPT_WRITE_CHAR_P("metadata_thp")
+ OPT_WRITE_INT64("mutex_max_spin")
OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread")
OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms")
OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms")
@@ -1081,8 +1507,17 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("zero")
OPT_WRITE_BOOL("utrace")
OPT_WRITE_BOOL("xmalloc")
+ OPT_WRITE_BOOL("experimental_infallible_new")
OPT_WRITE_BOOL("tcache")
- OPT_WRITE_SSIZE_T("lg_tcache_max")
+ OPT_WRITE_SIZE_T("tcache_max")
+ OPT_WRITE_UNSIGNED("tcache_nslots_small_min")
+ OPT_WRITE_UNSIGNED("tcache_nslots_small_max")
+ OPT_WRITE_UNSIGNED("tcache_nslots_large")
+ OPT_WRITE_SSIZE_T("lg_tcache_nslots_mul")
+ OPT_WRITE_SIZE_T("tcache_gc_incr_bytes")
+ OPT_WRITE_SIZE_T("tcache_gc_delay_bytes")
+ OPT_WRITE_UNSIGNED("lg_tcache_flush_small_div")
+ OPT_WRITE_UNSIGNED("lg_tcache_flush_large_div")
OPT_WRITE_CHAR_P("thp")
OPT_WRITE_BOOL("prof")
OPT_WRITE_CHAR_P("prof_prefix")
@@ -1095,8 +1530,14 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("prof_gdump")
OPT_WRITE_BOOL("prof_final")
OPT_WRITE_BOOL("prof_leak")
+ OPT_WRITE_BOOL("prof_leak_error")
OPT_WRITE_BOOL("stats_print")
OPT_WRITE_CHAR_P("stats_print_opts")
+ OPT_WRITE_BOOL("stats_print")
+ OPT_WRITE_CHAR_P("stats_print_opts")
+ OPT_WRITE_INT64("stats_interval")
+ OPT_WRITE_CHAR_P("stats_interval_opts")
+ OPT_WRITE_CHAR_P("zero_realloc")
emitter_dict_end(emitter);
@@ -1167,38 +1608,41 @@ stats_general_print(emitter_t *emitter) {
"Maximum thread-cached size class", emitter_type_size, &sv);
}
- unsigned nbins;
- CTL_GET("arenas.nbins", &nbins, unsigned);
+ unsigned arenas_nbins;
+ CTL_GET("arenas.nbins", &arenas_nbins, unsigned);
emitter_kv(emitter, "nbins", "Number of bin size classes",
- emitter_type_unsigned, &nbins);
+ emitter_type_unsigned, &arenas_nbins);
- unsigned nhbins;
- CTL_GET("arenas.nhbins", &nhbins, unsigned);
+ unsigned arenas_nhbins;
+ CTL_GET("arenas.nhbins", &arenas_nhbins, unsigned);
emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes",
- emitter_type_unsigned, &nhbins);
+ emitter_type_unsigned, &arenas_nhbins);
/*
* We do enough mallctls in a loop that we actually want to omit them
* (not just omit the printing).
*/
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_array_kv_begin(emitter, "bin");
- for (unsigned i = 0; i < nbins; i++) {
+ size_t arenas_bin_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
+ for (unsigned i = 0; i < arenas_nbins; i++) {
+ arenas_bin_mib[2] = i;
emitter_json_object_begin(emitter);
- CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
+ CTL_LEAF(arenas_bin_mib, 3, "size", &sv, size_t);
emitter_json_kv(emitter, "size", emitter_type_size,
&sv);
- CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
+ CTL_LEAF(arenas_bin_mib, 3, "nregs", &u32v, uint32_t);
emitter_json_kv(emitter, "nregs", emitter_type_uint32,
&u32v);
- CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t);
+ CTL_LEAF(arenas_bin_mib, 3, "slab_size", &sv, size_t);
emitter_json_kv(emitter, "slab_size", emitter_type_size,
&sv);
- CTL_M2_GET("arenas.bin.0.nshards", i, &u32v, uint32_t);
+ CTL_LEAF(arenas_bin_mib, 3, "nshards", &u32v, uint32_t);
emitter_json_kv(emitter, "nshards", emitter_type_uint32,
&u32v);
@@ -1212,12 +1656,15 @@ stats_general_print(emitter_t *emitter) {
emitter_kv(emitter, "nlextents", "Number of large size classes",
emitter_type_unsigned, &nlextents);
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_array_kv_begin(emitter, "lextent");
+ size_t arenas_lextent_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(arenas_lextent_mib, 0, "arenas.lextent");
for (unsigned i = 0; i < nlextents; i++) {
+ arenas_lextent_mib[2] = i;
emitter_json_object_begin(emitter);
- CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t);
+ CTL_LEAF(arenas_lextent_mib, 3, "size", &sv, size_t);
emitter_json_kv(emitter, "size", emitter_type_size,
&sv);
@@ -1229,9 +1676,10 @@ stats_general_print(emitter_t *emitter) {
emitter_json_object_end(emitter); /* Close "arenas" */
}
+JEMALLOC_COLD
static void
stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
- bool unmerged, bool bins, bool large, bool mutex, bool extents) {
+ bool unmerged, bool bins, bool large, bool mutex, bool extents, bool hpa) {
/*
* These should be deleted. We keep them around for a while, to aid in
* the transition to the emitter code.
@@ -1239,6 +1687,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
size_t allocated, active, metadata, metadata_thp, resident, mapped,
retained;
size_t num_background_threads;
+ size_t zero_reallocs;
uint64_t background_thread_num_runs, background_thread_run_interval;
CTL_GET("stats.allocated", &allocated, size_t);
@@ -1249,6 +1698,8 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
CTL_GET("stats.mapped", &mapped, size_t);
CTL_GET("stats.retained", &retained, size_t);
+ CTL_GET("stats.zero_reallocs", &zero_reallocs, size_t);
+
if (have_background_thread) {
CTL_GET("stats.background_thread.num_threads",
&num_background_threads, size_t);
@@ -1272,12 +1723,18 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
emitter_json_kv(emitter, "resident", emitter_type_size, &resident);
emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped);
emitter_json_kv(emitter, "retained", emitter_type_size, &retained);
+ emitter_json_kv(emitter, "zero_reallocs", emitter_type_size,
+ &zero_reallocs);
emitter_table_printf(emitter, "Allocated: %zu, active: %zu, "
"metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, "
"retained: %zu\n", allocated, active, metadata, metadata_thp,
resident, mapped, retained);
+ /* Strange behaviors */
+ emitter_table_printf(emitter,
+ "Count of realloc(non-null-ptr, 0) calls: %zu\n", zero_reallocs);
+
/* Background thread stats. */
emitter_json_object_kv_begin(emitter, "background_thread");
emitter_json_kv(emitter, "num_threads", emitter_type_size,
@@ -1308,9 +1765,11 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
CTL_M2_GET("stats.arenas.0.uptime", 0, &uptime, uint64_t);
+ size_t stats_mutexes_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_mutexes_mib, 0, "stats.mutexes");
for (int i = 0; i < mutex_prof_num_global_mutexes; i++) {
- mutex_stats_read_global(global_mutex_names[i], &name,
- col64, col32, uptime);
+ mutex_stats_read_global(stats_mutexes_mib, 2,
+ global_mutex_names[i], &name, col64, col32, uptime);
emitter_json_object_kv_begin(emitter, global_mutex_names[i]);
mutex_stats_emit(emitter, &row, col64, col32);
emitter_json_object_end(emitter);
@@ -1355,7 +1814,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
emitter_table_printf(emitter, "Merged arenas stats:\n");
emitter_json_object_kv_begin(emitter, "merged");
stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins,
- large, mutex, extents);
+ large, mutex, extents, hpa);
emitter_json_object_end(emitter); /* Close "merged". */
}
@@ -1366,7 +1825,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
"Destroyed arenas stats:\n");
emitter_json_object_kv_begin(emitter, "destroyed");
stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED,
- bins, large, mutex, extents);
+ bins, large, mutex, extents, hpa);
emitter_json_object_end(emitter); /* Close "destroyed". */
}
@@ -1382,7 +1841,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
emitter_table_printf(emitter,
"arenas[%s]:\n", arena_ind_str);
stats_arena_print(emitter, i, bins,
- large, mutex, extents);
+ large, mutex, extents, hpa);
/* Close "<arena-ind>". */
emitter_json_object_end(emitter);
}
@@ -1393,8 +1852,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
}
void
-stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts) {
+stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts) {
int err;
uint64_t epoch;
size_t u64sz;
@@ -1437,8 +1895,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
emitter_t emitter;
emitter_init(&emitter,
- json ? emitter_output_json : emitter_output_table, write_cb,
- cbopaque);
+ json ? emitter_output_json_compact : emitter_output_table,
+ write_cb, cbopaque);
emitter_begin(&emitter);
emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n");
emitter_json_object_kv_begin(&emitter, "jemalloc");
@@ -1448,10 +1906,68 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
if (config_stats) {
stats_print_helper(&emitter, merged, destroyed, unmerged,
- bins, large, mutex, extents);
+ bins, large, mutex, extents, hpa);
}
emitter_json_object_end(&emitter); /* Closes the "jemalloc" dict. */
emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n");
emitter_end(&emitter);
}
+
+uint64_t
+stats_interval_new_event_wait(tsd_t *tsd) {
+ return stats_interval_accum_batch;
+}
+
+uint64_t
+stats_interval_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+void
+stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED);
+ if (counter_accum(tsd_tsdn(tsd), &stats_interval_accumulated,
+ elapsed)) {
+ je_malloc_stats_print(NULL, NULL, opt_stats_interval_opts);
+ }
+}
+
+bool
+stats_boot(void) {
+ uint64_t stats_interval;
+ if (opt_stats_interval < 0) {
+ assert(opt_stats_interval == -1);
+ stats_interval = 0;
+ stats_interval_accum_batch = 0;
+ } else{
+ /* See comments in stats.h */
+ stats_interval = (opt_stats_interval > 0) ?
+ opt_stats_interval : 1;
+ uint64_t batch = stats_interval >>
+ STATS_INTERVAL_ACCUM_LG_BATCH_SIZE;
+ if (batch > STATS_INTERVAL_ACCUM_BATCH_MAX) {
+ batch = STATS_INTERVAL_ACCUM_BATCH_MAX;
+ } else if (batch == 0) {
+ batch = 1;
+ }
+ stats_interval_accum_batch = batch;
+ }
+
+ return counter_accum_init(&stats_interval_accumulated, stats_interval);
+}
+
+void
+stats_prefork(tsdn_t *tsdn) {
+ counter_prefork(tsdn, &stats_interval_accumulated);
+}
+
+void
+stats_postfork_parent(tsdn_t *tsdn) {
+ counter_postfork_parent(tsdn, &stats_interval_accumulated);
+}
+
+void
+stats_postfork_child(tsdn_t *tsdn) {
+ counter_postfork_child(tsdn, &stats_interval_accumulated);
+}