summaryrefslogtreecommitdiff
path: root/deps/jemalloc/src/prof.c
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/src/prof.c')
-rw-r--r--deps/jemalloc/src/prof.c723
1 files changed, 705 insertions, 18 deletions
diff --git a/deps/jemalloc/src/prof.c b/deps/jemalloc/src/prof.c
index 13df641a0..13334cb4c 100644
--- a/deps/jemalloc/src/prof.c
+++ b/deps/jemalloc/src/prof.c
@@ -7,6 +7,7 @@
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/emitter.h"
/******************************************************************************/
@@ -23,7 +24,7 @@
*/
#undef _Unwind_Backtrace
#include <unwind.h>
-#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
+#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
#endif
/******************************************************************************/
@@ -38,6 +39,7 @@ bool opt_prof_gdump = false;
bool opt_prof_final = false;
bool opt_prof_leak = false;
bool opt_prof_accum = false;
+bool opt_prof_log = false;
char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
@@ -70,6 +72,100 @@ uint64_t prof_interval = 0;
size_t lg_prof_sample;
+typedef enum prof_logging_state_e prof_logging_state_t;
+enum prof_logging_state_e {
+ prof_logging_state_stopped,
+ prof_logging_state_started,
+ prof_logging_state_dumping
+};
+
+/*
+ * - stopped: log_start never called, or previous log_stop has completed.
+ * - started: log_start called, log_stop not called yet. Allocations are logged.
+ * - dumping: log_stop called but not finished; samples are not logged anymore.
+ */
+prof_logging_state_t prof_logging_state = prof_logging_state_stopped;
+
+#ifdef JEMALLOC_JET
+static bool prof_log_dummy = false;
+#endif
+
+/* Incremented for every log file that is output. */
+static uint64_t log_seq = 0;
+static char log_filename[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
+
+/* Timestamp for most recent call to log_start(). */
+static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER;
+
+/* Increment these when adding to the log_bt and log_thr linked lists. */
+static size_t log_bt_index = 0;
+static size_t log_thr_index = 0;
+
+/* Linked list node definitions. These are only used in prof.c. */
+typedef struct prof_bt_node_s prof_bt_node_t;
+
+struct prof_bt_node_s {
+ prof_bt_node_t *next;
+ size_t index;
+ prof_bt_t bt;
+ /* Variable size backtrace vector pointed to by bt. */
+ void *vec[1];
+};
+
+typedef struct prof_thr_node_s prof_thr_node_t;
+
+struct prof_thr_node_s {
+ prof_thr_node_t *next;
+ size_t index;
+ uint64_t thr_uid;
+ /* Variable size based on thr_name_sz. */
+ char name[1];
+};
+
+typedef struct prof_alloc_node_s prof_alloc_node_t;
+
+/* This is output when logging sampled allocations. */
+struct prof_alloc_node_s {
+ prof_alloc_node_t *next;
+ /* Indices into an array of thread data. */
+ size_t alloc_thr_ind;
+ size_t free_thr_ind;
+
+ /* Indices into an array of backtraces. */
+ size_t alloc_bt_ind;
+ size_t free_bt_ind;
+
+ uint64_t alloc_time_ns;
+ uint64_t free_time_ns;
+
+ size_t usize;
+};
+
+/*
+ * Created on the first call to prof_log_start and deleted on prof_log_stop.
+ * These are the backtraces and threads that have already been logged by an
+ * allocation.
+ */
+static bool log_tables_initialized = false;
+static ckh_t log_bt_node_set;
+static ckh_t log_thr_node_set;
+
+/* Store linked lists for logged data. */
+static prof_bt_node_t *log_bt_first = NULL;
+static prof_bt_node_t *log_bt_last = NULL;
+static prof_thr_node_t *log_thr_first = NULL;
+static prof_thr_node_t *log_thr_last = NULL;
+static prof_alloc_node_t *log_alloc_first = NULL;
+static prof_alloc_node_t *log_alloc_last = NULL;
+
+/* Protects the prof_logging_state and any log_{...} variable. */
+static malloc_mutex_t log_mtx;
+
/*
* Table of mutexes that are shared among gctx's. These are leaf locks, so
* there is no problem with using them for more than one gctx at the same time.
@@ -145,6 +241,12 @@ static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
bool even_if_attached);
static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
+/* Hashtable functions for log_bt_node_set and log_thr_node_set. */
+static void prof_thr_node_hash(const void *key, size_t r_hash[2]);
+static bool prof_thr_node_keycomp(const void *k1, const void *k2);
+static void prof_bt_node_hash(const void *key, size_t r_hash[2]);
+static bool prof_bt_node_keycomp(const void *k1, const void *k2);
+
/******************************************************************************/
/* Red-black trees. */
@@ -242,6 +344,12 @@ prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx) {
prof_tctx_set(tsdn, ptr, usize, NULL, tctx);
+ /* Get the current time and set this in the extent_t. We'll read this
+ * when free() is called. */
+ nstime_t t = NSTIME_ZERO_INITIALIZER;
+ nstime_update(&t);
+ prof_alloc_time_set(tsdn, ptr, NULL, t);
+
malloc_mutex_lock(tsdn, tctx->tdata->lock);
tctx->cnts.curobjs++;
tctx->cnts.curbytes += usize;
@@ -253,14 +361,174 @@ prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
malloc_mutex_unlock(tsdn, tctx->tdata->lock);
}
+static size_t
+prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
+ assert(prof_logging_state == prof_logging_state_started);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
+
+ prof_bt_node_t dummy_node;
+ dummy_node.bt = *bt;
+ prof_bt_node_t *node;
+
+ /* See if this backtrace is already cached in the table. */
+ if (ckh_search(&log_bt_node_set, (void *)(&dummy_node),
+ (void **)(&node), NULL)) {
+ size_t sz = offsetof(prof_bt_node_t, vec) +
+ (bt->len * sizeof(void *));
+ prof_bt_node_t *new_node = (prof_bt_node_t *)
+ iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
+ true, arena_get(TSDN_NULL, 0, true), true);
+ if (log_bt_first == NULL) {
+ log_bt_first = new_node;
+ log_bt_last = new_node;
+ } else {
+ log_bt_last->next = new_node;
+ log_bt_last = new_node;
+ }
+
+ new_node->next = NULL;
+ new_node->index = log_bt_index;
+ /*
+ * Copy the backtrace: bt is inside a tdata or gctx, which
+ * might die before prof_log_stop is called.
+ */
+ new_node->bt.len = bt->len;
+ memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *));
+ new_node->bt.vec = new_node->vec;
+
+ log_bt_index++;
+ ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL);
+ return new_node->index;
+ } else {
+ return node->index;
+ }
+}
+static size_t
+prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
+ assert(prof_logging_state == prof_logging_state_started);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
+
+ prof_thr_node_t dummy_node;
+ dummy_node.thr_uid = thr_uid;
+ prof_thr_node_t *node;
+
+ /* See if this thread is already cached in the table. */
+ if (ckh_search(&log_thr_node_set, (void *)(&dummy_node),
+ (void **)(&node), NULL)) {
+ size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1;
+ prof_thr_node_t *new_node = (prof_thr_node_t *)
+ iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
+ true, arena_get(TSDN_NULL, 0, true), true);
+ if (log_thr_first == NULL) {
+ log_thr_first = new_node;
+ log_thr_last = new_node;
+ } else {
+ log_thr_last->next = new_node;
+ log_thr_last = new_node;
+ }
+
+ new_node->next = NULL;
+ new_node->index = log_thr_index;
+ new_node->thr_uid = thr_uid;
+ strcpy(new_node->name, name);
+
+ log_thr_index++;
+ ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL);
+ return new_node->index;
+ } else {
+ return node->index;
+ }
+}
+
+static void
+prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false);
+ if (cons_tdata == NULL) {
+ /*
+ * We decide not to log these allocations. cons_tdata will be
+ * NULL only when the current thread is in a weird state (e.g.
+ * it's being destroyed).
+ */
+ return;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx);
+
+ if (prof_logging_state != prof_logging_state_started) {
+ goto label_done;
+ }
+
+ if (!log_tables_initialized) {
+ bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
+ prof_bt_node_hash, prof_bt_node_keycomp);
+ bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
+ prof_thr_node_hash, prof_thr_node_keycomp);
+ if (err1 || err2) {
+ goto label_done;
+ }
+ log_tables_initialized = true;
+ }
+
+ nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr,
+ (alloc_ctx_t *)NULL);
+ nstime_t free_time = NSTIME_ZERO_INITIALIZER;
+ nstime_update(&free_time);
+
+ size_t sz = sizeof(prof_alloc_node_t);
+ prof_alloc_node_t *new_node = (prof_alloc_node_t *)
+ iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+
+ const char *prod_thr_name = (tctx->tdata->thread_name == NULL)?
+ "" : tctx->tdata->thread_name;
+ const char *cons_thr_name = prof_thread_name_get(tsd);
+
+ prof_bt_t bt;
+ /* Initialize the backtrace, using the buffer in tdata to store it. */
+ bt_init(&bt, cons_tdata->vec);
+ prof_backtrace(&bt);
+ prof_bt_t *cons_bt = &bt;
+
+ /* We haven't destroyed tctx yet, so gctx should be good to read. */
+ prof_bt_t *prod_bt = &tctx->gctx->bt;
+
+ new_node->next = NULL;
+ new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid,
+ prod_thr_name);
+ new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid,
+ cons_thr_name);
+ new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt);
+ new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt);
+ new_node->alloc_time_ns = nstime_ns(&alloc_time);
+ new_node->free_time_ns = nstime_ns(&free_time);
+ new_node->usize = usize;
+
+ if (log_alloc_first == NULL) {
+ log_alloc_first = new_node;
+ log_alloc_last = new_node;
+ } else {
+ log_alloc_last->next = new_node;
+ log_alloc_last = new_node;
+ }
+
+label_done:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
+}
+
void
-prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) {
+prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
+ prof_tctx_t *tctx) {
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+
assert(tctx->cnts.curobjs > 0);
assert(tctx->cnts.curbytes >= usize);
tctx->cnts.curobjs--;
tctx->cnts.curbytes -= usize;
+ prof_try_log(tsd, ptr, usize, tctx);
+
if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
prof_tctx_destroy(tsd, tctx);
} else {
@@ -871,15 +1139,12 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
void
prof_sample_threshold_update(prof_tdata_t *tdata) {
#ifdef JEMALLOC_PROF
- uint64_t r;
- double u;
-
if (!config_prof) {
return;
}
if (lg_prof_sample == 0) {
- tdata->bytes_until_sample = 0;
+ tsd_bytes_until_sample_set(tsd_fetch(), 0);
return;
}
@@ -901,11 +1166,16 @@ prof_sample_threshold_update(prof_tdata_t *tdata) {
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
*/
- r = prng_lg_range_u64(&tdata->prng_state, 53);
- u = (double)r * (1.0/9007199254740992.0L);
- tdata->bytes_until_sample = (uint64_t)(log(u) /
+ uint64_t r = prng_lg_range_u64(&tdata->prng_state, 53);
+ double u = (double)r * (1.0/9007199254740992.0L);
+ uint64_t bytes_until_sample = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ (uint64_t)1U;
+ if (bytes_until_sample > SSIZE_MAX) {
+ bytes_until_sample = SSIZE_MAX;
+ }
+ tsd_bytes_until_sample_set(tsd_fetch(), bytes_until_sample);
+
#endif
}
@@ -1022,7 +1292,7 @@ prof_dump_write(bool propagate_err, const char *s) {
}
}
- if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
+ if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) {
/* Finish writing. */
n = slen - i;
} else {
@@ -1033,6 +1303,7 @@ prof_dump_write(bool propagate_err, const char *s) {
prof_dump_buf_end += n;
i += n;
}
+ assert(i == slen);
return false;
}
@@ -1887,6 +2158,33 @@ prof_bt_keycomp(const void *k1, const void *k2) {
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
+static void
+prof_bt_node_hash(const void *key, size_t r_hash[2]) {
+ const prof_bt_node_t *bt_node = (prof_bt_node_t *)key;
+ prof_bt_hash((void *)(&bt_node->bt), r_hash);
+}
+
+static bool
+prof_bt_node_keycomp(const void *k1, const void *k2) {
+ const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1;
+ const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2;
+ return prof_bt_keycomp((void *)(&bt_node1->bt),
+ (void *)(&bt_node2->bt));
+}
+
+static void
+prof_thr_node_hash(const void *key, size_t r_hash[2]) {
+ const prof_thr_node_t *thr_node = (prof_thr_node_t *)key;
+ hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash);
+}
+
+static bool
+prof_thr_node_keycomp(const void *k1, const void *k2) {
+ const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1;
+ const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2;
+ return thr_node1->thr_uid == thr_node2->thr_uid;
+}
+
static uint64_t
prof_thr_uid_alloc(tsdn_t *tsdn) {
uint64_t thr_uid;
@@ -2119,6 +2417,368 @@ prof_active_set(tsdn_t *tsdn, bool active) {
return prof_active_old;
}
+#ifdef JEMALLOC_JET
+size_t
+prof_log_bt_count(void) {
+ size_t cnt = 0;
+ prof_bt_node_t *node = log_bt_first;
+ while (node != NULL) {
+ cnt++;
+ node = node->next;
+ }
+ return cnt;
+}
+
+size_t
+prof_log_alloc_count(void) {
+ size_t cnt = 0;
+ prof_alloc_node_t *node = log_alloc_first;
+ while (node != NULL) {
+ cnt++;
+ node = node->next;
+ }
+ return cnt;
+}
+
+size_t
+prof_log_thr_count(void) {
+ size_t cnt = 0;
+ prof_thr_node_t *node = log_thr_first;
+ while (node != NULL) {
+ cnt++;
+ node = node->next;
+ }
+ return cnt;
+}
+
+bool
+prof_log_is_logging(void) {
+ return prof_logging_state == prof_logging_state_started;
+}
+
+bool
+prof_log_rep_check(void) {
+ if (prof_logging_state == prof_logging_state_stopped
+ && log_tables_initialized) {
+ return true;
+ }
+
+ if (log_bt_last != NULL && log_bt_last->next != NULL) {
+ return true;
+ }
+ if (log_thr_last != NULL && log_thr_last->next != NULL) {
+ return true;
+ }
+ if (log_alloc_last != NULL && log_alloc_last->next != NULL) {
+ return true;
+ }
+
+ size_t bt_count = prof_log_bt_count();
+ size_t thr_count = prof_log_thr_count();
+ size_t alloc_count = prof_log_alloc_count();
+
+
+ if (prof_logging_state == prof_logging_state_stopped) {
+ if (bt_count != 0 || thr_count != 0 || alloc_count || 0) {
+ return true;
+ }
+ }
+
+ prof_alloc_node_t *node = log_alloc_first;
+ while (node != NULL) {
+ if (node->alloc_bt_ind >= bt_count) {
+ return true;
+ }
+ if (node->free_bt_ind >= bt_count) {
+ return true;
+ }
+ if (node->alloc_thr_ind >= thr_count) {
+ return true;
+ }
+ if (node->free_thr_ind >= thr_count) {
+ return true;
+ }
+ if (node->alloc_time_ns > node->free_time_ns) {
+ return true;
+ }
+ node = node->next;
+ }
+
+ return false;
+}
+
+void
+prof_log_dummy_set(bool new_value) {
+ prof_log_dummy = new_value;
+}
+#endif
+
+bool
+prof_log_start(tsdn_t *tsdn, const char *filename) {
+ if (!opt_prof || !prof_booted) {
+ return true;
+ }
+
+ bool ret = false;
+ size_t buf_size = PATH_MAX + 1;
+
+ malloc_mutex_lock(tsdn, &log_mtx);
+
+ if (prof_logging_state != prof_logging_state_stopped) {
+ ret = true;
+ } else if (filename == NULL) {
+ /* Make default name. */
+ malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json",
+ opt_prof_prefix, prof_getpid(), log_seq);
+ log_seq++;
+ prof_logging_state = prof_logging_state_started;
+ } else if (strlen(filename) >= buf_size) {
+ ret = true;
+ } else {
+ strcpy(log_filename, filename);
+ prof_logging_state = prof_logging_state_started;
+ }
+
+ if (!ret) {
+ nstime_update(&log_start_timestamp);
+ }
+
+ malloc_mutex_unlock(tsdn, &log_mtx);
+
+ return ret;
+}
+
+/* Used as an atexit function to stop logging on exit. */
+static void
+prof_log_stop_final(void) {
+ tsd_t *tsd = tsd_fetch();
+ prof_log_stop(tsd_tsdn(tsd));
+}
+
+struct prof_emitter_cb_arg_s {
+ int fd;
+ ssize_t ret;
+};
+
+static void
+prof_emitter_write_cb(void *opaque, const char *to_write) {
+ struct prof_emitter_cb_arg_s *arg =
+ (struct prof_emitter_cb_arg_s *)opaque;
+ size_t bytes = strlen(to_write);
+#ifdef JEMALLOC_JET
+ if (prof_log_dummy) {
+ return;
+ }
+#endif
+ arg->ret = write(arg->fd, (void *)to_write, bytes);
+}
+
+/*
+ * prof_log_emit_{...} goes through the appropriate linked list, emitting each
+ * node to the json and deallocating it.
+ */
+static void
+prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) {
+ emitter_json_array_kv_begin(emitter, "threads");
+ prof_thr_node_t *thr_node = log_thr_first;
+ prof_thr_node_t *thr_old_node;
+ while (thr_node != NULL) {
+ emitter_json_object_begin(emitter);
+
+ emitter_json_kv(emitter, "thr_uid", emitter_type_uint64,
+ &thr_node->thr_uid);
+
+ char *thr_name = thr_node->name;
+
+ emitter_json_kv(emitter, "thr_name", emitter_type_string,
+ &thr_name);
+
+ emitter_json_object_end(emitter);
+ thr_old_node = thr_node;
+ thr_node = thr_node->next;
+ idalloc(tsd, thr_old_node);
+ }
+ emitter_json_array_end(emitter);
+}
+
+static void
+prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
+ emitter_json_array_kv_begin(emitter, "stack_traces");
+ prof_bt_node_t *bt_node = log_bt_first;
+ prof_bt_node_t *bt_old_node;
+ /*
+ * Calculate how many hex digits we need: twice number of bytes, two for
+ * "0x", and then one more for terminating '\0'.
+ */
+ char buf[2 * sizeof(intptr_t) + 3];
+ size_t buf_sz = sizeof(buf);
+ while (bt_node != NULL) {
+ emitter_json_array_begin(emitter);
+ size_t i;
+ for (i = 0; i < bt_node->bt.len; i++) {
+ malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]);
+ char *trace_str = buf;
+ emitter_json_value(emitter, emitter_type_string,
+ &trace_str);
+ }
+ emitter_json_array_end(emitter);
+
+ bt_old_node = bt_node;
+ bt_node = bt_node->next;
+ idalloc(tsd, bt_old_node);
+ }
+ emitter_json_array_end(emitter);
+}
+
+static void
+prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) {
+ emitter_json_array_kv_begin(emitter, "allocations");
+ prof_alloc_node_t *alloc_node = log_alloc_first;
+ prof_alloc_node_t *alloc_old_node;
+ while (alloc_node != NULL) {
+ emitter_json_object_begin(emitter);
+
+ emitter_json_kv(emitter, "alloc_thread", emitter_type_size,
+ &alloc_node->alloc_thr_ind);
+
+ emitter_json_kv(emitter, "free_thread", emitter_type_size,
+ &alloc_node->free_thr_ind);
+
+ emitter_json_kv(emitter, "alloc_trace", emitter_type_size,
+ &alloc_node->alloc_bt_ind);
+
+ emitter_json_kv(emitter, "free_trace", emitter_type_size,
+ &alloc_node->free_bt_ind);
+
+ emitter_json_kv(emitter, "alloc_timestamp",
+ emitter_type_uint64, &alloc_node->alloc_time_ns);
+
+ emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64,
+ &alloc_node->free_time_ns);
+
+ emitter_json_kv(emitter, "usize", emitter_type_uint64,
+ &alloc_node->usize);
+
+ emitter_json_object_end(emitter);
+
+ alloc_old_node = alloc_node;
+ alloc_node = alloc_node->next;
+ idalloc(tsd, alloc_old_node);
+ }
+ emitter_json_array_end(emitter);
+}
+
+static void
+prof_log_emit_metadata(emitter_t *emitter) {
+ emitter_json_object_kv_begin(emitter, "info");
+
+ nstime_t now = NSTIME_ZERO_INITIALIZER;
+
+ nstime_update(&now);
+ uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp);
+ emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns);
+
+ char *vers = JEMALLOC_VERSION;
+ emitter_json_kv(emitter, "version",
+ emitter_type_string, &vers);
+
+ emitter_json_kv(emitter, "lg_sample_rate",
+ emitter_type_int, &lg_prof_sample);
+
+ int pid = prof_getpid();
+ emitter_json_kv(emitter, "pid", emitter_type_int, &pid);
+
+ emitter_json_object_end(emitter);
+}
+
+
+bool
+prof_log_stop(tsdn_t *tsdn) {
+ if (!opt_prof || !prof_booted) {
+ return true;
+ }
+
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ malloc_mutex_lock(tsdn, &log_mtx);
+
+ if (prof_logging_state != prof_logging_state_started) {
+ malloc_mutex_unlock(tsdn, &log_mtx);
+ return true;
+ }
+
+ /*
+ * Set the state to dumping. We'll set it to stopped when we're done.
+ * Since other threads won't be able to start/stop/log when the state is
+ * dumping, we don't have to hold the lock during the whole method.
+ */
+ prof_logging_state = prof_logging_state_dumping;
+ malloc_mutex_unlock(tsdn, &log_mtx);
+
+
+ emitter_t emitter;
+
+ /* Create a file. */
+
+ int fd;
+#ifdef JEMALLOC_JET
+ if (prof_log_dummy) {
+ fd = 0;
+ } else {
+ fd = creat(log_filename, 0644);
+ }
+#else
+ fd = creat(log_filename, 0644);
+#endif
+
+ if (fd == -1) {
+ malloc_printf("<jemalloc>: creat() for log file \"%s\" "
+ " failed with %d\n", log_filename, errno);
+ if (opt_abort) {
+ abort();
+ }
+ return true;
+ }
+
+ /* Emit to json. */
+ struct prof_emitter_cb_arg_s arg;
+ arg.fd = fd;
+ emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb,
+ (void *)(&arg));
+
+ emitter_begin(&emitter);
+ prof_log_emit_metadata(&emitter);
+ prof_log_emit_threads(tsd, &emitter);
+ prof_log_emit_traces(tsd, &emitter);
+ prof_log_emit_allocs(tsd, &emitter);
+ emitter_end(&emitter);
+
+ /* Reset global state. */
+ if (log_tables_initialized) {
+ ckh_delete(tsd, &log_bt_node_set);
+ ckh_delete(tsd, &log_thr_node_set);
+ }
+ log_tables_initialized = false;
+ log_bt_index = 0;
+ log_thr_index = 0;
+ log_bt_first = NULL;
+ log_bt_last = NULL;
+ log_thr_first = NULL;
+ log_thr_last = NULL;
+ log_alloc_first = NULL;
+ log_alloc_last = NULL;
+
+ malloc_mutex_lock(tsdn, &log_mtx);
+ prof_logging_state = prof_logging_state_stopped;
+ malloc_mutex_unlock(tsdn, &log_mtx);
+
+#ifdef JEMALLOC_JET
+ if (prof_log_dummy) {
+ return false;
+ }
+#endif
+ return close(fd);
+}
+
const char *
prof_thread_name_get(tsd_t *tsd) {
prof_tdata_t *tdata;
@@ -2355,6 +3015,35 @@ prof_boot2(tsd_t *tsd) {
}
}
+ if (opt_prof_log) {
+ prof_log_start(tsd_tsdn(tsd), NULL);
+ }
+
+ if (atexit(prof_log_stop_final) != 0) {
+ malloc_write("<jemalloc>: Error in atexit() "
+ "for logging\n");
+ if (opt_abort) {
+ abort();
+ }
+ }
+
+ if (malloc_mutex_init(&log_mtx, "prof_log",
+ WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
+ prof_bt_node_hash, prof_bt_node_keycomp)) {
+ return true;
+ }
+
+ if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
+ prof_thr_node_hash, prof_thr_node_keycomp)) {
+ return true;
+ }
+
+ log_tables_initialized = true;
+
gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
CACHELINE);
@@ -2382,16 +3071,14 @@ prof_boot2(tsd_t *tsd) {
return true;
}
}
- }
-
#ifdef JEMALLOC_PROF_LIBGCC
- /*
- * Cause the backtracing machinery to allocate its internal state
- * before enabling profiling.
- */
- _Unwind_Backtrace(prof_unwind_init_callback, NULL);
+ /*
+ * Cause the backtracing machinery to allocate its internal
+ * state before enabling profiling.
+ */
+ _Unwind_Backtrace(prof_unwind_init_callback, NULL);
#endif
-
+ }
prof_booted = true;
return false;