summaryrefslogtreecommitdiff
path: root/test/analyze
diff options
context:
space:
mode:
Diffstat (limited to 'test/analyze')
-rw-r--r--test/analyze/prof_bias.c60
-rw-r--r--test/analyze/rand.c276
-rw-r--r--test/analyze/sizes.c53
3 files changed, 389 insertions, 0 deletions
diff --git a/test/analyze/prof_bias.c b/test/analyze/prof_bias.c
new file mode 100644
index 000000000..a96ca942a
--- /dev/null
+++ b/test/analyze/prof_bias.c
@@ -0,0 +1,60 @@
+#include "test/jemalloc_test.h"
+
+/*
+ * This is a helper utility, only meant to be run manually (and, for example,
+ * doesn't check for failures, try to skip execution in non-prof modes, etc.).
+ * It runs, allocates objects of two different sizes from the same stack trace,
+ * and exits.
+ *
+ * The idea is that some human operator will run it like:
+ * MALLOC_CONF="prof:true,prof_final:true" test/analyze/prof_bias
+ * and manually inspect the results.
+ *
+ * The results should be:
+ * jeprof --text test/analyze/prof_bias --inuse_space jeprof.<pid>.0.f.heap:
+ * around 1024 MB
+ * jeprof --text test/analyze/prof_bias --inuse_objects jeprof.<pid>.0.f.heap:
+ * around 33554448 = 16 + 32 * 1024 * 1024
+ *
+ * And, if prof_accum is on:
+ * jeprof --text test/analyze/prof_bias --alloc_space jeprof.<pid>.0.f.heap:
+ * around 2048 MB
+ * jeprof --text test/analyze/prof_bias --alloc_objects jeprof.<pid>.0.f.heap:
+ * around 67108896 = 2 * (16 + 32 * 1024 * 1024)
+ */
+
+static void
+mock_backtrace(void **vec, unsigned *len, unsigned max_len) {
+ *len = 4;
+ vec[0] = (void *)0x111;
+ vec[1] = (void *)0x222;
+ vec[2] = (void *)0x333;
+ vec[3] = (void *)0x444;
+}
+
+static void
+do_allocs(size_t sz, size_t cnt, bool do_frees) {
+ for (size_t i = 0; i < cnt; i++) {
+ void *ptr = mallocx(sz, 0);
+ assert_ptr_not_null(ptr, "Unexpected mallocx failure");
+ if (do_frees) {
+ dallocx(ptr, 0);
+ }
+ }
+}
+
+int
+main(void) {
+ size_t lg_prof_sample_local = 19;
+ int err = mallctl("prof.reset", NULL, NULL,
+ (void *)&lg_prof_sample_local, sizeof(lg_prof_sample_local));
+ assert(err == 0);
+
+ prof_backtrace_hook_set(mock_backtrace);
+ do_allocs(16, 32 * 1024 * 1024, /* do_frees */ true);
+ do_allocs(32 * 1024* 1024, 16, /* do_frees */ true);
+ do_allocs(16, 32 * 1024 * 1024, /* do_frees */ false);
+ do_allocs(32 * 1024* 1024, 16, /* do_frees */ false);
+
+ return 0;
+}
diff --git a/test/analyze/rand.c b/test/analyze/rand.c
new file mode 100644
index 000000000..bb20b06ec
--- /dev/null
+++ b/test/analyze/rand.c
@@ -0,0 +1,276 @@
+#include "test/jemalloc_test.h"
+
+/******************************************************************************/
+
+/*
+ * General purpose tool for examining random number distributions.
+ *
+ * Input -
+ * (a) a random number generator, and
+ * (b) the buckets:
+ * (1) number of buckets,
+ * (2) width of each bucket, in log scale,
+ * (3) expected mean and stddev of the count of random numbers in each
+ * bucket, and
+ * (c) number of iterations to invoke the generator.
+ *
+ * The program generates the specified amount of random numbers, and assess how
+ * well they conform to the expectations: for each bucket, output -
+ * (a) the (given) expected mean and stddev,
+ * (b) the actual count and any interesting level of deviation:
+ * (1) ~68% buckets should show no interesting deviation, meaning a
+ * deviation less than stddev from the expectation;
+ * (2) ~27% buckets should show '+' / '-', meaning a deviation in the range
+ * of [stddev, 2 * stddev) from the expectation;
+ * (3) ~4% buckets should show '++' / '--', meaning a deviation in the
+ * range of [2 * stddev, 3 * stddev) from the expectation; and
+ * (4) less than 0.3% buckets should show more than two '+'s / '-'s.
+ *
+ * Technical remarks:
+ * (a) The generator is expected to output uint64_t numbers, so you might need
+ * to define a wrapper.
+ * (b) The buckets must be of equal width and the lowest bucket starts at
+ * [0, 2^lg_bucket_width - 1).
+ * (c) Any generated number >= n_bucket * 2^lg_bucket_width will be counted
+ * towards the last bucket; the expected mean and stddev provided should
+ * also reflect that.
+ * (d) The number of iterations is advised to be determined so that the bucket
+ * with the minimal expected proportion gets a sufficient count.
+ */
+
+static void
+fill(size_t a[], const size_t n, const size_t k) {
+ for (size_t i = 0; i < n; ++i) {
+ a[i] = k;
+ }
+}
+
+static void
+collect_buckets(uint64_t (*gen)(void *), void *opaque, size_t buckets[],
+ const size_t n_bucket, const size_t lg_bucket_width, const size_t n_iter) {
+ for (size_t i = 0; i < n_iter; ++i) {
+ uint64_t num = gen(opaque);
+ uint64_t bucket_id = num >> lg_bucket_width;
+ if (bucket_id >= n_bucket) {
+ bucket_id = n_bucket - 1;
+ }
+ ++buckets[bucket_id];
+ }
+}
+
+static void
+print_buckets(const size_t buckets[], const size_t means[],
+ const size_t stddevs[], const size_t n_bucket) {
+ for (size_t i = 0; i < n_bucket; ++i) {
+ malloc_printf("%zu:\tmean = %zu,\tstddev = %zu,\tbucket = %zu",
+ i, means[i], stddevs[i], buckets[i]);
+
+ /* Make sure there's no overflow. */
+ assert(buckets[i] + stddevs[i] >= stddevs[i]);
+ assert(means[i] + stddevs[i] >= stddevs[i]);
+
+ if (buckets[i] + stddevs[i] <= means[i]) {
+ malloc_write(" ");
+ for (size_t t = means[i] - buckets[i]; t >= stddevs[i];
+ t -= stddevs[i]) {
+ malloc_write("-");
+ }
+ } else if (buckets[i] >= means[i] + stddevs[i]) {
+ malloc_write(" ");
+ for (size_t t = buckets[i] - means[i]; t >= stddevs[i];
+ t -= stddevs[i]) {
+ malloc_write("+");
+ }
+ }
+ malloc_write("\n");
+ }
+}
+
+static void
+bucket_analysis(uint64_t (*gen)(void *), void *opaque, size_t buckets[],
+ const size_t means[], const size_t stddevs[], const size_t n_bucket,
+ const size_t lg_bucket_width, const size_t n_iter) {
+ for (size_t i = 1; i <= 3; ++i) {
+ malloc_printf("round %zu\n", i);
+ fill(buckets, n_bucket, 0);
+ collect_buckets(gen, opaque, buckets, n_bucket,
+ lg_bucket_width, n_iter);
+ print_buckets(buckets, means, stddevs, n_bucket);
+ }
+}
+
+/* (Recommended) minimal bucket mean. */
+#define MIN_BUCKET_MEAN 10000
+
+/******************************************************************************/
+
+/* Uniform random number generator. */
+
+typedef struct uniform_gen_arg_s uniform_gen_arg_t;
+struct uniform_gen_arg_s {
+ uint64_t state;
+ const unsigned lg_range;
+};
+
+static uint64_t
+uniform_gen(void *opaque) {
+ uniform_gen_arg_t *arg = (uniform_gen_arg_t *)opaque;
+ return prng_lg_range_u64(&arg->state, arg->lg_range);
+}
+
+TEST_BEGIN(test_uniform) {
+#define LG_N_BUCKET 5
+#define N_BUCKET (1 << LG_N_BUCKET)
+
+#define QUOTIENT_CEIL(n, d) (((n) - 1) / (d) + 1)
+
+ const unsigned lg_range_test = 25;
+
+ /*
+ * Mathematical tricks to guarantee that both mean and stddev are
+ * integers, and that the minimal bucket mean is at least
+ * MIN_BUCKET_MEAN.
+ */
+ const size_t q = 1 << QUOTIENT_CEIL(LG_CEIL(QUOTIENT_CEIL(
+ MIN_BUCKET_MEAN, N_BUCKET * (N_BUCKET - 1))), 2);
+ const size_t stddev = (N_BUCKET - 1) * q;
+ const size_t mean = N_BUCKET * stddev * q;
+ const size_t n_iter = N_BUCKET * mean;
+
+ size_t means[N_BUCKET];
+ fill(means, N_BUCKET, mean);
+ size_t stddevs[N_BUCKET];
+ fill(stddevs, N_BUCKET, stddev);
+
+ uniform_gen_arg_t arg = {(uint64_t)(uintptr_t)&lg_range_test,
+ lg_range_test};
+ size_t buckets[N_BUCKET];
+ assert_zu_ge(lg_range_test, LG_N_BUCKET, "");
+ const size_t lg_bucket_width = lg_range_test - LG_N_BUCKET;
+
+ bucket_analysis(uniform_gen, &arg, buckets, means, stddevs,
+ N_BUCKET, lg_bucket_width, n_iter);
+
+#undef LG_N_BUCKET
+#undef N_BUCKET
+#undef QUOTIENT_CEIL
+}
+TEST_END
+
+/******************************************************************************/
+
+/* Geometric random number generator; compiled only when prof is on. */
+
+#ifdef JEMALLOC_PROF
+
+/*
+ * Fills geometric proportions and returns the minimal proportion. See
+ * comments in test_prof_sample for explanations for n_divide.
+ */
+static double
+fill_geometric_proportions(double proportions[], const size_t n_bucket,
+ const size_t n_divide) {
+ assert(n_bucket > 0);
+ assert(n_divide > 0);
+ double x = 1.;
+ for (size_t i = 0; i < n_bucket; ++i) {
+ if (i == n_bucket - 1) {
+ proportions[i] = x;
+ } else {
+ double y = x * exp(-1. / n_divide);
+ proportions[i] = x - y;
+ x = y;
+ }
+ }
+ /*
+ * The minimal proportion is the smaller one of the last two
+ * proportions for geometric distribution.
+ */
+ double min_proportion = proportions[n_bucket - 1];
+ if (n_bucket >= 2 && proportions[n_bucket - 2] < min_proportion) {
+ min_proportion = proportions[n_bucket - 2];
+ }
+ return min_proportion;
+}
+
+static size_t
+round_to_nearest(const double x) {
+ return (size_t)(x + .5);
+}
+
+static void
+fill_references(size_t means[], size_t stddevs[], const double proportions[],
+ const size_t n_bucket, const size_t n_iter) {
+ for (size_t i = 0; i < n_bucket; ++i) {
+ double x = n_iter * proportions[i];
+ means[i] = round_to_nearest(x);
+ stddevs[i] = round_to_nearest(sqrt(x * (1. - proportions[i])));
+ }
+}
+
+static uint64_t
+prof_sample_gen(void *opaque) {
+ return prof_sample_new_event_wait((tsd_t *)opaque) - 1;
+}
+
+#endif /* JEMALLOC_PROF */
+
+TEST_BEGIN(test_prof_sample) {
+ test_skip_if(!config_prof);
+#ifdef JEMALLOC_PROF
+
+/* Number of divisions within [0, mean). */
+#define LG_N_DIVIDE 3
+#define N_DIVIDE (1 << LG_N_DIVIDE)
+
+/* Coverage of buckets in terms of multiples of mean. */
+#define LG_N_MULTIPLY 2
+#define N_GEO_BUCKET (N_DIVIDE << LG_N_MULTIPLY)
+
+ test_skip_if(!opt_prof);
+
+ size_t lg_prof_sample_test = 25;
+
+ size_t lg_prof_sample_orig = lg_prof_sample;
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, &lg_prof_sample_test,
+ sizeof(size_t)), 0, "");
+ malloc_printf("lg_prof_sample = %zu\n", lg_prof_sample_test);
+
+ double proportions[N_GEO_BUCKET + 1];
+ const double min_proportion = fill_geometric_proportions(proportions,
+ N_GEO_BUCKET + 1, N_DIVIDE);
+ const size_t n_iter = round_to_nearest(MIN_BUCKET_MEAN /
+ min_proportion);
+ size_t means[N_GEO_BUCKET + 1];
+ size_t stddevs[N_GEO_BUCKET + 1];
+ fill_references(means, stddevs, proportions, N_GEO_BUCKET + 1, n_iter);
+
+ tsd_t *tsd = tsd_fetch();
+ assert_ptr_not_null(tsd, "");
+ size_t buckets[N_GEO_BUCKET + 1];
+ assert_zu_ge(lg_prof_sample, LG_N_DIVIDE, "");
+ const size_t lg_bucket_width = lg_prof_sample - LG_N_DIVIDE;
+
+ bucket_analysis(prof_sample_gen, tsd, buckets, means, stddevs,
+ N_GEO_BUCKET + 1, lg_bucket_width, n_iter);
+
+ assert_d_eq(mallctl("prof.reset", NULL, NULL, &lg_prof_sample_orig,
+ sizeof(size_t)), 0, "");
+
+#undef LG_N_DIVIDE
+#undef N_DIVIDE
+#undef LG_N_MULTIPLY
+#undef N_GEO_BUCKET
+
+#endif /* JEMALLOC_PROF */
+}
+TEST_END
+
+/******************************************************************************/
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_uniform,
+ test_prof_sample);
+}
diff --git a/test/analyze/sizes.c b/test/analyze/sizes.c
new file mode 100644
index 000000000..44c9de5ed
--- /dev/null
+++ b/test/analyze/sizes.c
@@ -0,0 +1,53 @@
+#include "test/jemalloc_test.h"
+
+#include <stdio.h>
+
+/*
+ * Print the sizes of various important core data structures. OK, I guess this
+ * isn't really a "stress" test, but it does give useful information about
+ * low-level performance characteristics, as the other things in this directory
+ * do.
+ */
+
+static void
+do_print(const char *name, size_t sz_bytes) {
+ const char *sizes[] = {"bytes", "KB", "MB", "GB", "TB", "PB", "EB",
+ "ZB"};
+ size_t sizes_max = sizeof(sizes)/sizeof(sizes[0]);
+
+ size_t ind = 0;
+ double sz = sz_bytes;
+ while (sz >= 1024 && ind < sizes_max - 1) {
+ sz /= 1024;
+ ind++;
+ }
+ if (ind == 0) {
+ printf("%-20s: %zu bytes\n", name, sz_bytes);
+ } else {
+ printf("%-20s: %f %s\n", name, sz, sizes[ind]);
+ }
+}
+
+int
+main() {
+#define P(type) \
+ do_print(#type, sizeof(type))
+ P(arena_t);
+ P(arena_stats_t);
+ P(base_t);
+ P(decay_t);
+ P(edata_t);
+ P(ecache_t);
+ P(eset_t);
+ P(malloc_mutex_t);
+ P(prof_tctx_t);
+ P(prof_gctx_t);
+ P(prof_tdata_t);
+ P(rtree_t);
+ P(rtree_leaf_elm_t);
+ P(slab_data_t);
+ P(tcache_t);
+ P(tcache_slow_t);
+ P(tsd_t);
+#undef P
+}