summaryrefslogtreecommitdiff
path: root/deps/jemalloc/test/unit
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/test/unit')
-rw-r--r--deps/jemalloc/test/unit/SFMT.c32
-rw-r--r--deps/jemalloc/test/unit/a0.c2
-rw-r--r--deps/jemalloc/test/unit/arena_decay.c436
-rw-r--r--deps/jemalloc/test/unit/arena_decay.sh3
-rw-r--r--deps/jemalloc/test/unit/arena_reset.c90
-rw-r--r--deps/jemalloc/test/unit/atomic.c46
-rw-r--r--deps/jemalloc/test/unit/background_thread.c37
-rw-r--r--deps/jemalloc/test/unit/background_thread_enable.c55
-rw-r--r--deps/jemalloc/test/unit/base.c109
-rw-r--r--deps/jemalloc/test/unit/batch_alloc.c189
-rw-r--r--deps/jemalloc/test/unit/batch_alloc.sh3
-rw-r--r--deps/jemalloc/test/unit/batch_alloc_prof.c1
-rw-r--r--deps/jemalloc/test/unit/batch_alloc_prof.sh3
-rw-r--r--deps/jemalloc/test/unit/binshard.c38
-rw-r--r--deps/jemalloc/test/unit/bit_util.c256
-rw-r--r--deps/jemalloc/test/unit/bitmap.c194
-rw-r--r--deps/jemalloc/test/unit/buf_writer.c196
-rw-r--r--deps/jemalloc/test/unit/cache_bin.c384
-rw-r--r--deps/jemalloc/test/unit/ckh.c74
-rw-r--r--deps/jemalloc/test/unit/counter.c80
-rw-r--r--deps/jemalloc/test/unit/decay.c784
-rw-r--r--deps/jemalloc/test/unit/decay.sh3
-rw-r--r--deps/jemalloc/test/unit/div.c2
-rw-r--r--deps/jemalloc/test/unit/double_free.c77
-rw-r--r--deps/jemalloc/test/unit/double_free.h1
-rw-r--r--deps/jemalloc/test/unit/edata_cache.c226
-rw-r--r--deps/jemalloc/test/unit/emitter.c180
-rw-r--r--deps/jemalloc/test/unit/extent_quantize.c50
-rw-r--r--deps/jemalloc/test/unit/fb.c954
-rw-r--r--deps/jemalloc/test/unit/fork.c8
-rw-r--r--deps/jemalloc/test/unit/fxp.c394
-rw-r--r--deps/jemalloc/test/unit/hash.c2
-rw-r--r--deps/jemalloc/test/unit/hook.c348
-rw-r--r--deps/jemalloc/test/unit/hpa.c459
-rw-r--r--deps/jemalloc/test/unit/hpa_background_thread.c188
-rw-r--r--deps/jemalloc/test/unit/hpa_background_thread.sh4
-rw-r--r--deps/jemalloc/test/unit/hpdata.c244
-rw-r--r--deps/jemalloc/test/unit/huge.c58
-rw-r--r--deps/jemalloc/test/unit/inspect.c (renamed from deps/jemalloc/test/unit/extent_util.c)87
-rw-r--r--deps/jemalloc/test/unit/inspect.sh5
-rw-r--r--deps/jemalloc/test/unit/junk.c282
-rw-r--r--deps/jemalloc/test/unit/log.c31
-rw-r--r--deps/jemalloc/test/unit/mallctl.c684
-rw-r--r--deps/jemalloc/test/unit/malloc_conf_2.c29
-rw-r--r--deps/jemalloc/test/unit/malloc_conf_2.sh1
-rw-r--r--deps/jemalloc/test/unit/malloc_io.c28
-rw-r--r--deps/jemalloc/test/unit/math.c12
-rw-r--r--deps/jemalloc/test/unit/mpsc_queue.c304
-rw-r--r--deps/jemalloc/test/unit/mq.c18
-rw-r--r--deps/jemalloc/test/unit/mtx.c6
-rw-r--r--deps/jemalloc/test/unit/nstime.c119
-rw-r--r--deps/jemalloc/test/unit/oversize_threshold.c133
-rw-r--r--deps/jemalloc/test/unit/pa.c126
-rw-r--r--deps/jemalloc/test/unit/pack.c20
-rw-r--r--deps/jemalloc/test/unit/pages.c6
-rw-r--r--deps/jemalloc/test/unit/peak.c47
-rw-r--r--deps/jemalloc/test/unit/ph.c110
-rw-r--r--deps/jemalloc/test/unit/prng.c226
-rw-r--r--deps/jemalloc/test/unit/prof_accum.c13
-rw-r--r--deps/jemalloc/test/unit/prof_active.c16
-rw-r--r--deps/jemalloc/test/unit/prof_active.sh2
-rw-r--r--deps/jemalloc/test/unit/prof_gdump.c29
-rw-r--r--deps/jemalloc/test/unit/prof_hook.c169
-rw-r--r--deps/jemalloc/test/unit/prof_hook.sh6
-rw-r--r--deps/jemalloc/test/unit/prof_idump.c25
-rw-r--r--deps/jemalloc/test/unit/prof_log.c65
-rw-r--r--deps/jemalloc/test/unit/prof_log.sh2
-rw-r--r--deps/jemalloc/test/unit/prof_mdump.c216
-rw-r--r--deps/jemalloc/test/unit/prof_mdump.sh6
-rw-r--r--deps/jemalloc/test/unit/prof_recent.c678
-rw-r--r--deps/jemalloc/test/unit/prof_recent.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_reset.c112
-rw-r--r--deps/jemalloc/test/unit/prof_reset.sh2
-rw-r--r--deps/jemalloc/test/unit/prof_stats.c151
-rw-r--r--deps/jemalloc/test/unit/prof_stats.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_sys_thread_name.c77
-rw-r--r--deps/jemalloc/test/unit/prof_sys_thread_name.sh5
-rw-r--r--deps/jemalloc/test/unit/prof_tctx.c38
-rw-r--r--deps/jemalloc/test/unit/prof_tctx.sh2
-rw-r--r--deps/jemalloc/test/unit/prof_thread_name.c22
-rw-r--r--deps/jemalloc/test/unit/psset.c748
-rw-r--r--deps/jemalloc/test/unit/ql.c139
-rw-r--r--deps/jemalloc/test/unit/qr.c44
-rw-r--r--deps/jemalloc/test/unit/rb.c794
-rw-r--r--deps/jemalloc/test/unit/retained.c60
-rw-r--r--deps/jemalloc/test/unit/rtree.c321
-rw-r--r--deps/jemalloc/test/unit/safety_check.c33
-rw-r--r--deps/jemalloc/test/unit/safety_check.sh2
-rw-r--r--deps/jemalloc/test/unit/san.c207
-rw-r--r--deps/jemalloc/test/unit/san.sh3
-rw-r--r--deps/jemalloc/test/unit/san_bump.c111
-rw-r--r--deps/jemalloc/test/unit/sc.c6
-rw-r--r--deps/jemalloc/test/unit/sec.c634
-rw-r--r--deps/jemalloc/test/unit/seq.c12
-rw-r--r--deps/jemalloc/test/unit/size_check.c79
-rw-r--r--deps/jemalloc/test/unit/size_check.sh5
-rw-r--r--deps/jemalloc/test/unit/size_classes.c88
-rw-r--r--deps/jemalloc/test/unit/slab.c22
-rw-r--r--deps/jemalloc/test/unit/smoothstep.c12
-rw-r--r--deps/jemalloc/test/unit/stats.c229
-rw-r--r--deps/jemalloc/test/unit/stats_print.c26
-rw-r--r--deps/jemalloc/test/unit/sz.c66
-rw-r--r--deps/jemalloc/test/unit/tcache_max.c175
-rw-r--r--deps/jemalloc/test/unit/tcache_max.sh3
-rw-r--r--deps/jemalloc/test/unit/test_hooks.c10
-rw-r--r--deps/jemalloc/test/unit/thread_event.c34
-rw-r--r--deps/jemalloc/test/unit/thread_event.sh5
-rw-r--r--deps/jemalloc/test/unit/ticker.c65
-rw-r--r--deps/jemalloc/test/unit/tsd.c55
-rw-r--r--deps/jemalloc/test/unit/uaf.c262
-rw-r--r--deps/jemalloc/test/unit/witness.c32
-rw-r--r--deps/jemalloc/test/unit/zero.c10
-rw-r--r--deps/jemalloc/test/unit/zero_realloc_abort.c26
-rw-r--r--deps/jemalloc/test/unit/zero_realloc_abort.sh3
-rw-r--r--deps/jemalloc/test/unit/zero_realloc_alloc.c48
-rw-r--r--deps/jemalloc/test/unit/zero_realloc_alloc.sh3
-rw-r--r--deps/jemalloc/test/unit/zero_realloc_free.c33
-rw-r--r--deps/jemalloc/test/unit/zero_realloc_free.sh3
-rw-r--r--deps/jemalloc/test/unit/zero_reallocs.c40
-rw-r--r--deps/jemalloc/test/unit/zero_reallocs.sh3
120 files changed, 12085 insertions, 2496 deletions
diff --git a/deps/jemalloc/test/unit/SFMT.c b/deps/jemalloc/test/unit/SFMT.c
index 1fc8cf1bc..b9f85dd92 100644
--- a/deps/jemalloc/test/unit/SFMT.c
+++ b/deps/jemalloc/test/unit/SFMT.c
@@ -1456,7 +1456,7 @@ TEST_BEGIN(test_gen_rand_32) {
uint32_t r32;
sfmt_t *ctx;
- assert_d_le(get_min_array_size32(), BLOCK_SIZE,
+ expect_d_le(get_min_array_size32(), BLOCK_SIZE,
"Array size too small");
ctx = init_gen_rand(1234);
fill_array32(ctx, array32, BLOCK_SIZE);
@@ -1466,16 +1466,16 @@ TEST_BEGIN(test_gen_rand_32) {
ctx = init_gen_rand(1234);
for (i = 0; i < BLOCK_SIZE; i++) {
if (i < COUNT_1) {
- assert_u32_eq(array32[i], init_gen_rand_32_expected[i],
+ expect_u32_eq(array32[i], init_gen_rand_32_expected[i],
"Output mismatch for i=%d", i);
}
r32 = gen_rand32(ctx);
- assert_u32_eq(r32, array32[i],
+ expect_u32_eq(r32, array32[i],
"Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
}
for (i = 0; i < COUNT_2; i++) {
r32 = gen_rand32(ctx);
- assert_u32_eq(r32, array32_2[i],
+ expect_u32_eq(r32, array32_2[i],
"Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
r32);
}
@@ -1491,7 +1491,7 @@ TEST_BEGIN(test_by_array_32) {
uint32_t r32;
sfmt_t *ctx;
- assert_d_le(get_min_array_size32(), BLOCK_SIZE,
+ expect_d_le(get_min_array_size32(), BLOCK_SIZE,
"Array size too small");
ctx = init_by_array(ini, 4);
fill_array32(ctx, array32, BLOCK_SIZE);
@@ -1501,16 +1501,16 @@ TEST_BEGIN(test_by_array_32) {
ctx = init_by_array(ini, 4);
for (i = 0; i < BLOCK_SIZE; i++) {
if (i < COUNT_1) {
- assert_u32_eq(array32[i], init_by_array_32_expected[i],
+ expect_u32_eq(array32[i], init_by_array_32_expected[i],
"Output mismatch for i=%d", i);
}
r32 = gen_rand32(ctx);
- assert_u32_eq(r32, array32[i],
+ expect_u32_eq(r32, array32[i],
"Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
}
for (i = 0; i < COUNT_2; i++) {
r32 = gen_rand32(ctx);
- assert_u32_eq(r32, array32_2[i],
+ expect_u32_eq(r32, array32_2[i],
"Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
r32);
}
@@ -1525,7 +1525,7 @@ TEST_BEGIN(test_gen_rand_64) {
uint64_t r;
sfmt_t *ctx;
- assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
+ expect_d_le(get_min_array_size64(), BLOCK_SIZE64,
"Array size too small");
ctx = init_gen_rand(4321);
fill_array64(ctx, array64, BLOCK_SIZE64);
@@ -1535,17 +1535,17 @@ TEST_BEGIN(test_gen_rand_64) {
ctx = init_gen_rand(4321);
for (i = 0; i < BLOCK_SIZE64; i++) {
if (i < COUNT_1) {
- assert_u64_eq(array64[i], init_gen_rand_64_expected[i],
+ expect_u64_eq(array64[i], init_gen_rand_64_expected[i],
"Output mismatch for i=%d", i);
}
r = gen_rand64(ctx);
- assert_u64_eq(r, array64[i],
+ expect_u64_eq(r, array64[i],
"Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i,
array64[i], r);
}
for (i = 0; i < COUNT_2; i++) {
r = gen_rand64(ctx);
- assert_u64_eq(r, array64_2[i],
+ expect_u64_eq(r, array64_2[i],
"Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i,
array64_2[i], r);
}
@@ -1561,7 +1561,7 @@ TEST_BEGIN(test_by_array_64) {
uint32_t ini[] = {5, 4, 3, 2, 1};
sfmt_t *ctx;
- assert_d_le(get_min_array_size64(), BLOCK_SIZE64,
+ expect_d_le(get_min_array_size64(), BLOCK_SIZE64,
"Array size too small");
ctx = init_by_array(ini, 5);
fill_array64(ctx, array64, BLOCK_SIZE64);
@@ -1571,17 +1571,17 @@ TEST_BEGIN(test_by_array_64) {
ctx = init_by_array(ini, 5);
for (i = 0; i < BLOCK_SIZE64; i++) {
if (i < COUNT_1) {
- assert_u64_eq(array64[i], init_by_array_64_expected[i],
+ expect_u64_eq(array64[i], init_by_array_64_expected[i],
"Output mismatch for i=%d", i);
}
r = gen_rand64(ctx);
- assert_u64_eq(r, array64[i],
+ expect_u64_eq(r, array64[i],
"Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i,
array64[i], r);
}
for (i = 0; i < COUNT_2; i++) {
r = gen_rand64(ctx);
- assert_u64_eq(r, array64_2[i],
+ expect_u64_eq(r, array64_2[i],
"Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i,
array64_2[i], r);
}
diff --git a/deps/jemalloc/test/unit/a0.c b/deps/jemalloc/test/unit/a0.c
index a27ab3f42..c1be79a66 100644
--- a/deps/jemalloc/test/unit/a0.c
+++ b/deps/jemalloc/test/unit/a0.c
@@ -4,7 +4,7 @@ TEST_BEGIN(test_a0) {
void *p;
p = a0malloc(1);
- assert_ptr_not_null(p, "Unexpected a0malloc() error");
+ expect_ptr_not_null(p, "Unexpected a0malloc() error");
a0dalloc(p);
}
TEST_END
diff --git a/deps/jemalloc/test/unit/arena_decay.c b/deps/jemalloc/test/unit/arena_decay.c
new file mode 100644
index 000000000..e991f4dd1
--- /dev/null
+++ b/deps/jemalloc/test/unit/arena_decay.c
@@ -0,0 +1,436 @@
+#include "test/jemalloc_test.h"
+#include "test/arena_util.h"
+
+#include "jemalloc/internal/ticker.h"
+
+static nstime_monotonic_t *nstime_monotonic_orig;
+static nstime_update_t *nstime_update_orig;
+
+static unsigned nupdates_mock;
+static nstime_t time_mock;
+static bool monotonic_mock;
+
+static bool
+nstime_monotonic_mock(void) {
+ return monotonic_mock;
+}
+
+static void
+nstime_update_mock(nstime_t *time) {
+ nupdates_mock++;
+ if (monotonic_mock) {
+ nstime_copy(time, &time_mock);
+ }
+}
+
+TEST_BEGIN(test_decay_ticks) {
+ test_skip_if(is_background_thread_enabled());
+ test_skip_if(opt_hpa);
+
+ ticker_geom_t *decay_ticker;
+ unsigned tick0, tick1, arena_ind;
+ size_t sz, large0;
+ void *p;
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ /* Set up a manually managed arena for test. */
+ arena_ind = do_arena_create(0, 0);
+
+ /* Migrate to the new arena, and get the ticker. */
+ unsigned old_arena_ind;
+ size_t sz_arena_ind = sizeof(old_arena_ind);
+ expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
+ &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
+ "Unexpected mallctl() failure");
+ decay_ticker = tsd_arena_decay_tickerp_get(tsd_fetch());
+ expect_ptr_not_null(decay_ticker,
+ "Unexpected failure getting decay ticker");
+
+ /*
+ * Test the standard APIs using a large size class, since we can't
+ * control tcache interactions for small size classes (except by
+ * completely disabling tcache for the entire test program).
+ */
+
+ /* malloc(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = malloc(large0);
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
+ /* free(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ free(p);
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
+
+ /* calloc(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = calloc(1, large0);
+ expect_ptr_not_null(p, "Unexpected calloc() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
+ free(p);
+
+ /* posix_memalign(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ expect_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
+ "Unexpected posix_memalign() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during posix_memalign()");
+ free(p);
+
+ /* aligned_alloc(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = aligned_alloc(sizeof(size_t), large0);
+ expect_ptr_not_null(p, "Unexpected aligned_alloc() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during aligned_alloc()");
+ free(p);
+
+ /* realloc(). */
+ /* Allocate. */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = realloc(NULL, large0);
+ expect_ptr_not_null(p, "Unexpected realloc() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+ /* Reallocate. */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = realloc(p, large0);
+ expect_ptr_not_null(p, "Unexpected realloc() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+ /* Deallocate. */
+ tick0 = ticker_geom_read(decay_ticker);
+ realloc(p, 0);
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
+
+ /*
+ * Test the *allocx() APIs using large and small size classes, with
+ * tcache explicitly disabled.
+ */
+ {
+ unsigned i;
+ size_t allocx_sizes[2];
+ allocx_sizes[0] = large0;
+ allocx_sizes[1] = 1;
+
+ for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
+ sz = allocx_sizes[i];
+
+ /* mallocx(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = mallocx(sz, MALLOCX_TCACHE_NONE);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during mallocx() (sz=%zu)",
+ sz);
+ /* rallocx(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
+ expect_ptr_not_null(p, "Unexpected rallocx() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during rallocx() (sz=%zu)",
+ sz);
+ /* xallocx(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during xallocx() (sz=%zu)",
+ sz);
+ /* dallocx(). */
+ tick0 = ticker_geom_read(decay_ticker);
+ dallocx(p, MALLOCX_TCACHE_NONE);
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during dallocx() (sz=%zu)",
+ sz);
+ /* sdallocx(). */
+ p = mallocx(sz, MALLOCX_TCACHE_NONE);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ tick0 = ticker_geom_read(decay_ticker);
+ sdallocx(p, sz, MALLOCX_TCACHE_NONE);
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during sdallocx() "
+ "(sz=%zu)", sz);
+ }
+ }
+
+ /*
+ * Test tcache fill/flush interactions for large and small size classes,
+ * using an explicit tcache.
+ */
+ unsigned tcache_ind, i;
+ size_t tcache_sizes[2];
+ tcache_sizes[0] = large0;
+ tcache_sizes[1] = 1;
+
+ size_t tcache_max, sz_tcache_max;
+ sz_tcache_max = sizeof(tcache_max);
+ expect_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
+ &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
+
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+
+ for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
+ sz = tcache_sizes[i];
+
+ /* tcache fill. */
+ tick0 = ticker_geom_read(decay_ticker);
+ p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ tick1 = ticker_geom_read(decay_ticker);
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during tcache fill "
+ "(sz=%zu)", sz);
+ /* tcache flush. */
+ dallocx(p, MALLOCX_TCACHE(tcache_ind));
+ tick0 = ticker_geom_read(decay_ticker);
+ expect_d_eq(mallctl("tcache.flush", NULL, NULL,
+ (void *)&tcache_ind, sizeof(unsigned)), 0,
+ "Unexpected mallctl failure");
+ tick1 = ticker_geom_read(decay_ticker);
+
+ /* Will only tick if it's in tcache. */
+ expect_u32_ne(tick1, tick0,
+ "Expected ticker to tick during tcache flush (sz=%zu)", sz);
+ }
+}
+TEST_END
+
+static void
+decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
+ uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
+#define NINTERVALS 101
+ nstime_t time, update_interval, decay_ms, deadline;
+
+ nstime_init_update(&time);
+
+ nstime_init2(&decay_ms, dt, 0);
+ nstime_copy(&deadline, &time);
+ nstime_add(&deadline, &decay_ms);
+
+ nstime_init2(&update_interval, dt, 0);
+ nstime_idivide(&update_interval, NINTERVALS);
+
+ /*
+ * Keep q's slab from being deallocated during the looping below. If a
+ * cached slab were to repeatedly come and go during looping, it could
+ * prevent the decay backlog ever becoming empty.
+ */
+ void *p = do_mallocx(1, flags);
+ uint64_t dirty_npurge1, muzzy_npurge1;
+ do {
+ for (unsigned i = 0; i < ARENA_DECAY_NTICKS_PER_UPDATE / 2;
+ i++) {
+ void *q = do_mallocx(1, flags);
+ dallocx(q, flags);
+ }
+ dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
+ muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
+
+ nstime_add(&time_mock, &update_interval);
+ nstime_update(&time);
+ } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
+ dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
+ !terminate_asap));
+ dallocx(p, flags);
+
+ if (config_stats) {
+ expect_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
+ muzzy_npurge0, "Expected purging to occur");
+ }
+#undef NINTERVALS
+}
+
+TEST_BEGIN(test_decay_ticker) {
+ test_skip_if(is_background_thread_enabled());
+ test_skip_if(opt_hpa);
+#define NPS 2048
+ ssize_t ddt = opt_dirty_decay_ms;
+ ssize_t mdt = opt_muzzy_decay_ms;
+ unsigned arena_ind = do_arena_create(ddt, mdt);
+ int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
+ void *ps[NPS];
+
+ /*
+ * Allocate a bunch of large objects, pause the clock, deallocate every
+ * other object (to fragment virtual memory), restore the clock, then
+ * [md]allocx() in a tight loop while advancing time rapidly to verify
+ * the ticker triggers purging.
+ */
+ size_t large;
+ size_t sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ do_purge(arena_ind);
+ uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
+ uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
+
+ for (unsigned i = 0; i < NPS; i++) {
+ ps[i] = do_mallocx(large, flags);
+ }
+
+ nupdates_mock = 0;
+ nstime_init_update(&time_mock);
+ monotonic_mock = true;
+
+ nstime_monotonic_orig = nstime_monotonic;
+ nstime_update_orig = nstime_update;
+ nstime_monotonic = nstime_monotonic_mock;
+ nstime_update = nstime_update_mock;
+
+ for (unsigned i = 0; i < NPS; i += 2) {
+ dallocx(ps[i], flags);
+ unsigned nupdates0 = nupdates_mock;
+ do_decay(arena_ind);
+ expect_u_gt(nupdates_mock, nupdates0,
+ "Expected nstime_update() to be called");
+ }
+
+ decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
+ muzzy_npurge0, true);
+ decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
+ muzzy_npurge0, false);
+
+ do_arena_destroy(arena_ind);
+
+ nstime_monotonic = nstime_monotonic_orig;
+ nstime_update = nstime_update_orig;
+#undef NPS
+}
+TEST_END
+
+TEST_BEGIN(test_decay_nonmonotonic) {
+ test_skip_if(is_background_thread_enabled());
+ test_skip_if(opt_hpa);
+#define NPS (SMOOTHSTEP_NSTEPS + 1)
+ int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
+ void *ps[NPS];
+ uint64_t npurge0 = 0;
+ uint64_t npurge1 = 0;
+ size_t sz, large0;
+ unsigned i, nupdates0;
+
+ sz = sizeof(size_t);
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ "Unexpected mallctl failure");
+ do_epoch();
+ sz = sizeof(uint64_t);
+ npurge0 = get_arena_npurge(0);
+
+ nupdates_mock = 0;
+ nstime_init_update(&time_mock);
+ monotonic_mock = false;
+
+ nstime_monotonic_orig = nstime_monotonic;
+ nstime_update_orig = nstime_update;
+ nstime_monotonic = nstime_monotonic_mock;
+ nstime_update = nstime_update_mock;
+
+ for (i = 0; i < NPS; i++) {
+ ps[i] = mallocx(large0, flags);
+ expect_ptr_not_null(ps[i], "Unexpected mallocx() failure");
+ }
+
+ for (i = 0; i < NPS; i++) {
+ dallocx(ps[i], flags);
+ nupdates0 = nupdates_mock;
+ expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+ "Unexpected arena.0.decay failure");
+ expect_u_gt(nupdates_mock, nupdates0,
+ "Expected nstime_update() to be called");
+ }
+
+ do_epoch();
+ sz = sizeof(uint64_t);
+ npurge1 = get_arena_npurge(0);
+
+ if (config_stats) {
+ expect_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
+ }
+
+ nstime_monotonic = nstime_monotonic_orig;
+ nstime_update = nstime_update_orig;
+#undef NPS
+}
+TEST_END
+
+TEST_BEGIN(test_decay_now) {
+ test_skip_if(is_background_thread_enabled());
+ test_skip_if(opt_hpa);
+
+ unsigned arena_ind = do_arena_create(0, 0);
+ expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
+ expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
+ size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
+ /* Verify that dirty/muzzy pages never linger after deallocation. */
+ for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
+ size_t size = sizes[i];
+ generate_dirty(arena_ind, size);
+ expect_zu_eq(get_arena_pdirty(arena_ind), 0,
+ "Unexpected dirty pages");
+ expect_zu_eq(get_arena_pmuzzy(arena_ind), 0,
+ "Unexpected muzzy pages");
+ }
+ do_arena_destroy(arena_ind);
+}
+TEST_END
+
+TEST_BEGIN(test_decay_never) {
+ test_skip_if(is_background_thread_enabled() || !config_stats);
+ test_skip_if(opt_hpa);
+
+ unsigned arena_ind = do_arena_create(-1, -1);
+ int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+ expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
+ expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
+ size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
+ void *ptrs[sizeof(sizes)/sizeof(size_t)];
+ for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
+ ptrs[i] = do_mallocx(sizes[i], flags);
+ }
+ /* Verify that each deallocation generates additional dirty pages. */
+ size_t pdirty_prev = get_arena_pdirty(arena_ind);
+ size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
+ expect_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
+ expect_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
+ for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
+ dallocx(ptrs[i], flags);
+ size_t pdirty = get_arena_pdirty(arena_ind);
+ size_t pmuzzy = get_arena_pmuzzy(arena_ind);
+ expect_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
+ pdirty_prev, "Expected dirty pages to increase.");
+ expect_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
+ pdirty_prev = pdirty;
+ }
+ do_arena_destroy(arena_ind);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_decay_ticks,
+ test_decay_ticker,
+ test_decay_nonmonotonic,
+ test_decay_now,
+ test_decay_never);
+}
diff --git a/deps/jemalloc/test/unit/arena_decay.sh b/deps/jemalloc/test/unit/arena_decay.sh
new file mode 100644
index 000000000..52f1b2079
--- /dev/null
+++ b/deps/jemalloc/test/unit/arena_decay.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="dirty_decay_ms:1000,muzzy_decay_ms:1000,tcache_max:1024"
diff --git a/deps/jemalloc/test/unit/arena_reset.c b/deps/jemalloc/test/unit/arena_reset.c
index b182f31a6..8ef0786cc 100644
--- a/deps/jemalloc/test/unit/arena_reset.c
+++ b/deps/jemalloc/test/unit/arena_reset.c
@@ -13,7 +13,7 @@ get_nsizes_impl(const char *cmd) {
size_t z;
z = sizeof(unsigned);
- assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
+ expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return ret;
@@ -37,11 +37,11 @@ get_size_impl(const char *cmd, size_t ind) {
size_t miblen = 4;
z = sizeof(size_t);
- assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
mib[2] = ind;
z = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return ret;
@@ -60,35 +60,32 @@ get_large_size(size_t ind) {
/* Like ivsalloc(), but safe to call on discarded allocations. */
static size_t
vsalloc(tsdn_t *tsdn, const void *ptr) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- extent_t *extent;
- szind_t szind;
- if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, false, &extent, &szind)) {
+ emap_full_alloc_ctx_t full_alloc_ctx;
+ bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
+ ptr, &full_alloc_ctx);
+ if (missing) {
return 0;
}
- if (extent == NULL) {
+ if (full_alloc_ctx.edata == NULL) {
return 0;
}
- if (extent_state_get(extent) != extent_state_active) {
+ if (edata_state_get(full_alloc_ctx.edata) != extent_state_active) {
return 0;
}
- if (szind == SC_NSIZES) {
+ if (full_alloc_ctx.szind == SC_NSIZES) {
return 0;
}
- return sz_index2size(szind);
+ return sz_index2size(full_alloc_ctx.szind);
}
static unsigned
do_arena_create(extent_hooks_t *h) {
unsigned arena_ind;
size_t sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
(void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
"Unexpected mallctl() failure");
return arena_ind;
@@ -108,19 +105,19 @@ do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) {
nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge();
*nptrs = nsmall + nlarge;
*ptrs = (void **)malloc(*nptrs * sizeof(void *));
- assert_ptr_not_null(*ptrs, "Unexpected malloc() failure");
+ expect_ptr_not_null(*ptrs, "Unexpected malloc() failure");
/* Allocate objects with a wide range of sizes. */
for (i = 0; i < nsmall; i++) {
sz = get_small_size(i);
(*ptrs)[i] = mallocx(sz, flags);
- assert_ptr_not_null((*ptrs)[i],
+ expect_ptr_not_null((*ptrs)[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
}
for (i = 0; i < nlarge; i++) {
sz = get_large_size(i);
(*ptrs)[nsmall + i] = mallocx(sz, flags);
- assert_ptr_not_null((*ptrs)[i],
+ expect_ptr_not_null((*ptrs)[i],
"Unexpected mallocx(%zu, %#x) failure", sz, flags);
}
@@ -128,7 +125,7 @@ do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) {
/* Verify allocations. */
for (i = 0; i < *nptrs; i++) {
- assert_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0,
+ expect_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0,
"Allocation should have queryable size");
}
}
@@ -146,7 +143,7 @@ do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) {
}
/* Verify allocations no longer exist. */
for (i = 0; i < nptrs; i++) {
- assert_zu_eq(vsalloc(tsdn, ptrs[i]), 0,
+ expect_zu_eq(vsalloc(tsdn, ptrs[i]), 0,
"Allocation should no longer exist");
}
if (have_background_thread) {
@@ -163,10 +160,10 @@ do_arena_reset_destroy(const char *name, unsigned arena_ind) {
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib(name, mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib(name, mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
@@ -200,23 +197,23 @@ arena_i_initialized(unsigned arena_ind, bool refresh) {
if (refresh) {
uint64_t epoch = 1;
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
sizeof(epoch)), 0, "Unexpected mallctl() failure");
}
miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
sz = sizeof(initialized);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL,
0), 0, "Unexpected mallctlbymib() failure");
return initialized;
}
TEST_BEGIN(test_arena_destroy_initial) {
- assert_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
+ expect_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should not be initialized");
}
TEST_END
@@ -229,9 +226,9 @@ TEST_BEGIN(test_arena_destroy_hooks_default) {
arena_ind = do_arena_create(NULL);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
- assert_false(arena_i_initialized(arena_ind, false),
+ expect_false(arena_i_initialized(arena_ind, false),
"Arena stats should not be initialized");
- assert_true(arena_i_initialized(arena_ind, true),
+ expect_true(arena_i_initialized(arena_ind, true),
"Arena stats should be initialized");
/*
@@ -242,9 +239,9 @@ TEST_BEGIN(test_arena_destroy_hooks_default) {
do_arena_destroy(arena_ind);
- assert_false(arena_i_initialized(arena_ind, true),
+ expect_false(arena_i_initialized(arena_ind, true),
"Arena stats should not be initialized");
- assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
+ expect_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should be initialized");
do_arena_reset_post(ptrs, nptrs, arena_ind);
@@ -252,12 +249,27 @@ TEST_BEGIN(test_arena_destroy_hooks_default) {
arena_ind_prev = arena_ind;
arena_ind = do_arena_create(NULL);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
- assert_u_eq(arena_ind, arena_ind_prev,
+ expect_u_eq(arena_ind, arena_ind_prev,
"Arena index should have been recycled");
do_arena_destroy(arena_ind);
do_arena_reset_post(ptrs, nptrs, arena_ind);
do_arena_destroy(arena_ind_another);
+
+ /* Try arena.create with custom hooks. */
+ size_t sz = sizeof(extent_hooks_t *);
+ extent_hooks_t *a0_default_hooks;
+ expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&a0_default_hooks,
+ &sz, NULL, 0), 0, "Unexpected mallctlnametomib() failure");
+
+ /* Default impl; but wrapped as "customized". */
+ extent_hooks_t new_hooks = *a0_default_hooks;
+ extent_hooks_t *hook = &new_hooks;
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
+ (void *)&hook, sizeof(void *)), 0,
+ "Unexpected mallctl() failure");
+ do_arena_destroy(arena_ind);
}
TEST_END
@@ -271,9 +283,9 @@ extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
"arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
"true" : "false", arena_ind);
- assert_ptr_eq(extent_hooks, &hooks,
+ expect_ptr_eq(extent_hooks, &hooks,
"extent_hooks should be same as pointer used to set hooks");
- assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap,
+ expect_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap,
"Wrong hook function");
called_dalloc = true;
if (!try_dalloc) {
@@ -317,20 +329,20 @@ TEST_BEGIN(test_arena_destroy_hooks_unmap) {
arena_ind = do_arena_create(&hooks);
do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
- assert_true(did_alloc, "Expected alloc");
+ expect_true(did_alloc, "Expected alloc");
- assert_false(arena_i_initialized(arena_ind, false),
+ expect_false(arena_i_initialized(arena_ind, false),
"Arena stats should not be initialized");
- assert_true(arena_i_initialized(arena_ind, true),
+ expect_true(arena_i_initialized(arena_ind, true),
"Arena stats should be initialized");
did_dalloc = false;
do_arena_destroy(arena_ind);
- assert_true(did_dalloc, "Expected dalloc");
+ expect_true(did_dalloc, "Expected dalloc");
- assert_false(arena_i_initialized(arena_ind, true),
+ expect_false(arena_i_initialized(arena_ind, true),
"Arena stats should not be initialized");
- assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
+ expect_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
"Destroyed arena stats should be initialized");
do_arena_reset_post(ptrs, nptrs, arena_ind);
diff --git a/deps/jemalloc/test/unit/atomic.c b/deps/jemalloc/test/unit/atomic.c
index 572d8d23f..c2ec8c7e1 100644
--- a/deps/jemalloc/test/unit/atomic.c
+++ b/deps/jemalloc/test/unit/atomic.c
@@ -6,7 +6,7 @@
* some places and "ptr" in others. In the long run it would be nice to unify
* these, but in the short run we'll use this shim.
*/
-#define assert_p_eq assert_ptr_eq
+#define expect_p_eq expect_ptr_eq
/*
* t: the non-atomic type, like "uint32_t".
@@ -24,20 +24,20 @@
\
/* ATOMIC_INIT and load. */ \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1, val, "Load or init failed"); \
+ expect_##ta##_eq(val1, val, "Load or init failed"); \
\
/* Store. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
atomic_store_##ta(&atom, val2, ATOMIC_RELAXED); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
- assert_##ta##_eq(val2, val, "Store failed"); \
+ expect_##ta##_eq(val2, val, "Store failed"); \
\
/* Exchange. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
val = atomic_exchange_##ta(&atom, val2, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1, val, "Exchange returned invalid value"); \
+ expect_##ta##_eq(val1, val, "Exchange returned invalid value"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
- assert_##ta##_eq(val2, val, "Exchange store invalid value"); \
+ expect_##ta##_eq(val2, val, "Exchange store invalid value"); \
\
/* \
* Weak CAS. Spurious failures are allowed, so we loop a few \
@@ -45,21 +45,21 @@
*/ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
success = false; \
- for (int i = 0; i < 10 && !success; i++) { \
+ for (int retry = 0; retry < 10 && !success; retry++) { \
expected = val2; \
success = atomic_compare_exchange_weak_##ta(&atom, \
&expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1, expected, \
+ expect_##ta##_eq(val1, expected, \
"CAS should update expected"); \
} \
- assert_b_eq(val1 == val2, success, \
+ expect_b_eq(val1 == val2, success, \
"Weak CAS did the wrong state update"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
if (success) { \
- assert_##ta##_eq(val3, val, \
+ expect_##ta##_eq(val3, val, \
"Successful CAS should update atomic"); \
} else { \
- assert_##ta##_eq(val1, val, \
+ expect_##ta##_eq(val1, val, \
"Unsuccessful CAS should not update atomic"); \
} \
\
@@ -68,14 +68,14 @@
expected = val2; \
success = atomic_compare_exchange_strong_##ta(&atom, &expected, \
val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \
- assert_b_eq(val1 == val2, success, \
+ expect_b_eq(val1 == val2, success, \
"Strong CAS did the wrong state update"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
if (success) { \
- assert_##ta##_eq(val3, val, \
+ expect_##ta##_eq(val3, val, \
"Successful CAS should update atomic"); \
} else { \
- assert_##ta##_eq(val1, val, \
+ expect_##ta##_eq(val1, val, \
"Unsuccessful CAS should not update atomic"); \
} \
\
@@ -89,46 +89,46 @@
/* Fetch-add. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
val = atomic_fetch_add_##ta(&atom, val2, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1, val, \
+ expect_##ta##_eq(val1, val, \
"Fetch-add should return previous value"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1 + val2, val, \
+ expect_##ta##_eq(val1 + val2, val, \
"Fetch-add should update atomic"); \
\
/* Fetch-sub. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
val = atomic_fetch_sub_##ta(&atom, val2, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1, val, \
+ expect_##ta##_eq(val1, val, \
"Fetch-sub should return previous value"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1 - val2, val, \
+ expect_##ta##_eq(val1 - val2, val, \
"Fetch-sub should update atomic"); \
\
/* Fetch-and. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
val = atomic_fetch_and_##ta(&atom, val2, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1, val, \
+ expect_##ta##_eq(val1, val, \
"Fetch-and should return previous value"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1 & val2, val, \
+ expect_##ta##_eq(val1 & val2, val, \
"Fetch-and should update atomic"); \
\
/* Fetch-or. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
val = atomic_fetch_or_##ta(&atom, val2, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1, val, \
+ expect_##ta##_eq(val1, val, \
"Fetch-or should return previous value"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1 | val2, val, \
+ expect_##ta##_eq(val1 | val2, val, \
"Fetch-or should update atomic"); \
\
/* Fetch-xor. */ \
atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \
val = atomic_fetch_xor_##ta(&atom, val2, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1, val, \
+ expect_##ta##_eq(val1, val, \
"Fetch-xor should return previous value"); \
val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \
- assert_##ta##_eq(val1 ^ val2, val, \
+ expect_##ta##_eq(val1 ^ val2, val, \
"Fetch-xor should update atomic"); \
} while (0)
diff --git a/deps/jemalloc/test/unit/background_thread.c b/deps/jemalloc/test/unit/background_thread.c
index f7bd37c42..c60010a81 100644
--- a/deps/jemalloc/test/unit/background_thread.c
+++ b/deps/jemalloc/test/unit/background_thread.c
@@ -8,15 +8,15 @@ test_switch_background_thread_ctl(bool new_val) {
size_t sz = sizeof(bool);
e1 = new_val;
- assert_d_eq(mallctl("background_thread", (void *)&e0, &sz,
+ expect_d_eq(mallctl("background_thread", (void *)&e0, &sz,
&e1, sz), 0, "Unexpected mallctl() failure");
- assert_b_eq(e0, !e1,
+ expect_b_eq(e0, !e1,
"background_thread should be %d before.\n", !e1);
if (e1) {
- assert_zu_gt(n_background_threads, 0,
+ expect_zu_gt(n_background_threads, 0,
"Number of background threads should be non zero.\n");
} else {
- assert_zu_eq(n_background_threads, 0,
+ expect_zu_eq(n_background_threads, 0,
"Number of background threads should be zero.\n");
}
}
@@ -27,15 +27,15 @@ test_repeat_background_thread_ctl(bool before) {
size_t sz = sizeof(bool);
e1 = before;
- assert_d_eq(mallctl("background_thread", (void *)&e0, &sz,
+ expect_d_eq(mallctl("background_thread", (void *)&e0, &sz,
&e1, sz), 0, "Unexpected mallctl() failure");
- assert_b_eq(e0, before,
+ expect_b_eq(e0, before,
"background_thread should be %d.\n", before);
if (e1) {
- assert_zu_gt(n_background_threads, 0,
+ expect_zu_gt(n_background_threads, 0,
"Number of background threads should be non zero.\n");
} else {
- assert_zu_eq(n_background_threads, 0,
+ expect_zu_eq(n_background_threads, 0,
"Number of background threads should be zero.\n");
}
}
@@ -46,16 +46,16 @@ TEST_BEGIN(test_background_thread_ctl) {
bool e0, e1;
size_t sz = sizeof(bool);
- assert_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz,
+ expect_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
- assert_d_eq(mallctl("background_thread", (void *)&e1, &sz,
+ expect_d_eq(mallctl("background_thread", (void *)&e1, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
- assert_b_eq(e0, e1,
+ expect_b_eq(e0, e1,
"Default and opt.background_thread does not match.\n");
if (e0) {
test_switch_background_thread_ctl(false);
}
- assert_zu_eq(n_background_threads, 0,
+ expect_zu_eq(n_background_threads, 0,
"Number of background threads should be 0.\n");
for (unsigned i = 0; i < 4; i++) {
@@ -80,12 +80,11 @@ TEST_BEGIN(test_background_thread_running) {
test_repeat_background_thread_ctl(false);
test_switch_background_thread_ctl(true);
- assert_b_eq(info->state, background_thread_started,
+ expect_b_eq(info->state, background_thread_started,
"Background_thread did not start.\n");
- nstime_t start, now;
- nstime_init(&start, 0);
- nstime_update(&start);
+ nstime_t start;
+ nstime_init_update(&start);
bool ran = false;
while (true) {
@@ -98,10 +97,10 @@ TEST_BEGIN(test_background_thread_running) {
break;
}
- nstime_init(&now, 0);
- nstime_update(&now);
+ nstime_t now;
+ nstime_init_update(&now);
nstime_subtract(&now, &start);
- assert_u64_lt(nstime_sec(&now), 1000,
+ expect_u64_lt(nstime_sec(&now), 1000,
"Background threads did not run for 1000 seconds.");
sleep(1);
}
diff --git a/deps/jemalloc/test/unit/background_thread_enable.c b/deps/jemalloc/test/unit/background_thread_enable.c
index d894e9371..44034ac67 100644
--- a/deps/jemalloc/test/unit/background_thread_enable.c
+++ b/deps/jemalloc/test/unit/background_thread_enable.c
@@ -2,12 +2,8 @@
const char *malloc_conf = "background_thread:false,narenas:1,max_background_threads:20";
-TEST_BEGIN(test_deferred) {
- test_skip_if(!have_background_thread);
-
- unsigned id;
- size_t sz_u = sizeof(unsigned);
-
+static unsigned
+max_test_narenas(void) {
/*
* 10 here is somewhat arbitrary, except insofar as we want to ensure
* that the number of background threads is smaller than the number of
@@ -15,17 +11,32 @@ TEST_BEGIN(test_deferred) {
* cpu to handle background purging, so this is a conservative
* approximation.
*/
- for (unsigned i = 0; i < 10 * ncpus; i++) {
- assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
+ unsigned ret = 10 * ncpus;
+ /* Limit the max to avoid VM exhaustion on 32-bit . */
+ if (ret > 512) {
+ ret = 512;
+ }
+
+ return ret;
+}
+
+TEST_BEGIN(test_deferred) {
+ test_skip_if(!have_background_thread);
+
+ unsigned id;
+ size_t sz_u = sizeof(unsigned);
+
+ for (unsigned i = 0; i < max_test_narenas(); i++) {
+ expect_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
"Failed to create arena");
}
bool enable = true;
size_t sz_b = sizeof(bool);
- assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
+ expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
"Failed to enable background threads");
enable = false;
- assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
+ expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
"Failed to disable background threads");
}
TEST_END
@@ -36,43 +47,43 @@ TEST_BEGIN(test_max_background_threads) {
size_t max_n_thds;
size_t opt_max_n_thds;
size_t sz_m = sizeof(max_n_thds);
- assert_d_eq(mallctl("opt.max_background_threads",
+ expect_d_eq(mallctl("opt.max_background_threads",
&opt_max_n_thds, &sz_m, NULL, 0), 0,
"Failed to get opt.max_background_threads");
- assert_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL,
+ expect_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL,
0), 0, "Failed to get max background threads");
- assert_zu_eq(opt_max_n_thds, max_n_thds,
+ expect_zu_eq(opt_max_n_thds, max_n_thds,
"max_background_threads and "
"opt.max_background_threads should match");
- assert_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds,
+ expect_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds,
sz_m), 0, "Failed to set max background threads");
unsigned id;
size_t sz_u = sizeof(unsigned);
- for (unsigned i = 0; i < 10 * ncpus; i++) {
- assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
+ for (unsigned i = 0; i < max_test_narenas(); i++) {
+ expect_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
"Failed to create arena");
}
bool enable = true;
size_t sz_b = sizeof(bool);
- assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
+ expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
"Failed to enable background threads");
- assert_zu_eq(n_background_threads, max_n_thds,
+ expect_zu_eq(n_background_threads, max_n_thds,
"Number of background threads should not change.\n");
size_t new_max_thds = max_n_thds - 1;
if (new_max_thds > 0) {
- assert_d_eq(mallctl("max_background_threads", NULL, NULL,
+ expect_d_eq(mallctl("max_background_threads", NULL, NULL,
&new_max_thds, sz_m), 0,
"Failed to set max background threads");
- assert_zu_eq(n_background_threads, new_max_thds,
+ expect_zu_eq(n_background_threads, new_max_thds,
"Number of background threads should decrease by 1.\n");
}
new_max_thds = 1;
- assert_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds,
+ expect_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds,
sz_m), 0, "Failed to set max background threads");
- assert_zu_eq(n_background_threads, new_max_thds,
+ expect_zu_eq(n_background_threads, new_max_thds,
"Number of background threads should be 1.\n");
}
TEST_END
diff --git a/deps/jemalloc/test/unit/base.c b/deps/jemalloc/test/unit/base.c
index 6b792cf21..15e04a8ce 100644
--- a/deps/jemalloc/test/unit/base.c
+++ b/deps/jemalloc/test/unit/base.c
@@ -31,26 +31,28 @@ TEST_BEGIN(test_base_hooks_default) {
size_t allocated0, allocated1, resident, mapped, n_thp;
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
- base = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
+ base = base_new(tsdn, 0,
+ (extent_hooks_t *)&ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
if (config_stats) {
base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
&n_thp);
- assert_zu_ge(allocated0, sizeof(base_t),
+ expect_zu_ge(allocated0, sizeof(base_t),
"Base header should count as allocated");
if (opt_metadata_thp == metadata_thp_always) {
- assert_zu_gt(n_thp, 0,
+ expect_zu_gt(n_thp, 0,
"Base should have 1 THP at least.");
}
}
- assert_ptr_not_null(base_alloc(tsdn, base, 42, 1),
+ expect_ptr_not_null(base_alloc(tsdn, base, 42, 1),
"Unexpected base_alloc() failure");
if (config_stats) {
base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
&n_thp);
- assert_zu_ge(allocated1 - allocated0, 42,
+ expect_zu_ge(allocated1 - allocated0, 42,
"At least 42 bytes were allocated by base_alloc()");
}
@@ -73,27 +75,27 @@ TEST_BEGIN(test_base_hooks_null) {
memcpy(&hooks, &hooks_null, sizeof(extent_hooks_t));
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
- base = base_new(tsdn, 0, &hooks);
- assert_ptr_not_null(base, "Unexpected base_new() failure");
+ base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new() failure");
if (config_stats) {
base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
&n_thp);
- assert_zu_ge(allocated0, sizeof(base_t),
+ expect_zu_ge(allocated0, sizeof(base_t),
"Base header should count as allocated");
if (opt_metadata_thp == metadata_thp_always) {
- assert_zu_gt(n_thp, 0,
+ expect_zu_gt(n_thp, 0,
"Base should have 1 THP at least.");
}
}
- assert_ptr_not_null(base_alloc(tsdn, base, 42, 1),
+ expect_ptr_not_null(base_alloc(tsdn, base, 42, 1),
"Unexpected base_alloc() failure");
if (config_stats) {
base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
&n_thp);
- assert_zu_ge(allocated1 - allocated0, 42,
+ expect_zu_ge(allocated1 - allocated0, 42,
"At least 42 bytes were allocated by base_alloc()");
}
@@ -119,9 +121,9 @@ TEST_BEGIN(test_base_hooks_not_null) {
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
did_alloc = false;
- base = base_new(tsdn, 0, &hooks);
- assert_ptr_not_null(base, "Unexpected base_new() failure");
- assert_true(did_alloc, "Expected alloc");
+ base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new() failure");
+ expect_true(did_alloc, "Expected alloc");
/*
* Check for tight packing at specified alignment under simple
@@ -142,21 +144,21 @@ TEST_BEGIN(test_base_hooks_not_null) {
size_t align_ceil = ALIGNMENT_CEILING(alignment,
QUANTUM);
p = base_alloc(tsdn, base, 1, alignment);
- assert_ptr_not_null(p,
+ expect_ptr_not_null(p,
"Unexpected base_alloc() failure");
- assert_ptr_eq(p,
+ expect_ptr_eq(p,
(void *)(ALIGNMENT_CEILING((uintptr_t)p,
alignment)), "Expected quantum alignment");
q = base_alloc(tsdn, base, alignment, alignment);
- assert_ptr_not_null(q,
+ expect_ptr_not_null(q,
"Unexpected base_alloc() failure");
- assert_ptr_eq((void *)((uintptr_t)p + align_ceil), q,
+ expect_ptr_eq((void *)((uintptr_t)p + align_ceil), q,
"Minimal allocation should take up %zu bytes",
align_ceil);
r = base_alloc(tsdn, base, 1, alignment);
- assert_ptr_not_null(r,
+ expect_ptr_not_null(r,
"Unexpected base_alloc() failure");
- assert_ptr_eq((void *)((uintptr_t)q + align_ceil), r,
+ expect_ptr_eq((void *)((uintptr_t)q + align_ceil), r,
"Minimal allocation should take up %zu bytes",
align_ceil);
}
@@ -167,23 +169,23 @@ TEST_BEGIN(test_base_hooks_not_null) {
* that the first block's remaining space is considered for subsequent
* allocation.
*/
- assert_zu_ge(extent_bsize_get(&base->blocks->extent), QUANTUM,
+ expect_zu_ge(edata_bsize_get(&base->blocks->edata), QUANTUM,
"Remainder insufficient for test");
/* Use up all but one quantum of block. */
- while (extent_bsize_get(&base->blocks->extent) > QUANTUM) {
+ while (edata_bsize_get(&base->blocks->edata) > QUANTUM) {
p = base_alloc(tsdn, base, QUANTUM, QUANTUM);
- assert_ptr_not_null(p, "Unexpected base_alloc() failure");
+ expect_ptr_not_null(p, "Unexpected base_alloc() failure");
}
- r_exp = extent_addr_get(&base->blocks->extent);
- assert_zu_eq(base->extent_sn_next, 1, "One extant block expected");
+ r_exp = edata_addr_get(&base->blocks->edata);
+ expect_zu_eq(base->extent_sn_next, 1, "One extant block expected");
q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM);
- assert_ptr_not_null(q, "Unexpected base_alloc() failure");
- assert_ptr_ne(q, r_exp, "Expected allocation from new block");
- assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
+ expect_ptr_not_null(q, "Unexpected base_alloc() failure");
+ expect_ptr_ne(q, r_exp, "Expected allocation from new block");
+ expect_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
r = base_alloc(tsdn, base, QUANTUM, QUANTUM);
- assert_ptr_not_null(r, "Unexpected base_alloc() failure");
- assert_ptr_eq(r, r_exp, "Expected allocation from first block");
- assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
+ expect_ptr_not_null(r, "Unexpected base_alloc() failure");
+ expect_ptr_eq(r, r_exp, "Expected allocation from first block");
+ expect_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
/*
* Check for proper alignment support when normal blocks are too small.
@@ -198,9 +200,9 @@ TEST_BEGIN(test_base_hooks_not_null) {
for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) {
size_t alignment = alignments[i];
p = base_alloc(tsdn, base, QUANTUM, alignment);
- assert_ptr_not_null(p,
+ expect_ptr_not_null(p,
"Unexpected base_alloc() failure");
- assert_ptr_eq(p,
+ expect_ptr_eq(p,
(void *)(ALIGNMENT_CEILING((uintptr_t)p,
alignment)), "Expected %zu-byte alignment",
alignment);
@@ -210,11 +212,11 @@ TEST_BEGIN(test_base_hooks_not_null) {
called_dalloc = called_destroy = called_decommit = called_purge_lazy =
called_purge_forced = false;
base_delete(tsdn, base);
- assert_true(called_dalloc, "Expected dalloc call");
- assert_true(!called_destroy, "Unexpected destroy call");
- assert_true(called_decommit, "Expected decommit call");
- assert_true(called_purge_lazy, "Expected purge_lazy call");
- assert_true(called_purge_forced, "Expected purge_forced call");
+ expect_true(called_dalloc, "Expected dalloc call");
+ expect_true(!called_destroy, "Unexpected destroy call");
+ expect_true(called_decommit, "Expected decommit call");
+ expect_true(called_purge_lazy, "Expected purge_lazy call");
+ expect_true(called_purge_forced, "Expected purge_forced call");
try_dalloc = true;
try_destroy = true;
@@ -225,10 +227,39 @@ TEST_BEGIN(test_base_hooks_not_null) {
}
TEST_END
+TEST_BEGIN(test_base_ehooks_get_for_metadata_default_hook) {
+ extent_hooks_prep();
+ memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
+ base_t *base;
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ false);
+ ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
+ expect_true(ehooks_are_default(ehooks),
+ "Expected default extent hook functions pointer");
+ base_delete(tsdn, base);
+}
+TEST_END
+
+
+TEST_BEGIN(test_base_ehooks_get_for_metadata_custom_hook) {
+ extent_hooks_prep();
+ memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
+ base_t *base;
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true);
+ ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
+ expect_ptr_eq(&hooks, ehooks_get_extent_hooks_ptr(ehooks),
+ "Expected user-specified extend hook functions pointer");
+ base_delete(tsdn, base);
+}
+TEST_END
+
int
main(void) {
return test(
test_base_hooks_default,
test_base_hooks_null,
- test_base_hooks_not_null);
+ test_base_hooks_not_null,
+ test_base_ehooks_get_for_metadata_default_hook,
+ test_base_ehooks_get_for_metadata_custom_hook);
}
diff --git a/deps/jemalloc/test/unit/batch_alloc.c b/deps/jemalloc/test/unit/batch_alloc.c
new file mode 100644
index 000000000..901c52b1a
--- /dev/null
+++ b/deps/jemalloc/test/unit/batch_alloc.c
@@ -0,0 +1,189 @@
+#include "test/jemalloc_test.h"
+
+#define BATCH_MAX ((1U << 16) + 1024)
+static void *global_ptrs[BATCH_MAX];
+
+#define PAGE_ALIGNED(ptr) (((uintptr_t)ptr & PAGE_MASK) == 0)
+
+static void
+verify_batch_basic(tsd_t *tsd, void **ptrs, size_t batch, size_t usize,
+ bool zero) {
+ for (size_t i = 0; i < batch; ++i) {
+ void *p = ptrs[i];
+ expect_zu_eq(isalloc(tsd_tsdn(tsd), p), usize, "");
+ if (zero) {
+ for (size_t k = 0; k < usize; ++k) {
+ expect_true(*((unsigned char *)p + k) == 0, "");
+ }
+ }
+ }
+}
+
+static void
+verify_batch_locality(tsd_t *tsd, void **ptrs, size_t batch, size_t usize,
+ arena_t *arena, unsigned nregs) {
+ if (config_prof && opt_prof) {
+ /*
+ * Checking batch locality when prof is on is feasible but
+ * complicated, while checking the non-prof case suffices for
+ * unit-test purpose.
+ */
+ return;
+ }
+ for (size_t i = 0, j = 0; i < batch; ++i, ++j) {
+ if (j == nregs) {
+ j = 0;
+ }
+ if (j == 0 && batch - i < nregs) {
+ break;
+ }
+ void *p = ptrs[i];
+ expect_ptr_eq(iaalloc(tsd_tsdn(tsd), p), arena, "");
+ if (j == 0) {
+ expect_true(PAGE_ALIGNED(p), "");
+ continue;
+ }
+ assert(i > 0);
+ void *q = ptrs[i - 1];
+ expect_true((uintptr_t)p > (uintptr_t)q
+ && (size_t)((uintptr_t)p - (uintptr_t)q) == usize, "");
+ }
+}
+
+static void
+release_batch(void **ptrs, size_t batch, size_t size) {
+ for (size_t i = 0; i < batch; ++i) {
+ sdallocx(ptrs[i], size, 0);
+ }
+}
+
+typedef struct batch_alloc_packet_s batch_alloc_packet_t;
+struct batch_alloc_packet_s {
+ void **ptrs;
+ size_t num;
+ size_t size;
+ int flags;
+};
+
+static size_t
+batch_alloc_wrapper(void **ptrs, size_t num, size_t size, int flags) {
+ batch_alloc_packet_t batch_alloc_packet = {ptrs, num, size, flags};
+ size_t filled;
+ size_t len = sizeof(size_t);
+ assert_d_eq(mallctl("experimental.batch_alloc", &filled, &len,
+ &batch_alloc_packet, sizeof(batch_alloc_packet)), 0, "");
+ return filled;
+}
+
+static void
+test_wrapper(size_t size, size_t alignment, bool zero, unsigned arena_flag) {
+ tsd_t *tsd = tsd_fetch();
+ assert(tsd != NULL);
+ const size_t usize =
+ (alignment != 0 ? sz_sa2u(size, alignment) : sz_s2u(size));
+ const szind_t ind = sz_size2index(usize);
+ const bin_info_t *bin_info = &bin_infos[ind];
+ const unsigned nregs = bin_info->nregs;
+ assert(nregs > 0);
+ arena_t *arena;
+ if (arena_flag != 0) {
+ arena = arena_get(tsd_tsdn(tsd), MALLOCX_ARENA_GET(arena_flag),
+ false);
+ } else {
+ arena = arena_choose(tsd, NULL);
+ }
+ assert(arena != NULL);
+ int flags = arena_flag;
+ if (alignment != 0) {
+ flags |= MALLOCX_ALIGN(alignment);
+ }
+ if (zero) {
+ flags |= MALLOCX_ZERO;
+ }
+
+ /*
+ * Allocate for the purpose of bootstrapping arena_tdata, so that the
+ * change in bin stats won't contaminate the stats to be verified below.
+ */
+ void *p = mallocx(size, flags | MALLOCX_TCACHE_NONE);
+
+ for (size_t i = 0; i < 4; ++i) {
+ size_t base = 0;
+ if (i == 1) {
+ base = nregs;
+ } else if (i == 2) {
+ base = nregs * 2;
+ } else if (i == 3) {
+ base = (1 << 16);
+ }
+ for (int j = -1; j <= 1; ++j) {
+ if (base == 0 && j == -1) {
+ continue;
+ }
+ size_t batch = base + (size_t)j;
+ assert(batch < BATCH_MAX);
+ size_t filled = batch_alloc_wrapper(global_ptrs, batch,
+ size, flags);
+ assert_zu_eq(filled, batch, "");
+ verify_batch_basic(tsd, global_ptrs, batch, usize,
+ zero);
+ verify_batch_locality(tsd, global_ptrs, batch, usize,
+ arena, nregs);
+ release_batch(global_ptrs, batch, usize);
+ }
+ }
+
+ free(p);
+}
+
+TEST_BEGIN(test_batch_alloc) {
+ test_wrapper(11, 0, false, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_batch_alloc_zero) {
+ test_wrapper(11, 0, true, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_batch_alloc_aligned) {
+ test_wrapper(7, 16, false, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_batch_alloc_manual_arena) {
+ unsigned arena_ind;
+ size_t len_unsigned = sizeof(unsigned);
+ assert_d_eq(mallctl("arenas.create", &arena_ind, &len_unsigned, NULL,
+ 0), 0, "");
+ test_wrapper(11, 0, false, MALLOCX_ARENA(arena_ind));
+}
+TEST_END
+
+TEST_BEGIN(test_batch_alloc_large) {
+ size_t size = SC_LARGE_MINCLASS;
+ for (size_t batch = 0; batch < 4; ++batch) {
+ assert(batch < BATCH_MAX);
+ size_t filled = batch_alloc(global_ptrs, batch, size, 0);
+ assert_zu_eq(filled, batch, "");
+ release_batch(global_ptrs, batch, size);
+ }
+ size = tcache_maxclass + 1;
+ for (size_t batch = 0; batch < 4; ++batch) {
+ assert(batch < BATCH_MAX);
+ size_t filled = batch_alloc(global_ptrs, batch, size, 0);
+ assert_zu_eq(filled, batch, "");
+ release_batch(global_ptrs, batch, size);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_batch_alloc,
+ test_batch_alloc_zero,
+ test_batch_alloc_aligned,
+ test_batch_alloc_manual_arena,
+ test_batch_alloc_large);
+}
diff --git a/deps/jemalloc/test/unit/batch_alloc.sh b/deps/jemalloc/test/unit/batch_alloc.sh
new file mode 100644
index 000000000..9d81010ac
--- /dev/null
+++ b/deps/jemalloc/test/unit/batch_alloc.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="tcache_gc_incr_bytes:2147483648"
diff --git a/deps/jemalloc/test/unit/batch_alloc_prof.c b/deps/jemalloc/test/unit/batch_alloc_prof.c
new file mode 100644
index 000000000..ef6445861
--- /dev/null
+++ b/deps/jemalloc/test/unit/batch_alloc_prof.c
@@ -0,0 +1 @@
+#include "batch_alloc.c"
diff --git a/deps/jemalloc/test/unit/batch_alloc_prof.sh b/deps/jemalloc/test/unit/batch_alloc_prof.sh
new file mode 100644
index 000000000..a2697a610
--- /dev/null
+++ b/deps/jemalloc/test/unit/batch_alloc_prof.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="prof:true,lg_prof_sample:14"
diff --git a/deps/jemalloc/test/unit/binshard.c b/deps/jemalloc/test/unit/binshard.c
index d7a8df8fc..040ea54d2 100644
--- a/deps/jemalloc/test/unit/binshard.c
+++ b/deps/jemalloc/test/unit/binshard.c
@@ -13,7 +13,7 @@ thd_producer(void *varg) {
sz = sizeof(arena);
/* Remote arena. */
- assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
for (i = 0; i < REMOTE_NALLOC / 2; i++) {
mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena));
@@ -42,7 +42,7 @@ TEST_BEGIN(test_producer_consumer) {
/* Remote deallocation by the current thread. */
for (i = 0; i < NTHREADS; i++) {
for (unsigned j = 0; j < REMOTE_NALLOC; j++) {
- assert_ptr_not_null(mem[i][j],
+ expect_ptr_not_null(mem[i][j],
"Unexpected remote allocation failure");
dallocx(mem[i][j], 0);
}
@@ -53,7 +53,7 @@ TEST_END
static void *
thd_start(void *varg) {
void *ptr, *ptr2;
- extent_t *extent;
+ edata_t *edata;
unsigned shard1, shard2;
tsdn_t *tsdn = tsdn_fetch();
@@ -62,15 +62,15 @@ thd_start(void *varg) {
ptr = mallocx(1, MALLOCX_TCACHE_NONE);
ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
- extent = iealloc(tsdn, ptr);
- shard1 = extent_binshard_get(extent);
+ edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ shard1 = edata_binshard_get(edata);
dallocx(ptr, 0);
- assert_u_lt(shard1, 16, "Unexpected bin shard used");
+ expect_u_lt(shard1, 16, "Unexpected bin shard used");
- extent = iealloc(tsdn, ptr2);
- shard2 = extent_binshard_get(extent);
+ edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr2);
+ shard2 = edata_binshard_get(edata);
dallocx(ptr2, 0);
- assert_u_lt(shard2, 4, "Unexpected bin shard used");
+ expect_u_lt(shard2, 4, "Unexpected bin shard used");
if (shard1 > 0 || shard2 > 0) {
/* Triggered sharded bin usage. */
@@ -98,7 +98,7 @@ TEST_BEGIN(test_bin_shard_mt) {
sharded = true;
}
}
- assert_b_eq(sharded, true, "Did not find sharded bins");
+ expect_b_eq(sharded, true, "Did not find sharded bins");
}
TEST_END
@@ -108,14 +108,14 @@ TEST_BEGIN(test_bin_shard) {
size_t miblen, miblen2, len;
len = sizeof(nbins);
- assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
+ expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
"Unexpected mallctl() failure");
miblen = 4;
- assert_d_eq(mallctlnametomib("arenas.bin.0.nshards", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arenas.bin.0.nshards", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
miblen2 = 4;
- assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib2, &miblen2), 0,
+ expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib2, &miblen2), 0,
"Unexpected mallctlnametomib() failure");
for (i = 0; i < nbins; i++) {
@@ -124,22 +124,22 @@ TEST_BEGIN(test_bin_shard) {
mib[2] = i;
sz1 = sizeof(nshards);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&nshards, &sz1,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&nshards, &sz1,
NULL, 0), 0, "Unexpected mallctlbymib() failure");
mib2[2] = i;
sz2 = sizeof(size);
- assert_d_eq(mallctlbymib(mib2, miblen2, (void *)&size, &sz2,
+ expect_d_eq(mallctlbymib(mib2, miblen2, (void *)&size, &sz2,
NULL, 0), 0, "Unexpected mallctlbymib() failure");
if (size >= 1 && size <= 128) {
- assert_u_eq(nshards, 16, "Unexpected nshards");
+ expect_u_eq(nshards, 16, "Unexpected nshards");
} else if (size == 256) {
- assert_u_eq(nshards, 8, "Unexpected nshards");
+ expect_u_eq(nshards, 8, "Unexpected nshards");
} else if (size > 128 && size <= 512) {
- assert_u_eq(nshards, 4, "Unexpected nshards");
+ expect_u_eq(nshards, 4, "Unexpected nshards");
} else {
- assert_u_eq(nshards, 1, "Unexpected nshards");
+ expect_u_eq(nshards, 1, "Unexpected nshards");
}
}
}
diff --git a/deps/jemalloc/test/unit/bit_util.c b/deps/jemalloc/test/unit/bit_util.c
index b747deb43..7d31b2109 100644
--- a/deps/jemalloc/test/unit/bit_util.c
+++ b/deps/jemalloc/test/unit/bit_util.c
@@ -6,27 +6,27 @@
unsigned i, pow2; \
t x; \
\
- assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \
+ expect_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \
\
for (i = 0; i < sizeof(t) * 8; i++) { \
- assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \
+ expect_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \
<< i, "Unexpected result"); \
} \
\
for (i = 2; i < sizeof(t) * 8; i++) { \
- assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \
+ expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \
((t)1) << i, "Unexpected result"); \
} \
\
for (i = 0; i < sizeof(t) * 8 - 1; i++) { \
- assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \
+ expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \
((t)1) << (i+1), "Unexpected result"); \
} \
\
for (pow2 = 1; pow2 < 25; pow2++) { \
for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \
x++) { \
- assert_##suf##_eq(pow2_ceil_##suf(x), \
+ expect_##suf##_eq(pow2_ceil_##suf(x), \
((t)1) << pow2, \
"Unexpected result, x=%"pri, x); \
} \
@@ -49,35 +49,35 @@ TEST_BEGIN(test_pow2_ceil_zu) {
TEST_END
void
-assert_lg_ceil_range(size_t input, unsigned answer) {
+expect_lg_ceil_range(size_t input, unsigned answer) {
if (input == 1) {
- assert_u_eq(0, answer, "Got %u as lg_ceil of 1", answer);
+ expect_u_eq(0, answer, "Got %u as lg_ceil of 1", answer);
return;
}
- assert_zu_le(input, (ZU(1) << answer),
+ expect_zu_le(input, (ZU(1) << answer),
"Got %u as lg_ceil of %zu", answer, input);
- assert_zu_gt(input, (ZU(1) << (answer - 1)),
+ expect_zu_gt(input, (ZU(1) << (answer - 1)),
"Got %u as lg_ceil of %zu", answer, input);
}
void
-assert_lg_floor_range(size_t input, unsigned answer) {
+expect_lg_floor_range(size_t input, unsigned answer) {
if (input == 1) {
- assert_u_eq(0, answer, "Got %u as lg_floor of 1", answer);
+ expect_u_eq(0, answer, "Got %u as lg_floor of 1", answer);
return;
}
- assert_zu_ge(input, (ZU(1) << answer),
+ expect_zu_ge(input, (ZU(1) << answer),
"Got %u as lg_floor of %zu", answer, input);
- assert_zu_lt(input, (ZU(1) << (answer + 1)),
+ expect_zu_lt(input, (ZU(1) << (answer + 1)),
"Got %u as lg_floor of %zu", answer, input);
}
TEST_BEGIN(test_lg_ceil_floor) {
for (size_t i = 1; i < 10 * 1000 * 1000; i++) {
- assert_lg_ceil_range(i, lg_ceil(i));
- assert_lg_ceil_range(i, LG_CEIL(i));
- assert_lg_floor_range(i, lg_floor(i));
- assert_lg_floor_range(i, LG_FLOOR(i));
+ expect_lg_ceil_range(i, lg_ceil(i));
+ expect_lg_ceil_range(i, LG_CEIL(i));
+ expect_lg_floor_range(i, lg_floor(i));
+ expect_lg_floor_range(i, LG_FLOOR(i));
}
for (int i = 10; i < 8 * (1 << LG_SIZEOF_PTR) - 5; i++) {
for (size_t j = 0; j < (1 << 4); j++) {
@@ -85,27 +85,223 @@ TEST_BEGIN(test_lg_ceil_floor) {
- j * ((size_t)1 << (i - 4));
size_t num2 = ((size_t)1 << i)
+ j * ((size_t)1 << (i - 4));
- assert_zu_ne(num1, 0, "Invalid lg argument");
- assert_zu_ne(num2, 0, "Invalid lg argument");
- assert_lg_ceil_range(num1, lg_ceil(num1));
- assert_lg_ceil_range(num1, LG_CEIL(num1));
- assert_lg_ceil_range(num2, lg_ceil(num2));
- assert_lg_ceil_range(num2, LG_CEIL(num2));
-
- assert_lg_floor_range(num1, lg_floor(num1));
- assert_lg_floor_range(num1, LG_FLOOR(num1));
- assert_lg_floor_range(num2, lg_floor(num2));
- assert_lg_floor_range(num2, LG_FLOOR(num2));
+ expect_zu_ne(num1, 0, "Invalid lg argument");
+ expect_zu_ne(num2, 0, "Invalid lg argument");
+ expect_lg_ceil_range(num1, lg_ceil(num1));
+ expect_lg_ceil_range(num1, LG_CEIL(num1));
+ expect_lg_ceil_range(num2, lg_ceil(num2));
+ expect_lg_ceil_range(num2, LG_CEIL(num2));
+
+ expect_lg_floor_range(num1, lg_floor(num1));
+ expect_lg_floor_range(num1, LG_FLOOR(num1));
+ expect_lg_floor_range(num2, lg_floor(num2));
+ expect_lg_floor_range(num2, LG_FLOOR(num2));
+ }
+ }
+}
+TEST_END
+
+#define TEST_FFS(t, suf, test_suf, pri) do { \
+ for (unsigned i = 0; i < sizeof(t) * 8; i++) { \
+ for (unsigned j = 0; j <= i; j++) { \
+ for (unsigned k = 0; k <= j; k++) { \
+ t x = (t)1 << i; \
+ x |= (t)1 << j; \
+ x |= (t)1 << k; \
+ expect_##test_suf##_eq(ffs_##suf(x), k, \
+ "Unexpected result, x=%"pri, x); \
+ } \
+ } \
+ } \
+} while(0)
+
+TEST_BEGIN(test_ffs_u) {
+ TEST_FFS(unsigned, u, u,"u");
+}
+TEST_END
+
+TEST_BEGIN(test_ffs_lu) {
+ TEST_FFS(unsigned long, lu, lu, "lu");
+}
+TEST_END
+
+TEST_BEGIN(test_ffs_llu) {
+ TEST_FFS(unsigned long long, llu, qd, "llu");
+}
+TEST_END
+
+TEST_BEGIN(test_ffs_u32) {
+ TEST_FFS(uint32_t, u32, u32, FMTu32);
+}
+TEST_END
+
+TEST_BEGIN(test_ffs_u64) {
+ TEST_FFS(uint64_t, u64, u64, FMTu64);
+}
+TEST_END
+
+TEST_BEGIN(test_ffs_zu) {
+ TEST_FFS(size_t, zu, zu, "zu");
+}
+TEST_END
+
+#define TEST_FLS(t, suf, test_suf, pri) do { \
+ for (unsigned i = 0; i < sizeof(t) * 8; i++) { \
+ for (unsigned j = 0; j <= i; j++) { \
+ for (unsigned k = 0; k <= j; k++) { \
+ t x = (t)1 << i; \
+ x |= (t)1 << j; \
+ x |= (t)1 << k; \
+ expect_##test_suf##_eq(fls_##suf(x), i, \
+ "Unexpected result, x=%"pri, x); \
+ } \
+ } \
+ } \
+} while(0)
+
+TEST_BEGIN(test_fls_u) {
+ TEST_FLS(unsigned, u, u,"u");
+}
+TEST_END
+
+TEST_BEGIN(test_fls_lu) {
+ TEST_FLS(unsigned long, lu, lu, "lu");
+}
+TEST_END
+
+TEST_BEGIN(test_fls_llu) {
+ TEST_FLS(unsigned long long, llu, qd, "llu");
+}
+TEST_END
+
+TEST_BEGIN(test_fls_u32) {
+ TEST_FLS(uint32_t, u32, u32, FMTu32);
+}
+TEST_END
+
+TEST_BEGIN(test_fls_u64) {
+ TEST_FLS(uint64_t, u64, u64, FMTu64);
+}
+TEST_END
+
+TEST_BEGIN(test_fls_zu) {
+ TEST_FLS(size_t, zu, zu, "zu");
+}
+TEST_END
+
+TEST_BEGIN(test_fls_u_slow) {
+ TEST_FLS(unsigned, u_slow, u,"u");
+}
+TEST_END
+
+TEST_BEGIN(test_fls_lu_slow) {
+ TEST_FLS(unsigned long, lu_slow, lu, "lu");
+}
+TEST_END
+
+TEST_BEGIN(test_fls_llu_slow) {
+ TEST_FLS(unsigned long long, llu_slow, qd, "llu");
+}
+TEST_END
+
+static unsigned
+popcount_byte(unsigned byte) {
+ int count = 0;
+ for (int i = 0; i < 8; i++) {
+ if ((byte & (1 << i)) != 0) {
+ count++;
}
}
+ return count;
+}
+
+static uint64_t
+expand_byte_to_mask(unsigned byte) {
+ uint64_t result = 0;
+ for (int i = 0; i < 8; i++) {
+ if ((byte & (1 << i)) != 0) {
+ result |= ((uint64_t)0xFF << (i * 8));
+ }
+ }
+ return result;
+}
+
+#define TEST_POPCOUNT(t, suf, pri_hex) do { \
+ t bmul = (t)0x0101010101010101ULL; \
+ for (unsigned i = 0; i < (1 << sizeof(t)); i++) { \
+ for (unsigned j = 0; j < 256; j++) { \
+ /* \
+ * Replicate the byte j into various \
+ * bytes of the integer (as indicated by the \
+ * mask in i), and ensure that the popcount of \
+ * the result is popcount(i) * popcount(j) \
+ */ \
+ t mask = (t)expand_byte_to_mask(i); \
+ t x = (bmul * j) & mask; \
+ expect_u_eq( \
+ popcount_byte(i) * popcount_byte(j), \
+ popcount_##suf(x), \
+ "Unexpected result, x=0x%"pri_hex, x); \
+ } \
+ } \
+} while (0)
+
+TEST_BEGIN(test_popcount_u) {
+ TEST_POPCOUNT(unsigned, u, "x");
+}
+TEST_END
+
+TEST_BEGIN(test_popcount_u_slow) {
+ TEST_POPCOUNT(unsigned, u_slow, "x");
+}
+TEST_END
+
+TEST_BEGIN(test_popcount_lu) {
+ TEST_POPCOUNT(unsigned long, lu, "lx");
+}
+TEST_END
+
+TEST_BEGIN(test_popcount_lu_slow) {
+ TEST_POPCOUNT(unsigned long, lu_slow, "lx");
+}
+TEST_END
+
+TEST_BEGIN(test_popcount_llu) {
+ TEST_POPCOUNT(unsigned long long, llu, "llx");
+}
+TEST_END
+
+TEST_BEGIN(test_popcount_llu_slow) {
+ TEST_POPCOUNT(unsigned long long, llu_slow, "llx");
}
TEST_END
int
main(void) {
- return test(
+ return test_no_reentrancy(
test_pow2_ceil_u64,
test_pow2_ceil_u32,
test_pow2_ceil_zu,
- test_lg_ceil_floor);
+ test_lg_ceil_floor,
+ test_ffs_u,
+ test_ffs_lu,
+ test_ffs_llu,
+ test_ffs_u32,
+ test_ffs_u64,
+ test_ffs_zu,
+ test_fls_u,
+ test_fls_lu,
+ test_fls_llu,
+ test_fls_u32,
+ test_fls_u64,
+ test_fls_zu,
+ test_fls_u_slow,
+ test_fls_lu_slow,
+ test_fls_llu_slow,
+ test_popcount_u,
+ test_popcount_u_slow,
+ test_popcount_lu,
+ test_popcount_lu_slow,
+ test_popcount_llu,
+ test_popcount_llu_slow);
}
diff --git a/deps/jemalloc/test/unit/bitmap.c b/deps/jemalloc/test/unit/bitmap.c
index cafb2039e..78e542b67 100644
--- a/deps/jemalloc/test/unit/bitmap.c
+++ b/deps/jemalloc/test/unit/bitmap.c
@@ -1,124 +1,34 @@
#include "test/jemalloc_test.h"
-#define NBITS_TAB \
- NB( 1) \
- NB( 2) \
- NB( 3) \
- NB( 4) \
- NB( 5) \
- NB( 6) \
- NB( 7) \
- NB( 8) \
- NB( 9) \
- NB(10) \
- NB(11) \
- NB(12) \
- NB(13) \
- NB(14) \
- NB(15) \
- NB(16) \
- NB(17) \
- NB(18) \
- NB(19) \
- NB(20) \
- NB(21) \
- NB(22) \
- NB(23) \
- NB(24) \
- NB(25) \
- NB(26) \
- NB(27) \
- NB(28) \
- NB(29) \
- NB(30) \
- NB(31) \
- NB(32) \
- \
- NB(33) \
- NB(34) \
- NB(35) \
- NB(36) \
- NB(37) \
- NB(38) \
- NB(39) \
- NB(40) \
- NB(41) \
- NB(42) \
- NB(43) \
- NB(44) \
- NB(45) \
- NB(46) \
- NB(47) \
- NB(48) \
- NB(49) \
- NB(50) \
- NB(51) \
- NB(52) \
- NB(53) \
- NB(54) \
- NB(55) \
- NB(56) \
- NB(57) \
- NB(58) \
- NB(59) \
- NB(60) \
- NB(61) \
- NB(62) \
- NB(63) \
- NB(64) \
- NB(65) \
- \
- NB(126) \
- NB(127) \
- NB(128) \
- NB(129) \
- NB(130) \
- \
- NB(254) \
- NB(255) \
- NB(256) \
- NB(257) \
- NB(258) \
- \
- NB(510) \
- NB(511) \
- NB(512) \
- NB(513) \
- NB(514) \
- \
- NB(1024) \
- NB(2048) \
- NB(4096) \
- NB(8192) \
- NB(16384) \
+#include "test/nbits.h"
static void
test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) {
bitmap_info_t binfo_dyn;
bitmap_info_init(&binfo_dyn, nbits);
- assert_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn),
+ expect_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn),
"Unexpected difference between static and dynamic initialization, "
"nbits=%zu", nbits);
- assert_zu_eq(binfo->nbits, binfo_dyn.nbits,
+ expect_zu_eq(binfo->nbits, binfo_dyn.nbits,
"Unexpected difference between static and dynamic initialization, "
"nbits=%zu", nbits);
#ifdef BITMAP_USE_TREE
- assert_u_eq(binfo->nlevels, binfo_dyn.nlevels,
+ expect_u_eq(binfo->nlevels, binfo_dyn.nlevels,
"Unexpected difference between static and dynamic initialization, "
"nbits=%zu", nbits);
{
unsigned i;
for (i = 0; i < binfo->nlevels; i++) {
- assert_zu_eq(binfo->levels[i].group_offset,
+ expect_zu_eq(binfo->levels[i].group_offset,
binfo_dyn.levels[i].group_offset,
"Unexpected difference between static and dynamic "
"initialization, nbits=%zu, level=%u", nbits, i);
}
}
#else
- assert_zu_eq(binfo->ngroups, binfo_dyn.ngroups,
+ expect_zu_eq(binfo->ngroups, binfo_dyn.ngroups,
"Unexpected difference between static and dynamic initialization");
#endif
}
@@ -140,9 +50,9 @@ static size_t
test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits,
size_t prev_size) {
size_t size = bitmap_size(binfo);
- assert_zu_ge(size, (nbits >> 3),
+ expect_zu_ge(size, (nbits >> 3),
"Bitmap size is smaller than expected");
- assert_zu_ge(size, prev_size, "Bitmap size is smaller than expected");
+ expect_zu_ge(size, prev_size, "Bitmap size is smaller than expected");
return size;
}
@@ -170,17 +80,17 @@ static void
test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) {
size_t i;
bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
- assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
+ expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
bitmap_init(bitmap, binfo, false);
for (i = 0; i < nbits; i++) {
- assert_false(bitmap_get(bitmap, binfo, i),
+ expect_false(bitmap_get(bitmap, binfo, i),
"Bit should be unset");
}
bitmap_init(bitmap, binfo, true);
for (i = 0; i < nbits; i++) {
- assert_true(bitmap_get(bitmap, binfo, i), "Bit should be set");
+ expect_true(bitmap_get(bitmap, binfo, i), "Bit should be set");
}
free(bitmap);
@@ -207,13 +117,13 @@ static void
test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) {
size_t i;
bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
- assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
+ expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
bitmap_init(bitmap, binfo, false);
for (i = 0; i < nbits; i++) {
bitmap_set(bitmap, binfo, i);
}
- assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
+ expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
free(bitmap);
}
@@ -238,20 +148,20 @@ static void
test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) {
size_t i;
bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
- assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
+ expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
bitmap_init(bitmap, binfo, false);
for (i = 0; i < nbits; i++) {
bitmap_set(bitmap, binfo, i);
}
- assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
+ expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
for (i = 0; i < nbits; i++) {
bitmap_unset(bitmap, binfo, i);
}
for (i = 0; i < nbits; i++) {
bitmap_set(bitmap, binfo, i);
}
- assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
+ expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
free(bitmap);
}
@@ -275,25 +185,25 @@ TEST_END
static void
test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
- assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
+ expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
bitmap_init(bitmap, binfo, false);
/* Iteratively set bits starting at the beginning. */
for (size_t i = 0; i < nbits; i++) {
- assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
"First unset bit should be just after previous first unset "
"bit");
- assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
"First unset bit should be just after previous first unset "
"bit");
- assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
"First unset bit should be just after previous first unset "
"bit");
- assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
"First unset bit should be just after previous first unset "
"bit");
}
- assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
+ expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
/*
* Iteratively unset bits starting at the end, and verify that
@@ -301,17 +211,17 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
*/
for (size_t i = nbits - 1; i < nbits; i--) { /* (nbits..0] */
bitmap_unset(bitmap, binfo, i);
- assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
"First unset bit should the bit previously unset");
- assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
"First unset bit should the bit previously unset");
- assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
"First unset bit should the bit previously unset");
- assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
"First unset bit should the bit previously unset");
bitmap_unset(bitmap, binfo, i);
}
- assert_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset");
+ expect_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset");
/*
* Iteratively set bits starting at the beginning, and verify that
@@ -319,29 +229,29 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
*/
for (size_t i = 1; i < nbits; i++) {
bitmap_set(bitmap, binfo, i - 1);
- assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
"First unset bit should be just after the bit previously "
"set");
- assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
"First unset bit should be just after the bit previously "
"set");
- assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
"First unset bit should be just after the bit previously "
"set");
- assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
"First unset bit should be just after the bit previously "
"set");
bitmap_unset(bitmap, binfo, i);
}
- assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1,
"First unset bit should be the last bit");
- assert_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1),
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1),
nbits - 1, "First unset bit should be the last bit");
- assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1,
"First unset bit should be the last bit");
- assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1,
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1,
"First unset bit should be the last bit");
- assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
+ expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
/*
* Bubble a "usu" pattern through the bitmap and verify that
@@ -352,22 +262,22 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
bitmap_unset(bitmap, binfo, i);
bitmap_unset(bitmap, binfo, i+2);
if (i > 0) {
- assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
"Unexpected first unset bit");
}
- assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
"Unexpected first unset bit");
- assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2,
"Unexpected first unset bit");
- assert_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2,
"Unexpected first unset bit");
if (i + 3 < nbits) {
- assert_zu_eq(bitmap_ffu(bitmap, binfo, i+3),
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i+3),
nbits, "Unexpected first unset bit");
}
- assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
"Unexpected first unset bit");
- assert_zu_eq(bitmap_sfu(bitmap, binfo), i+2,
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), i+2,
"Unexpected first unset bit");
}
}
@@ -382,20 +292,20 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
for (size_t i = 0; i < nbits-1; i++) {
bitmap_unset(bitmap, binfo, i);
if (i > 0) {
- assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
"Unexpected first unset bit");
}
- assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
"Unexpected first unset bit");
- assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1,
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1,
"Unexpected first unset bit");
- assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1),
+ expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1),
nbits-1, "Unexpected first unset bit");
- assert_zu_eq(bitmap_sfu(bitmap, binfo), i,
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
"Unexpected first unset bit");
}
- assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1,
+ expect_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1,
"Unexpected first unset bit");
}
@@ -403,9 +313,11 @@ test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
}
TEST_BEGIN(test_bitmap_xfu) {
- size_t nbits;
+ size_t nbits, nbits_max;
- for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
+ /* The test is O(n^2); large page sizes may slow down too much. */
+ nbits_max = BITMAP_MAXBITS > 512 ? 512 : BITMAP_MAXBITS;
+ for (nbits = 1; nbits <= nbits_max; nbits++) {
bitmap_info_t binfo;
bitmap_info_init(&binfo, nbits);
test_bitmap_xfu_body(&binfo, nbits);
diff --git a/deps/jemalloc/test/unit/buf_writer.c b/deps/jemalloc/test/unit/buf_writer.c
new file mode 100644
index 000000000..d5e63a0e3
--- /dev/null
+++ b/deps/jemalloc/test/unit/buf_writer.c
@@ -0,0 +1,196 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/buf_writer.h"
+
+#define TEST_BUF_SIZE 16
+#define UNIT_MAX (TEST_BUF_SIZE * 3)
+
+static size_t test_write_len;
+static char test_buf[TEST_BUF_SIZE];
+static uint64_t arg;
+static uint64_t arg_store;
+
+static void
+test_write_cb(void *cbopaque, const char *s) {
+ size_t prev_test_write_len = test_write_len;
+ test_write_len += strlen(s); /* only increase the length */
+ arg_store = *(uint64_t *)cbopaque; /* only pass along the argument */
+ assert_zu_le(prev_test_write_len, test_write_len,
+ "Test write overflowed");
+}
+
+static void
+test_buf_writer_body(tsdn_t *tsdn, buf_writer_t *buf_writer) {
+ char s[UNIT_MAX + 1];
+ size_t n_unit, remain, i;
+ ssize_t unit;
+
+ assert(buf_writer->buf != NULL);
+ memset(s, 'a', UNIT_MAX);
+ arg = 4; /* Starting value of random argument. */
+ arg_store = arg;
+ for (unit = UNIT_MAX; unit >= 0; --unit) {
+ /* unit keeps decreasing, so strlen(s) is always unit. */
+ s[unit] = '\0';
+ for (n_unit = 1; n_unit <= 3; ++n_unit) {
+ test_write_len = 0;
+ remain = 0;
+ for (i = 1; i <= n_unit; ++i) {
+ arg = prng_lg_range_u64(&arg, 64);
+ buf_writer_cb(buf_writer, s);
+ remain += unit;
+ if (remain > buf_writer->buf_size) {
+ /* Flushes should have happened. */
+ assert_u64_eq(arg_store, arg, "Call "
+ "back argument didn't get through");
+ remain %= buf_writer->buf_size;
+ if (remain == 0) {
+ /* Last flush should be lazy. */
+ remain += buf_writer->buf_size;
+ }
+ }
+ assert_zu_eq(test_write_len + remain, i * unit,
+ "Incorrect length after writing %zu strings"
+ " of length %zu", i, unit);
+ }
+ buf_writer_flush(buf_writer);
+ expect_zu_eq(test_write_len, n_unit * unit,
+ "Incorrect length after flushing at the end of"
+ " writing %zu strings of length %zu", n_unit, unit);
+ }
+ }
+ buf_writer_terminate(tsdn, buf_writer);
+}
+
+TEST_BEGIN(test_buf_write_static) {
+ buf_writer_t buf_writer;
+ tsdn_t *tsdn = tsdn_fetch();
+ assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
+ test_buf, TEST_BUF_SIZE),
+ "buf_writer_init() should not encounter error on static buffer");
+ test_buf_writer_body(tsdn, &buf_writer);
+}
+TEST_END
+
+TEST_BEGIN(test_buf_write_dynamic) {
+ buf_writer_t buf_writer;
+ tsdn_t *tsdn = tsdn_fetch();
+ assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
+ NULL, TEST_BUF_SIZE), "buf_writer_init() should not OOM");
+ test_buf_writer_body(tsdn, &buf_writer);
+}
+TEST_END
+
+TEST_BEGIN(test_buf_write_oom) {
+ buf_writer_t buf_writer;
+ tsdn_t *tsdn = tsdn_fetch();
+ assert_true(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
+ NULL, SC_LARGE_MAXCLASS + 1), "buf_writer_init() should OOM");
+ assert(buf_writer.buf == NULL);
+
+ char s[UNIT_MAX + 1];
+ size_t n_unit, i;
+ ssize_t unit;
+
+ memset(s, 'a', UNIT_MAX);
+ arg = 4; /* Starting value of random argument. */
+ arg_store = arg;
+ for (unit = UNIT_MAX; unit >= 0; unit -= UNIT_MAX / 4) {
+ /* unit keeps decreasing, so strlen(s) is always unit. */
+ s[unit] = '\0';
+ for (n_unit = 1; n_unit <= 3; ++n_unit) {
+ test_write_len = 0;
+ for (i = 1; i <= n_unit; ++i) {
+ arg = prng_lg_range_u64(&arg, 64);
+ buf_writer_cb(&buf_writer, s);
+ assert_u64_eq(arg_store, arg,
+ "Call back argument didn't get through");
+ assert_zu_eq(test_write_len, i * unit,
+ "Incorrect length after writing %zu strings"
+ " of length %zu", i, unit);
+ }
+ buf_writer_flush(&buf_writer);
+ expect_zu_eq(test_write_len, n_unit * unit,
+ "Incorrect length after flushing at the end of"
+ " writing %zu strings of length %zu", n_unit, unit);
+ }
+ }
+ buf_writer_terminate(tsdn, &buf_writer);
+}
+TEST_END
+
+static int test_read_count;
+static size_t test_read_len;
+static uint64_t arg_sum;
+
+ssize_t
+test_read_cb(void *cbopaque, void *buf, size_t limit) {
+ static uint64_t rand = 4;
+
+ arg_sum += *(uint64_t *)cbopaque;
+ assert_zu_gt(limit, 0, "Limit for read_cb must be positive");
+ --test_read_count;
+ if (test_read_count == 0) {
+ return -1;
+ } else {
+ size_t read_len = limit;
+ if (limit > 1) {
+ rand = prng_range_u64(&rand, (uint64_t)limit);
+ read_len -= (size_t)rand;
+ }
+ assert(read_len > 0);
+ memset(buf, 'a', read_len);
+ size_t prev_test_read_len = test_read_len;
+ test_read_len += read_len;
+ assert_zu_le(prev_test_read_len, test_read_len,
+ "Test read overflowed");
+ return read_len;
+ }
+}
+
+static void
+test_buf_writer_pipe_body(tsdn_t *tsdn, buf_writer_t *buf_writer) {
+ arg = 4; /* Starting value of random argument. */
+ for (int count = 5; count > 0; --count) {
+ arg = prng_lg_range_u64(&arg, 64);
+ arg_sum = 0;
+ test_read_count = count;
+ test_read_len = 0;
+ test_write_len = 0;
+ buf_writer_pipe(buf_writer, test_read_cb, &arg);
+ assert(test_read_count == 0);
+ expect_u64_eq(arg_sum, arg * count, "");
+ expect_zu_eq(test_write_len, test_read_len,
+ "Write length should be equal to read length");
+ }
+ buf_writer_terminate(tsdn, buf_writer);
+}
+
+TEST_BEGIN(test_buf_write_pipe) {
+ buf_writer_t buf_writer;
+ tsdn_t *tsdn = tsdn_fetch();
+ assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
+ test_buf, TEST_BUF_SIZE),
+ "buf_writer_init() should not encounter error on static buffer");
+ test_buf_writer_pipe_body(tsdn, &buf_writer);
+}
+TEST_END
+
+TEST_BEGIN(test_buf_write_pipe_oom) {
+ buf_writer_t buf_writer;
+ tsdn_t *tsdn = tsdn_fetch();
+ assert_true(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
+ NULL, SC_LARGE_MAXCLASS + 1), "buf_writer_init() should OOM");
+ test_buf_writer_pipe_body(tsdn, &buf_writer);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_buf_write_static,
+ test_buf_write_dynamic,
+ test_buf_write_oom,
+ test_buf_write_pipe,
+ test_buf_write_pipe_oom);
+}
diff --git a/deps/jemalloc/test/unit/cache_bin.c b/deps/jemalloc/test/unit/cache_bin.c
new file mode 100644
index 000000000..3b6dbab39
--- /dev/null
+++ b/deps/jemalloc/test/unit/cache_bin.c
@@ -0,0 +1,384 @@
+#include "test/jemalloc_test.h"
+
+static void
+do_fill_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
+ cache_bin_sz_t ncached_max, cache_bin_sz_t nfill_attempt,
+ cache_bin_sz_t nfill_succeed) {
+ bool success;
+ void *ptr;
+ assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
+ CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill_attempt);
+ cache_bin_init_ptr_array_for_fill(bin, info, &arr, nfill_attempt);
+ for (cache_bin_sz_t i = 0; i < nfill_succeed; i++) {
+ arr.ptr[i] = &ptrs[i];
+ }
+ cache_bin_finish_fill(bin, info, &arr, nfill_succeed);
+ expect_true(cache_bin_ncached_get_local(bin, info) == nfill_succeed,
+ "");
+ cache_bin_low_water_set(bin);
+
+ for (cache_bin_sz_t i = 0; i < nfill_succeed; i++) {
+ ptr = cache_bin_alloc(bin, &success);
+ expect_true(success, "");
+ expect_ptr_eq(ptr, (void *)&ptrs[i],
+ "Should pop in order filled");
+ expect_true(cache_bin_low_water_get(bin, info)
+ == nfill_succeed - i - 1, "");
+ }
+ expect_true(cache_bin_ncached_get_local(bin, info) == 0, "");
+ expect_true(cache_bin_low_water_get(bin, info) == 0, "");
+}
+
+static void
+do_flush_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
+ cache_bin_sz_t nfill, cache_bin_sz_t nflush) {
+ bool success;
+ assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
+
+ for (cache_bin_sz_t i = 0; i < nfill; i++) {
+ success = cache_bin_dalloc_easy(bin, &ptrs[i]);
+ expect_true(success, "");
+ }
+
+ CACHE_BIN_PTR_ARRAY_DECLARE(arr, nflush);
+ cache_bin_init_ptr_array_for_flush(bin, info, &arr, nflush);
+ for (cache_bin_sz_t i = 0; i < nflush; i++) {
+ expect_ptr_eq(arr.ptr[i], &ptrs[nflush - i - 1], "");
+ }
+ cache_bin_finish_flush(bin, info, &arr, nflush);
+
+ expect_true(cache_bin_ncached_get_local(bin, info) == nfill - nflush,
+ "");
+ while (cache_bin_ncached_get_local(bin, info) > 0) {
+ cache_bin_alloc(bin, &success);
+ }
+}
+
+static void
+do_batch_alloc_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
+ cache_bin_sz_t nfill, size_t batch) {
+ assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
+ CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill);
+ cache_bin_init_ptr_array_for_fill(bin, info, &arr, nfill);
+ for (cache_bin_sz_t i = 0; i < nfill; i++) {
+ arr.ptr[i] = &ptrs[i];
+ }
+ cache_bin_finish_fill(bin, info, &arr, nfill);
+ assert_true(cache_bin_ncached_get_local(bin, info) == nfill, "");
+ cache_bin_low_water_set(bin);
+
+ void **out = malloc((batch + 1) * sizeof(void *));
+ size_t n = cache_bin_alloc_batch(bin, batch, out);
+ assert_true(n == ((size_t)nfill < batch ? (size_t)nfill : batch), "");
+ for (cache_bin_sz_t i = 0; i < (cache_bin_sz_t)n; i++) {
+ expect_ptr_eq(out[i], &ptrs[i], "");
+ }
+ expect_true(cache_bin_low_water_get(bin, info) == nfill -
+ (cache_bin_sz_t)n, "");
+ while (cache_bin_ncached_get_local(bin, info) > 0) {
+ bool success;
+ cache_bin_alloc(bin, &success);
+ }
+ free(out);
+}
+
+static void
+test_bin_init(cache_bin_t *bin, cache_bin_info_t *info) {
+ size_t size;
+ size_t alignment;
+ cache_bin_info_compute_alloc(info, 1, &size, &alignment);
+ void *mem = mallocx(size, MALLOCX_ALIGN(alignment));
+ assert_ptr_not_null(mem, "Unexpected mallocx failure");
+
+ size_t cur_offset = 0;
+ cache_bin_preincrement(info, 1, mem, &cur_offset);
+ cache_bin_init(bin, info, mem, &cur_offset);
+ cache_bin_postincrement(info, 1, mem, &cur_offset);
+ assert_zu_eq(cur_offset, size, "Should use all requested memory");
+}
+
+TEST_BEGIN(test_cache_bin) {
+ const int ncached_max = 100;
+ bool success;
+ void *ptr;
+
+ cache_bin_info_t info;
+ cache_bin_info_init(&info, ncached_max);
+ cache_bin_t bin;
+ test_bin_init(&bin, &info);
+
+ /* Initialize to empty; should then have 0 elements. */
+ expect_d_eq(ncached_max, cache_bin_info_ncached_max(&info), "");
+ expect_true(cache_bin_ncached_get_local(&bin, &info) == 0, "");
+ expect_true(cache_bin_low_water_get(&bin, &info) == 0, "");
+
+ ptr = cache_bin_alloc_easy(&bin, &success);
+ expect_false(success, "Shouldn't successfully allocate when empty");
+ expect_ptr_null(ptr, "Shouldn't get a non-null pointer on failure");
+
+ ptr = cache_bin_alloc(&bin, &success);
+ expect_false(success, "Shouldn't successfully allocate when empty");
+ expect_ptr_null(ptr, "Shouldn't get a non-null pointer on failure");
+
+ /*
+ * We allocate one more item than ncached_max, so we can test cache bin
+ * exhaustion.
+ */
+ void **ptrs = mallocx(sizeof(void *) * (ncached_max + 1), 0);
+ assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
+ for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
+ expect_true(cache_bin_ncached_get_local(&bin, &info) == i, "");
+ success = cache_bin_dalloc_easy(&bin, &ptrs[i]);
+ expect_true(success,
+ "Should be able to dalloc into a non-full cache bin.");
+ expect_true(cache_bin_low_water_get(&bin, &info) == 0,
+ "Pushes and pops shouldn't change low water of zero.");
+ }
+ expect_true(cache_bin_ncached_get_local(&bin, &info) == ncached_max,
+ "");
+ success = cache_bin_dalloc_easy(&bin, &ptrs[ncached_max]);
+ expect_false(success, "Shouldn't be able to dalloc into a full bin.");
+
+ cache_bin_low_water_set(&bin);
+
+ for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
+ expect_true(cache_bin_low_water_get(&bin, &info)
+ == ncached_max - i, "");
+ expect_true(cache_bin_ncached_get_local(&bin, &info)
+ == ncached_max - i, "");
+ /*
+ * This should fail -- the easy variant can't change the low
+ * water mark.
+ */
+ ptr = cache_bin_alloc_easy(&bin, &success);
+ expect_ptr_null(ptr, "");
+ expect_false(success, "");
+ expect_true(cache_bin_low_water_get(&bin, &info)
+ == ncached_max - i, "");
+ expect_true(cache_bin_ncached_get_local(&bin, &info)
+ == ncached_max - i, "");
+
+ /* This should succeed, though. */
+ ptr = cache_bin_alloc(&bin, &success);
+ expect_true(success, "");
+ expect_ptr_eq(ptr, &ptrs[ncached_max - i - 1],
+ "Alloc should pop in stack order");
+ expect_true(cache_bin_low_water_get(&bin, &info)
+ == ncached_max - i - 1, "");
+ expect_true(cache_bin_ncached_get_local(&bin, &info)
+ == ncached_max - i - 1, "");
+ }
+ /* Now we're empty -- all alloc attempts should fail. */
+ expect_true(cache_bin_ncached_get_local(&bin, &info) == 0, "");
+ ptr = cache_bin_alloc_easy(&bin, &success);
+ expect_ptr_null(ptr, "");
+ expect_false(success, "");
+ ptr = cache_bin_alloc(&bin, &success);
+ expect_ptr_null(ptr, "");
+ expect_false(success, "");
+
+ for (cache_bin_sz_t i = 0; i < ncached_max / 2; i++) {
+ cache_bin_dalloc_easy(&bin, &ptrs[i]);
+ }
+ cache_bin_low_water_set(&bin);
+
+ for (cache_bin_sz_t i = ncached_max / 2; i < ncached_max; i++) {
+ cache_bin_dalloc_easy(&bin, &ptrs[i]);
+ }
+ expect_true(cache_bin_ncached_get_local(&bin, &info) == ncached_max,
+ "");
+ for (cache_bin_sz_t i = ncached_max - 1; i >= ncached_max / 2; i--) {
+ /*
+ * Size is bigger than low water -- the reduced version should
+ * succeed.
+ */
+ ptr = cache_bin_alloc_easy(&bin, &success);
+ expect_true(success, "");
+ expect_ptr_eq(ptr, &ptrs[i], "");
+ }
+ /* But now, we've hit low-water. */
+ ptr = cache_bin_alloc_easy(&bin, &success);
+ expect_false(success, "");
+ expect_ptr_null(ptr, "");
+
+ /* We're going to test filling -- we must be empty to start. */
+ while (cache_bin_ncached_get_local(&bin, &info)) {
+ cache_bin_alloc(&bin, &success);
+ expect_true(success, "");
+ }
+
+ /* Test fill. */
+ /* Try to fill all, succeed fully. */
+ do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max, ncached_max);
+ /* Try to fill all, succeed partially. */
+ do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max,
+ ncached_max / 2);
+ /* Try to fill all, fail completely. */
+ do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max, 0);
+
+ /* Try to fill some, succeed fully. */
+ do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max / 2,
+ ncached_max / 2);
+ /* Try to fill some, succeed partially. */
+ do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max / 2,
+ ncached_max / 4);
+ /* Try to fill some, fail completely. */
+ do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max / 2, 0);
+
+ do_flush_test(&bin, &info, ptrs, ncached_max, ncached_max);
+ do_flush_test(&bin, &info, ptrs, ncached_max, ncached_max / 2);
+ do_flush_test(&bin, &info, ptrs, ncached_max, 0);
+ do_flush_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 2);
+ do_flush_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 4);
+ do_flush_test(&bin, &info, ptrs, ncached_max / 2, 0);
+
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max, ncached_max);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max, ncached_max * 2);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max, ncached_max / 2);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max, 2);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max, 1);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max, 0);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2,
+ ncached_max / 2);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, ncached_max);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2,
+ ncached_max / 4);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, 2);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, 1);
+ do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, 0);
+ do_batch_alloc_test(&bin, &info, ptrs, 2, ncached_max);
+ do_batch_alloc_test(&bin, &info, ptrs, 2, 2);
+ do_batch_alloc_test(&bin, &info, ptrs, 2, 1);
+ do_batch_alloc_test(&bin, &info, ptrs, 2, 0);
+ do_batch_alloc_test(&bin, &info, ptrs, 1, 2);
+ do_batch_alloc_test(&bin, &info, ptrs, 1, 1);
+ do_batch_alloc_test(&bin, &info, ptrs, 1, 0);
+ do_batch_alloc_test(&bin, &info, ptrs, 0, 2);
+ do_batch_alloc_test(&bin, &info, ptrs, 0, 1);
+ do_batch_alloc_test(&bin, &info, ptrs, 0, 0);
+
+ free(ptrs);
+}
+TEST_END
+
+static void
+do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
+ cache_bin_sz_t nfill, cache_bin_sz_t nstash) {
+ expect_true(cache_bin_ncached_get_local(bin, info) == 0,
+ "Bin not empty");
+ expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
+ "Bin not empty");
+ expect_true(nfill + nstash <= info->ncached_max, "Exceeded max");
+
+ bool ret;
+ /* Fill */
+ for (cache_bin_sz_t i = 0; i < nfill; i++) {
+ ret = cache_bin_dalloc_easy(bin, &ptrs[i]);
+ expect_true(ret, "Unexpected fill failure");
+ }
+ expect_true(cache_bin_ncached_get_local(bin, info) == nfill,
+ "Wrong cached count");
+
+ /* Stash */
+ for (cache_bin_sz_t i = 0; i < nstash; i++) {
+ ret = cache_bin_stash(bin, &ptrs[i + nfill]);
+ expect_true(ret, "Unexpected stash failure");
+ }
+ expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
+ "Wrong stashed count");
+
+ if (nfill + nstash == info->ncached_max) {
+ ret = cache_bin_dalloc_easy(bin, &ptrs[0]);
+ expect_false(ret, "Should not dalloc into a full bin");
+ ret = cache_bin_stash(bin, &ptrs[0]);
+ expect_false(ret, "Should not stash into a full bin");
+ }
+
+ /* Alloc filled ones */
+ for (cache_bin_sz_t i = 0; i < nfill; i++) {
+ void *ptr = cache_bin_alloc(bin, &ret);
+ expect_true(ret, "Unexpected alloc failure");
+ /* Verify it's not from the stashed range. */
+ expect_true((uintptr_t)ptr < (uintptr_t)&ptrs[nfill],
+ "Should not alloc stashed ptrs");
+ }
+ expect_true(cache_bin_ncached_get_local(bin, info) == 0,
+ "Wrong cached count");
+ expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
+ "Wrong stashed count");
+
+ cache_bin_alloc(bin, &ret);
+ expect_false(ret, "Should not alloc stashed");
+
+ /* Clear stashed ones */
+ cache_bin_finish_flush_stashed(bin, info);
+ expect_true(cache_bin_ncached_get_local(bin, info) == 0,
+ "Wrong cached count");
+ expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
+ "Wrong stashed count");
+
+ cache_bin_alloc(bin, &ret);
+ expect_false(ret, "Should not alloc from empty bin");
+}
+
+TEST_BEGIN(test_cache_bin_stash) {
+ const int ncached_max = 100;
+
+ cache_bin_t bin;
+ cache_bin_info_t info;
+ cache_bin_info_init(&info, ncached_max);
+ test_bin_init(&bin, &info);
+
+ /*
+ * The content of this array is not accessed; instead the interior
+ * addresses are used to insert / stash into the bins as test pointers.
+ */
+ void **ptrs = mallocx(sizeof(void *) * (ncached_max + 1), 0);
+ assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
+ bool ret;
+ for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
+ expect_true(cache_bin_ncached_get_local(&bin, &info) ==
+ (i / 2 + i % 2), "Wrong ncached value");
+ expect_true(cache_bin_nstashed_get_local(&bin, &info) == i / 2,
+ "Wrong nstashed value");
+ if (i % 2 == 0) {
+ cache_bin_dalloc_easy(&bin, &ptrs[i]);
+ } else {
+ ret = cache_bin_stash(&bin, &ptrs[i]);
+ expect_true(ret, "Should be able to stash into a "
+ "non-full cache bin");
+ }
+ }
+ ret = cache_bin_dalloc_easy(&bin, &ptrs[0]);
+ expect_false(ret, "Should not dalloc into a full cache bin");
+ ret = cache_bin_stash(&bin, &ptrs[0]);
+ expect_false(ret, "Should not stash into a full cache bin");
+ for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
+ void *ptr = cache_bin_alloc(&bin, &ret);
+ if (i < ncached_max / 2) {
+ expect_true(ret, "Should be able to alloc");
+ uintptr_t diff = ((uintptr_t)ptr - (uintptr_t)&ptrs[0])
+ / sizeof(void *);
+ expect_true(diff % 2 == 0, "Should be able to alloc");
+ } else {
+ expect_false(ret, "Should not alloc stashed");
+ expect_true(cache_bin_nstashed_get_local(&bin, &info) ==
+ ncached_max / 2, "Wrong nstashed value");
+ }
+ }
+
+ test_bin_init(&bin, &info);
+ do_flush_stashed_test(&bin, &info, ptrs, ncached_max, 0);
+ do_flush_stashed_test(&bin, &info, ptrs, 0, ncached_max);
+ do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 2);
+ do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 4, ncached_max / 2);
+ do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 4);
+ do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 4, ncached_max / 4);
+}
+TEST_END
+
+int
+main(void) {
+ return test(test_cache_bin,
+ test_cache_bin_stash);
+}
diff --git a/deps/jemalloc/test/unit/ckh.c b/deps/jemalloc/test/unit/ckh.c
index 707ea5f8c..36142acdd 100644
--- a/deps/jemalloc/test/unit/ckh.c
+++ b/deps/jemalloc/test/unit/ckh.c
@@ -6,11 +6,11 @@ TEST_BEGIN(test_new_delete) {
tsd = tsd_fetch();
- assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
+ expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
ckh_string_keycomp), "Unexpected ckh_new() error");
ckh_delete(tsd, &ckh);
- assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
+ expect_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
ckh_pointer_keycomp), "Unexpected ckh_new() error");
ckh_delete(tsd, &ckh);
}
@@ -30,16 +30,16 @@ TEST_BEGIN(test_count_insert_search_remove) {
tsd = tsd_fetch();
- assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
+ expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
ckh_string_keycomp), "Unexpected ckh_new() error");
- assert_zu_eq(ckh_count(&ckh), 0,
+ expect_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu", ZU(0),
ckh_count(&ckh));
/* Insert. */
for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
ckh_insert(tsd, &ckh, strs[i], strs[i]);
- assert_zu_eq(ckh_count(&ckh), i+1,
+ expect_zu_eq(ckh_count(&ckh), i+1,
"ckh_count() should return %zu, but it returned %zu", i+1,
ckh_count(&ckh));
}
@@ -57,17 +57,17 @@ TEST_BEGIN(test_count_insert_search_remove) {
vp = (i & 2) ? &v.p : NULL;
k.p = NULL;
v.p = NULL;
- assert_false(ckh_search(&ckh, strs[i], kp, vp),
+ expect_false(ckh_search(&ckh, strs[i], kp, vp),
"Unexpected ckh_search() error");
ks = (i & 1) ? strs[i] : (const char *)NULL;
vs = (i & 2) ? strs[i] : (const char *)NULL;
- assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
+ expect_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
i);
- assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
+ expect_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
i);
}
- assert_true(ckh_search(&ckh, missing, NULL, NULL),
+ expect_true(ckh_search(&ckh, missing, NULL, NULL),
"Unexpected ckh_search() success");
/* Remove. */
@@ -83,16 +83,16 @@ TEST_BEGIN(test_count_insert_search_remove) {
vp = (i & 2) ? &v.p : NULL;
k.p = NULL;
v.p = NULL;
- assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp),
+ expect_false(ckh_remove(tsd, &ckh, strs[i], kp, vp),
"Unexpected ckh_remove() error");
ks = (i & 1) ? strs[i] : (const char *)NULL;
vs = (i & 2) ? strs[i] : (const char *)NULL;
- assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
+ expect_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
i);
- assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
+ expect_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
i);
- assert_zu_eq(ckh_count(&ckh),
+ expect_zu_eq(ckh_count(&ckh),
sizeof(strs)/sizeof(const char *) - i - 1,
"ckh_count() should return %zu, but it returned %zu",
sizeof(strs)/sizeof(const char *) - i - 1,
@@ -113,40 +113,40 @@ TEST_BEGIN(test_insert_iter_remove) {
tsd = tsd_fetch();
- assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash,
+ expect_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash,
ckh_pointer_keycomp), "Unexpected ckh_new() error");
for (i = 0; i < NITEMS; i++) {
p[i] = mallocx(i+1, 0);
- assert_ptr_not_null(p[i], "Unexpected mallocx() failure");
+ expect_ptr_not_null(p[i], "Unexpected mallocx() failure");
}
for (i = 0; i < NITEMS; i++) {
size_t j;
for (j = i; j < NITEMS; j++) {
- assert_false(ckh_insert(tsd, &ckh, p[j], p[j]),
+ expect_false(ckh_insert(tsd, &ckh, p[j], p[j]),
"Unexpected ckh_insert() failure");
- assert_false(ckh_search(&ckh, p[j], &q, &r),
+ expect_false(ckh_search(&ckh, p[j], &q, &r),
"Unexpected ckh_search() failure");
- assert_ptr_eq(p[j], q, "Key pointer mismatch");
- assert_ptr_eq(p[j], r, "Value pointer mismatch");
+ expect_ptr_eq(p[j], q, "Key pointer mismatch");
+ expect_ptr_eq(p[j], r, "Value pointer mismatch");
}
- assert_zu_eq(ckh_count(&ckh), NITEMS,
+ expect_zu_eq(ckh_count(&ckh), NITEMS,
"ckh_count() should return %zu, but it returned %zu",
NITEMS, ckh_count(&ckh));
for (j = i + 1; j < NITEMS; j++) {
- assert_false(ckh_search(&ckh, p[j], NULL, NULL),
+ expect_false(ckh_search(&ckh, p[j], NULL, NULL),
"Unexpected ckh_search() failure");
- assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r),
+ expect_false(ckh_remove(tsd, &ckh, p[j], &q, &r),
"Unexpected ckh_remove() failure");
- assert_ptr_eq(p[j], q, "Key pointer mismatch");
- assert_ptr_eq(p[j], r, "Value pointer mismatch");
- assert_true(ckh_search(&ckh, p[j], NULL, NULL),
+ expect_ptr_eq(p[j], q, "Key pointer mismatch");
+ expect_ptr_eq(p[j], r, "Value pointer mismatch");
+ expect_true(ckh_search(&ckh, p[j], NULL, NULL),
"Unexpected ckh_search() success");
- assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r),
+ expect_true(ckh_remove(tsd, &ckh, p[j], &q, &r),
"Unexpected ckh_remove() success");
}
@@ -159,11 +159,11 @@ TEST_BEGIN(test_insert_iter_remove) {
for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) {
size_t k;
- assert_ptr_eq(q, r, "Key and val not equal");
+ expect_ptr_eq(q, r, "Key and val not equal");
for (k = 0; k < NITEMS; k++) {
if (p[k] == q) {
- assert_false(seen[k],
+ expect_false(seen[k],
"Item %zu already seen", k);
seen[k] = true;
break;
@@ -172,29 +172,29 @@ TEST_BEGIN(test_insert_iter_remove) {
}
for (j = 0; j < i + 1; j++) {
- assert_true(seen[j], "Item %zu not seen", j);
+ expect_true(seen[j], "Item %zu not seen", j);
}
for (; j < NITEMS; j++) {
- assert_false(seen[j], "Item %zu seen", j);
+ expect_false(seen[j], "Item %zu seen", j);
}
}
}
for (i = 0; i < NITEMS; i++) {
- assert_false(ckh_search(&ckh, p[i], NULL, NULL),
+ expect_false(ckh_search(&ckh, p[i], NULL, NULL),
"Unexpected ckh_search() failure");
- assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r),
+ expect_false(ckh_remove(tsd, &ckh, p[i], &q, &r),
"Unexpected ckh_remove() failure");
- assert_ptr_eq(p[i], q, "Key pointer mismatch");
- assert_ptr_eq(p[i], r, "Value pointer mismatch");
- assert_true(ckh_search(&ckh, p[i], NULL, NULL),
+ expect_ptr_eq(p[i], q, "Key pointer mismatch");
+ expect_ptr_eq(p[i], r, "Value pointer mismatch");
+ expect_true(ckh_search(&ckh, p[i], NULL, NULL),
"Unexpected ckh_search() success");
- assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r),
+ expect_true(ckh_remove(tsd, &ckh, p[i], &q, &r),
"Unexpected ckh_remove() success");
dallocx(p[i], 0);
}
- assert_zu_eq(ckh_count(&ckh), 0,
+ expect_zu_eq(ckh_count(&ckh), 0,
"ckh_count() should return %zu, but it returned %zu",
ZU(0), ckh_count(&ckh));
ckh_delete(tsd, &ckh);
diff --git a/deps/jemalloc/test/unit/counter.c b/deps/jemalloc/test/unit/counter.c
new file mode 100644
index 000000000..277baac16
--- /dev/null
+++ b/deps/jemalloc/test/unit/counter.c
@@ -0,0 +1,80 @@
+#include "test/jemalloc_test.h"
+
+static const uint64_t interval = 1 << 20;
+
+TEST_BEGIN(test_counter_accum) {
+ uint64_t increment = interval >> 4;
+ unsigned n = interval / increment;
+ uint64_t accum = 0;
+
+ counter_accum_t c;
+ counter_accum_init(&c, interval);
+
+ tsd_t *tsd = tsd_fetch();
+ bool trigger;
+ for (unsigned i = 0; i < n; i++) {
+ trigger = counter_accum(tsd_tsdn(tsd), &c, increment);
+ accum += increment;
+ if (accum < interval) {
+ expect_b_eq(trigger, false, "Should not trigger");
+ } else {
+ expect_b_eq(trigger, true, "Should have triggered");
+ }
+ }
+ expect_b_eq(trigger, true, "Should have triggered");
+}
+TEST_END
+
+void
+expect_counter_value(counter_accum_t *c, uint64_t v) {
+ uint64_t accum = locked_read_u64_unsynchronized(&c->accumbytes);
+ expect_u64_eq(accum, v, "Counter value mismatch");
+}
+
+#define N_THDS (16)
+#define N_ITER_THD (1 << 12)
+#define ITER_INCREMENT (interval >> 4)
+
+static void *
+thd_start(void *varg) {
+ counter_accum_t *c = (counter_accum_t *)varg;
+
+ tsd_t *tsd = tsd_fetch();
+ bool trigger;
+ uintptr_t n_triggered = 0;
+ for (unsigned i = 0; i < N_ITER_THD; i++) {
+ trigger = counter_accum(tsd_tsdn(tsd), c, ITER_INCREMENT);
+ n_triggered += trigger ? 1 : 0;
+ }
+
+ return (void *)n_triggered;
+}
+
+
+TEST_BEGIN(test_counter_mt) {
+ counter_accum_t shared_c;
+ counter_accum_init(&shared_c, interval);
+
+ thd_t thds[N_THDS];
+ unsigned i;
+ for (i = 0; i < N_THDS; i++) {
+ thd_create(&thds[i], thd_start, (void *)&shared_c);
+ }
+
+ uint64_t sum = 0;
+ for (i = 0; i < N_THDS; i++) {
+ void *ret;
+ thd_join(thds[i], &ret);
+ sum += (uintptr_t)ret;
+ }
+ expect_u64_eq(sum, N_THDS * N_ITER_THD / (interval / ITER_INCREMENT),
+ "Incorrect number of triggers");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_counter_accum,
+ test_counter_mt);
+}
diff --git a/deps/jemalloc/test/unit/decay.c b/deps/jemalloc/test/unit/decay.c
index cf3c07960..bdb6d0a39 100644
--- a/deps/jemalloc/test/unit/decay.c
+++ b/deps/jemalloc/test/unit/decay.c
@@ -1,605 +1,283 @@
#include "test/jemalloc_test.h"
-#include "jemalloc/internal/ticker.h"
-
-static nstime_monotonic_t *nstime_monotonic_orig;
-static nstime_update_t *nstime_update_orig;
-
-static unsigned nupdates_mock;
-static nstime_t time_mock;
-static bool monotonic_mock;
-
-static bool
-check_background_thread_enabled(void) {
- bool enabled;
- size_t sz = sizeof(bool);
- int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0);
- if (ret == ENOENT) {
- return false;
- }
- assert_d_eq(ret, 0, "Unexpected mallctl error");
- return enabled;
-}
+#include "jemalloc/internal/decay.h"
-static bool
-nstime_monotonic_mock(void) {
- return monotonic_mock;
-}
+TEST_BEGIN(test_decay_init) {
+ decay_t decay;
+ memset(&decay, 0, sizeof(decay));
-static bool
-nstime_update_mock(nstime_t *time) {
- nupdates_mock++;
- if (monotonic_mock) {
- nstime_copy(time, &time_mock);
- }
- return !monotonic_mock;
-}
+ nstime_t curtime;
+ nstime_init(&curtime, 0);
-static unsigned
-do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
- unsigned arena_ind;
- size_t sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
- 0, "Unexpected mallctl() failure");
- size_t mib[3];
- size_t miblen = sizeof(mib)/sizeof(size_t);
-
- assert_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen),
- 0, "Unexpected mallctlnametomib() failure");
- mib[1] = (size_t)arena_ind;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
- (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0,
- "Unexpected mallctlbymib() failure");
-
- assert_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen),
- 0, "Unexpected mallctlnametomib() failure");
- mib[1] = (size_t)arena_ind;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
- (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0,
- "Unexpected mallctlbymib() failure");
-
- return arena_ind;
-}
+ ssize_t decay_ms = 1000;
+ assert_true(decay_ms_valid(decay_ms), "");
-static void
-do_arena_destroy(unsigned arena_ind) {
- size_t mib[3];
- size_t miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
- "Unexpected mallctlnametomib() failure");
- mib[1] = (size_t)arena_ind;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
- "Unexpected mallctlbymib() failure");
+ expect_false(decay_init(&decay, &curtime, decay_ms),
+ "Failed to initialize decay");
+ expect_zd_eq(decay_ms_read(&decay), decay_ms,
+ "Decay_ms was initialized incorrectly");
+ expect_u64_ne(decay_epoch_duration_ns(&decay), 0,
+ "Epoch duration was initialized incorrectly");
}
+TEST_END
-void
-do_epoch(void) {
- uint64_t epoch = 1;
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
- 0, "Unexpected mallctl() failure");
+TEST_BEGIN(test_decay_ms_valid) {
+ expect_false(decay_ms_valid(-7),
+ "Misclassified negative decay as valid");
+ expect_true(decay_ms_valid(-1),
+ "Misclassified -1 (never decay) as invalid decay");
+ expect_true(decay_ms_valid(8943),
+ "Misclassified valid decay");
+ if (SSIZE_MAX > NSTIME_SEC_MAX) {
+ expect_false(
+ decay_ms_valid((ssize_t)(NSTIME_SEC_MAX * KQU(1000) + 39)),
+ "Misclassified too large decay");
+ }
}
+TEST_END
-void
-do_purge(unsigned arena_ind) {
- size_t mib[3];
- size_t miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
- "Unexpected mallctlnametomib() failure");
- mib[1] = (size_t)arena_ind;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
- "Unexpected mallctlbymib() failure");
-}
+TEST_BEGIN(test_decay_npages_purge_in) {
+ decay_t decay;
+ memset(&decay, 0, sizeof(decay));
-void
-do_decay(unsigned arena_ind) {
- size_t mib[3];
- size_t miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
- "Unexpected mallctlnametomib() failure");
- mib[1] = (size_t)arena_ind;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
- "Unexpected mallctlbymib() failure");
-}
+ nstime_t curtime;
+ nstime_init(&curtime, 0);
-static uint64_t
-get_arena_npurge_impl(const char *mibname, unsigned arena_ind) {
- size_t mib[4];
- size_t miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
- "Unexpected mallctlnametomib() failure");
- mib[2] = (size_t)arena_ind;
- uint64_t npurge = 0;
- size_t sz = sizeof(npurge);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0),
- config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure");
- return npurge;
-}
+ uint64_t decay_ms = 1000;
+ nstime_t decay_nstime;
+ nstime_init(&decay_nstime, decay_ms * 1000 * 1000);
+ expect_false(decay_init(&decay, &curtime, (ssize_t)decay_ms),
+ "Failed to initialize decay");
-static uint64_t
-get_arena_dirty_npurge(unsigned arena_ind) {
- do_epoch();
- return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind);
-}
+ size_t new_pages = 100;
-static uint64_t
-get_arena_dirty_purged(unsigned arena_ind) {
- do_epoch();
- return get_arena_npurge_impl("stats.arenas.0.dirty_purged", arena_ind);
-}
+ nstime_t time;
+ nstime_copy(&time, &decay_nstime);
+ expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages),
+ new_pages, "Not all pages are expected to decay in decay_ms");
-static uint64_t
-get_arena_muzzy_npurge(unsigned arena_ind) {
- do_epoch();
- return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
-}
+ nstime_init(&time, 0);
+ expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages), 0,
+ "More than zero pages are expected to instantly decay");
-static uint64_t
-get_arena_npurge(unsigned arena_ind) {
- do_epoch();
- return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) +
- get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
+ nstime_copy(&time, &decay_nstime);
+ nstime_idivide(&time, 2);
+ expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages),
+ new_pages / 2, "Not half of pages decay in half the decay period");
}
+TEST_END
-static size_t
-get_arena_pdirty(unsigned arena_ind) {
- do_epoch();
- size_t mib[4];
- size_t miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
- "Unexpected mallctlnametomib() failure");
- mib[2] = (size_t)arena_ind;
- size_t pdirty;
- size_t sz = sizeof(pdirty);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
- "Unexpected mallctlbymib() failure");
- return pdirty;
-}
+TEST_BEGIN(test_decay_maybe_advance_epoch) {
+ decay_t decay;
+ memset(&decay, 0, sizeof(decay));
-static size_t
-get_arena_pmuzzy(unsigned arena_ind) {
- do_epoch();
- size_t mib[4];
- size_t miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
- "Unexpected mallctlnametomib() failure");
- mib[2] = (size_t)arena_ind;
- size_t pmuzzy;
- size_t sz = sizeof(pmuzzy);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
- "Unexpected mallctlbymib() failure");
- return pmuzzy;
-}
+ nstime_t curtime;
+ nstime_init(&curtime, 0);
-static void *
-do_mallocx(size_t size, int flags) {
- void *p = mallocx(size, flags);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
- return p;
-}
+ uint64_t decay_ms = 1000;
-static void
-generate_dirty(unsigned arena_ind, size_t size) {
- int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
- void *p = do_mallocx(size, flags);
- dallocx(p, flags);
-}
+ bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
+ expect_false(err, "");
-TEST_BEGIN(test_decay_ticks) {
- test_skip_if(check_background_thread_enabled());
-
- ticker_t *decay_ticker;
- unsigned tick0, tick1, arena_ind;
- size_t sz, large0;
- void *p;
-
- sz = sizeof(size_t);
- assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
- 0), 0, "Unexpected mallctl failure");
-
- /* Set up a manually managed arena for test. */
- arena_ind = do_arena_create(0, 0);
-
- /* Migrate to the new arena, and get the ticker. */
- unsigned old_arena_ind;
- size_t sz_arena_ind = sizeof(old_arena_ind);
- assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
- &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
- "Unexpected mallctl() failure");
- decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind);
- assert_ptr_not_null(decay_ticker,
- "Unexpected failure getting decay ticker");
-
- /*
- * Test the standard APIs using a large size class, since we can't
- * control tcache interactions for small size classes (except by
- * completely disabling tcache for the entire test program).
- */
-
- /* malloc(). */
- tick0 = ticker_read(decay_ticker);
- p = malloc(large0);
- assert_ptr_not_null(p, "Unexpected malloc() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
- /* free(). */
- tick0 = ticker_read(decay_ticker);
- free(p);
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
-
- /* calloc(). */
- tick0 = ticker_read(decay_ticker);
- p = calloc(1, large0);
- assert_ptr_not_null(p, "Unexpected calloc() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
- free(p);
-
- /* posix_memalign(). */
- tick0 = ticker_read(decay_ticker);
- assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
- "Unexpected posix_memalign() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during posix_memalign()");
- free(p);
-
- /* aligned_alloc(). */
- tick0 = ticker_read(decay_ticker);
- p = aligned_alloc(sizeof(size_t), large0);
- assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during aligned_alloc()");
- free(p);
-
- /* realloc(). */
- /* Allocate. */
- tick0 = ticker_read(decay_ticker);
- p = realloc(NULL, large0);
- assert_ptr_not_null(p, "Unexpected realloc() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
- /* Reallocate. */
- tick0 = ticker_read(decay_ticker);
- p = realloc(p, large0);
- assert_ptr_not_null(p, "Unexpected realloc() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
- /* Deallocate. */
- tick0 = ticker_read(decay_ticker);
- realloc(p, 0);
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
-
- /*
- * Test the *allocx() APIs using large and small size classes, with
- * tcache explicitly disabled.
- */
- {
- unsigned i;
- size_t allocx_sizes[2];
- allocx_sizes[0] = large0;
- allocx_sizes[1] = 1;
-
- for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
- sz = allocx_sizes[i];
-
- /* mallocx(). */
- tick0 = ticker_read(decay_ticker);
- p = mallocx(sz, MALLOCX_TCACHE_NONE);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during mallocx() (sz=%zu)",
- sz);
- /* rallocx(). */
- tick0 = ticker_read(decay_ticker);
- p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
- assert_ptr_not_null(p, "Unexpected rallocx() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during rallocx() (sz=%zu)",
- sz);
- /* xallocx(). */
- tick0 = ticker_read(decay_ticker);
- xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during xallocx() (sz=%zu)",
- sz);
- /* dallocx(). */
- tick0 = ticker_read(decay_ticker);
- dallocx(p, MALLOCX_TCACHE_NONE);
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during dallocx() (sz=%zu)",
- sz);
- /* sdallocx(). */
- p = mallocx(sz, MALLOCX_TCACHE_NONE);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
- tick0 = ticker_read(decay_ticker);
- sdallocx(p, sz, MALLOCX_TCACHE_NONE);
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during sdallocx() "
- "(sz=%zu)", sz);
- }
- }
+ bool advanced;
+ advanced = decay_maybe_advance_epoch(&decay, &curtime, 0);
+ expect_false(advanced, "Epoch advanced while time didn't");
- /*
- * Test tcache fill/flush interactions for large and small size classes,
- * using an explicit tcache.
- */
- unsigned tcache_ind, i;
- size_t tcache_sizes[2];
- tcache_sizes[0] = large0;
- tcache_sizes[1] = 1;
-
- size_t tcache_max, sz_tcache_max;
- sz_tcache_max = sizeof(tcache_max);
- assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
- &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
-
- sz = sizeof(unsigned);
- assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
- NULL, 0), 0, "Unexpected mallctl failure");
-
- for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
- sz = tcache_sizes[i];
-
- /* tcache fill. */
- tick0 = ticker_read(decay_ticker);
- p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
- tick1 = ticker_read(decay_ticker);
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during tcache fill "
- "(sz=%zu)", sz);
- /* tcache flush. */
- dallocx(p, MALLOCX_TCACHE(tcache_ind));
- tick0 = ticker_read(decay_ticker);
- assert_d_eq(mallctl("tcache.flush", NULL, NULL,
- (void *)&tcache_ind, sizeof(unsigned)), 0,
- "Unexpected mallctl failure");
- tick1 = ticker_read(decay_ticker);
-
- /* Will only tick if it's in tcache. */
- if (sz <= tcache_max) {
- assert_u32_ne(tick1, tick0,
- "Expected ticker to tick during tcache "
- "flush (sz=%zu)", sz);
- } else {
- assert_u32_eq(tick1, tick0,
- "Unexpected ticker tick during tcache "
- "flush (sz=%zu)", sz);
- }
- }
+ nstime_t interval;
+ nstime_init(&interval, decay_epoch_duration_ns(&decay));
+
+ nstime_add(&curtime, &interval);
+ advanced = decay_maybe_advance_epoch(&decay, &curtime, 0);
+ expect_false(advanced, "Epoch advanced after first interval");
+
+ nstime_add(&curtime, &interval);
+ advanced = decay_maybe_advance_epoch(&decay, &curtime, 0);
+ expect_true(advanced, "Epoch didn't advance after two intervals");
}
TEST_END
-static void
-decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
- uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
-#define NINTERVALS 101
- nstime_t time, update_interval, decay_ms, deadline;
-
- nstime_init(&time, 0);
- nstime_update(&time);
-
- nstime_init2(&decay_ms, dt, 0);
- nstime_copy(&deadline, &time);
- nstime_add(&deadline, &decay_ms);
-
- nstime_init2(&update_interval, dt, 0);
- nstime_idivide(&update_interval, NINTERVALS);
-
- /*
- * Keep q's slab from being deallocated during the looping below. If a
- * cached slab were to repeatedly come and go during looping, it could
- * prevent the decay backlog ever becoming empty.
- */
- void *p = do_mallocx(1, flags);
- uint64_t dirty_npurge1, muzzy_npurge1;
- do {
- for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2;
- i++) {
- void *q = do_mallocx(1, flags);
- dallocx(q, flags);
+TEST_BEGIN(test_decay_empty) {
+ /* If we never have any decaying pages, npages_limit should be 0. */
+ decay_t decay;
+ memset(&decay, 0, sizeof(decay));
+
+ nstime_t curtime;
+ nstime_init(&curtime, 0);
+
+ uint64_t decay_ms = 1000;
+ uint64_t decay_ns = decay_ms * 1000 * 1000;
+
+ bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
+ assert_false(err, "");
+
+ uint64_t time_between_calls = decay_epoch_duration_ns(&decay) / 5;
+ int nepochs = 0;
+ for (uint64_t i = 0; i < decay_ns / time_between_calls * 10; i++) {
+ size_t dirty_pages = 0;
+ nstime_init(&curtime, i * time_between_calls);
+ bool epoch_advanced = decay_maybe_advance_epoch(&decay,
+ &curtime, dirty_pages);
+ if (epoch_advanced) {
+ nepochs++;
+ expect_zu_eq(decay_npages_limit_get(&decay), 0,
+ "Unexpectedly increased npages_limit");
}
- dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
- muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
-
- nstime_add(&time_mock, &update_interval);
- nstime_update(&time);
- } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
- dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
- !terminate_asap));
- dallocx(p, flags);
-
- if (config_stats) {
- assert_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
- muzzy_npurge0, "Expected purging to occur");
}
-#undef NINTERVALS
+ expect_d_gt(nepochs, 0, "Epochs never advanced");
}
+TEST_END
-TEST_BEGIN(test_decay_ticker) {
- test_skip_if(check_background_thread_enabled());
-#define NPS 2048
- ssize_t ddt = opt_dirty_decay_ms;
- ssize_t mdt = opt_muzzy_decay_ms;
- unsigned arena_ind = do_arena_create(ddt, mdt);
- int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
- void *ps[NPS];
- size_t large;
-
- /*
- * Allocate a bunch of large objects, pause the clock, deallocate every
- * other object (to fragment virtual memory), restore the clock, then
- * [md]allocx() in a tight loop while advancing time rapidly to verify
- * the ticker triggers purging.
- */
-
- size_t tcache_max;
- size_t sz = sizeof(size_t);
- assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL,
- 0), 0, "Unexpected mallctl failure");
- large = nallocx(tcache_max + 1, flags);
-
- do_purge(arena_ind);
- uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
- uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
-
- for (unsigned i = 0; i < NPS; i++) {
- ps[i] = do_mallocx(large, flags);
+/*
+ * Verify that npages_limit correctly decays as the time goes.
+ *
+ * During first 'nepoch_init' epochs, add new dirty pages.
+ * After that, let them decay and verify npages_limit decreases.
+ * Then proceed with another 'nepoch_init' epochs and check that
+ * all dirty pages are flushed out of backlog, bringing npages_limit
+ * down to zero.
+ */
+TEST_BEGIN(test_decay) {
+ const uint64_t nepoch_init = 10;
+
+ decay_t decay;
+ memset(&decay, 0, sizeof(decay));
+
+ nstime_t curtime;
+ nstime_init(&curtime, 0);
+
+ uint64_t decay_ms = 1000;
+ uint64_t decay_ns = decay_ms * 1000 * 1000;
+
+ bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
+ assert_false(err, "");
+
+ expect_zu_eq(decay_npages_limit_get(&decay), 0,
+ "Empty decay returned nonzero npages_limit");
+
+ nstime_t epochtime;
+ nstime_init(&epochtime, decay_epoch_duration_ns(&decay));
+
+ const size_t dirty_pages_per_epoch = 1000;
+ size_t dirty_pages = 0;
+ uint64_t epoch_ns = decay_epoch_duration_ns(&decay);
+ bool epoch_advanced = false;
+
+ /* Populate backlog with some dirty pages */
+ for (uint64_t i = 0; i < nepoch_init; i++) {
+ nstime_add(&curtime, &epochtime);
+ dirty_pages += dirty_pages_per_epoch;
+ epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
+ dirty_pages);
}
-
- nupdates_mock = 0;
- nstime_init(&time_mock, 0);
- nstime_update(&time_mock);
- monotonic_mock = true;
-
- nstime_monotonic_orig = nstime_monotonic;
- nstime_update_orig = nstime_update;
- nstime_monotonic = nstime_monotonic_mock;
- nstime_update = nstime_update_mock;
-
- for (unsigned i = 0; i < NPS; i += 2) {
- dallocx(ps[i], flags);
- unsigned nupdates0 = nupdates_mock;
- do_decay(arena_ind);
- assert_u_gt(nupdates_mock, nupdates0,
- "Expected nstime_update() to be called");
+ expect_true(epoch_advanced, "Epoch never advanced");
+
+ size_t npages_limit = decay_npages_limit_get(&decay);
+ expect_zu_gt(npages_limit, 0, "npages_limit is incorrectly equal "
+ "to zero after dirty pages have been added");
+
+ /* Keep dirty pages unchanged and verify that npages_limit decreases */
+ for (uint64_t i = nepoch_init; i * epoch_ns < decay_ns; ++i) {
+ nstime_add(&curtime, &epochtime);
+ epoch_advanced = decay_maybe_advance_epoch(&decay, &curtime,
+ dirty_pages);
+ if (epoch_advanced) {
+ size_t npages_limit_new = decay_npages_limit_get(&decay);
+ expect_zu_lt(npages_limit_new, npages_limit,
+ "napges_limit failed to decay");
+
+ npages_limit = npages_limit_new;
+ }
}
- decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
- muzzy_npurge0, true);
- decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
- muzzy_npurge0, false);
+ expect_zu_gt(npages_limit, 0, "npages_limit decayed to zero earlier "
+ "than decay_ms since last dirty page was added");
- do_arena_destroy(arena_ind);
+ /* Completely push all dirty pages out of the backlog */
+ epoch_advanced = false;
+ for (uint64_t i = 0; i < nepoch_init; i++) {
+ nstime_add(&curtime, &epochtime);
+ epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
+ dirty_pages);
+ }
+ expect_true(epoch_advanced, "Epoch never advanced");
- nstime_monotonic = nstime_monotonic_orig;
- nstime_update = nstime_update_orig;
-#undef NPS
+ npages_limit = decay_npages_limit_get(&decay);
+ expect_zu_eq(npages_limit, 0, "npages_limit didn't decay to 0 after "
+ "decay_ms since last bump in dirty pages");
}
TEST_END
-TEST_BEGIN(test_decay_nonmonotonic) {
- test_skip_if(check_background_thread_enabled());
-#define NPS (SMOOTHSTEP_NSTEPS + 1)
- int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
- void *ps[NPS];
- uint64_t npurge0 = 0;
- uint64_t npurge1 = 0;
- size_t sz, large0;
- unsigned i, nupdates0;
-
- sz = sizeof(size_t);
- assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
- 0), 0, "Unexpected mallctl failure");
-
- assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
- "Unexpected mallctl failure");
- do_epoch();
- sz = sizeof(uint64_t);
- npurge0 = get_arena_npurge(0);
-
- nupdates_mock = 0;
- nstime_init(&time_mock, 0);
- nstime_update(&time_mock);
- monotonic_mock = false;
-
- nstime_monotonic_orig = nstime_monotonic;
- nstime_update_orig = nstime_update;
- nstime_monotonic = nstime_monotonic_mock;
- nstime_update = nstime_update_mock;
-
- for (i = 0; i < NPS; i++) {
- ps[i] = mallocx(large0, flags);
- assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
- }
+TEST_BEGIN(test_decay_ns_until_purge) {
+ const uint64_t nepoch_init = 10;
- for (i = 0; i < NPS; i++) {
- dallocx(ps[i], flags);
- nupdates0 = nupdates_mock;
- assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
- "Unexpected arena.0.decay failure");
- assert_u_gt(nupdates_mock, nupdates0,
- "Expected nstime_update() to be called");
- }
+ decay_t decay;
+ memset(&decay, 0, sizeof(decay));
- do_epoch();
- sz = sizeof(uint64_t);
- npurge1 = get_arena_npurge(0);
+ nstime_t curtime;
+ nstime_init(&curtime, 0);
- if (config_stats) {
- assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
- }
+ uint64_t decay_ms = 1000;
+ uint64_t decay_ns = decay_ms * 1000 * 1000;
- nstime_monotonic = nstime_monotonic_orig;
- nstime_update = nstime_update_orig;
-#undef NPS
-}
-TEST_END
+ bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
+ assert_false(err, "");
-TEST_BEGIN(test_decay_now) {
- test_skip_if(check_background_thread_enabled());
-
- unsigned arena_ind = do_arena_create(0, 0);
- assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
- assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
- size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
- /* Verify that dirty/muzzy pages never linger after deallocation. */
- for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
- size_t size = sizes[i];
- generate_dirty(arena_ind, size);
- assert_zu_eq(get_arena_pdirty(arena_ind), 0,
- "Unexpected dirty pages");
- assert_zu_eq(get_arena_pmuzzy(arena_ind), 0,
- "Unexpected muzzy pages");
- }
- do_arena_destroy(arena_ind);
-}
-TEST_END
+ nstime_t epochtime;
+ nstime_init(&epochtime, decay_epoch_duration_ns(&decay));
-TEST_BEGIN(test_decay_never) {
- test_skip_if(check_background_thread_enabled() || !config_stats);
-
- unsigned arena_ind = do_arena_create(-1, -1);
- int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
- assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
- assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
- size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
- void *ptrs[sizeof(sizes)/sizeof(size_t)];
- for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
- ptrs[i] = do_mallocx(sizes[i], flags);
- }
- /* Verify that each deallocation generates additional dirty pages. */
- size_t pdirty_prev = get_arena_pdirty(arena_ind);
- size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
- assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
- assert_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
- for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
- dallocx(ptrs[i], flags);
- size_t pdirty = get_arena_pdirty(arena_ind);
- size_t pmuzzy = get_arena_pmuzzy(arena_ind);
- assert_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
- pdirty_prev, "Expected dirty pages to increase.");
- assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
- pdirty_prev = pdirty;
+ uint64_t ns_until_purge_empty = decay_ns_until_purge(&decay, 0, 0);
+ expect_u64_eq(ns_until_purge_empty, DECAY_UNBOUNDED_TIME_TO_PURGE,
+ "Failed to return unbounded wait time for zero threshold");
+
+ const size_t dirty_pages_per_epoch = 1000;
+ size_t dirty_pages = 0;
+ bool epoch_advanced = false;
+ for (uint64_t i = 0; i < nepoch_init; i++) {
+ nstime_add(&curtime, &epochtime);
+ dirty_pages += dirty_pages_per_epoch;
+ epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
+ dirty_pages);
}
- do_arena_destroy(arena_ind);
+ expect_true(epoch_advanced, "Epoch never advanced");
+
+ uint64_t ns_until_purge_all = decay_ns_until_purge(&decay,
+ dirty_pages, dirty_pages);
+ expect_u64_ge(ns_until_purge_all, decay_ns,
+ "Incorrectly calculated time to purge all pages");
+
+ uint64_t ns_until_purge_none = decay_ns_until_purge(&decay,
+ dirty_pages, 0);
+ expect_u64_eq(ns_until_purge_none, decay_epoch_duration_ns(&decay) * 2,
+ "Incorrectly calculated time to purge 0 pages");
+
+ uint64_t npages_threshold = dirty_pages / 2;
+ uint64_t ns_until_purge_half = decay_ns_until_purge(&decay,
+ dirty_pages, npages_threshold);
+
+ nstime_t waittime;
+ nstime_init(&waittime, ns_until_purge_half);
+ nstime_add(&curtime, &waittime);
+
+ decay_maybe_advance_epoch(&decay, &curtime, dirty_pages);
+ size_t npages_limit = decay_npages_limit_get(&decay);
+ expect_zu_lt(npages_limit, dirty_pages,
+ "npages_limit failed to decrease after waiting");
+ size_t expected = dirty_pages - npages_limit;
+ int deviation = abs((int)expected - (int)(npages_threshold));
+ expect_d_lt(deviation, (int)(npages_threshold / 2),
+ "After waiting, number of pages is out of the expected interval "
+ "[0.5 * npages_threshold .. 1.5 * npages_threshold]");
}
TEST_END
int
main(void) {
return test(
- test_decay_ticks,
- test_decay_ticker,
- test_decay_nonmonotonic,
- test_decay_now,
- test_decay_never);
+ test_decay_init,
+ test_decay_ms_valid,
+ test_decay_npages_purge_in,
+ test_decay_maybe_advance_epoch,
+ test_decay_empty,
+ test_decay,
+ test_decay_ns_until_purge);
}
diff --git a/deps/jemalloc/test/unit/decay.sh b/deps/jemalloc/test/unit/decay.sh
deleted file mode 100644
index 45aeccf42..000000000
--- a/deps/jemalloc/test/unit/decay.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-
-export MALLOC_CONF="dirty_decay_ms:1000,muzzy_decay_ms:1000,lg_tcache_max:0"
diff --git a/deps/jemalloc/test/unit/div.c b/deps/jemalloc/test/unit/div.c
index b47f10b2b..29aea6659 100644
--- a/deps/jemalloc/test/unit/div.c
+++ b/deps/jemalloc/test/unit/div.c
@@ -14,7 +14,7 @@ TEST_BEGIN(test_div_exhaustive) {
dividend += divisor) {
size_t quotient = div_compute(
&div_info, dividend);
- assert_zu_eq(dividend, quotient * divisor,
+ expect_zu_eq(dividend, quotient * divisor,
"With divisor = %zu, dividend = %zu, "
"got quotient %zu", divisor, dividend, quotient);
}
diff --git a/deps/jemalloc/test/unit/double_free.c b/deps/jemalloc/test/unit/double_free.c
new file mode 100644
index 000000000..12122c1b7
--- /dev/null
+++ b/deps/jemalloc/test/unit/double_free.c
@@ -0,0 +1,77 @@
+#include "test/jemalloc_test.h"
+#include "test/san.h"
+
+#include "jemalloc/internal/safety_check.h"
+
+bool fake_abort_called;
+void fake_abort(const char *message) {
+ (void)message;
+ fake_abort_called = true;
+}
+
+void
+test_large_double_free_pre(void) {
+ safety_check_set_abort(&fake_abort);
+ fake_abort_called = false;
+}
+
+void
+test_large_double_free_post() {
+ expect_b_eq(fake_abort_called, true, "Double-free check didn't fire.");
+ safety_check_set_abort(NULL);
+}
+
+TEST_BEGIN(test_large_double_free_tcache) {
+ test_skip_if(!config_opt_safety_checks);
+ /*
+ * Skip debug builds, since too many assertions will be triggered with
+ * double-free before hitting the one we are interested in.
+ */
+ test_skip_if(config_debug);
+
+ test_large_double_free_pre();
+ char *ptr = malloc(SC_LARGE_MINCLASS);
+ bool guarded = extent_is_guarded(tsdn_fetch(), ptr);
+ free(ptr);
+ if (!guarded) {
+ free(ptr);
+ } else {
+ /*
+ * Skip because guarded extents may unguard immediately on
+ * deallocation, in which case the second free will crash before
+ * reaching the intended safety check.
+ */
+ fake_abort_called = true;
+ }
+ mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
+ test_large_double_free_post();
+}
+TEST_END
+
+TEST_BEGIN(test_large_double_free_no_tcache) {
+ test_skip_if(!config_opt_safety_checks);
+ test_skip_if(config_debug);
+
+ test_large_double_free_pre();
+ char *ptr = mallocx(SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
+ bool guarded = extent_is_guarded(tsdn_fetch(), ptr);
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+ if (!guarded) {
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+ } else {
+ /*
+ * Skip because guarded extents may unguard immediately on
+ * deallocation, in which case the second free will crash before
+ * reaching the intended safety check.
+ */
+ fake_abort_called = true;
+ }
+ test_large_double_free_post();
+}
+TEST_END
+
+int
+main(void) {
+ return test(test_large_double_free_no_tcache,
+ test_large_double_free_tcache);
+}
diff --git a/deps/jemalloc/test/unit/double_free.h b/deps/jemalloc/test/unit/double_free.h
new file mode 100644
index 000000000..8b1378917
--- /dev/null
+++ b/deps/jemalloc/test/unit/double_free.h
@@ -0,0 +1 @@
+
diff --git a/deps/jemalloc/test/unit/edata_cache.c b/deps/jemalloc/test/unit/edata_cache.c
new file mode 100644
index 000000000..af1110a95
--- /dev/null
+++ b/deps/jemalloc/test/unit/edata_cache.c
@@ -0,0 +1,226 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/edata_cache.h"
+
+static void
+test_edata_cache_init(edata_cache_t *edata_cache) {
+ base_t *base = base_new(TSDN_NULL, /* ind */ 1,
+ &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
+ assert_ptr_not_null(base, "");
+ bool err = edata_cache_init(edata_cache, base);
+ assert_false(err, "");
+}
+
+static void
+test_edata_cache_destroy(edata_cache_t *edata_cache) {
+ base_delete(TSDN_NULL, edata_cache->base);
+}
+
+TEST_BEGIN(test_edata_cache) {
+ edata_cache_t ec;
+ test_edata_cache_init(&ec);
+
+ /* Get one */
+ edata_t *ed1 = edata_cache_get(TSDN_NULL, &ec);
+ assert_ptr_not_null(ed1, "");
+
+ /* Cache should be empty */
+ assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ /* Get another */
+ edata_t *ed2 = edata_cache_get(TSDN_NULL, &ec);
+ assert_ptr_not_null(ed2, "");
+
+ /* Still empty */
+ assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ /* Put one back, and the cache should now have one item */
+ edata_cache_put(TSDN_NULL, &ec, ed1);
+ assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 1, "");
+
+ /* Reallocating should reuse the item, and leave an empty cache. */
+ edata_t *ed1_again = edata_cache_get(TSDN_NULL, &ec);
+ assert_ptr_eq(ed1, ed1_again, "");
+ assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ test_edata_cache_destroy(&ec);
+}
+TEST_END
+
+static size_t
+ecf_count(edata_cache_fast_t *ecf) {
+ size_t count = 0;
+ edata_t *cur;
+ ql_foreach(cur, &ecf->list.head, ql_link_inactive) {
+ count++;
+ }
+ return count;
+}
+
+TEST_BEGIN(test_edata_cache_fast_simple) {
+ edata_cache_t ec;
+ edata_cache_fast_t ecf;
+
+ test_edata_cache_init(&ec);
+ edata_cache_fast_init(&ecf, &ec);
+
+ edata_t *ed1 = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_ptr_not_null(ed1, "");
+ expect_zu_eq(ecf_count(&ecf), 0, "");
+ expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ edata_t *ed2 = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_ptr_not_null(ed2, "");
+ expect_zu_eq(ecf_count(&ecf), 0, "");
+ expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ edata_cache_fast_put(TSDN_NULL, &ecf, ed1);
+ expect_zu_eq(ecf_count(&ecf), 1, "");
+ expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ edata_cache_fast_put(TSDN_NULL, &ecf, ed2);
+ expect_zu_eq(ecf_count(&ecf), 2, "");
+ expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ /* LIFO ordering. */
+ expect_ptr_eq(ed2, edata_cache_fast_get(TSDN_NULL, &ecf), "");
+ expect_zu_eq(ecf_count(&ecf), 1, "");
+ expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ expect_ptr_eq(ed1, edata_cache_fast_get(TSDN_NULL, &ecf), "");
+ expect_zu_eq(ecf_count(&ecf), 0, "");
+ expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
+
+ test_edata_cache_destroy(&ec);
+}
+TEST_END
+
+TEST_BEGIN(test_edata_cache_fill) {
+ edata_cache_t ec;
+ edata_cache_fast_t ecf;
+
+ test_edata_cache_init(&ec);
+ edata_cache_fast_init(&ecf, &ec);
+
+ edata_t *allocs[EDATA_CACHE_FAST_FILL * 2];
+
+ /*
+ * If the fallback cache can't satisfy the request, we shouldn't do
+ * extra allocations until compelled to. Put half the fill goal in the
+ * fallback.
+ */
+ for (int i = 0; i < EDATA_CACHE_FAST_FILL / 2; i++) {
+ allocs[i] = edata_cache_get(TSDN_NULL, &ec);
+ }
+ for (int i = 0; i < EDATA_CACHE_FAST_FILL / 2; i++) {
+ edata_cache_put(TSDN_NULL, &ec, allocs[i]);
+ }
+ expect_zu_eq(EDATA_CACHE_FAST_FILL / 2,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+
+ allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_zu_eq(EDATA_CACHE_FAST_FILL / 2 - 1, ecf_count(&ecf),
+ "Should have grabbed all edatas available but no more.");
+
+ for (int i = 1; i < EDATA_CACHE_FAST_FILL / 2; i++) {
+ allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_ptr_not_null(allocs[i], "");
+ }
+ expect_zu_eq(0, ecf_count(&ecf), "");
+
+ /* When forced, we should alloc from the base. */
+ edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_ptr_not_null(edata, "");
+ expect_zu_eq(0, ecf_count(&ecf), "Allocated more than necessary");
+ expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED),
+ "Allocated more than necessary");
+
+ /*
+ * We should correctly fill in the common case where the fallback isn't
+ * exhausted, too.
+ */
+ for (int i = 0; i < EDATA_CACHE_FAST_FILL * 2; i++) {
+ allocs[i] = edata_cache_get(TSDN_NULL, &ec);
+ expect_ptr_not_null(allocs[i], "");
+ }
+ for (int i = 0; i < EDATA_CACHE_FAST_FILL * 2; i++) {
+ edata_cache_put(TSDN_NULL, &ec, allocs[i]);
+ }
+
+ allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_zu_eq(EDATA_CACHE_FAST_FILL - 1, ecf_count(&ecf), "");
+ expect_zu_eq(EDATA_CACHE_FAST_FILL,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+ for (int i = 1; i < EDATA_CACHE_FAST_FILL; i++) {
+ expect_zu_eq(EDATA_CACHE_FAST_FILL - i, ecf_count(&ecf), "");
+ expect_zu_eq(EDATA_CACHE_FAST_FILL,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+ allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_ptr_not_null(allocs[i], "");
+ }
+ expect_zu_eq(0, ecf_count(&ecf), "");
+ expect_zu_eq(EDATA_CACHE_FAST_FILL,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+
+ allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_zu_eq(EDATA_CACHE_FAST_FILL - 1, ecf_count(&ecf), "");
+ expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+ for (int i = 1; i < EDATA_CACHE_FAST_FILL; i++) {
+ expect_zu_eq(EDATA_CACHE_FAST_FILL - i, ecf_count(&ecf), "");
+ expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+ allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_ptr_not_null(allocs[i], "");
+ }
+ expect_zu_eq(0, ecf_count(&ecf), "");
+ expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+
+ test_edata_cache_destroy(&ec);
+}
+TEST_END
+
+TEST_BEGIN(test_edata_cache_disable) {
+ edata_cache_t ec;
+ edata_cache_fast_t ecf;
+
+ test_edata_cache_init(&ec);
+ edata_cache_fast_init(&ecf, &ec);
+
+ for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
+ edata_t *edata = edata_cache_get(TSDN_NULL, &ec);
+ expect_ptr_not_null(edata, "");
+ edata_cache_fast_put(TSDN_NULL, &ecf, edata);
+ }
+
+ expect_zu_eq(EDATA_CACHE_FAST_FILL, ecf_count(&ecf), "");
+ expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
+
+ edata_cache_fast_disable(TSDN_NULL, &ecf);
+
+ expect_zu_eq(0, ecf_count(&ecf), "");
+ expect_zu_eq(EDATA_CACHE_FAST_FILL,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED), "Disabling should flush");
+
+ edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf);
+ expect_zu_eq(0, ecf_count(&ecf), "");
+ expect_zu_eq(EDATA_CACHE_FAST_FILL - 1,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED),
+ "Disabled ecf should forward on get");
+
+ edata_cache_fast_put(TSDN_NULL, &ecf, edata);
+ expect_zu_eq(0, ecf_count(&ecf), "");
+ expect_zu_eq(EDATA_CACHE_FAST_FILL,
+ atomic_load_zu(&ec.count, ATOMIC_RELAXED),
+ "Disabled ecf should forward on put");
+
+ test_edata_cache_destroy(&ec);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_edata_cache,
+ test_edata_cache_fast_simple,
+ test_edata_cache_fill,
+ test_edata_cache_disable);
+}
diff --git a/deps/jemalloc/test/unit/emitter.c b/deps/jemalloc/test/unit/emitter.c
index b4a693f4b..ef8f9ff58 100644
--- a/deps/jemalloc/test/unit/emitter.c
+++ b/deps/jemalloc/test/unit/emitter.c
@@ -58,15 +58,17 @@ forwarding_cb(void *buf_descriptor_v, const char *str) {
size_t written = malloc_snprintf(buf_descriptor->buf,
buf_descriptor->len, "%s", str);
- assert_zu_eq(written, strlen(str), "Buffer overflow!");
+ expect_zu_eq(written, strlen(str), "Buffer overflow!");
buf_descriptor->buf += written;
buf_descriptor->len -= written;
- assert_zu_gt(buf_descriptor->len, 0, "Buffer out of space!");
+ expect_zu_gt(buf_descriptor->len, 0, "Buffer out of space!");
}
static void
-assert_emit_output(void (*emit_fn)(emitter_t *),
- const char *expected_json_output, const char *expected_table_output) {
+expect_emit_output(void (*emit_fn)(emitter_t *),
+ const char *expected_json_output,
+ const char *expected_json_compact_output,
+ const char *expected_table_output) {
emitter_t emitter;
char buf[MALLOC_PRINTF_BUFSIZE];
buf_descriptor_t buf_descriptor;
@@ -78,7 +80,17 @@ assert_emit_output(void (*emit_fn)(emitter_t *),
emitter_init(&emitter, emitter_output_json, &forwarding_cb,
&buf_descriptor);
(*emit_fn)(&emitter);
- assert_str_eq(expected_json_output, buf, "json output failure");
+ expect_str_eq(expected_json_output, buf, "json output failure");
+
+ buf_descriptor.buf = buf;
+ buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
+ buf_descriptor.mid_quote = false;
+
+ emitter_init(&emitter, emitter_output_json_compact, &forwarding_cb,
+ &buf_descriptor);
+ (*emit_fn)(&emitter);
+ expect_str_eq(expected_json_compact_output, buf,
+ "compact json output failure");
buf_descriptor.buf = buf;
buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
@@ -87,7 +99,7 @@ assert_emit_output(void (*emit_fn)(emitter_t *),
emitter_init(&emitter, emitter_output_table, &forwarding_cb,
&buf_descriptor);
(*emit_fn)(&emitter);
- assert_str_eq(expected_table_output, buf, "table output failure");
+ expect_str_eq(expected_table_output, buf, "table output failure");
}
static void
@@ -108,6 +120,7 @@ emit_dict(emitter_t *emitter) {
emitter_dict_end(emitter);
emitter_end(emitter);
}
+
static const char *dict_json =
"{\n"
"\t\"foo\": {\n"
@@ -117,6 +130,15 @@ static const char *dict_json =
"\t\t\"jkl\": \"a string\"\n"
"\t}\n"
"}\n";
+static const char *dict_json_compact =
+"{"
+ "\"foo\":{"
+ "\"abc\":false,"
+ "\"def\":true,"
+ "\"ghi\":123,"
+ "\"jkl\":\"a string\""
+ "}"
+"}";
static const char *dict_table =
"This is the foo table:\n"
" ABC: false\n"
@@ -124,11 +146,6 @@ static const char *dict_table =
" GHI: 123 (note_key1: \"a string\")\n"
" JKL: \"a string\" (note_key2: false)\n";
-TEST_BEGIN(test_dict) {
- assert_emit_output(&emit_dict, dict_json, dict_table);
-}
-TEST_END
-
static void
emit_table_printf(emitter_t *emitter) {
emitter_begin(emitter);
@@ -141,17 +158,11 @@ emit_table_printf(emitter_t *emitter) {
static const char *table_printf_json =
"{\n"
"}\n";
-
+static const char *table_printf_json_compact = "{}";
static const char *table_printf_table =
"Table note 1\n"
"Table note 2 with format string\n";
-TEST_BEGIN(test_table_printf) {
- assert_emit_output(&emit_table_printf, table_printf_json,
- table_printf_table);
-}
-TEST_END
-
static void emit_nested_dict(emitter_t *emitter) {
int val = 123;
emitter_begin(emitter);
@@ -169,7 +180,7 @@ static void emit_nested_dict(emitter_t *emitter) {
emitter_end(emitter);
}
-static const char *nested_object_json =
+static const char *nested_dict_json =
"{\n"
"\t\"json1\": {\n"
"\t\t\"json2\": {\n"
@@ -182,8 +193,20 @@ static const char *nested_object_json =
"\t\t\"primitive\": 123\n"
"\t}\n"
"}\n";
-
-static const char *nested_object_table =
+static const char *nested_dict_json_compact =
+"{"
+ "\"json1\":{"
+ "\"json2\":{"
+ "\"primitive\":123"
+ "},"
+ "\"json3\":{"
+ "}"
+ "},"
+ "\"json4\":{"
+ "\"primitive\":123"
+ "}"
+"}";
+static const char *nested_dict_table =
"Dict 1\n"
" Dict 2\n"
" A primitive: 123\n"
@@ -191,12 +214,6 @@ static const char *nested_object_table =
"Dict 4\n"
" Another primitive: 123\n";
-TEST_BEGIN(test_nested_dict) {
- assert_emit_output(&emit_nested_dict, nested_object_json,
- nested_object_table);
-}
-TEST_END
-
static void
emit_types(emitter_t *emitter) {
bool b = false;
@@ -235,7 +252,17 @@ static const char *types_json =
"\t\"k7\": 789,\n"
"\t\"k8\": 10000000000\n"
"}\n";
-
+static const char *types_json_compact =
+"{"
+ "\"k1\":false,"
+ "\"k2\":-123,"
+ "\"k3\":123,"
+ "\"k4\":-456,"
+ "\"k5\":456,"
+ "\"k6\":\"string\","
+ "\"k7\":789,"
+ "\"k8\":10000000000"
+"}";
static const char *types_table =
"K1: false\n"
"K2: -123\n"
@@ -246,11 +273,6 @@ static const char *types_table =
"K7: 789\n"
"K8: 10000000000\n";
-TEST_BEGIN(test_types) {
- assert_emit_output(&emit_types, types_json, types_table);
-}
-TEST_END
-
static void
emit_modal(emitter_t *emitter) {
int val = 123;
@@ -283,7 +305,18 @@ const char *modal_json =
"\t\t\"i6\": 123\n"
"\t}\n"
"}\n";
-
+const char *modal_json_compact =
+"{"
+ "\"j0\":{"
+ "\"j1\":{"
+ "\"i1\":123,"
+ "\"i2\":123,"
+ "\"i4\":123"
+ "},"
+ "\"i5\":123,"
+ "\"i6\":123"
+ "}"
+"}";
const char *modal_table =
"T0\n"
" I1: 123\n"
@@ -293,13 +326,8 @@ const char *modal_table =
" I5: 123\n"
" I6: 123\n";
-TEST_BEGIN(test_modal) {
- assert_emit_output(&emit_modal, modal_json, modal_table);
-}
-TEST_END
-
static void
-emit_json_arr(emitter_t *emitter) {
+emit_json_array(emitter_t *emitter) {
int ival = 123;
emitter_begin(emitter);
@@ -338,14 +366,24 @@ static const char *json_array_json =
"\t\t]\n"
"\t}\n"
"}\n";
-
+static const char *json_array_json_compact =
+"{"
+ "\"dict\":{"
+ "\"arr\":["
+ "{"
+ "\"foo\":123"
+ "},"
+ "123,"
+ "123,"
+ "{"
+ "\"bar\":123,"
+ "\"baz\":123"
+ "}"
+ "]"
+ "}"
+"}";
static const char *json_array_table = "";
-TEST_BEGIN(test_json_arr) {
- assert_emit_output(&emit_json_arr, json_array_json, json_array_table);
-}
-TEST_END
-
static void
emit_json_nested_array(emitter_t *emitter) {
int ival = 123;
@@ -391,12 +429,27 @@ static const char *json_nested_array_json =
"\t\t]\n"
"\t]\n"
"}\n";
-
-TEST_BEGIN(test_json_nested_arr) {
- assert_emit_output(&emit_json_nested_array, json_nested_array_json,
- json_array_table);
-}
-TEST_END
+static const char *json_nested_array_json_compact =
+"{"
+ "["
+ "["
+ "123,"
+ "\"foo\","
+ "123,"
+ "\"foo\""
+ "],"
+ "["
+ "123"
+ "],"
+ "["
+ "\"foo\","
+ "123"
+ "],"
+ "["
+ "]"
+ "]"
+"}";
+static const char *json_nested_array_table = "";
static void
emit_table_row(emitter_t *emitter) {
@@ -443,18 +496,29 @@ emit_table_row(emitter_t *emitter) {
static const char *table_row_json =
"{\n"
"}\n";
-
+static const char *table_row_json_compact = "{}";
static const char *table_row_table =
"ABC title DEF title GHI\n"
"123 true 456\n"
"789 false 1011\n"
"\"a string\" false ghi\n";
-TEST_BEGIN(test_table_row) {
- assert_emit_output(&emit_table_row, table_row_json, table_row_table);
-}
+#define GENERATE_TEST(feature) \
+TEST_BEGIN(test_##feature) { \
+ expect_emit_output(emit_##feature, feature##_json, \
+ feature##_json_compact, feature##_table); \
+} \
TEST_END
+GENERATE_TEST(dict)
+GENERATE_TEST(table_printf)
+GENERATE_TEST(nested_dict)
+GENERATE_TEST(types)
+GENERATE_TEST(modal)
+GENERATE_TEST(json_array)
+GENERATE_TEST(json_nested_array)
+GENERATE_TEST(table_row)
+
int
main(void) {
return test_no_reentrancy(
@@ -463,7 +527,7 @@ main(void) {
test_nested_dict,
test_types,
test_modal,
- test_json_arr,
- test_json_nested_arr,
+ test_json_array,
+ test_json_nested_array,
test_table_row);
}
diff --git a/deps/jemalloc/test/unit/extent_quantize.c b/deps/jemalloc/test/unit/extent_quantize.c
index 0ca7a75d9..e6bbd539c 100644
--- a/deps/jemalloc/test/unit/extent_quantize.c
+++ b/deps/jemalloc/test/unit/extent_quantize.c
@@ -12,22 +12,22 @@ TEST_BEGIN(test_small_extent_size) {
*/
sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
+ expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
- assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0,
"Unexpected mallctlnametomib failure");
for (i = 0; i < nbins; i++) {
mib[2] = i;
sz = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz,
NULL, 0), 0, "Unexpected mallctlbymib failure");
- assert_zu_eq(extent_size,
- extent_size_quantize_floor(extent_size),
+ expect_zu_eq(extent_size,
+ sz_psz_quantize_floor(extent_size),
"Small extent quantization should be a no-op "
"(extent_size=%zu)", extent_size);
- assert_zu_eq(extent_size,
- extent_size_quantize_ceil(extent_size),
+ expect_zu_eq(extent_size,
+ sz_psz_quantize_ceil(extent_size),
"Small extent quantization should be a no-op "
"(extent_size=%zu)", extent_size);
}
@@ -47,42 +47,42 @@ TEST_BEGIN(test_large_extent_size) {
*/
sz = sizeof(bool);
- assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious,
+ expect_d_eq(mallctl("opt.cache_oblivious", (void *)&cache_oblivious,
&sz, NULL, 0), 0, "Unexpected mallctl failure");
sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
+ expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
0), 0, "Unexpected mallctl failure");
- assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib failure");
for (i = 0; i < nlextents; i++) {
size_t lextent_size, extent_size, floor, ceil;
mib[2] = i;
sz = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size,
&sz, NULL, 0), 0, "Unexpected mallctlbymib failure");
extent_size = cache_oblivious ? lextent_size + PAGE :
lextent_size;
- floor = extent_size_quantize_floor(extent_size);
- ceil = extent_size_quantize_ceil(extent_size);
+ floor = sz_psz_quantize_floor(extent_size);
+ ceil = sz_psz_quantize_ceil(extent_size);
- assert_zu_eq(extent_size, floor,
+ expect_zu_eq(extent_size, floor,
"Extent quantization should be a no-op for precise size "
"(lextent_size=%zu, extent_size=%zu)", lextent_size,
extent_size);
- assert_zu_eq(extent_size, ceil,
+ expect_zu_eq(extent_size, ceil,
"Extent quantization should be a no-op for precise size "
"(lextent_size=%zu, extent_size=%zu)", lextent_size,
extent_size);
if (i > 0) {
- assert_zu_eq(extent_size_prev,
- extent_size_quantize_floor(extent_size - PAGE),
+ expect_zu_eq(extent_size_prev,
+ sz_psz_quantize_floor(extent_size - PAGE),
"Floor should be a precise size");
if (extent_size_prev < ceil_prev) {
- assert_zu_eq(ceil_prev, extent_size,
+ expect_zu_eq(ceil_prev, extent_size,
"Ceiling should be a precise size "
"(extent_size_prev=%zu, ceil_prev=%zu, "
"extent_size=%zu)", extent_size_prev,
@@ -91,7 +91,7 @@ TEST_BEGIN(test_large_extent_size) {
}
if (i + 1 < nlextents) {
extent_size_prev = floor;
- ceil_prev = extent_size_quantize_ceil(extent_size +
+ ceil_prev = sz_psz_quantize_ceil(extent_size +
PAGE);
}
}
@@ -109,20 +109,20 @@ TEST_BEGIN(test_monotonic) {
size_t extent_size, floor, ceil;
extent_size = i << LG_PAGE;
- floor = extent_size_quantize_floor(extent_size);
- ceil = extent_size_quantize_ceil(extent_size);
+ floor = sz_psz_quantize_floor(extent_size);
+ ceil = sz_psz_quantize_ceil(extent_size);
- assert_zu_le(floor, extent_size,
+ expect_zu_le(floor, extent_size,
"Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)",
floor, extent_size, ceil);
- assert_zu_ge(ceil, extent_size,
+ expect_zu_ge(ceil, extent_size,
"Ceiling should be >= (floor=%zu, extent_size=%zu, "
"ceil=%zu)", floor, extent_size, ceil);
- assert_zu_le(floor_prev, floor, "Floor should be monotonic "
+ expect_zu_le(floor_prev, floor, "Floor should be monotonic "
"(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)",
floor_prev, floor, extent_size, ceil);
- assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
+ expect_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
"(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)",
floor, extent_size, ceil_prev, ceil);
diff --git a/deps/jemalloc/test/unit/fb.c b/deps/jemalloc/test/unit/fb.c
new file mode 100644
index 000000000..ad72c75ad
--- /dev/null
+++ b/deps/jemalloc/test/unit/fb.c
@@ -0,0 +1,954 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/fb.h"
+#include "test/nbits.h"
+
+static void
+do_test_init(size_t nbits) {
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb = malloc(sz);
+ /* Junk fb's contents. */
+ memset(fb, 99, sz);
+ fb_init(fb, nbits);
+ for (size_t i = 0; i < nbits; i++) {
+ expect_false(fb_get(fb, nbits, i),
+ "bitmap should start empty");
+ }
+ free(fb);
+}
+
+TEST_BEGIN(test_fb_init) {
+#define NB(nbits) \
+ do_test_init(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+do_test_get_set_unset(size_t nbits) {
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb = malloc(sz);
+ fb_init(fb, nbits);
+ /* Set the bits divisible by 3. */
+ for (size_t i = 0; i < nbits; i++) {
+ if (i % 3 == 0) {
+ fb_set(fb, nbits, i);
+ }
+ }
+ /* Check them. */
+ for (size_t i = 0; i < nbits; i++) {
+ expect_b_eq(i % 3 == 0, fb_get(fb, nbits, i),
+ "Unexpected bit at position %zu", i);
+ }
+ /* Unset those divisible by 5. */
+ for (size_t i = 0; i < nbits; i++) {
+ if (i % 5 == 0) {
+ fb_unset(fb, nbits, i);
+ }
+ }
+ /* Check them. */
+ for (size_t i = 0; i < nbits; i++) {
+ expect_b_eq(i % 3 == 0 && i % 5 != 0, fb_get(fb, nbits, i),
+ "Unexpected bit at position %zu", i);
+ }
+ free(fb);
+}
+
+TEST_BEGIN(test_get_set_unset) {
+#define NB(nbits) \
+ do_test_get_set_unset(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static ssize_t
+find_3_5_compute(ssize_t i, size_t nbits, bool bit, bool forward) {
+ for(; i < (ssize_t)nbits && i >= 0; i += (forward ? 1 : -1)) {
+ bool expected_bit = i % 3 == 0 || i % 5 == 0;
+ if (expected_bit == bit) {
+ return i;
+ }
+ }
+ return forward ? (ssize_t)nbits : (ssize_t)-1;
+}
+
+static void
+do_test_search_simple(size_t nbits) {
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb = malloc(sz);
+ fb_init(fb, nbits);
+
+ /* We pick multiples of 3 or 5. */
+ for (size_t i = 0; i < nbits; i++) {
+ if (i % 3 == 0) {
+ fb_set(fb, nbits, i);
+ }
+ /* This tests double-setting a little, too. */
+ if (i % 5 == 0) {
+ fb_set(fb, nbits, i);
+ }
+ }
+ for (size_t i = 0; i < nbits; i++) {
+ size_t ffs_compute = find_3_5_compute(i, nbits, true, true);
+ size_t ffs_search = fb_ffs(fb, nbits, i);
+ expect_zu_eq(ffs_compute, ffs_search, "ffs mismatch at %zu", i);
+
+ ssize_t fls_compute = find_3_5_compute(i, nbits, true, false);
+ size_t fls_search = fb_fls(fb, nbits, i);
+ expect_zu_eq(fls_compute, fls_search, "fls mismatch at %zu", i);
+
+ size_t ffu_compute = find_3_5_compute(i, nbits, false, true);
+ size_t ffu_search = fb_ffu(fb, nbits, i);
+ expect_zu_eq(ffu_compute, ffu_search, "ffu mismatch at %zu", i);
+
+ size_t flu_compute = find_3_5_compute(i, nbits, false, false);
+ size_t flu_search = fb_flu(fb, nbits, i);
+ expect_zu_eq(flu_compute, flu_search, "flu mismatch at %zu", i);
+ }
+
+ free(fb);
+}
+
+TEST_BEGIN(test_search_simple) {
+#define NB(nbits) \
+ do_test_search_simple(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+expect_exhaustive_results(fb_group_t *mostly_full, fb_group_t *mostly_empty,
+ size_t nbits, size_t special_bit, size_t position) {
+ if (position < special_bit) {
+ expect_zu_eq(special_bit, fb_ffs(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(-1, fb_fls(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zu_eq(position, fb_ffu(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position, fb_flu(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+
+ expect_zu_eq(position, fb_ffs(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position, fb_fls(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zu_eq(special_bit, fb_ffu(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(-1, fb_flu(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ } else if (position == special_bit) {
+ expect_zu_eq(special_bit, fb_ffs(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(special_bit, fb_fls(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zu_eq(position + 1, fb_ffu(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position - 1, fb_flu(mostly_empty, nbits,
+ position), "mismatch at %zu, %zu", position, special_bit);
+
+ expect_zu_eq(position + 1, fb_ffs(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position - 1, fb_fls(mostly_full, nbits,
+ position), "mismatch at %zu, %zu", position, special_bit);
+ expect_zu_eq(position, fb_ffu(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position, fb_flu(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ } else {
+ /* position > special_bit. */
+ expect_zu_eq(nbits, fb_ffs(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(special_bit, fb_fls(mostly_empty, nbits,
+ position), "mismatch at %zu, %zu", position, special_bit);
+ expect_zu_eq(position, fb_ffu(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position, fb_flu(mostly_empty, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+
+ expect_zu_eq(position, fb_ffs(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(position, fb_fls(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zu_eq(nbits, fb_ffu(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ expect_zd_eq(special_bit, fb_flu(mostly_full, nbits, position),
+ "mismatch at %zu, %zu", position, special_bit);
+ }
+}
+
+static void
+do_test_search_exhaustive(size_t nbits) {
+ /* This test is quadratic; let's not get too big. */
+ if (nbits > 1000) {
+ return;
+ }
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *empty = malloc(sz);
+ fb_init(empty, nbits);
+ fb_group_t *full = malloc(sz);
+ fb_init(full, nbits);
+ fb_set_range(full, nbits, 0, nbits);
+
+ for (size_t i = 0; i < nbits; i++) {
+ fb_set(empty, nbits, i);
+ fb_unset(full, nbits, i);
+
+ for (size_t j = 0; j < nbits; j++) {
+ expect_exhaustive_results(full, empty, nbits, i, j);
+ }
+ fb_unset(empty, nbits, i);
+ fb_set(full, nbits, i);
+ }
+
+ free(empty);
+ free(full);
+}
+
+TEST_BEGIN(test_search_exhaustive) {
+#define NB(nbits) \
+ do_test_search_exhaustive(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+TEST_BEGIN(test_range_simple) {
+ /*
+ * Just pick a constant big enough to have nontrivial middle sizes, and
+ * big enough that usages of things like weirdnum (below) near the
+ * beginning fit comfortably into the beginning of the bitmap.
+ */
+ size_t nbits = 64 * 10;
+ size_t ngroups = FB_NGROUPS(nbits);
+ fb_group_t *fb = malloc(sizeof(fb_group_t) * ngroups);
+ fb_init(fb, nbits);
+ for (size_t i = 0; i < nbits; i++) {
+ if (i % 2 == 0) {
+ fb_set_range(fb, nbits, i, 1);
+ }
+ }
+ for (size_t i = 0; i < nbits; i++) {
+ expect_b_eq(i % 2 == 0, fb_get(fb, nbits, i),
+ "mismatch at position %zu", i);
+ }
+ fb_set_range(fb, nbits, 0, nbits / 2);
+ fb_unset_range(fb, nbits, nbits / 2, nbits / 2);
+ for (size_t i = 0; i < nbits; i++) {
+ expect_b_eq(i < nbits / 2, fb_get(fb, nbits, i),
+ "mismatch at position %zu", i);
+ }
+
+ static const size_t weirdnum = 7;
+ fb_set_range(fb, nbits, 0, nbits);
+ fb_unset_range(fb, nbits, weirdnum, FB_GROUP_BITS + weirdnum);
+ for (size_t i = 0; i < nbits; i++) {
+ expect_b_eq(7 <= i && i <= 2 * weirdnum + FB_GROUP_BITS - 1,
+ !fb_get(fb, nbits, i), "mismatch at position %zu", i);
+ }
+ free(fb);
+}
+TEST_END
+
+static void
+do_test_empty_full_exhaustive(size_t nbits) {
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *empty = malloc(sz);
+ fb_init(empty, nbits);
+ fb_group_t *full = malloc(sz);
+ fb_init(full, nbits);
+ fb_set_range(full, nbits, 0, nbits);
+
+ expect_true(fb_full(full, nbits), "");
+ expect_false(fb_empty(full, nbits), "");
+ expect_false(fb_full(empty, nbits), "");
+ expect_true(fb_empty(empty, nbits), "");
+
+ for (size_t i = 0; i < nbits; i++) {
+ fb_set(empty, nbits, i);
+ fb_unset(full, nbits, i);
+
+ expect_false(fb_empty(empty, nbits), "error at bit %zu", i);
+ if (nbits != 1) {
+ expect_false(fb_full(empty, nbits),
+ "error at bit %zu", i);
+ expect_false(fb_empty(full, nbits),
+ "error at bit %zu", i);
+ } else {
+ expect_true(fb_full(empty, nbits),
+ "error at bit %zu", i);
+ expect_true(fb_empty(full, nbits),
+ "error at bit %zu", i);
+ }
+ expect_false(fb_full(full, nbits), "error at bit %zu", i);
+
+ fb_unset(empty, nbits, i);
+ fb_set(full, nbits, i);
+ }
+
+ free(empty);
+ free(full);
+}
+
+TEST_BEGIN(test_empty_full) {
+#define NB(nbits) \
+ do_test_empty_full_exhaustive(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+/*
+ * This tests both iter_range and the longest range functionality, which is
+ * built closely on top of it.
+ */
+TEST_BEGIN(test_iter_range_simple) {
+ size_t set_limit = 30;
+ size_t nbits = 100;
+ fb_group_t fb[FB_NGROUPS(100)];
+
+ fb_init(fb, nbits);
+
+ /*
+ * Failing to initialize these can lead to build failures with -Wall;
+ * the compiler can't prove that they're set.
+ */
+ size_t begin = (size_t)-1;
+ size_t len = (size_t)-1;
+ bool result;
+
+ /* A set of checks with only the first set_limit bits *set*. */
+ fb_set_range(fb, nbits, 0, set_limit);
+ expect_zu_eq(set_limit, fb_srange_longest(fb, nbits),
+ "Incorrect longest set range");
+ expect_zu_eq(nbits - set_limit, fb_urange_longest(fb, nbits),
+ "Incorrect longest unset range");
+ for (size_t i = 0; i < set_limit; i++) {
+ result = fb_srange_iter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(set_limit - i, len, "Incorrect len at %zu", i);
+
+ result = fb_urange_iter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(nbits - set_limit, len, "Incorrect len at %zu", i);
+
+ result = fb_srange_riter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(i + 1, len, "Incorrect len at %zu", i);
+
+ result = fb_urange_riter(fb, nbits, i, &begin, &len);
+ expect_false(result, "Should not have found a range at %zu", i);
+ }
+ for (size_t i = set_limit; i < nbits; i++) {
+ result = fb_srange_iter(fb, nbits, i, &begin, &len);
+ expect_false(result, "Should not have found a range at %zu", i);
+
+ result = fb_urange_iter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(nbits - i, len, "Incorrect len at %zu", i);
+
+ result = fb_srange_riter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(set_limit, len, "Incorrect len at %zu", i);
+
+ result = fb_urange_riter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(i - set_limit + 1, len, "Incorrect len at %zu", i);
+ }
+
+ /* A set of checks with only the first set_limit bits *unset*. */
+ fb_unset_range(fb, nbits, 0, set_limit);
+ fb_set_range(fb, nbits, set_limit, nbits - set_limit);
+ expect_zu_eq(nbits - set_limit, fb_srange_longest(fb, nbits),
+ "Incorrect longest set range");
+ expect_zu_eq(set_limit, fb_urange_longest(fb, nbits),
+ "Incorrect longest unset range");
+ for (size_t i = 0; i < set_limit; i++) {
+ result = fb_srange_iter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(nbits - set_limit, len, "Incorrect len at %zu", i);
+
+ result = fb_urange_iter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(set_limit - i, len, "Incorrect len at %zu", i);
+
+ result = fb_srange_riter(fb, nbits, i, &begin, &len);
+ expect_false(result, "Should not have found a range at %zu", i);
+
+ result = fb_urange_riter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should not have found a range at %zu", i);
+ expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(i + 1, len, "Incorrect len at %zu", i);
+ }
+ for (size_t i = set_limit; i < nbits; i++) {
+ result = fb_srange_iter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(nbits - i, len, "Incorrect len at %zu", i);
+
+ result = fb_urange_iter(fb, nbits, i, &begin, &len);
+ expect_false(result, "Should not have found a range at %zu", i);
+
+ result = fb_srange_riter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(i - set_limit + 1, len, "Incorrect len at %zu", i);
+
+ result = fb_urange_riter(fb, nbits, i, &begin, &len);
+ expect_true(result, "Should have found a range at %zu", i);
+ expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
+ expect_zu_eq(set_limit, len, "Incorrect len at %zu", i);
+ }
+
+}
+TEST_END
+
+/*
+ * Doing this bit-by-bit is too slow for a real implementation, but for testing
+ * code, it's easy to get right. In the exhaustive tests, we'll compare the
+ * (fast but tricky) real implementation against the (slow but simple) testing
+ * one.
+ */
+static bool
+fb_iter_simple(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
+ size_t *r_len, bool val, bool forward) {
+ ssize_t stride = (forward ? (ssize_t)1 : (ssize_t)-1);
+ ssize_t range_begin = (ssize_t)start;
+ for (; range_begin != (ssize_t)nbits && range_begin != -1;
+ range_begin += stride) {
+ if (fb_get(fb, nbits, range_begin) == val) {
+ ssize_t range_end = range_begin;
+ for (; range_end != (ssize_t)nbits && range_end != -1;
+ range_end += stride) {
+ if (fb_get(fb, nbits, range_end) != val) {
+ break;
+ }
+ }
+ if (forward) {
+ *r_begin = range_begin;
+ *r_len = range_end - range_begin;
+ } else {
+ *r_begin = range_end + 1;
+ *r_len = range_begin - range_end;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Similar, but for finding longest ranges. */
+static size_t
+fb_range_longest_simple(fb_group_t *fb, size_t nbits, bool val) {
+ size_t longest_so_far = 0;
+ for (size_t begin = 0; begin < nbits; begin++) {
+ if (fb_get(fb, nbits, begin) != val) {
+ continue;
+ }
+ size_t end = begin + 1;
+ for (; end < nbits; end++) {
+ if (fb_get(fb, nbits, end) != val) {
+ break;
+ }
+ }
+ if (end - begin > longest_so_far) {
+ longest_so_far = end - begin;
+ }
+ }
+ return longest_so_far;
+}
+
+static void
+expect_iter_results_at(fb_group_t *fb, size_t nbits, size_t pos,
+ bool val, bool forward) {
+ bool iter_res;
+ size_t iter_begin JEMALLOC_CC_SILENCE_INIT(0);
+ size_t iter_len JEMALLOC_CC_SILENCE_INIT(0);
+ if (val) {
+ if (forward) {
+ iter_res = fb_srange_iter(fb, nbits, pos,
+ &iter_begin, &iter_len);
+ } else {
+ iter_res = fb_srange_riter(fb, nbits, pos,
+ &iter_begin, &iter_len);
+ }
+ } else {
+ if (forward) {
+ iter_res = fb_urange_iter(fb, nbits, pos,
+ &iter_begin, &iter_len);
+ } else {
+ iter_res = fb_urange_riter(fb, nbits, pos,
+ &iter_begin, &iter_len);
+ }
+ }
+
+ bool simple_iter_res;
+ /*
+ * These are dead stores, but the compiler can't always figure that out
+ * statically, and warns on the uninitialized variable.
+ */
+ size_t simple_iter_begin = 0;
+ size_t simple_iter_len = 0;
+ simple_iter_res = fb_iter_simple(fb, nbits, pos, &simple_iter_begin,
+ &simple_iter_len, val, forward);
+
+ expect_b_eq(iter_res, simple_iter_res, "Result mismatch at %zu", pos);
+ if (iter_res && simple_iter_res) {
+ assert_zu_eq(iter_begin, simple_iter_begin,
+ "Begin mismatch at %zu", pos);
+ expect_zu_eq(iter_len, simple_iter_len,
+ "Length mismatch at %zu", pos);
+ }
+}
+
+static void
+expect_iter_results(fb_group_t *fb, size_t nbits) {
+ for (size_t i = 0; i < nbits; i++) {
+ expect_iter_results_at(fb, nbits, i, false, false);
+ expect_iter_results_at(fb, nbits, i, false, true);
+ expect_iter_results_at(fb, nbits, i, true, false);
+ expect_iter_results_at(fb, nbits, i, true, true);
+ }
+ expect_zu_eq(fb_range_longest_simple(fb, nbits, true),
+ fb_srange_longest(fb, nbits), "Longest range mismatch");
+ expect_zu_eq(fb_range_longest_simple(fb, nbits, false),
+ fb_urange_longest(fb, nbits), "Longest range mismatch");
+}
+
+static void
+set_pattern_3(fb_group_t *fb, size_t nbits, bool zero_val) {
+ for (size_t i = 0; i < nbits; i++) {
+ if ((i % 6 < 3 && zero_val) || (i % 6 >= 3 && !zero_val)) {
+ fb_set(fb, nbits, i);
+ } else {
+ fb_unset(fb, nbits, i);
+ }
+ }
+}
+
+static void
+do_test_iter_range_exhaustive(size_t nbits) {
+ /* This test is also pretty slow. */
+ if (nbits > 1000) {
+ return;
+ }
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb = malloc(sz);
+ fb_init(fb, nbits);
+
+ set_pattern_3(fb, nbits, /* zero_val */ true);
+ expect_iter_results(fb, nbits);
+
+ set_pattern_3(fb, nbits, /* zero_val */ false);
+ expect_iter_results(fb, nbits);
+
+ fb_set_range(fb, nbits, 0, nbits);
+ fb_unset_range(fb, nbits, 0, nbits / 2 == 0 ? 1 : nbits / 2);
+ expect_iter_results(fb, nbits);
+
+ fb_unset_range(fb, nbits, 0, nbits);
+ fb_set_range(fb, nbits, 0, nbits / 2 == 0 ? 1: nbits / 2);
+ expect_iter_results(fb, nbits);
+
+ free(fb);
+}
+
+/*
+ * Like test_iter_range_simple, this tests both iteration and longest-range
+ * computation.
+ */
+TEST_BEGIN(test_iter_range_exhaustive) {
+#define NB(nbits) \
+ do_test_iter_range_exhaustive(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+/*
+ * If all set bits in the bitmap are contiguous, in [set_start, set_end),
+ * returns the number of set bits in [scount_start, scount_end).
+ */
+static size_t
+scount_contiguous(size_t set_start, size_t set_end, size_t scount_start,
+ size_t scount_end) {
+ /* No overlap. */
+ if (set_end <= scount_start || scount_end <= set_start) {
+ return 0;
+ }
+ /* set range contains scount range */
+ if (set_start <= scount_start && set_end >= scount_end) {
+ return scount_end - scount_start;
+ }
+ /* scount range contains set range. */
+ if (scount_start <= set_start && scount_end >= set_end) {
+ return set_end - set_start;
+ }
+ /* Partial overlap, with set range starting first. */
+ if (set_start < scount_start && set_end < scount_end) {
+ return set_end - scount_start;
+ }
+ /* Partial overlap, with scount range starting first. */
+ if (scount_start < set_start && scount_end < set_end) {
+ return scount_end - set_start;
+ }
+ /*
+ * Trigger an assert failure; the above list should have been
+ * exhaustive.
+ */
+ unreachable();
+}
+
+static size_t
+ucount_contiguous(size_t set_start, size_t set_end, size_t ucount_start,
+ size_t ucount_end) {
+ /* No overlap. */
+ if (set_end <= ucount_start || ucount_end <= set_start) {
+ return ucount_end - ucount_start;
+ }
+ /* set range contains ucount range */
+ if (set_start <= ucount_start && set_end >= ucount_end) {
+ return 0;
+ }
+ /* ucount range contains set range. */
+ if (ucount_start <= set_start && ucount_end >= set_end) {
+ return (ucount_end - ucount_start) - (set_end - set_start);
+ }
+ /* Partial overlap, with set range starting first. */
+ if (set_start < ucount_start && set_end < ucount_end) {
+ return ucount_end - set_end;
+ }
+ /* Partial overlap, with ucount range starting first. */
+ if (ucount_start < set_start && ucount_end < set_end) {
+ return set_start - ucount_start;
+ }
+ /*
+ * Trigger an assert failure; the above list should have been
+ * exhaustive.
+ */
+ unreachable();
+}
+
+static void
+expect_count_match_contiguous(fb_group_t *fb, size_t nbits, size_t set_start,
+ size_t set_end) {
+ for (size_t i = 0; i < nbits; i++) {
+ for (size_t j = i + 1; j <= nbits; j++) {
+ size_t cnt = j - i;
+ size_t scount_expected = scount_contiguous(set_start,
+ set_end, i, j);
+ size_t scount_computed = fb_scount(fb, nbits, i, cnt);
+ expect_zu_eq(scount_expected, scount_computed,
+ "fb_scount error with nbits=%zu, start=%zu, "
+ "cnt=%zu, with bits set in [%zu, %zu)",
+ nbits, i, cnt, set_start, set_end);
+
+ size_t ucount_expected = ucount_contiguous(set_start,
+ set_end, i, j);
+ size_t ucount_computed = fb_ucount(fb, nbits, i, cnt);
+ assert_zu_eq(ucount_expected, ucount_computed,
+ "fb_ucount error with nbits=%zu, start=%zu, "
+ "cnt=%zu, with bits set in [%zu, %zu)",
+ nbits, i, cnt, set_start, set_end);
+
+ }
+ }
+}
+
+static void
+do_test_count_contiguous(size_t nbits) {
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb = malloc(sz);
+
+ fb_init(fb, nbits);
+
+ expect_count_match_contiguous(fb, nbits, 0, 0);
+ for (size_t i = 0; i < nbits; i++) {
+ fb_set(fb, nbits, i);
+ expect_count_match_contiguous(fb, nbits, 0, i + 1);
+ }
+
+ for (size_t i = 0; i < nbits; i++) {
+ fb_unset(fb, nbits, i);
+ expect_count_match_contiguous(fb, nbits, i + 1, nbits);
+ }
+
+ free(fb);
+}
+
+TEST_BEGIN(test_count_contiguous_simple) {
+ enum {nbits = 300};
+ fb_group_t fb[FB_NGROUPS(nbits)];
+ fb_init(fb, nbits);
+ /* Just an arbitrary number. */
+ size_t start = 23;
+
+ fb_set_range(fb, nbits, start, 30 - start);
+ expect_count_match_contiguous(fb, nbits, start, 30);
+
+ fb_set_range(fb, nbits, start, 40 - start);
+ expect_count_match_contiguous(fb, nbits, start, 40);
+
+ fb_set_range(fb, nbits, start, 70 - start);
+ expect_count_match_contiguous(fb, nbits, start, 70);
+
+ fb_set_range(fb, nbits, start, 120 - start);
+ expect_count_match_contiguous(fb, nbits, start, 120);
+
+ fb_set_range(fb, nbits, start, 150 - start);
+ expect_count_match_contiguous(fb, nbits, start, 150);
+
+ fb_set_range(fb, nbits, start, 200 - start);
+ expect_count_match_contiguous(fb, nbits, start, 200);
+
+ fb_set_range(fb, nbits, start, 290 - start);
+ expect_count_match_contiguous(fb, nbits, start, 290);
+}
+TEST_END
+
+TEST_BEGIN(test_count_contiguous) {
+#define NB(nbits) \
+ /* This test is *particularly* slow in debug builds. */ \
+ if ((!config_debug && nbits < 300) || nbits < 150) { \
+ do_test_count_contiguous(nbits); \
+ }
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+expect_count_match_alternating(fb_group_t *fb_even, fb_group_t *fb_odd,
+ size_t nbits) {
+ for (size_t i = 0; i < nbits; i++) {
+ for (size_t j = i + 1; j <= nbits; j++) {
+ size_t cnt = j - i;
+ size_t odd_scount = cnt / 2
+ + (size_t)(cnt % 2 == 1 && i % 2 == 1);
+ size_t odd_scount_computed = fb_scount(fb_odd, nbits,
+ i, j - i);
+ assert_zu_eq(odd_scount, odd_scount_computed,
+ "fb_scount error with nbits=%zu, start=%zu, "
+ "cnt=%zu, with alternating bits set.",
+ nbits, i, j - i);
+
+ size_t odd_ucount = cnt / 2
+ + (size_t)(cnt % 2 == 1 && i % 2 == 0);
+ size_t odd_ucount_computed = fb_ucount(fb_odd, nbits,
+ i, j - i);
+ assert_zu_eq(odd_ucount, odd_ucount_computed,
+ "fb_ucount error with nbits=%zu, start=%zu, "
+ "cnt=%zu, with alternating bits set.",
+ nbits, i, j - i);
+
+ size_t even_scount = cnt / 2
+ + (size_t)(cnt % 2 == 1 && i % 2 == 0);
+ size_t even_scount_computed = fb_scount(fb_even, nbits,
+ i, j - i);
+ assert_zu_eq(even_scount, even_scount_computed,
+ "fb_scount error with nbits=%zu, start=%zu, "
+ "cnt=%zu, with alternating bits set.",
+ nbits, i, j - i);
+
+ size_t even_ucount = cnt / 2
+ + (size_t)(cnt % 2 == 1 && i % 2 == 1);
+ size_t even_ucount_computed = fb_ucount(fb_even, nbits,
+ i, j - i);
+ assert_zu_eq(even_ucount, even_ucount_computed,
+ "fb_ucount error with nbits=%zu, start=%zu, "
+ "cnt=%zu, with alternating bits set.",
+ nbits, i, j - i);
+ }
+ }
+}
+
+static void
+do_test_count_alternating(size_t nbits) {
+ if (nbits > 1000) {
+ return;
+ }
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb_even = malloc(sz);
+ fb_group_t *fb_odd = malloc(sz);
+
+ fb_init(fb_even, nbits);
+ fb_init(fb_odd, nbits);
+
+ for (size_t i = 0; i < nbits; i++) {
+ if (i % 2 == 0) {
+ fb_set(fb_even, nbits, i);
+ } else {
+ fb_set(fb_odd, nbits, i);
+ }
+ }
+
+ expect_count_match_alternating(fb_even, fb_odd, nbits);
+
+ free(fb_even);
+ free(fb_odd);
+}
+
+TEST_BEGIN(test_count_alternating) {
+#define NB(nbits) \
+ do_test_count_alternating(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static void
+do_test_bit_op(size_t nbits, bool (*op)(bool a, bool b),
+ void (*fb_op)(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits)) {
+ size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
+ fb_group_t *fb1 = malloc(sz);
+ fb_group_t *fb2 = malloc(sz);
+ fb_group_t *fb_result = malloc(sz);
+ fb_init(fb1, nbits);
+ fb_init(fb2, nbits);
+ fb_init(fb_result, nbits);
+
+ /* Just two random numbers. */
+ const uint64_t prng_init1 = (uint64_t)0X4E9A9DE6A35691CDULL;
+ const uint64_t prng_init2 = (uint64_t)0X7856E396B063C36EULL;
+
+ uint64_t prng1 = prng_init1;
+ uint64_t prng2 = prng_init2;
+
+ for (size_t i = 0; i < nbits; i++) {
+ bool bit1 = ((prng1 & (1ULL << (i % 64))) != 0);
+ bool bit2 = ((prng2 & (1ULL << (i % 64))) != 0);
+
+ if (bit1) {
+ fb_set(fb1, nbits, i);
+ }
+ if (bit2) {
+ fb_set(fb2, nbits, i);
+ }
+
+ if (i % 64 == 0) {
+ prng1 = prng_state_next_u64(prng1);
+ prng2 = prng_state_next_u64(prng2);
+ }
+ }
+
+ fb_op(fb_result, fb1, fb2, nbits);
+
+ /* Reset the prngs to replay them. */
+ prng1 = prng_init1;
+ prng2 = prng_init2;
+
+ for (size_t i = 0; i < nbits; i++) {
+ bool bit1 = ((prng1 & (1ULL << (i % 64))) != 0);
+ bool bit2 = ((prng2 & (1ULL << (i % 64))) != 0);
+
+ /* Original bitmaps shouldn't change. */
+ expect_b_eq(bit1, fb_get(fb1, nbits, i), "difference at bit %zu", i);
+ expect_b_eq(bit2, fb_get(fb2, nbits, i), "difference at bit %zu", i);
+
+ /* New one should be bitwise and. */
+ expect_b_eq(op(bit1, bit2), fb_get(fb_result, nbits, i),
+ "difference at bit %zu", i);
+
+ /* Update the same way we did last time. */
+ if (i % 64 == 0) {
+ prng1 = prng_state_next_u64(prng1);
+ prng2 = prng_state_next_u64(prng2);
+ }
+ }
+
+ free(fb1);
+ free(fb2);
+ free(fb_result);
+}
+
+static bool
+binary_and(bool a, bool b) {
+ return a & b;
+}
+
+static void
+do_test_bit_and(size_t nbits) {
+ do_test_bit_op(nbits, &binary_and, &fb_bit_and);
+}
+
+TEST_BEGIN(test_bit_and) {
+#define NB(nbits) \
+ do_test_bit_and(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static bool
+binary_or(bool a, bool b) {
+ return a | b;
+}
+
+static void
+do_test_bit_or(size_t nbits) {
+ do_test_bit_op(nbits, &binary_or, &fb_bit_or);
+}
+
+TEST_BEGIN(test_bit_or) {
+#define NB(nbits) \
+ do_test_bit_or(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+static bool
+binary_not(bool a, bool b) {
+ (void)b;
+ return !a;
+}
+
+static void
+fb_bit_not_shim(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2,
+ size_t nbits) {
+ (void)src2;
+ fb_bit_not(dst, src1, nbits);
+}
+
+static void
+do_test_bit_not(size_t nbits) {
+ do_test_bit_op(nbits, &binary_not, &fb_bit_not_shim);
+}
+
+TEST_BEGIN(test_bit_not) {
+#define NB(nbits) \
+ do_test_bit_not(nbits);
+ NBITS_TAB
+#undef NB
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_fb_init,
+ test_get_set_unset,
+ test_search_simple,
+ test_search_exhaustive,
+ test_range_simple,
+ test_empty_full,
+ test_iter_range_simple,
+ test_iter_range_exhaustive,
+ test_count_contiguous_simple,
+ test_count_contiguous,
+ test_count_alternating,
+ test_bit_and,
+ test_bit_or,
+ test_bit_not);
+}
diff --git a/deps/jemalloc/test/unit/fork.c b/deps/jemalloc/test/unit/fork.c
index b1690750a..4137423f0 100644
--- a/deps/jemalloc/test/unit/fork.c
+++ b/deps/jemalloc/test/unit/fork.c
@@ -36,25 +36,25 @@ TEST_BEGIN(test_fork) {
/* Set up a manually managed arena for test. */
unsigned arena_ind;
size_t sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
/* Migrate to the new arena. */
unsigned old_arena_ind;
sz = sizeof(old_arena_ind);
- assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
(void *)&arena_ind, sizeof(arena_ind)), 0,
"Unexpected mallctl() failure");
p = malloc(1);
- assert_ptr_not_null(p, "Unexpected malloc() failure");
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
pid = fork();
free(p);
p = malloc(64);
- assert_ptr_not_null(p, "Unexpected malloc() failure");
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
free(p);
if (pid == -1) {
diff --git a/deps/jemalloc/test/unit/fxp.c b/deps/jemalloc/test/unit/fxp.c
new file mode 100644
index 000000000..27f109768
--- /dev/null
+++ b/deps/jemalloc/test/unit/fxp.c
@@ -0,0 +1,394 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/fxp.h"
+
+static double
+fxp2double(fxp_t a) {
+ double intpart = (double)(a >> 16);
+ double fracpart = (double)(a & ((1U << 16) - 1)) / (1U << 16);
+ return intpart + fracpart;
+}
+
+/* Is a close to b? */
+static bool
+double_close(double a, double b) {
+ /*
+ * Our implementation doesn't try for precision. Correspondingly, don't
+ * enforce it too strenuously here; accept values that are close in
+ * either relative or absolute terms.
+ */
+ return fabs(a - b) < 0.01 || fabs(a - b) / a < 0.01;
+}
+
+static bool
+fxp_close(fxp_t a, fxp_t b) {
+ return double_close(fxp2double(a), fxp2double(b));
+}
+
+static fxp_t
+xparse_fxp(const char *str) {
+ fxp_t result;
+ bool err = fxp_parse(&result, str, NULL);
+ assert_false(err, "Invalid fxp string: %s", str);
+ return result;
+}
+
+static void
+expect_parse_accurate(const char *str, const char *parse_str) {
+ double true_val = strtod(str, NULL);
+ fxp_t fxp_val;
+ char *end;
+ bool err = fxp_parse(&fxp_val, parse_str, &end);
+ expect_false(err, "Unexpected parse failure");
+ expect_ptr_eq(parse_str + strlen(str), end,
+ "Didn't parse whole string");
+ expect_true(double_close(fxp2double(fxp_val), true_val),
+ "Misparsed %s", str);
+}
+
+static void
+parse_valid_trial(const char *str) {
+ /* The value it parses should be correct. */
+ expect_parse_accurate(str, str);
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%swith_some_trailing_text", str);
+ expect_parse_accurate(str, buf);
+ snprintf(buf, sizeof(buf), "%s with a space", str);
+ expect_parse_accurate(str, buf);
+ snprintf(buf, sizeof(buf), "%s,in_a_malloc_conf_string:1", str);
+ expect_parse_accurate(str, buf);
+}
+
+TEST_BEGIN(test_parse_valid) {
+ parse_valid_trial("0");
+ parse_valid_trial("1");
+ parse_valid_trial("2");
+ parse_valid_trial("100");
+ parse_valid_trial("345");
+ parse_valid_trial("00000000123");
+ parse_valid_trial("00000000987");
+
+ parse_valid_trial("0.0");
+ parse_valid_trial("0.00000000000456456456");
+ parse_valid_trial("100.00000000000456456456");
+
+ parse_valid_trial("123.1");
+ parse_valid_trial("123.01");
+ parse_valid_trial("123.001");
+ parse_valid_trial("123.0001");
+ parse_valid_trial("123.00001");
+ parse_valid_trial("123.000001");
+ parse_valid_trial("123.0000001");
+
+ parse_valid_trial(".0");
+ parse_valid_trial(".1");
+ parse_valid_trial(".01");
+ parse_valid_trial(".001");
+ parse_valid_trial(".0001");
+ parse_valid_trial(".00001");
+ parse_valid_trial(".000001");
+
+ parse_valid_trial(".1");
+ parse_valid_trial(".10");
+ parse_valid_trial(".100");
+ parse_valid_trial(".1000");
+ parse_valid_trial(".100000");
+}
+TEST_END
+
+static void
+expect_parse_failure(const char *str) {
+ fxp_t result = FXP_INIT_INT(333);
+ char *end = (void *)0x123;
+ bool err = fxp_parse(&result, str, &end);
+ expect_true(err, "Expected a parse error on: %s", str);
+ expect_ptr_eq((void *)0x123, end,
+ "Parse error shouldn't change results");
+ expect_u32_eq(result, FXP_INIT_INT(333),
+ "Parse error shouldn't change results");
+}
+
+TEST_BEGIN(test_parse_invalid) {
+ expect_parse_failure("123.");
+ expect_parse_failure("3.a");
+ expect_parse_failure(".a");
+ expect_parse_failure("a.1");
+ expect_parse_failure("a");
+ /* A valid string, but one that overflows. */
+ expect_parse_failure("123456789");
+ expect_parse_failure("0000000123456789");
+ expect_parse_failure("1000000");
+}
+TEST_END
+
+static void
+expect_init_percent(unsigned percent, const char *str) {
+ fxp_t result_init = FXP_INIT_PERCENT(percent);
+ fxp_t result_parse = xparse_fxp(str);
+ expect_u32_eq(result_init, result_parse,
+ "Expect representations of FXP_INIT_PERCENT(%u) and "
+ "fxp_parse(\"%s\") to be equal; got %x and %x",
+ percent, str, result_init, result_parse);
+
+}
+
+/*
+ * Every other test uses either parsing or FXP_INIT_INT; it gets tested in those
+ * ways. We need a one-off for the percent-based initialization, though.
+ */
+TEST_BEGIN(test_init_percent) {
+ expect_init_percent(100, "1");
+ expect_init_percent(75, ".75");
+ expect_init_percent(1, ".01");
+ expect_init_percent(50, ".5");
+}
+TEST_END
+
+static void
+expect_add(const char *astr, const char *bstr, const char* resultstr) {
+ fxp_t a = xparse_fxp(astr);
+ fxp_t b = xparse_fxp(bstr);
+ fxp_t result = xparse_fxp(resultstr);
+ expect_true(fxp_close(fxp_add(a, b), result),
+ "Expected %s + %s == %s", astr, bstr, resultstr);
+}
+
+TEST_BEGIN(test_add_simple) {
+ expect_add("0", "0", "0");
+ expect_add("0", "1", "1");
+ expect_add("1", "1", "2");
+ expect_add("1.5", "1.5", "3");
+ expect_add("0.1", "0.1", "0.2");
+ expect_add("123", "456", "579");
+}
+TEST_END
+
+static void
+expect_sub(const char *astr, const char *bstr, const char* resultstr) {
+ fxp_t a = xparse_fxp(astr);
+ fxp_t b = xparse_fxp(bstr);
+ fxp_t result = xparse_fxp(resultstr);
+ expect_true(fxp_close(fxp_sub(a, b), result),
+ "Expected %s - %s == %s", astr, bstr, resultstr);
+}
+
+TEST_BEGIN(test_sub_simple) {
+ expect_sub("0", "0", "0");
+ expect_sub("1", "0", "1");
+ expect_sub("1", "1", "0");
+ expect_sub("3.5", "1.5", "2");
+ expect_sub("0.3", "0.1", "0.2");
+ expect_sub("456", "123", "333");
+}
+TEST_END
+
+static void
+expect_mul(const char *astr, const char *bstr, const char* resultstr) {
+ fxp_t a = xparse_fxp(astr);
+ fxp_t b = xparse_fxp(bstr);
+ fxp_t result = xparse_fxp(resultstr);
+ expect_true(fxp_close(fxp_mul(a, b), result),
+ "Expected %s * %s == %s", astr, bstr, resultstr);
+}
+
+TEST_BEGIN(test_mul_simple) {
+ expect_mul("0", "0", "0");
+ expect_mul("1", "0", "0");
+ expect_mul("1", "1", "1");
+ expect_mul("1.5", "1.5", "2.25");
+ expect_mul("100.0", "10", "1000");
+ expect_mul(".1", "10", "1");
+}
+TEST_END
+
+static void
+expect_div(const char *astr, const char *bstr, const char* resultstr) {
+ fxp_t a = xparse_fxp(astr);
+ fxp_t b = xparse_fxp(bstr);
+ fxp_t result = xparse_fxp(resultstr);
+ expect_true(fxp_close(fxp_div(a, b), result),
+ "Expected %s / %s == %s", astr, bstr, resultstr);
+}
+
+TEST_BEGIN(test_div_simple) {
+ expect_div("1", "1", "1");
+ expect_div("0", "1", "0");
+ expect_div("2", "1", "2");
+ expect_div("3", "2", "1.5");
+ expect_div("3", "1.5", "2");
+ expect_div("10", ".1", "100");
+ expect_div("123", "456", ".2697368421");
+}
+TEST_END
+
+static void
+expect_round(const char *str, uint32_t rounded_down, uint32_t rounded_nearest) {
+ fxp_t fxp = xparse_fxp(str);
+ uint32_t fxp_rounded_down = fxp_round_down(fxp);
+ uint32_t fxp_rounded_nearest = fxp_round_nearest(fxp);
+ expect_u32_eq(rounded_down, fxp_rounded_down,
+ "Mistake rounding %s down", str);
+ expect_u32_eq(rounded_nearest, fxp_rounded_nearest,
+ "Mistake rounding %s to nearest", str);
+}
+
+TEST_BEGIN(test_round_simple) {
+ expect_round("1.5", 1, 2);
+ expect_round("0", 0, 0);
+ expect_round("0.1", 0, 0);
+ expect_round("0.4", 0, 0);
+ expect_round("0.40000", 0, 0);
+ expect_round("0.5", 0, 1);
+ expect_round("0.6", 0, 1);
+ expect_round("123", 123, 123);
+ expect_round("123.4", 123, 123);
+ expect_round("123.5", 123, 124);
+}
+TEST_END
+
+static void
+expect_mul_frac(size_t a, const char *fracstr, size_t expected) {
+ fxp_t frac = xparse_fxp(fracstr);
+ size_t result = fxp_mul_frac(a, frac);
+ expect_true(double_close(expected, result),
+ "Expected %zu * %s == %zu (fracmul); got %zu", a, fracstr,
+ expected, result);
+}
+
+TEST_BEGIN(test_mul_frac_simple) {
+ expect_mul_frac(SIZE_MAX, "1.0", SIZE_MAX);
+ expect_mul_frac(SIZE_MAX, ".75", SIZE_MAX / 4 * 3);
+ expect_mul_frac(SIZE_MAX, ".5", SIZE_MAX / 2);
+ expect_mul_frac(SIZE_MAX, ".25", SIZE_MAX / 4);
+ expect_mul_frac(1U << 16, "1.0", 1U << 16);
+ expect_mul_frac(1U << 30, "0.5", 1U << 29);
+ expect_mul_frac(1U << 30, "0.25", 1U << 28);
+ expect_mul_frac(1U << 30, "0.125", 1U << 27);
+ expect_mul_frac((1U << 30) + 1, "0.125", 1U << 27);
+ expect_mul_frac(100, "0.25", 25);
+ expect_mul_frac(1000 * 1000, "0.001", 1000);
+}
+TEST_END
+
+static void
+expect_print(const char *str) {
+ fxp_t fxp = xparse_fxp(str);
+ char buf[FXP_BUF_SIZE];
+ fxp_print(fxp, buf);
+ expect_d_eq(0, strcmp(str, buf), "Couldn't round-trip print %s", str);
+}
+
+TEST_BEGIN(test_print_simple) {
+ expect_print("0.0");
+ expect_print("1.0");
+ expect_print("2.0");
+ expect_print("123.0");
+ /*
+ * We hit the possibility of roundoff errors whenever the fractional
+ * component isn't a round binary number; only check these here (we
+ * round-trip properly in the stress test).
+ */
+ expect_print("1.5");
+ expect_print("3.375");
+ expect_print("0.25");
+ expect_print("0.125");
+ /* 1 / 2**14 */
+ expect_print("0.00006103515625");
+}
+TEST_END
+
+TEST_BEGIN(test_stress) {
+ const char *numbers[] = {
+ "0.0", "0.1", "0.2", "0.3", "0.4",
+ "0.5", "0.6", "0.7", "0.8", "0.9",
+
+ "1.0", "1.1", "1.2", "1.3", "1.4",
+ "1.5", "1.6", "1.7", "1.8", "1.9",
+
+ "2.0", "2.1", "2.2", "2.3", "2.4",
+ "2.5", "2.6", "2.7", "2.8", "2.9",
+
+ "17.0", "17.1", "17.2", "17.3", "17.4",
+ "17.5", "17.6", "17.7", "17.8", "17.9",
+
+ "18.0", "18.1", "18.2", "18.3", "18.4",
+ "18.5", "18.6", "18.7", "18.8", "18.9",
+
+ "123.0", "123.1", "123.2", "123.3", "123.4",
+ "123.5", "123.6", "123.7", "123.8", "123.9",
+
+ "124.0", "124.1", "124.2", "124.3", "124.4",
+ "124.5", "124.6", "124.7", "124.8", "124.9",
+
+ "125.0", "125.1", "125.2", "125.3", "125.4",
+ "125.5", "125.6", "125.7", "125.8", "125.9"};
+ size_t numbers_len = sizeof(numbers)/sizeof(numbers[0]);
+ for (size_t i = 0; i < numbers_len; i++) {
+ fxp_t fxp_a = xparse_fxp(numbers[i]);
+ double double_a = strtod(numbers[i], NULL);
+
+ uint32_t fxp_rounded_down = fxp_round_down(fxp_a);
+ uint32_t fxp_rounded_nearest = fxp_round_nearest(fxp_a);
+ uint32_t double_rounded_down = (uint32_t)double_a;
+ uint32_t double_rounded_nearest = (uint32_t)round(double_a);
+
+ expect_u32_eq(double_rounded_down, fxp_rounded_down,
+ "Incorrectly rounded down %s", numbers[i]);
+ expect_u32_eq(double_rounded_nearest, fxp_rounded_nearest,
+ "Incorrectly rounded-to-nearest %s", numbers[i]);
+
+ for (size_t j = 0; j < numbers_len; j++) {
+ fxp_t fxp_b = xparse_fxp(numbers[j]);
+ double double_b = strtod(numbers[j], NULL);
+
+ fxp_t fxp_sum = fxp_add(fxp_a, fxp_b);
+ double double_sum = double_a + double_b;
+ expect_true(
+ double_close(fxp2double(fxp_sum), double_sum),
+ "Miscomputed %s + %s", numbers[i], numbers[j]);
+
+ if (double_a > double_b) {
+ fxp_t fxp_diff = fxp_sub(fxp_a, fxp_b);
+ double double_diff = double_a - double_b;
+ expect_true(
+ double_close(fxp2double(fxp_diff),
+ double_diff),
+ "Miscomputed %s - %s", numbers[i],
+ numbers[j]);
+ }
+
+ fxp_t fxp_prod = fxp_mul(fxp_a, fxp_b);
+ double double_prod = double_a * double_b;
+ expect_true(
+ double_close(fxp2double(fxp_prod), double_prod),
+ "Miscomputed %s * %s", numbers[i], numbers[j]);
+
+ if (double_b != 0.0) {
+ fxp_t fxp_quot = fxp_div(fxp_a, fxp_b);
+ double double_quot = double_a / double_b;
+ expect_true(
+ double_close(fxp2double(fxp_quot),
+ double_quot),
+ "Miscomputed %s / %s", numbers[i],
+ numbers[j]);
+ }
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_parse_valid,
+ test_parse_invalid,
+ test_init_percent,
+ test_add_simple,
+ test_sub_simple,
+ test_mul_simple,
+ test_div_simple,
+ test_round_simple,
+ test_mul_frac_simple,
+ test_print_simple,
+ test_stress);
+}
diff --git a/deps/jemalloc/test/unit/hash.c b/deps/jemalloc/test/unit/hash.c
index 7cc034f8d..49f08238d 100644
--- a/deps/jemalloc/test/unit/hash.c
+++ b/deps/jemalloc/test/unit/hash.c
@@ -131,7 +131,7 @@ hash_variant_verify_key(hash_variant_t variant, uint8_t *key) {
default: not_reached();
}
- assert_u32_eq(computed, expected,
+ expect_u32_eq(computed, expected,
"Hash mismatch for %s(): expected %#x but got %#x",
hash_variant_string(variant), expected, computed);
}
diff --git a/deps/jemalloc/test/unit/hook.c b/deps/jemalloc/test/unit/hook.c
index 72fcc433c..16a6f1b03 100644
--- a/deps/jemalloc/test/unit/hook.c
+++ b/deps/jemalloc/test/unit/hook.c
@@ -70,10 +70,10 @@ set_args_raw(uintptr_t *args_raw, int nargs) {
}
static void
-assert_args_raw(uintptr_t *args_raw_expected, int nargs) {
+expect_args_raw(uintptr_t *args_raw_expected, int nargs) {
int cmp = memcmp(args_raw_expected, arg_args_raw,
sizeof(uintptr_t) * nargs);
- assert_d_eq(cmp, 0, "Raw args mismatch");
+ expect_d_eq(cmp, 0, "Raw args mismatch");
}
static void
@@ -132,34 +132,34 @@ TEST_BEGIN(test_hooks_basic) {
reset_args();
hook_invoke_alloc(hook_alloc_posix_memalign, (void *)222, 333,
args_raw);
- assert_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer");
- assert_d_eq((int)hook_alloc_posix_memalign, arg_type,
+ expect_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer");
+ expect_d_eq((int)hook_alloc_posix_memalign, arg_type,
"Passed wrong alloc type");
- assert_ptr_eq((void *)222, arg_result, "Passed wrong result address");
- assert_u64_eq(333, arg_result_raw, "Passed wrong result");
- assert_args_raw(args_raw, 3);
+ expect_ptr_eq((void *)222, arg_result, "Passed wrong result address");
+ expect_u64_eq(333, arg_result_raw, "Passed wrong result");
+ expect_args_raw(args_raw, 3);
/* Dalloc */
reset_args();
hook_invoke_dalloc(hook_dalloc_sdallocx, (void *)222, args_raw);
- assert_d_eq((int)hook_dalloc_sdallocx, arg_type,
+ expect_d_eq((int)hook_dalloc_sdallocx, arg_type,
"Passed wrong dalloc type");
- assert_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
- assert_ptr_eq((void *)222, arg_address, "Passed wrong address");
- assert_args_raw(args_raw, 3);
+ expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
+ expect_ptr_eq((void *)222, arg_address, "Passed wrong address");
+ expect_args_raw(args_raw, 3);
/* Expand */
reset_args();
hook_invoke_expand(hook_expand_xallocx, (void *)222, 333, 444, 555,
args_raw);
- assert_d_eq((int)hook_expand_xallocx, arg_type,
+ expect_d_eq((int)hook_expand_xallocx, arg_type,
"Passed wrong expand type");
- assert_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
- assert_ptr_eq((void *)222, arg_address, "Passed wrong address");
- assert_zu_eq(333, arg_old_usize, "Passed wrong old usize");
- assert_zu_eq(444, arg_new_usize, "Passed wrong new usize");
- assert_zu_eq(555, arg_result_raw, "Passed wrong result");
- assert_args_raw(args_raw, 4);
+ expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
+ expect_ptr_eq((void *)222, arg_address, "Passed wrong address");
+ expect_zu_eq(333, arg_old_usize, "Passed wrong old usize");
+ expect_zu_eq(444, arg_new_usize, "Passed wrong new usize");
+ expect_zu_eq(555, arg_result_raw, "Passed wrong result");
+ expect_args_raw(args_raw, 4);
hook_remove(TSDN_NULL, handle);
}
@@ -177,24 +177,24 @@ TEST_BEGIN(test_hooks_null) {
void *handle3 = hook_install(TSDN_NULL, &hooks3);
void *handle4 = hook_install(TSDN_NULL, &hooks4);
- assert_ptr_ne(handle1, NULL, "Hook installation failed");
- assert_ptr_ne(handle2, NULL, "Hook installation failed");
- assert_ptr_ne(handle3, NULL, "Hook installation failed");
- assert_ptr_ne(handle4, NULL, "Hook installation failed");
+ expect_ptr_ne(handle1, NULL, "Hook installation failed");
+ expect_ptr_ne(handle2, NULL, "Hook installation failed");
+ expect_ptr_ne(handle3, NULL, "Hook installation failed");
+ expect_ptr_ne(handle4, NULL, "Hook installation failed");
uintptr_t args_raw[4] = {10, 20, 30, 40};
call_count = 0;
hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
- assert_d_eq(call_count, 1, "Called wrong number of times");
+ expect_d_eq(call_count, 1, "Called wrong number of times");
call_count = 0;
hook_invoke_dalloc(hook_dalloc_free, NULL, args_raw);
- assert_d_eq(call_count, 1, "Called wrong number of times");
+ expect_d_eq(call_count, 1, "Called wrong number of times");
call_count = 0;
hook_invoke_expand(hook_expand_realloc, NULL, 0, 0, 0, args_raw);
- assert_d_eq(call_count, 1, "Called wrong number of times");
+ expect_d_eq(call_count, 1, "Called wrong number of times");
hook_remove(TSDN_NULL, handle1);
hook_remove(TSDN_NULL, handle2);
@@ -206,16 +206,16 @@ TEST_END
TEST_BEGIN(test_hooks_remove) {
hooks_t hooks = {&test_alloc_hook, NULL, NULL, NULL};
void *handle = hook_install(TSDN_NULL, &hooks);
- assert_ptr_ne(handle, NULL, "Hook installation failed");
+ expect_ptr_ne(handle, NULL, "Hook installation failed");
call_count = 0;
uintptr_t args_raw[4] = {10, 20, 30, 40};
hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
- assert_d_eq(call_count, 1, "Hook not invoked");
+ expect_d_eq(call_count, 1, "Hook not invoked");
call_count = 0;
hook_remove(TSDN_NULL, handle);
hook_invoke_alloc(hook_alloc_malloc, NULL, 0, NULL);
- assert_d_eq(call_count, 0, "Hook invoked after removal");
+ expect_d_eq(call_count, 0, "Hook invoked after removal");
}
TEST_END
@@ -224,7 +224,7 @@ TEST_BEGIN(test_hooks_alloc_simple) {
/* "Simple" in the sense that we're not in a realloc variant. */
hooks_t hooks = {&test_alloc_hook, NULL, NULL, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
- assert_ptr_ne(handle, NULL, "Hook installation failed");
+ expect_ptr_ne(handle, NULL, "Hook installation failed");
/* Stop malloc from being optimized away. */
volatile int err;
@@ -233,69 +233,69 @@ TEST_BEGIN(test_hooks_alloc_simple) {
/* malloc */
reset();
ptr = malloc(1);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type");
- assert_ptr_eq(ptr, arg_result, "Wrong result");
- assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
free(ptr);
/* posix_memalign */
reset();
err = posix_memalign((void **)&ptr, 1024, 1);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_alloc_posix_memalign,
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_posix_memalign,
"Wrong hook type");
- assert_ptr_eq(ptr, arg_result, "Wrong result");
- assert_u64_eq((uintptr_t)err, (uintptr_t)arg_result_raw,
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)err, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument");
- assert_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument");
- assert_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument");
+ expect_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument");
free(ptr);
/* aligned_alloc */
reset();
ptr = aligned_alloc(1024, 1);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_alloc_aligned_alloc,
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_aligned_alloc,
"Wrong hook type");
- assert_ptr_eq(ptr, arg_result, "Wrong result");
- assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
- assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
+ expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
free(ptr);
/* calloc */
reset();
ptr = calloc(11, 13);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type");
- assert_ptr_eq(ptr, arg_result, "Wrong result");
- assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument");
- assert_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument");
+ expect_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument");
free(ptr);
/* memalign */
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
reset();
ptr = memalign(1024, 1);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type");
- assert_ptr_eq(ptr, arg_result, "Wrong result");
- assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
- assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
+ expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
free(ptr);
#endif /* JEMALLOC_OVERRIDE_MEMALIGN */
@@ -303,27 +303,27 @@ TEST_BEGIN(test_hooks_alloc_simple) {
#ifdef JEMALLOC_OVERRIDE_VALLOC
reset();
ptr = valloc(1);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type");
- assert_ptr_eq(ptr, arg_result, "Wrong result");
- assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
free(ptr);
#endif /* JEMALLOC_OVERRIDE_VALLOC */
/* mallocx */
reset();
ptr = mallocx(1, MALLOCX_LG_ALIGN(10));
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type");
- assert_ptr_eq(ptr, arg_result, "Wrong result");
- assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
- assert_u64_eq((uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1],
+ expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1],
"Wrong flags");
free(ptr);
@@ -335,7 +335,7 @@ TEST_BEGIN(test_hooks_dalloc_simple) {
/* "Simple" in the sense that we're not in a realloc variant. */
hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
- assert_ptr_ne(handle, NULL, "Hook installation failed");
+ expect_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
@@ -343,35 +343,35 @@ TEST_BEGIN(test_hooks_dalloc_simple) {
reset();
ptr = malloc(1);
free(ptr);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type");
- assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
- assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
/* dallocx() */
reset();
ptr = malloc(1);
dallocx(ptr, MALLOCX_TCACHE_NONE);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type");
- assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
- assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
- assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1],
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
+ expect_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1],
"Wrong raw arg");
/* sdallocx() */
reset();
ptr = malloc(1);
sdallocx(ptr, 1, MALLOCX_TCACHE_NONE);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type");
- assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
- assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
- assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg");
- assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2],
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg");
+ expect_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2],
"Wrong raw arg");
hook_remove(TSDN_NULL, handle);
@@ -382,7 +382,7 @@ TEST_BEGIN(test_hooks_expand_simple) {
/* "Simple" in the sense that we're not in a realloc variant. */
hooks_t hooks = {NULL, NULL, &test_expand_hook, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
- assert_ptr_ne(handle, NULL, "Hook installation failed");
+ expect_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
@@ -390,17 +390,17 @@ TEST_BEGIN(test_hooks_expand_simple) {
reset();
ptr = malloc(1);
size_t new_usize = xallocx(ptr, 100, 200, MALLOCX_TCACHE_NONE);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type");
- assert_ptr_eq(ptr, arg_address, "Wrong pointer expanded");
- assert_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize");
- assert_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize");
- assert_u64_eq(new_usize, arg_result_raw, "Wrong result");
- assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg");
- assert_u64_eq(100, arg_args_raw[1], "Wrong arg");
- assert_u64_eq(200, arg_args_raw[2], "Wrong arg");
- assert_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg");
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong pointer expanded");
+ expect_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize");
+ expect_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize");
+ expect_u64_eq(new_usize, arg_result_raw, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg");
+ expect_u64_eq(100, arg_args_raw[1], "Wrong arg");
+ expect_u64_eq(200, arg_args_raw[2], "Wrong arg");
+ expect_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg");
hook_remove(TSDN_NULL, handle);
}
@@ -410,45 +410,51 @@ TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) {
hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
&test_expand_hook, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
- assert_ptr_ne(handle, NULL, "Hook installation failed");
+ expect_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
/* realloc(NULL, size) as malloc */
reset();
ptr = realloc(NULL, 1);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
- assert_ptr_eq(ptr, arg_result, "Wrong result");
- assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
- assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
+ expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
free(ptr);
/* realloc(ptr, 0) as free */
- ptr = malloc(1);
- reset();
- realloc(ptr, 0);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_dalloc_realloc, "Wrong hook type");
- assert_ptr_eq(ptr, arg_address, "Wrong pointer freed");
- assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
- assert_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong raw arg");
+ if (opt_zero_realloc_action == zero_realloc_action_free) {
+ ptr = malloc(1);
+ reset();
+ realloc(ptr, 0);
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_dalloc_realloc,
+ "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address,
+ "Wrong pointer freed");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0],
+ "Wrong raw arg");
+ expect_u64_eq((uintptr_t)0, arg_args_raw[1],
+ "Wrong raw arg");
+ }
/* realloc(NULL, 0) as malloc(0) */
reset();
ptr = realloc(NULL, 0);
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
- assert_ptr_eq(ptr, arg_result, "Wrong result");
- assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_result, "Wrong result");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
- assert_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument");
+ expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument");
free(ptr);
hook_remove(TSDN_NULL, handle);
@@ -461,7 +467,7 @@ do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
&test_expand_hook, (void *)123};
void *handle = hook_install(TSDN_NULL, &hooks);
- assert_ptr_ne(handle, NULL, "Hook installation failed");
+ expect_ptr_ne(handle, NULL, "Hook installation failed");
void *volatile ptr;
void *volatile ptr2;
@@ -470,16 +476,16 @@ do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
ptr = malloc(129);
reset();
ptr2 = ralloc(ptr, 130, flags);
- assert_ptr_eq(ptr, ptr2, "Small realloc moved");
+ expect_ptr_eq(ptr, ptr2, "Small realloc moved");
- assert_d_eq(call_count, 1, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, expand_type, "Wrong hook type");
- assert_ptr_eq(ptr, arg_address, "Wrong address");
- assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, expand_type, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong address");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
- assert_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument");
free(ptr);
/*
@@ -493,19 +499,19 @@ do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
ptr = ralloc(ptr2, 2 * 1024 * 1024, flags);
/* ptr is the new address, ptr2 is the old address. */
if (ptr == ptr2) {
- assert_d_eq(call_count, 1, "Hook not called");
- assert_d_eq(arg_type, expand_type, "Wrong hook type");
+ expect_d_eq(call_count, 1, "Hook not called");
+ expect_d_eq(arg_type, expand_type, "Wrong hook type");
} else {
- assert_d_eq(call_count, 2, "Wrong hooks called");
- assert_ptr_eq(ptr, arg_result, "Wrong address");
- assert_d_eq(arg_type, dalloc_type, "Wrong hook type");
+ expect_d_eq(call_count, 2, "Wrong hooks called");
+ expect_ptr_eq(ptr, arg_result, "Wrong address");
+ expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
}
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_ptr_eq(ptr2, arg_address, "Wrong address");
- assert_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_ptr_eq(ptr2, arg_address, "Wrong address");
+ expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument");
- assert_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
+ expect_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
"Wrong argument");
free(ptr);
@@ -513,34 +519,34 @@ do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
ptr = malloc(8);
reset();
ptr2 = ralloc(ptr, 128, flags);
- assert_ptr_ne(ptr, ptr2, "Small realloc didn't move");
-
- assert_d_eq(call_count, 2, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, dalloc_type, "Wrong hook type");
- assert_ptr_eq(ptr, arg_address, "Wrong address");
- assert_ptr_eq(ptr2, arg_result, "Wrong address");
- assert_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
+ expect_ptr_ne(ptr, ptr2, "Small realloc didn't move");
+
+ expect_d_eq(call_count, 2, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong address");
+ expect_ptr_eq(ptr2, arg_result, "Wrong address");
+ expect_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
- assert_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument");
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument");
free(ptr2);
/* Realloc with move, large. */
ptr = malloc(1);
reset();
ptr2 = ralloc(ptr, 2 * 1024 * 1024, flags);
- assert_ptr_ne(ptr, ptr2, "Large realloc didn't move");
-
- assert_d_eq(call_count, 2, "Hook not called");
- assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
- assert_d_eq(arg_type, dalloc_type, "Wrong hook type");
- assert_ptr_eq(ptr, arg_address, "Wrong address");
- assert_ptr_eq(ptr2, arg_result, "Wrong address");
- assert_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
+ expect_ptr_ne(ptr, ptr2, "Large realloc didn't move");
+
+ expect_d_eq(call_count, 2, "Hook not called");
+ expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
+ expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
+ expect_ptr_eq(ptr, arg_address, "Wrong address");
+ expect_ptr_eq(ptr2, arg_result, "Wrong address");
+ expect_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
"Wrong raw result");
- assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
- assert_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
+ expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
+ expect_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
"Wrong argument");
free(ptr2);
diff --git a/deps/jemalloc/test/unit/hpa.c b/deps/jemalloc/test/unit/hpa.c
new file mode 100644
index 000000000..dfd57f39f
--- /dev/null
+++ b/deps/jemalloc/test/unit/hpa.c
@@ -0,0 +1,459 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/hpa.h"
+#include "jemalloc/internal/nstime.h"
+
+#define SHARD_IND 111
+
+#define ALLOC_MAX (HUGEPAGE / 4)
+
+typedef struct test_data_s test_data_t;
+struct test_data_s {
+ /*
+ * Must be the first member -- we convert back and forth between the
+ * test_data_t and the hpa_shard_t;
+ */
+ hpa_shard_t shard;
+ hpa_central_t central;
+ base_t *base;
+ edata_cache_t shard_edata_cache;
+
+ emap_t emap;
+};
+
+static hpa_shard_opts_t test_hpa_shard_opts_default = {
+ /* slab_max_alloc */
+ ALLOC_MAX,
+ /* hugification threshold */
+ HUGEPAGE,
+ /* dirty_mult */
+ FXP_INIT_PERCENT(25),
+ /* deferral_allowed */
+ false,
+ /* hugify_delay_ms */
+ 10 * 1000,
+};
+
+static hpa_shard_t *
+create_test_data(hpa_hooks_t *hooks, hpa_shard_opts_t *opts) {
+ bool err;
+ base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND,
+ &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
+ assert_ptr_not_null(base, "");
+
+ test_data_t *test_data = malloc(sizeof(test_data_t));
+ assert_ptr_not_null(test_data, "");
+
+ test_data->base = base;
+
+ err = edata_cache_init(&test_data->shard_edata_cache, base);
+ assert_false(err, "");
+
+ err = emap_init(&test_data->emap, test_data->base, /* zeroed */ false);
+ assert_false(err, "");
+
+ err = hpa_central_init(&test_data->central, test_data->base, hooks);
+ assert_false(err, "");
+
+ err = hpa_shard_init(&test_data->shard, &test_data->central,
+ &test_data->emap, test_data->base, &test_data->shard_edata_cache,
+ SHARD_IND, opts);
+ assert_false(err, "");
+
+ return (hpa_shard_t *)test_data;
+}
+
+static void
+destroy_test_data(hpa_shard_t *shard) {
+ test_data_t *test_data = (test_data_t *)shard;
+ base_delete(TSDN_NULL, test_data->base);
+ free(test_data);
+}
+
+TEST_BEGIN(test_alloc_max) {
+ test_skip_if(!hpa_supported());
+
+ hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
+ &test_hpa_shard_opts_default);
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+
+ edata_t *edata;
+
+ /* Small max */
+ bool deferred_work_generated = false;
+ edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
+ false, &deferred_work_generated);
+ expect_ptr_not_null(edata, "Allocation of small max failed");
+ edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
+ false, false, &deferred_work_generated);
+ expect_ptr_null(edata, "Allocation of larger than small max succeeded");
+
+ destroy_test_data(shard);
+}
+TEST_END
+
+typedef struct mem_contents_s mem_contents_t;
+struct mem_contents_s {
+ uintptr_t my_addr;
+ size_t size;
+ edata_t *my_edata;
+ rb_node(mem_contents_t) link;
+};
+
+static int
+mem_contents_cmp(const mem_contents_t *a, const mem_contents_t *b) {
+ return (a->my_addr > b->my_addr) - (a->my_addr < b->my_addr);
+}
+
+typedef rb_tree(mem_contents_t) mem_tree_t;
+rb_gen(static, mem_tree_, mem_tree_t, mem_contents_t, link,
+ mem_contents_cmp);
+
+static void
+node_assert_ordered(mem_contents_t *a, mem_contents_t *b) {
+ assert_zu_lt(a->my_addr, a->my_addr + a->size, "Overflow");
+ assert_zu_le(a->my_addr + a->size, b->my_addr, "");
+}
+
+static void
+node_check(mem_tree_t *tree, mem_contents_t *contents) {
+ edata_t *edata = contents->my_edata;
+ assert_ptr_eq(contents, (void *)contents->my_addr, "");
+ assert_ptr_eq(contents, edata_base_get(edata), "");
+ assert_zu_eq(contents->size, edata_size_get(edata), "");
+ assert_ptr_eq(contents->my_edata, edata, "");
+
+ mem_contents_t *next = mem_tree_next(tree, contents);
+ if (next != NULL) {
+ node_assert_ordered(contents, next);
+ }
+ mem_contents_t *prev = mem_tree_prev(tree, contents);
+ if (prev != NULL) {
+ node_assert_ordered(prev, contents);
+ }
+}
+
+static void
+node_insert(mem_tree_t *tree, edata_t *edata, size_t npages) {
+ mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
+ contents->my_addr = (uintptr_t)edata_base_get(edata);
+ contents->size = edata_size_get(edata);
+ contents->my_edata = edata;
+ mem_tree_insert(tree, contents);
+ node_check(tree, contents);
+}
+
+static void
+node_remove(mem_tree_t *tree, edata_t *edata) {
+ mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
+ node_check(tree, contents);
+ mem_tree_remove(tree, contents);
+}
+
+TEST_BEGIN(test_stress) {
+ test_skip_if(!hpa_supported());
+
+ hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
+ &test_hpa_shard_opts_default);
+
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+
+ const size_t nlive_edatas_max = 500;
+ size_t nlive_edatas = 0;
+ edata_t **live_edatas = calloc(nlive_edatas_max, sizeof(edata_t *));
+ /*
+ * Nothing special about this constant; we're only fixing it for
+ * consistency across runs.
+ */
+ size_t prng_state = (size_t)0x76999ffb014df07c;
+
+ mem_tree_t tree;
+ mem_tree_new(&tree);
+
+ bool deferred_work_generated = false;
+
+ for (size_t i = 0; i < 100 * 1000; i++) {
+ size_t operation = prng_range_zu(&prng_state, 2);
+ if (operation == 0) {
+ /* Alloc */
+ if (nlive_edatas == nlive_edatas_max) {
+ continue;
+ }
+
+ /*
+ * We make sure to get an even balance of small and
+ * large allocations.
+ */
+ size_t npages_min = 1;
+ size_t npages_max = ALLOC_MAX / PAGE;
+ size_t npages = npages_min + prng_range_zu(&prng_state,
+ npages_max - npages_min);
+ edata_t *edata = pai_alloc(tsdn, &shard->pai,
+ npages * PAGE, PAGE, false, false, false,
+ &deferred_work_generated);
+ assert_ptr_not_null(edata,
+ "Unexpected allocation failure");
+ live_edatas[nlive_edatas] = edata;
+ nlive_edatas++;
+ node_insert(&tree, edata, npages);
+ } else {
+ /* Free. */
+ if (nlive_edatas == 0) {
+ continue;
+ }
+ size_t victim = prng_range_zu(&prng_state, nlive_edatas);
+ edata_t *to_free = live_edatas[victim];
+ live_edatas[victim] = live_edatas[nlive_edatas - 1];
+ nlive_edatas--;
+ node_remove(&tree, to_free);
+ pai_dalloc(tsdn, &shard->pai, to_free,
+ &deferred_work_generated);
+ }
+ }
+
+ size_t ntreenodes = 0;
+ for (mem_contents_t *contents = mem_tree_first(&tree); contents != NULL;
+ contents = mem_tree_next(&tree, contents)) {
+ ntreenodes++;
+ node_check(&tree, contents);
+ }
+ expect_zu_eq(ntreenodes, nlive_edatas, "");
+
+ /*
+ * Test hpa_shard_destroy, which requires as a precondition that all its
+ * extents have been deallocated.
+ */
+ for (size_t i = 0; i < nlive_edatas; i++) {
+ edata_t *to_free = live_edatas[i];
+ node_remove(&tree, to_free);
+ pai_dalloc(tsdn, &shard->pai, to_free,
+ &deferred_work_generated);
+ }
+ hpa_shard_destroy(tsdn, shard);
+
+ free(live_edatas);
+ destroy_test_data(shard);
+}
+TEST_END
+
+static void
+expect_contiguous(edata_t **edatas, size_t nedatas) {
+ for (size_t i = 0; i < nedatas; i++) {
+ size_t expected = (size_t)edata_base_get(edatas[0])
+ + i * PAGE;
+ expect_zu_eq(expected, (size_t)edata_base_get(edatas[i]),
+ "Mismatch at index %zu", i);
+ }
+}
+
+TEST_BEGIN(test_alloc_dalloc_batch) {
+ test_skip_if(!hpa_supported());
+
+ hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
+ &test_hpa_shard_opts_default);
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+
+ bool deferred_work_generated = false;
+
+ enum {NALLOCS = 8};
+
+ edata_t *allocs[NALLOCS];
+ /*
+ * Allocate a mix of ways; first half from regular alloc, second half
+ * from alloc_batch.
+ */
+ for (size_t i = 0; i < NALLOCS / 2; i++) {
+ allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false,
+ /* frequent_reuse */ false, &deferred_work_generated);
+ expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
+ }
+ edata_list_active_t allocs_list;
+ edata_list_active_init(&allocs_list);
+ size_t nsuccess = pai_alloc_batch(tsdn, &shard->pai, PAGE, NALLOCS / 2,
+ &allocs_list, &deferred_work_generated);
+ expect_zu_eq(NALLOCS / 2, nsuccess, "Unexpected oom");
+ for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
+ allocs[i] = edata_list_active_first(&allocs_list);
+ edata_list_active_remove(&allocs_list, allocs[i]);
+ }
+
+ /*
+ * Should have allocated them contiguously, despite the differing
+ * methods used.
+ */
+ void *orig_base = edata_base_get(allocs[0]);
+ expect_contiguous(allocs, NALLOCS);
+
+ /*
+ * Batch dalloc the first half, individually deallocate the second half.
+ */
+ for (size_t i = 0; i < NALLOCS / 2; i++) {
+ edata_list_active_append(&allocs_list, allocs[i]);
+ }
+ pai_dalloc_batch(tsdn, &shard->pai, &allocs_list,
+ &deferred_work_generated);
+ for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
+ pai_dalloc(tsdn, &shard->pai, allocs[i],
+ &deferred_work_generated);
+ }
+
+ /* Reallocate (individually), and ensure reuse and contiguity. */
+ for (size_t i = 0; i < NALLOCS; i++) {
+ allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
+ }
+ void *new_base = edata_base_get(allocs[0]);
+ expect_ptr_eq(orig_base, new_base,
+ "Failed to reuse the allocated memory.");
+ expect_contiguous(allocs, NALLOCS);
+
+ destroy_test_data(shard);
+}
+TEST_END
+
+static uintptr_t defer_bump_ptr = HUGEPAGE * 123;
+static void *
+defer_test_map(size_t size) {
+ void *result = (void *)defer_bump_ptr;
+ defer_bump_ptr += size;
+ return result;
+}
+
+static void
+defer_test_unmap(void *ptr, size_t size) {
+ (void)ptr;
+ (void)size;
+}
+
+static bool defer_purge_called = false;
+static void
+defer_test_purge(void *ptr, size_t size) {
+ (void)ptr;
+ (void)size;
+ defer_purge_called = true;
+}
+
+static bool defer_hugify_called = false;
+static void
+defer_test_hugify(void *ptr, size_t size) {
+ defer_hugify_called = true;
+}
+
+static bool defer_dehugify_called = false;
+static void
+defer_test_dehugify(void *ptr, size_t size) {
+ defer_dehugify_called = true;
+}
+
+static nstime_t defer_curtime;
+static void
+defer_test_curtime(nstime_t *r_time, bool first_reading) {
+ *r_time = defer_curtime;
+}
+
+static uint64_t
+defer_test_ms_since(nstime_t *past_time) {
+ return (nstime_ns(&defer_curtime) - nstime_ns(past_time)) / 1000 / 1000;
+}
+
+TEST_BEGIN(test_defer_time) {
+ test_skip_if(!hpa_supported());
+
+ hpa_hooks_t hooks;
+ hooks.map = &defer_test_map;
+ hooks.unmap = &defer_test_unmap;
+ hooks.purge = &defer_test_purge;
+ hooks.hugify = &defer_test_hugify;
+ hooks.dehugify = &defer_test_dehugify;
+ hooks.curtime = &defer_test_curtime;
+ hooks.ms_since = &defer_test_ms_since;
+
+ hpa_shard_opts_t opts = test_hpa_shard_opts_default;
+ opts.deferral_allowed = true;
+
+ hpa_shard_t *shard = create_test_data(&hooks, &opts);
+
+ bool deferred_work_generated = false;
+
+ nstime_init(&defer_curtime, 0);
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ edata_t *edatas[HUGEPAGE_PAGES];
+ for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
+ edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
+ false, false, &deferred_work_generated);
+ expect_ptr_not_null(edatas[i], "Unexpected null edata");
+ }
+ hpa_shard_do_deferred_work(tsdn, shard);
+ expect_false(defer_hugify_called, "Hugified too early");
+
+ /* Hugification delay is set to 10 seconds in options. */
+ nstime_init2(&defer_curtime, 11, 0);
+ hpa_shard_do_deferred_work(tsdn, shard);
+ expect_true(defer_hugify_called, "Failed to hugify");
+
+ defer_hugify_called = false;
+
+ /* Purge. Recall that dirty_mult is .25. */
+ for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
+ pai_dalloc(tsdn, &shard->pai, edatas[i],
+ &deferred_work_generated);
+ }
+
+ hpa_shard_do_deferred_work(tsdn, shard);
+
+ expect_false(defer_hugify_called, "Hugified too early");
+ expect_true(defer_dehugify_called, "Should have dehugified");
+ expect_true(defer_purge_called, "Should have purged");
+ defer_hugify_called = false;
+ defer_dehugify_called = false;
+ defer_purge_called = false;
+
+ /*
+ * Refill the page. We now meet the hugification threshold; we should
+ * be marked for pending hugify.
+ */
+ for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
+ edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
+ false, false, &deferred_work_generated);
+ expect_ptr_not_null(edatas[i], "Unexpected null edata");
+ }
+ /*
+ * We would be ineligible for hugification, had we not already met the
+ * threshold before dipping below it.
+ */
+ pai_dalloc(tsdn, &shard->pai, edatas[0],
+ &deferred_work_generated);
+ /* Wait for the threshold again. */
+ nstime_init2(&defer_curtime, 22, 0);
+ hpa_shard_do_deferred_work(tsdn, shard);
+ expect_true(defer_hugify_called, "Hugified too early");
+ expect_false(defer_dehugify_called, "Unexpected dehugify");
+ expect_false(defer_purge_called, "Unexpected purge");
+
+ destroy_test_data(shard);
+}
+TEST_END
+
+int
+main(void) {
+ /*
+ * These trigger unused-function warnings on CI runs, even if declared
+ * with static inline.
+ */
+ (void)mem_tree_empty;
+ (void)mem_tree_last;
+ (void)mem_tree_search;
+ (void)mem_tree_nsearch;
+ (void)mem_tree_psearch;
+ (void)mem_tree_iter;
+ (void)mem_tree_reverse_iter;
+ (void)mem_tree_destroy;
+ return test_no_reentrancy(
+ test_alloc_max,
+ test_stress,
+ test_alloc_dalloc_batch,
+ test_defer_time);
+}
diff --git a/deps/jemalloc/test/unit/hpa_background_thread.c b/deps/jemalloc/test/unit/hpa_background_thread.c
new file mode 100644
index 000000000..81c256127
--- /dev/null
+++ b/deps/jemalloc/test/unit/hpa_background_thread.c
@@ -0,0 +1,188 @@
+#include "test/jemalloc_test.h"
+#include "test/sleep.h"
+
+static void
+sleep_for_background_thread_interval() {
+ /*
+ * The sleep interval set in our .sh file is 50ms. So it likely will
+ * run if we sleep for four times that.
+ */
+ sleep_ns(200 * 1000 * 1000);
+}
+
+static unsigned
+create_arena() {
+ unsigned arena_ind;
+ size_t sz;
+
+ sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 2),
+ 0, "Unexpected mallctl() failure");
+ return arena_ind;
+}
+
+static size_t
+get_empty_ndirty(unsigned arena_ind) {
+ int err;
+ size_t ndirty_huge;
+ size_t ndirty_nonhuge;
+ uint64_t epoch = 1;
+ size_t sz = sizeof(epoch);
+ err = je_mallctl("epoch", (void *)&epoch, &sz, (void *)&epoch,
+ sizeof(epoch));
+ expect_d_eq(0, err, "Unexpected mallctl() failure");
+
+ size_t mib[6];
+ size_t miblen = sizeof(mib)/sizeof(mib[0]);
+ err = mallctlnametomib(
+ "stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge", mib,
+ &miblen);
+ expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
+
+ sz = sizeof(ndirty_nonhuge);
+ mib[2] = arena_ind;
+ err = mallctlbymib(mib, miblen, &ndirty_nonhuge, &sz, NULL, 0);
+ expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
+
+ err = mallctlnametomib(
+ "stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge", mib,
+ &miblen);
+ expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
+
+ sz = sizeof(ndirty_huge);
+ mib[2] = arena_ind;
+ err = mallctlbymib(mib, miblen, &ndirty_huge, &sz, NULL, 0);
+ expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
+
+ return ndirty_huge + ndirty_nonhuge;
+}
+
+static void
+set_background_thread_enabled(bool enabled) {
+ int err;
+ err = je_mallctl("background_thread", NULL, NULL, &enabled,
+ sizeof(enabled));
+ expect_d_eq(0, err, "Unexpected mallctl failure");
+}
+
+static void
+wait_until_thread_is_enabled(unsigned arena_id) {
+ tsd_t* tsd = tsd_fetch();
+
+ bool sleeping = false;
+ int iterations = 0;
+ do {
+ background_thread_info_t *info =
+ background_thread_info_get(arena_id);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ sleeping = background_thread_indefinite_sleep(info);
+ assert_d_lt(iterations, UINT64_C(1000000),
+ "Waiting for a thread to start for too long");
+ } while (!sleeping);
+}
+
+static void
+expect_purging(unsigned arena_ind, bool expect_deferred) {
+ size_t empty_ndirty;
+
+ empty_ndirty = get_empty_ndirty(arena_ind);
+ expect_zu_eq(0, empty_ndirty, "Expected arena to start unused.");
+
+ /*
+ * It's possible that we get unlucky with our stats collection timing,
+ * and the background thread runs in between the deallocation and the
+ * stats collection. So we retry 10 times, and see if we *ever* see
+ * deferred reclamation.
+ */
+ bool observed_dirty_page = false;
+ for (int i = 0; i < 10; i++) {
+ void *ptr = mallocx(PAGE,
+ MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind));
+ empty_ndirty = get_empty_ndirty(arena_ind);
+ expect_zu_eq(0, empty_ndirty, "All pages should be active");
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+ empty_ndirty = get_empty_ndirty(arena_ind);
+ if (expect_deferred) {
+ expect_true(empty_ndirty == 0 || empty_ndirty == 1 ||
+ opt_prof, "Unexpected extra dirty page count: %zu",
+ empty_ndirty);
+ } else {
+ assert_zu_eq(0, empty_ndirty,
+ "Saw dirty pages without deferred purging");
+ }
+ if (empty_ndirty > 0) {
+ observed_dirty_page = true;
+ break;
+ }
+ }
+ expect_b_eq(expect_deferred, observed_dirty_page, "");
+
+ /*
+ * Under high concurrency / heavy test load (e.g. using run_test.sh),
+ * the background thread may not get scheduled for a longer period of
+ * time. Retry 100 times max before bailing out.
+ */
+ unsigned retry = 0;
+ while ((empty_ndirty = get_empty_ndirty(arena_ind)) > 0 &&
+ expect_deferred && (retry++ < 100)) {
+ sleep_for_background_thread_interval();
+ }
+
+ expect_zu_eq(0, empty_ndirty, "Should have seen a background purge");
+}
+
+TEST_BEGIN(test_hpa_background_thread_purges) {
+ test_skip_if(!config_stats);
+ test_skip_if(!hpa_supported());
+ test_skip_if(!have_background_thread);
+ /* Skip since guarded pages cannot be allocated from hpa. */
+ test_skip_if(san_guard_enabled());
+
+ unsigned arena_ind = create_arena();
+ /*
+ * Our .sh sets dirty mult to 0, so all dirty pages should get purged
+ * any time any thread frees.
+ */
+ expect_purging(arena_ind, /* expect_deferred */ true);
+}
+TEST_END
+
+TEST_BEGIN(test_hpa_background_thread_enable_disable) {
+ test_skip_if(!config_stats);
+ test_skip_if(!hpa_supported());
+ test_skip_if(!have_background_thread);
+ /* Skip since guarded pages cannot be allocated from hpa. */
+ test_skip_if(san_guard_enabled());
+
+ unsigned arena_ind = create_arena();
+
+ set_background_thread_enabled(false);
+ expect_purging(arena_ind, false);
+
+ set_background_thread_enabled(true);
+ wait_until_thread_is_enabled(arena_ind);
+ expect_purging(arena_ind, true);
+}
+TEST_END
+
+int
+main(void) {
+ /*
+ * OK, this is a sort of nasty hack. We don't want to add *another*
+ * config option for HPA (the intent is that it becomes available on
+ * more platforms over time, and we're trying to prune back config
+ * options generally. But we'll get initialization errors on other
+ * platforms if we set hpa:true in the MALLOC_CONF (even if we set
+ * abort_conf:false as well). So we reach into the internals and set
+ * them directly, but only if we know that we're actually going to do
+ * something nontrivial in the tests.
+ */
+ if (config_stats && hpa_supported() && have_background_thread) {
+ opt_hpa = true;
+ opt_background_thread = true;
+ }
+ return test_no_reentrancy(
+ test_hpa_background_thread_purges,
+ test_hpa_background_thread_enable_disable);
+}
diff --git a/deps/jemalloc/test/unit/hpa_background_thread.sh b/deps/jemalloc/test/unit/hpa_background_thread.sh
new file mode 100644
index 000000000..65a56a089
--- /dev/null
+++ b/deps/jemalloc/test/unit/hpa_background_thread.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+export MALLOC_CONF="hpa_dirty_mult:0,hpa_min_purge_interval_ms:50,hpa_sec_nshards:0"
+
diff --git a/deps/jemalloc/test/unit/hpdata.c b/deps/jemalloc/test/unit/hpdata.c
new file mode 100644
index 000000000..288e71d45
--- /dev/null
+++ b/deps/jemalloc/test/unit/hpdata.c
@@ -0,0 +1,244 @@
+#include "test/jemalloc_test.h"
+
+#define HPDATA_ADDR ((void *)(10 * HUGEPAGE))
+#define HPDATA_AGE 123
+
+TEST_BEGIN(test_reserve_alloc) {
+ hpdata_t hpdata;
+ hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
+
+ /* Allocating a page at a time, we should do first fit. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(HUGEPAGE_PAGES - i,
+ hpdata_longest_free_range_get(&hpdata), "");
+ void *alloc = hpdata_reserve_alloc(&hpdata, PAGE);
+ expect_ptr_eq((char *)HPDATA_ADDR + i * PAGE, alloc, "");
+ expect_true(hpdata_consistent(&hpdata), "");
+ }
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(0, hpdata_longest_free_range_get(&hpdata), "");
+
+ /*
+ * Build up a bigger free-range, 2 pages at a time, until we've got 6
+ * adjacent free pages total. Pages 8-13 should be unreserved after
+ * this.
+ */
+ hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 10 * PAGE, 2 * PAGE);
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(2, hpdata_longest_free_range_get(&hpdata), "");
+
+ hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 12 * PAGE, 2 * PAGE);
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(4, hpdata_longest_free_range_get(&hpdata), "");
+
+ hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 8 * PAGE, 2 * PAGE);
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(6, hpdata_longest_free_range_get(&hpdata), "");
+
+ /*
+ * Leave page 14 reserved, but free page 15 (this test the case where
+ * unreserving combines two ranges).
+ */
+ hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 15 * PAGE, PAGE);
+ /*
+ * Longest free range shouldn't change; we've got a free range of size
+ * 6, then a reserved page, then another free range.
+ */
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(6, hpdata_longest_free_range_get(&hpdata), "");
+
+ /* After freeing page 14, the two ranges get combined. */
+ hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 14 * PAGE, PAGE);
+ expect_true(hpdata_consistent(&hpdata), "");
+ expect_zu_eq(8, hpdata_longest_free_range_get(&hpdata), "");
+}
+TEST_END
+
+TEST_BEGIN(test_purge_simple) {
+ hpdata_t hpdata;
+ hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
+
+ void *alloc = hpdata_reserve_alloc(&hpdata, HUGEPAGE_PAGES / 2 * PAGE);
+ expect_ptr_eq(alloc, HPDATA_ADDR, "");
+
+ /* Create HUGEPAGE_PAGES / 4 dirty inactive pages at the beginning. */
+ hpdata_unreserve(&hpdata, alloc, HUGEPAGE_PAGES / 4 * PAGE);
+
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 2, "");
+
+ hpdata_alloc_allowed_set(&hpdata, false);
+ hpdata_purge_state_t purge_state;
+ size_t to_purge = hpdata_purge_begin(&hpdata, &purge_state);
+ expect_zu_eq(HUGEPAGE_PAGES / 4, to_purge, "");
+
+ void *purge_addr;
+ size_t purge_size;
+ bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_true(got_result, "");
+ expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
+ expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
+
+ got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_false(got_result, "Unexpected additional purge range: "
+ "extent at %p of size %zu", purge_addr, purge_size);
+
+ hpdata_purge_end(&hpdata, &purge_state);
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 4, "");
+}
+TEST_END
+
+/*
+ * We only test intervening dalloc's not intervening allocs; the latter are
+ * disallowed as a purging precondition (because they interfere with purging
+ * across a retained extent, saving a purge call).
+ */
+TEST_BEGIN(test_purge_intervening_dalloc) {
+ hpdata_t hpdata;
+ hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
+
+ /* Allocate the first 3/4 of the pages. */
+ void *alloc = hpdata_reserve_alloc(&hpdata, 3 * HUGEPAGE_PAGES / 4 * PAGE);
+ expect_ptr_eq(alloc, HPDATA_ADDR, "");
+
+ /* Free the first 1/4 and the third 1/4 of the pages. */
+ hpdata_unreserve(&hpdata, alloc, HUGEPAGE_PAGES / 4 * PAGE);
+ hpdata_unreserve(&hpdata,
+ (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
+ HUGEPAGE_PAGES / 4 * PAGE);
+
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), 3 * HUGEPAGE_PAGES / 4, "");
+
+ hpdata_alloc_allowed_set(&hpdata, false);
+ hpdata_purge_state_t purge_state;
+ size_t to_purge = hpdata_purge_begin(&hpdata, &purge_state);
+ expect_zu_eq(HUGEPAGE_PAGES / 2, to_purge, "");
+
+ void *purge_addr;
+ size_t purge_size;
+ /* First purge. */
+ bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_true(got_result, "");
+ expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
+ expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
+
+ /* Deallocate the second 1/4 before the second purge occurs. */
+ hpdata_unreserve(&hpdata,
+ (void *)((uintptr_t)alloc + 1 * HUGEPAGE_PAGES / 4 * PAGE),
+ HUGEPAGE_PAGES / 4 * PAGE);
+
+ /* Now continue purging. */
+ got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_true(got_result, "");
+ expect_ptr_eq(
+ (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
+ purge_addr, "");
+ expect_zu_ge(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
+
+ got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_false(got_result, "Unexpected additional purge range: "
+ "extent at %p of size %zu", purge_addr, purge_size);
+
+ hpdata_purge_end(&hpdata, &purge_state);
+
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 4, "");
+}
+TEST_END
+
+TEST_BEGIN(test_purge_over_retained) {
+ void *purge_addr;
+ size_t purge_size;
+
+ hpdata_t hpdata;
+ hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
+
+ /* Allocate the first 3/4 of the pages. */
+ void *alloc = hpdata_reserve_alloc(&hpdata, 3 * HUGEPAGE_PAGES / 4 * PAGE);
+ expect_ptr_eq(alloc, HPDATA_ADDR, "");
+
+ /* Free the second quarter. */
+ void *second_quarter =
+ (void *)((uintptr_t)alloc + HUGEPAGE_PAGES / 4 * PAGE);
+ hpdata_unreserve(&hpdata, second_quarter, HUGEPAGE_PAGES / 4 * PAGE);
+
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), 3 * HUGEPAGE_PAGES / 4, "");
+
+ /* Purge the second quarter. */
+ hpdata_alloc_allowed_set(&hpdata, false);
+ hpdata_purge_state_t purge_state;
+ size_t to_purge_dirty = hpdata_purge_begin(&hpdata, &purge_state);
+ expect_zu_eq(HUGEPAGE_PAGES / 4, to_purge_dirty, "");
+
+ bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_true(got_result, "");
+ expect_ptr_eq(second_quarter, purge_addr, "");
+ expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
+
+ got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_false(got_result, "Unexpected additional purge range: "
+ "extent at %p of size %zu", purge_addr, purge_size);
+ hpdata_purge_end(&hpdata, &purge_state);
+
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 2, "");
+
+ /* Free the first and third quarter. */
+ hpdata_unreserve(&hpdata, HPDATA_ADDR, HUGEPAGE_PAGES / 4 * PAGE);
+ hpdata_unreserve(&hpdata,
+ (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
+ HUGEPAGE_PAGES / 4 * PAGE);
+
+ /*
+ * Purge again. The second quarter is retained, so we can safely
+ * re-purge it. We expect a single purge of 3/4 of the hugepage,
+ * purging half its pages.
+ */
+ to_purge_dirty = hpdata_purge_begin(&hpdata, &purge_state);
+ expect_zu_eq(HUGEPAGE_PAGES / 2, to_purge_dirty, "");
+
+ got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_true(got_result, "");
+ expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
+ expect_zu_eq(3 * HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
+
+ got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
+ &purge_size);
+ expect_false(got_result, "Unexpected additional purge range: "
+ "extent at %p of size %zu", purge_addr, purge_size);
+ hpdata_purge_end(&hpdata, &purge_state);
+
+ expect_zu_eq(hpdata_ntouched_get(&hpdata), 0, "");
+}
+TEST_END
+
+TEST_BEGIN(test_hugify) {
+ hpdata_t hpdata;
+ hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
+
+ void *alloc = hpdata_reserve_alloc(&hpdata, HUGEPAGE / 2);
+ expect_ptr_eq(alloc, HPDATA_ADDR, "");
+
+ expect_zu_eq(HUGEPAGE_PAGES / 2, hpdata_ntouched_get(&hpdata), "");
+
+ hpdata_hugify(&hpdata);
+
+ /* Hugeifying should have increased the dirty page count. */
+ expect_zu_eq(HUGEPAGE_PAGES, hpdata_ntouched_get(&hpdata), "");
+}
+TEST_END
+
+int main(void) {
+ return test_no_reentrancy(
+ test_reserve_alloc,
+ test_purge_simple,
+ test_purge_intervening_dalloc,
+ test_purge_over_retained,
+ test_hugify);
+}
diff --git a/deps/jemalloc/test/unit/huge.c b/deps/jemalloc/test/unit/huge.c
index ab72cf007..ec64e5002 100644
--- a/deps/jemalloc/test/unit/huge.c
+++ b/deps/jemalloc/test/unit/huge.c
@@ -11,37 +11,37 @@ TEST_BEGIN(huge_bind_thread) {
size_t sz = sizeof(unsigned);
/* Bind to a manual arena. */
- assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
+ expect_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
"Failed to create arena");
- assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena1,
+ expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena1,
sizeof(arena1)), 0, "Fail to bind thread");
void *ptr = mallocx(HUGE_SZ, 0);
- assert_ptr_not_null(ptr, "Fail to allocate huge size");
- assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ expect_ptr_not_null(ptr, "Fail to allocate huge size");
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
- assert_u_eq(arena1, arena2, "Wrong arena used after binding");
+ expect_u_eq(arena1, arena2, "Wrong arena used after binding");
dallocx(ptr, 0);
/* Switch back to arena 0. */
test_skip_if(have_percpu_arena &&
PERCPU_ARENA_ENABLED(opt_percpu_arena));
arena2 = 0;
- assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena2,
+ expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena2,
sizeof(arena2)), 0, "Fail to bind thread");
ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
- assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
- assert_u_eq(arena2, 0, "Wrong arena used after binding");
+ expect_u_eq(arena2, 0, "Wrong arena used after binding");
dallocx(ptr, MALLOCX_TCACHE_NONE);
/* Then huge allocation should use the huge arena. */
ptr = mallocx(HUGE_SZ, 0);
- assert_ptr_not_null(ptr, "Fail to allocate huge size");
- assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ expect_ptr_not_null(ptr, "Fail to allocate huge size");
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
- assert_u_ne(arena2, 0, "Wrong arena used after binding");
- assert_u_ne(arena1, arena2, "Wrong arena used after binding");
+ expect_u_ne(arena2, 0, "Wrong arena used after binding");
+ expect_u_ne(arena1, arena2, "Wrong arena used after binding");
dallocx(ptr, 0);
}
TEST_END
@@ -50,22 +50,22 @@ TEST_BEGIN(huge_mallocx) {
unsigned arena1, arena2;
size_t sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
+ expect_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
"Failed to create arena");
void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1));
- assert_ptr_not_null(huge, "Fail to allocate huge size");
- assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge,
+ expect_ptr_not_null(huge, "Fail to allocate huge size");
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge,
sizeof(huge)), 0, "Unexpected mallctl() failure");
- assert_u_eq(arena1, arena2, "Wrong arena used for mallocx");
+ expect_u_eq(arena1, arena2, "Wrong arena used for mallocx");
dallocx(huge, MALLOCX_ARENA(arena1));
void *huge2 = mallocx(HUGE_SZ, 0);
- assert_ptr_not_null(huge, "Fail to allocate huge size");
- assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2,
+ expect_ptr_not_null(huge, "Fail to allocate huge size");
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2,
sizeof(huge2)), 0, "Unexpected mallctl() failure");
- assert_u_ne(arena1, arena2,
+ expect_u_ne(arena1, arena2,
"Huge allocation should not come from the manual arena.");
- assert_u_ne(arena2, 0,
+ expect_u_ne(arena2, 0,
"Huge allocation should not come from the arena 0.");
dallocx(huge2, 0);
}
@@ -75,25 +75,25 @@ TEST_BEGIN(huge_allocation) {
unsigned arena1, arena2;
void *ptr = mallocx(HUGE_SZ, 0);
- assert_ptr_not_null(ptr, "Fail to allocate huge size");
+ expect_ptr_not_null(ptr, "Fail to allocate huge size");
size_t sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
+ expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
0, "Unexpected mallctl() failure");
- assert_u_gt(arena1, 0, "Huge allocation should not come from arena 0");
+ expect_u_gt(arena1, 0, "Huge allocation should not come from arena 0");
dallocx(ptr, 0);
ptr = mallocx(HUGE_SZ >> 1, 0);
- assert_ptr_not_null(ptr, "Fail to allocate half huge size");
- assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ expect_ptr_not_null(ptr, "Fail to allocate half huge size");
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
- assert_u_ne(arena1, arena2, "Wrong arena used for half huge");
+ expect_u_ne(arena1, arena2, "Wrong arena used for half huge");
dallocx(ptr, 0);
ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
- assert_ptr_not_null(ptr, "Fail to allocate small size");
- assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
+ expect_ptr_not_null(ptr, "Fail to allocate small size");
+ expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
sizeof(ptr)), 0, "Unexpected mallctl() failure");
- assert_u_ne(arena1, arena2,
+ expect_u_ne(arena1, arena2,
"Huge and small should be from different arenas");
dallocx(ptr, 0);
}
diff --git a/deps/jemalloc/test/unit/extent_util.c b/deps/jemalloc/test/unit/inspect.c
index 97e55f0f6..fe59e5971 100644
--- a/deps/jemalloc/test/unit/extent_util.c
+++ b/deps/jemalloc/test/unit/inspect.c
@@ -18,8 +18,8 @@
assert_d_eq(mallctl("experimental.utilization." node, \
out, &out_sz, in, in_sz), 0, \
"Should return 0 on correct arguments"); \
- assert_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \
- assert_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \
+ expect_zu_eq(out_sz, out_sz_ref, "incorrect output size"); \
+ expect_d_ne(memcmp(out, out_ref, out_sz_ref), 0, \
"Output content should be changed"); \
} while (0)
@@ -83,62 +83,67 @@ TEST_BEGIN(test_query) {
/* Examine output for valid call */
TEST_UTIL_VALID("query");
- assert_zu_le(sz, SIZE_READ(out),
+ expect_zu_le(sz, SIZE_READ(out),
"Extent size should be at least allocation size");
- assert_zu_eq(SIZE_READ(out) & (PAGE - 1), 0,
+ expect_zu_eq(SIZE_READ(out) & (PAGE - 1), 0,
"Extent size should be a multiple of page size");
- if (sz <= SC_SMALL_MAXCLASS) {
- assert_zu_le(NFREE_READ(out), NREGS_READ(out),
+
+ /*
+ * We don't do much bin checking if prof is on, since profiling
+ * can produce extents that are for small size classes but not
+ * slabs, which interferes with things like region counts.
+ */
+ if (!opt_prof && sz <= SC_SMALL_MAXCLASS) {
+ expect_zu_le(NFREE_READ(out), NREGS_READ(out),
"Extent free count exceeded region count");
- assert_zu_le(NREGS_READ(out), SIZE_READ(out),
+ expect_zu_le(NREGS_READ(out), SIZE_READ(out),
"Extent region count exceeded size");
- assert_zu_ne(NREGS_READ(out), 0,
+ expect_zu_ne(NREGS_READ(out), 0,
"Extent region count must be positive");
- assert_ptr_not_null(SLABCUR_READ(out),
- "Current slab is null");
- assert_true(NFREE_READ(out) == 0
- || SLABCUR_READ(out) <= p,
+ expect_true(NFREE_READ(out) == 0 || (SLABCUR_READ(out)
+ != NULL && SLABCUR_READ(out) <= p),
"Allocation should follow first fit principle");
+
if (config_stats) {
- assert_zu_le(BIN_NFREE_READ(out),
+ expect_zu_le(BIN_NFREE_READ(out),
BIN_NREGS_READ(out),
"Bin free count exceeded region count");
- assert_zu_ne(BIN_NREGS_READ(out), 0,
+ expect_zu_ne(BIN_NREGS_READ(out), 0,
"Bin region count must be positive");
- assert_zu_le(NFREE_READ(out),
+ expect_zu_le(NFREE_READ(out),
BIN_NFREE_READ(out),
"Extent free count exceeded bin free count");
- assert_zu_le(NREGS_READ(out),
+ expect_zu_le(NREGS_READ(out),
BIN_NREGS_READ(out),
"Extent region count exceeded "
"bin region count");
- assert_zu_eq(BIN_NREGS_READ(out)
+ expect_zu_eq(BIN_NREGS_READ(out)
% NREGS_READ(out), 0,
"Bin region count isn't a multiple of "
"extent region count");
- assert_zu_le(
+ expect_zu_le(
BIN_NFREE_READ(out) - NFREE_READ(out),
BIN_NREGS_READ(out) - NREGS_READ(out),
"Free count in other extents in the bin "
"exceeded region count in other extents "
"in the bin");
- assert_zu_le(NREGS_READ(out) - NFREE_READ(out),
+ expect_zu_le(NREGS_READ(out) - NFREE_READ(out),
BIN_NREGS_READ(out) - BIN_NFREE_READ(out),
"Extent utilized count exceeded "
"bin utilized count");
}
- } else {
- assert_zu_eq(NFREE_READ(out), 0,
+ } else if (sz > SC_SMALL_MAXCLASS) {
+ expect_zu_eq(NFREE_READ(out), 0,
"Extent free count should be zero");
- assert_zu_eq(NREGS_READ(out), 1,
+ expect_zu_eq(NREGS_READ(out), 1,
"Extent region count should be one");
- assert_ptr_null(SLABCUR_READ(out),
+ expect_ptr_null(SLABCUR_READ(out),
"Current slab must be null for large size classes");
if (config_stats) {
- assert_zu_eq(BIN_NFREE_READ(out), 0,
+ expect_zu_eq(BIN_NFREE_READ(out), 0,
"Bin free count must be zero for "
"large sizes");
- assert_zu_eq(BIN_NREGS_READ(out), 0,
+ expect_zu_eq(BIN_NREGS_READ(out), 0,
"Bin region count must be zero for "
"large sizes");
}
@@ -212,21 +217,25 @@ TEST_BEGIN(test_batch) {
out_sz_ref = out_sz /= 2;
in_sz /= 2;
TEST_UTIL_BATCH_VALID;
- assert_zu_le(sz, SIZE_READ(out, 0),
+ expect_zu_le(sz, SIZE_READ(out, 0),
"Extent size should be at least allocation size");
- assert_zu_eq(SIZE_READ(out, 0) & (PAGE - 1), 0,
+ expect_zu_eq(SIZE_READ(out, 0) & (PAGE - 1), 0,
"Extent size should be a multiple of page size");
- if (sz <= SC_SMALL_MAXCLASS) {
- assert_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0),
+ /*
+ * See the corresponding comment in test_query; profiling breaks
+ * our slab count expectations.
+ */
+ if (sz <= SC_SMALL_MAXCLASS && !opt_prof) {
+ expect_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0),
"Extent free count exceeded region count");
- assert_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0),
+ expect_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0),
"Extent region count exceeded size");
- assert_zu_ne(NREGS_READ(out, 0), 0,
+ expect_zu_ne(NREGS_READ(out, 0), 0,
"Extent region count must be positive");
- } else {
- assert_zu_eq(NFREE_READ(out, 0), 0,
+ } else if (sz > SC_SMALL_MAXCLASS) {
+ expect_zu_eq(NFREE_READ(out, 0), 0,
"Extent free count should be zero");
- assert_zu_eq(NREGS_READ(out, 0), 1,
+ expect_zu_eq(NREGS_READ(out, 0), 1,
"Extent region count should be one");
}
TEST_EQUAL_REF(1,
@@ -238,15 +247,15 @@ TEST_BEGIN(test_batch) {
TEST_UTIL_BATCH_VALID;
TEST_EQUAL_REF(0, "Statistics should be stable across calls");
if (sz <= SC_SMALL_MAXCLASS) {
- assert_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1),
+ expect_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1),
"Extent free count exceeded region count");
} else {
- assert_zu_eq(NFREE_READ(out, 0), 0,
+ expect_zu_eq(NFREE_READ(out, 0), 0,
"Extent free count should be zero");
}
- assert_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1),
+ expect_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1),
"Extent region count should be same for same region size");
- assert_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1),
+ expect_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1),
"Extent size should be same for same region size");
#undef SIZE_READ
@@ -263,7 +272,7 @@ TEST_END
int
main(void) {
- assert_zu_lt(SC_SMALL_MAXCLASS, TEST_MAX_SIZE,
+ assert_zu_lt(SC_SMALL_MAXCLASS + 100000, TEST_MAX_SIZE,
"Test case cannot cover large classes");
return test(test_query, test_batch);
}
diff --git a/deps/jemalloc/test/unit/inspect.sh b/deps/jemalloc/test/unit/inspect.sh
new file mode 100644
index 000000000..352d11076
--- /dev/null
+++ b/deps/jemalloc/test/unit/inspect.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:false"
+fi
diff --git a/deps/jemalloc/test/unit/junk.c b/deps/jemalloc/test/unit/junk.c
index 57e3ad431..543092f1d 100644
--- a/deps/jemalloc/test/unit/junk.c
+++ b/deps/jemalloc/test/unit/junk.c
@@ -1,141 +1,195 @@
#include "test/jemalloc_test.h"
-#include "jemalloc/internal/util.h"
-
-static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
-static large_dalloc_junk_t *large_dalloc_junk_orig;
-static large_dalloc_maybe_junk_t *large_dalloc_maybe_junk_orig;
-static void *watch_for_junking;
-static bool saw_junking;
+#define arraylen(arr) (sizeof(arr)/sizeof(arr[0]))
+static size_t ptr_ind;
+static void *volatile ptrs[100];
+static void *last_junked_ptr;
+static size_t last_junked_usize;
static void
-watch_junking(void *p) {
- watch_for_junking = p;
- saw_junking = false;
+reset() {
+ ptr_ind = 0;
+ last_junked_ptr = NULL;
+ last_junked_usize = 0;
}
static void
-arena_dalloc_junk_small_intercept(void *ptr, const bin_info_t *bin_info) {
- size_t i;
-
- arena_dalloc_junk_small_orig(ptr, bin_info);
- for (i = 0; i < bin_info->reg_size; i++) {
- assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
- "Missing junk fill for byte %zu/%zu of deallocated region",
- i, bin_info->reg_size);
- }
- if (ptr == watch_for_junking) {
- saw_junking = true;
- }
+test_junk(void *ptr, size_t usize) {
+ last_junked_ptr = ptr;
+ last_junked_usize = usize;
}
static void
-large_dalloc_junk_intercept(void *ptr, size_t usize) {
- size_t i;
-
- large_dalloc_junk_orig(ptr, usize);
- for (i = 0; i < usize; i++) {
- assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
- "Missing junk fill for byte %zu/%zu of deallocated region",
- i, usize);
+do_allocs(size_t size, bool zero, size_t lg_align) {
+#define JUNK_ALLOC(...) \
+ do { \
+ assert(ptr_ind + 1 < arraylen(ptrs)); \
+ void *ptr = __VA_ARGS__; \
+ assert_ptr_not_null(ptr, ""); \
+ ptrs[ptr_ind++] = ptr; \
+ if (opt_junk_alloc && !zero) { \
+ expect_ptr_eq(ptr, last_junked_ptr, ""); \
+ expect_zu_eq(last_junked_usize, \
+ TEST_MALLOC_SIZE(ptr), ""); \
+ } \
+ } while (0)
+ if (!zero && lg_align == 0) {
+ JUNK_ALLOC(malloc(size));
}
- if (ptr == watch_for_junking) {
- saw_junking = true;
+ if (!zero) {
+ JUNK_ALLOC(aligned_alloc(1 << lg_align, size));
}
-}
-
-static void
-large_dalloc_maybe_junk_intercept(void *ptr, size_t usize) {
- large_dalloc_maybe_junk_orig(ptr, usize);
- if (ptr == watch_for_junking) {
- saw_junking = true;
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+ if (!zero) {
+ JUNK_ALLOC(je_memalign(1 << lg_align, size));
}
-}
-
-static void
-test_junk(size_t sz_min, size_t sz_max) {
- uint8_t *s;
- size_t sz_prev, sz, i;
-
- if (opt_junk_free) {
- arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
- arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
- large_dalloc_junk_orig = large_dalloc_junk;
- large_dalloc_junk = large_dalloc_junk_intercept;
- large_dalloc_maybe_junk_orig = large_dalloc_maybe_junk;
- large_dalloc_maybe_junk = large_dalloc_maybe_junk_intercept;
+#endif
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+ if (!zero && lg_align == LG_PAGE) {
+ JUNK_ALLOC(je_valloc(size));
}
+#endif
+ int zero_flag = zero ? MALLOCX_ZERO : 0;
+ JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)));
+ JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)
+ | MALLOCX_TCACHE_NONE));
+ if (lg_align >= LG_SIZEOF_PTR) {
+ void *memalign_result;
+ int err = posix_memalign(&memalign_result, (1 << lg_align),
+ size);
+ assert_d_eq(err, 0, "");
+ JUNK_ALLOC(memalign_result);
+ }
+}
- sz_prev = 0;
- s = (uint8_t *)mallocx(sz_min, 0);
- assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
-
- for (sz = sallocx(s, 0); sz <= sz_max;
- sz_prev = sz, sz = sallocx(s, 0)) {
- if (sz_prev > 0) {
- assert_u_eq(s[0], 'a',
- "Previously allocated byte %zu/%zu is corrupted",
- ZU(0), sz_prev);
- assert_u_eq(s[sz_prev-1], 'a',
- "Previously allocated byte %zu/%zu is corrupted",
- sz_prev-1, sz_prev);
- }
-
- for (i = sz_prev; i < sz; i++) {
- if (opt_junk_alloc) {
- assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK,
- "Newly allocated byte %zu/%zu isn't "
- "junk-filled", i, sz);
- }
- s[i] = 'a';
- }
-
- if (xallocx(s, sz+1, 0, 0) == sz) {
- uint8_t *t;
- watch_junking(s);
- t = (uint8_t *)rallocx(s, sz+1, 0);
- assert_ptr_not_null((void *)t,
- "Unexpected rallocx() failure");
- assert_zu_ge(sallocx(t, 0), sz+1,
- "Unexpectedly small rallocx() result");
- if (!background_thread_enabled()) {
- assert_ptr_ne(s, t,
- "Unexpected in-place rallocx()");
- assert_true(!opt_junk_free || saw_junking,
- "Expected region of size %zu to be "
- "junk-filled", sz);
+TEST_BEGIN(test_junk_alloc_free) {
+ bool zerovals[] = {false, true};
+ size_t sizevals[] = {
+ 1, 8, 100, 1000, 100*1000
+ /*
+ * Memory allocation failure is a real possibility in 32-bit mode.
+ * Rather than try to check in the face of resource exhaustion, we just
+ * rely more on the 64-bit tests. This is a little bit white-box-y in
+ * the sense that this is only a good test strategy if we know that the
+ * junk pathways don't touch interact with the allocation selection
+ * mechanisms; but this is in fact the case.
+ */
+#if LG_SIZEOF_PTR == 3
+ , 10 * 1000 * 1000
+#endif
+ };
+ size_t lg_alignvals[] = {
+ 0, 4, 10, 15, 16, LG_PAGE
+#if LG_SIZEOF_PTR == 3
+ , 20, 24
+#endif
+ };
+
+#define JUNK_FREE(...) \
+ do { \
+ do_allocs(size, zero, lg_align); \
+ for (size_t n = 0; n < ptr_ind; n++) { \
+ void *ptr = ptrs[n]; \
+ __VA_ARGS__; \
+ if (opt_junk_free) { \
+ assert_ptr_eq(ptr, last_junked_ptr, \
+ ""); \
+ assert_zu_eq(usize, last_junked_usize, \
+ ""); \
+ } \
+ reset(); \
+ } \
+ } while (0)
+ for (size_t i = 0; i < arraylen(zerovals); i++) {
+ for (size_t j = 0; j < arraylen(sizevals); j++) {
+ for (size_t k = 0; k < arraylen(lg_alignvals); k++) {
+ bool zero = zerovals[i];
+ size_t size = sizevals[j];
+ size_t lg_align = lg_alignvals[k];
+ size_t usize = nallocx(size,
+ MALLOCX_LG_ALIGN(lg_align));
+
+ JUNK_FREE(free(ptr));
+ JUNK_FREE(dallocx(ptr, 0));
+ JUNK_FREE(dallocx(ptr, MALLOCX_TCACHE_NONE));
+ JUNK_FREE(dallocx(ptr, MALLOCX_LG_ALIGN(
+ lg_align)));
+ JUNK_FREE(sdallocx(ptr, usize, MALLOCX_LG_ALIGN(
+ lg_align)));
+ JUNK_FREE(sdallocx(ptr, usize,
+ MALLOCX_TCACHE_NONE | MALLOCX_LG_ALIGN(lg_align)));
+ if (opt_zero_realloc_action
+ == zero_realloc_action_free) {
+ JUNK_FREE(realloc(ptr, 0));
+ }
}
- s = t;
}
}
-
- watch_junking(s);
- dallocx(s, 0);
- assert_true(!opt_junk_free || saw_junking,
- "Expected region of size %zu to be junk-filled", sz);
-
- if (opt_junk_free) {
- arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
- large_dalloc_junk = large_dalloc_junk_orig;
- large_dalloc_maybe_junk = large_dalloc_maybe_junk_orig;
- }
-}
-
-TEST_BEGIN(test_junk_small) {
- test_skip_if(!config_fill);
- test_junk(1, SC_SMALL_MAXCLASS - 1);
}
TEST_END
-TEST_BEGIN(test_junk_large) {
- test_skip_if(!config_fill);
- test_junk(SC_SMALL_MAXCLASS + 1, (1U << (SC_LG_LARGE_MINCLASS + 1)));
+TEST_BEGIN(test_realloc_expand) {
+ char *volatile ptr;
+ char *volatile expanded;
+
+ test_skip_if(!opt_junk_alloc);
+
+ /* Realloc */
+ ptr = malloc(SC_SMALL_MAXCLASS);
+ expanded = realloc(ptr, SC_LARGE_MINCLASS);
+ expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
+ expect_zu_eq(last_junked_usize,
+ SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
+ free(expanded);
+
+ /* rallocx(..., 0) */
+ ptr = malloc(SC_SMALL_MAXCLASS);
+ expanded = rallocx(ptr, SC_LARGE_MINCLASS, 0);
+ expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
+ expect_zu_eq(last_junked_usize,
+ SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
+ free(expanded);
+
+ /* rallocx(..., nonzero) */
+ ptr = malloc(SC_SMALL_MAXCLASS);
+ expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
+ expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
+ expect_zu_eq(last_junked_usize,
+ SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
+ free(expanded);
+
+ /* rallocx(..., MALLOCX_ZERO) */
+ ptr = malloc(SC_SMALL_MAXCLASS);
+ last_junked_ptr = (void *)-1;
+ last_junked_usize = (size_t)-1;
+ expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_ZERO);
+ expect_ptr_eq(last_junked_ptr, (void *)-1, "");
+ expect_zu_eq(last_junked_usize, (size_t)-1, "");
+ free(expanded);
+
+ /*
+ * Unfortunately, testing xallocx reliably is difficult to do portably
+ * (since allocations can be expanded / not expanded differently on
+ * different platforms. We rely on manual inspection there -- the
+ * xallocx pathway is easy to inspect, though.
+ *
+ * Likewise, we don't test the shrinking pathways. It's difficult to do
+ * so consistently (because of the risk of split failure or memory
+ * exhaustion, in which case no junking should happen). This is fine
+ * -- junking is a best-effort debug mechanism in the first place.
+ */
}
TEST_END
int
main(void) {
- return test(
- test_junk_small,
- test_junk_large);
+ junk_alloc_callback = &test_junk;
+ junk_free_callback = &test_junk;
+ /*
+ * We check the last pointer junked. If a reentrant call happens, that
+ * might be an internal allocation.
+ */
+ return test_no_reentrancy(
+ test_junk_alloc_free,
+ test_realloc_expand);
}
diff --git a/deps/jemalloc/test/unit/log.c b/deps/jemalloc/test/unit/log.c
index a52bd737d..c09b58969 100644
--- a/deps/jemalloc/test/unit/log.c
+++ b/deps/jemalloc/test/unit/log.c
@@ -3,12 +3,17 @@
#include "jemalloc/internal/log.h"
static void
+update_log_var_names(const char *names) {
+ strncpy(log_var_names, names, sizeof(log_var_names));
+}
+
+static void
expect_no_logging(const char *names) {
log_var_t log_l1 = LOG_VAR_INIT("l1");
log_var_t log_l2 = LOG_VAR_INIT("l2");
log_var_t log_l2_a = LOG_VAR_INIT("l2.a");
- strcpy(log_var_names, names);
+ update_log_var_names(names);
int count = 0;
@@ -25,7 +30,7 @@ expect_no_logging(const char *names) {
count++;
log_do_end(log_l2_a)
}
- assert_d_eq(count, 0, "Disabled logging not ignored!");
+ expect_d_eq(count, 0, "Disabled logging not ignored!");
}
TEST_BEGIN(test_log_disabled) {
@@ -50,25 +55,25 @@ TEST_BEGIN(test_log_enabled_direct) {
int count;
count = 0;
- strcpy(log_var_names, "l1");
+ update_log_var_names("l1");
for (int i = 0; i < 10; i++) {
log_do_begin(log_l1)
count++;
log_do_end(log_l1)
}
- assert_d_eq(count, 10, "Mis-logged!");
+ expect_d_eq(count, 10, "Mis-logged!");
count = 0;
- strcpy(log_var_names, "l1.a");
+ update_log_var_names("l1.a");
for (int i = 0; i < 10; i++) {
log_do_begin(log_l1_a)
count++;
log_do_end(log_l1_a)
}
- assert_d_eq(count, 10, "Mis-logged!");
+ expect_d_eq(count, 10, "Mis-logged!");
count = 0;
- strcpy(log_var_names, "l1.a|abc|l2|def");
+ update_log_var_names("l1.a|abc|l2|def");
for (int i = 0; i < 10; i++) {
log_do_begin(log_l1_a)
count++;
@@ -78,14 +83,14 @@ TEST_BEGIN(test_log_enabled_direct) {
count++;
log_do_end(log_l2)
}
- assert_d_eq(count, 20, "Mis-logged!");
+ expect_d_eq(count, 20, "Mis-logged!");
}
TEST_END
TEST_BEGIN(test_log_enabled_indirect) {
test_skip_if(!config_log);
atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
- strcpy(log_var_names, "l0|l1|abc|l2.b|def");
+ update_log_var_names("l0|l1|abc|l2.b|def");
/* On. */
log_var_t log_l1 = LOG_VAR_INIT("l1");
@@ -128,14 +133,14 @@ TEST_BEGIN(test_log_enabled_indirect) {
log_do_end(log_l2_b_b)
}
- assert_d_eq(count, 40, "Mis-logged!");
+ expect_d_eq(count, 40, "Mis-logged!");
}
TEST_END
TEST_BEGIN(test_log_enabled_global) {
test_skip_if(!config_log);
atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
- strcpy(log_var_names, "abc|.|def");
+ update_log_var_names("abc|.|def");
log_var_t log_l1 = LOG_VAR_INIT("l1");
log_var_t log_l2_a_a = LOG_VAR_INIT("l2.a.a");
@@ -150,7 +155,7 @@ TEST_BEGIN(test_log_enabled_global) {
count++;
log_do_end(log_l2_a_a)
}
- assert_d_eq(count, 20, "Mis-logged!");
+ expect_d_eq(count, 20, "Mis-logged!");
}
TEST_END
@@ -166,7 +171,7 @@ TEST_BEGIN(test_logs_if_no_init) {
count++;
log_do_end(l)
}
- assert_d_eq(count, 0, "Logging shouldn't happen if not initialized.");
+ expect_d_eq(count, 0, "Logging shouldn't happen if not initialized.");
}
TEST_END
diff --git a/deps/jemalloc/test/unit/mallctl.c b/deps/jemalloc/test/unit/mallctl.c
index 3a75ac040..6efc8f1b7 100644
--- a/deps/jemalloc/test/unit/mallctl.c
+++ b/deps/jemalloc/test/unit/mallctl.c
@@ -1,5 +1,6 @@
#include "test/jemalloc_test.h"
+#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/util.h"
@@ -7,25 +8,25 @@ TEST_BEGIN(test_mallctl_errors) {
uint64_t epoch;
size_t sz;
- assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
+ expect_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
"mallctl() should return ENOENT for non-existent names");
- assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
+ expect_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
EPERM, "mallctl() should return EPERM on attempt to write "
"read-only value");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
sizeof(epoch)-1), EINVAL,
"mallctl() should return EINVAL for input size mismatch");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
sizeof(epoch)+1), EINVAL,
"mallctl() should return EINVAL for input size mismatch");
sz = sizeof(epoch)-1;
- assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
+ expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
"mallctl() should return EINVAL for output size mismatch");
sz = sizeof(epoch)+1;
- assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
+ expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
"mallctl() should return EINVAL for output size mismatch");
}
TEST_END
@@ -35,7 +36,7 @@ TEST_BEGIN(test_mallctlnametomib_errors) {
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
+ expect_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
"mallctlnametomib() should return ENOENT for non-existent names");
}
TEST_END
@@ -47,30 +48,30 @@ TEST_BEGIN(test_mallctlbymib_errors) {
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("version", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("version", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
"attempt to write read-only value");
miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
sizeof(epoch)-1), EINVAL,
"mallctlbymib() should return EINVAL for input size mismatch");
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
sizeof(epoch)+1), EINVAL,
"mallctlbymib() should return EINVAL for input size mismatch");
sz = sizeof(epoch)-1;
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
EINVAL,
"mallctlbymib() should return EINVAL for output size mismatch");
sz = sizeof(epoch)+1;
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
EINVAL,
"mallctlbymib() should return EINVAL for output size mismatch");
}
@@ -81,25 +82,25 @@ TEST_BEGIN(test_mallctl_read_write) {
size_t sz = sizeof(old_epoch);
/* Blind. */
- assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+ expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Read. */
- assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
+ expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+ expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Write. */
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
- assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+ expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
/* Read+write. */
- assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
+ expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
(void *)&new_epoch, sizeof(new_epoch)), 0,
"Unexpected mallctl() failure");
- assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
+ expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
}
TEST_END
@@ -109,22 +110,141 @@ TEST_BEGIN(test_mallctlnametomib_short_mib) {
miblen = 3;
mib[3] = 42;
- assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
- assert_zu_eq(miblen, 3, "Unexpected mib output length");
- assert_zu_eq(mib[3], 42,
+ expect_zu_eq(miblen, 3, "Unexpected mib output length");
+ expect_zu_eq(mib[3], 42,
"mallctlnametomib() wrote past the end of the input mib");
}
TEST_END
+TEST_BEGIN(test_mallctlnametomib_short_name) {
+ size_t mib[4];
+ size_t miblen;
+
+ miblen = 4;
+ mib[3] = 42;
+ expect_d_eq(mallctlnametomib("arenas.bin.0", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ expect_zu_eq(miblen, 3, "Unexpected mib output length");
+ expect_zu_eq(mib[3], 42,
+ "mallctlnametomib() wrote past the end of the input mib");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlmibnametomib) {
+ size_t mib[4];
+ size_t miblen = 4;
+ uint32_t result, result_ref;
+ size_t len_result = sizeof(uint32_t);
+
+ tsd_t *tsd = tsd_fetch();
+
+ /* Error cases */
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "bob", &miblen), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "9999", &miblen), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+
+ /* Valid case. */
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "arenas", &miblen), 0, "");
+ assert_zu_eq(miblen, 1, "");
+ miblen = 4;
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 1, "bin", &miblen), 0, "");
+ assert_zu_eq(miblen, 2, "");
+ expect_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
+ ENOENT, "mallctlbymib() should fail on partial path");
+
+ /* Error cases. */
+ miblen = 4;
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "bob", &miblen), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "9999", &miblen), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+
+ /* Valid case. */
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "0", &miblen), 0, "");
+ assert_zu_eq(miblen, 3, "");
+ expect_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
+ ENOENT, "mallctlbymib() should fail on partial path");
+
+ /* Error cases. */
+ miblen = 4;
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "bob", &miblen), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "9999", &miblen), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+
+ /* Valid case. */
+ assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "nregs", &miblen), 0, "");
+ assert_zu_eq(miblen, 4, "");
+ assert_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
+ 0, "Unexpected mallctlbymib() failure");
+ assert_d_eq(mallctl("arenas.bin.0.nregs", &result_ref, &len_result,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ expect_zu_eq(result, result_ref,
+ "mallctlbymib() and mallctl() returned different result");
+}
+TEST_END
+
+TEST_BEGIN(test_mallctlbymibname) {
+ size_t mib[4];
+ size_t miblen = 4;
+ uint32_t result, result_ref;
+ size_t len_result = sizeof(uint32_t);
+
+ tsd_t *tsd = tsd_fetch();
+
+ /* Error cases. */
+
+ assert_d_eq(mallctlnametomib("arenas", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() failure");
+ assert_zu_eq(miblen, 1, "");
+
+ miblen = 4;
+ assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0", &miblen,
+ &result, &len_result, NULL, 0), ENOENT, "");
+ miblen = 4;
+ assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.bob", &miblen,
+ &result, &len_result, NULL, 0), ENOENT, "");
+ assert_zu_eq(miblen, 4, "");
+
+ /* Valid cases. */
+
+ assert_d_eq(mallctl("arenas.bin.0.nregs", &result_ref, &len_result,
+ NULL, 0), 0, "Unexpected mallctl() failure");
+ miblen = 4;
+
+ assert_d_eq(ctl_bymibname(tsd, mib, 0, "arenas.bin.0.nregs", &miblen,
+ &result, &len_result, NULL, 0), 0, "");
+ assert_zu_eq(miblen, 4, "");
+ expect_zu_eq(result, result_ref, "Unexpected result");
+
+ assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.nregs", &miblen, &result,
+ &len_result, NULL, 0), 0, "");
+ assert_zu_eq(miblen, 4, "");
+ expect_zu_eq(result, result_ref, "Unexpected result");
+
+ assert_d_eq(ctl_bymibname(tsd, mib, 2, "0.nregs", &miblen, &result,
+ &len_result, NULL, 0), 0, "");
+ assert_zu_eq(miblen, 4, "");
+ expect_zu_eq(result, result_ref, "Unexpected result");
+
+ assert_d_eq(ctl_bymibname(tsd, mib, 3, "nregs", &miblen, &result,
+ &len_result, NULL, 0), 0, "");
+ assert_zu_eq(miblen, 4, "");
+ expect_zu_eq(result, result_ref, "Unexpected result");
+}
+TEST_END
+
TEST_BEGIN(test_mallctl_config) {
#define TEST_MALLCTL_CONFIG(config, t) do { \
t oldval; \
size_t sz = sizeof(oldval); \
- assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \
+ expect_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \
NULL, 0), 0, "Unexpected mallctl() failure"); \
- assert_b_eq(oldval, config_##config, "Incorrect config value"); \
- assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
+ expect_b_eq(oldval, config_##config, "Incorrect config value"); \
+ expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
} while (0)
TEST_MALLCTL_CONFIG(cache_oblivious, bool);
@@ -152,17 +272,26 @@ TEST_BEGIN(test_mallctl_opt) {
int expected = config_##config ? 0 : ENOENT; \
int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \
0); \
- assert_d_eq(result, expected, \
+ expect_d_eq(result, expected, \
"Unexpected mallctl() result for opt."#opt); \
- assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
+ expect_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \
} while (0)
TEST_MALLCTL_OPT(bool, abort, always);
TEST_MALLCTL_OPT(bool, abort_conf, always);
+ TEST_MALLCTL_OPT(bool, cache_oblivious, always);
+ TEST_MALLCTL_OPT(bool, trust_madvise, always);
TEST_MALLCTL_OPT(bool, confirm_conf, always);
TEST_MALLCTL_OPT(const char *, metadata_thp, always);
TEST_MALLCTL_OPT(bool, retain, always);
TEST_MALLCTL_OPT(const char *, dss, always);
+ TEST_MALLCTL_OPT(bool, hpa, always);
+ TEST_MALLCTL_OPT(size_t, hpa_slab_max_alloc, always);
+ TEST_MALLCTL_OPT(size_t, hpa_sec_nshards, always);
+ TEST_MALLCTL_OPT(size_t, hpa_sec_max_alloc, always);
+ TEST_MALLCTL_OPT(size_t, hpa_sec_max_bytes, always);
+ TEST_MALLCTL_OPT(size_t, hpa_sec_bytes_after_flush, always);
+ TEST_MALLCTL_OPT(size_t, hpa_sec_batch_fill_extra, always);
TEST_MALLCTL_OPT(unsigned, narenas, always);
TEST_MALLCTL_OPT(const char *, percpu_arena, always);
TEST_MALLCTL_OPT(size_t, oversize_threshold, always);
@@ -170,14 +299,18 @@ TEST_BEGIN(test_mallctl_opt) {
TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always);
TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always);
TEST_MALLCTL_OPT(bool, stats_print, always);
+ TEST_MALLCTL_OPT(const char *, stats_print_opts, always);
+ TEST_MALLCTL_OPT(int64_t, stats_interval, always);
+ TEST_MALLCTL_OPT(const char *, stats_interval_opts, always);
TEST_MALLCTL_OPT(const char *, junk, fill);
TEST_MALLCTL_OPT(bool, zero, fill);
TEST_MALLCTL_OPT(bool, utrace, utrace);
TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
TEST_MALLCTL_OPT(bool, tcache, always);
TEST_MALLCTL_OPT(size_t, lg_extent_max_active_fit, always);
- TEST_MALLCTL_OPT(size_t, lg_tcache_max, always);
+ TEST_MALLCTL_OPT(size_t, tcache_max, always);
TEST_MALLCTL_OPT(const char *, thp, always);
+ TEST_MALLCTL_OPT(const char *, zero_realloc, always);
TEST_MALLCTL_OPT(bool, prof, prof);
TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
TEST_MALLCTL_OPT(bool, prof_active, prof);
@@ -187,6 +320,11 @@ TEST_BEGIN(test_mallctl_opt) {
TEST_MALLCTL_OPT(bool, prof_gdump, prof);
TEST_MALLCTL_OPT(bool, prof_final, prof);
TEST_MALLCTL_OPT(bool, prof_leak, prof);
+ TEST_MALLCTL_OPT(bool, prof_leak_error, prof);
+ TEST_MALLCTL_OPT(ssize_t, prof_recent_alloc_max, prof);
+ TEST_MALLCTL_OPT(bool, prof_stats, prof);
+ TEST_MALLCTL_OPT(bool, prof_sys_thread_name, prof);
+ TEST_MALLCTL_OPT(ssize_t, lg_san_uaf_align, uaf_detection);
#undef TEST_MALLCTL_OPT
}
@@ -198,18 +336,18 @@ TEST_BEGIN(test_manpage_example) {
size_t len, miblen;
len = sizeof(nbins);
- assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
+ expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
"Unexpected mallctl() failure");
miblen = 4;
- assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
for (i = 0; i < nbins; i++) {
size_t bin_size;
mib[2] = i;
len = sizeof(bin_size);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
NULL, 0), 0, "Unexpected mallctlbymib() failure");
/* Do something with bin_size... */
}
@@ -221,9 +359,9 @@ TEST_BEGIN(test_tcache_none) {
/* Allocate p and q. */
void *p0 = mallocx(42, 0);
- assert_ptr_not_null(p0, "Unexpected mallocx() failure");
+ expect_ptr_not_null(p0, "Unexpected mallocx() failure");
void *q = mallocx(42, 0);
- assert_ptr_not_null(q, "Unexpected mallocx() failure");
+ expect_ptr_not_null(q, "Unexpected mallocx() failure");
/* Deallocate p and q, but bypass the tcache for q. */
dallocx(p0, 0);
@@ -231,8 +369,11 @@ TEST_BEGIN(test_tcache_none) {
/* Make sure that tcache-based allocation returns p, not q. */
void *p1 = mallocx(42, 0);
- assert_ptr_not_null(p1, "Unexpected mallocx() failure");
- assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region");
+ expect_ptr_not_null(p1, "Unexpected mallocx() failure");
+ if (!opt_prof && !san_uaf_detection_enabled()) {
+ expect_ptr_eq(p0, p1,
+ "Expected tcache to allocate cached region");
+ }
/* Clean up. */
dallocx(p1, MALLOCX_TCACHE_NONE);
@@ -253,25 +394,25 @@ TEST_BEGIN(test_tcache) {
/* Create tcaches. */
for (i = 0; i < NTCACHES; i++) {
sz = sizeof(unsigned);
- assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
+ expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
0), 0, "Unexpected mallctl() failure, i=%u", i);
}
/* Exercise tcache ID recycling. */
for (i = 0; i < NTCACHES; i++) {
- assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
+ expect_d_eq(mallctl("tcache.destroy", NULL, NULL,
(void *)&tis[i], sizeof(unsigned)), 0,
"Unexpected mallctl() failure, i=%u", i);
}
for (i = 0; i < NTCACHES; i++) {
sz = sizeof(unsigned);
- assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
+ expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
0), 0, "Unexpected mallctl() failure, i=%u", i);
}
/* Flush empty tcaches. */
for (i = 0; i < NTCACHES; i++) {
- assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
+ expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
i);
}
@@ -279,12 +420,12 @@ TEST_BEGIN(test_tcache) {
/* Cache some allocations. */
for (i = 0; i < NTCACHES; i++) {
ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
- assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
+ expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
i);
dallocx(ps[i], MALLOCX_TCACHE(tis[i]));
qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i]));
- assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
+ expect_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
i);
dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
}
@@ -293,20 +434,24 @@ TEST_BEGIN(test_tcache) {
for (i = 0; i < NTCACHES; i++) {
void *p0 = ps[i];
ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
- assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
+ expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
i);
- assert_ptr_eq(ps[i], p0,
- "Expected mallocx() to allocate cached region, i=%u", i);
+ if (!san_uaf_detection_enabled()) {
+ expect_ptr_eq(ps[i], p0, "Expected mallocx() to "
+ "allocate cached region, i=%u", i);
+ }
}
/* Verify that reallocation uses cached regions. */
for (i = 0; i < NTCACHES; i++) {
void *q0 = qs[i];
qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
- assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
+ expect_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
i);
- assert_ptr_eq(qs[i], q0,
- "Expected rallocx() to allocate cached region, i=%u", i);
+ if (!san_uaf_detection_enabled()) {
+ expect_ptr_eq(qs[i], q0, "Expected rallocx() to "
+ "allocate cached region, i=%u", i);
+ }
/* Avoid undefined behavior in case of test failure. */
if (qs[i] == NULL) {
qs[i] = ps[i];
@@ -318,14 +463,14 @@ TEST_BEGIN(test_tcache) {
/* Flush some non-empty tcaches. */
for (i = 0; i < NTCACHES/2; i++) {
- assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
+ expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
i);
}
/* Destroy tcaches. */
for (i = 0; i < NTCACHES; i++) {
- assert_d_eq(mallctl("tcache.destroy", NULL, NULL,
+ expect_d_eq(mallctl("tcache.destroy", NULL, NULL,
(void *)&tis[i], sizeof(unsigned)), 0,
"Unexpected mallctl() failure, i=%u", i);
}
@@ -337,32 +482,32 @@ TEST_BEGIN(test_thread_arena) {
const char *opa;
size_t sz = sizeof(opa);
- assert_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0,
+ expect_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+ expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
if (opt_oversize_threshold != 0) {
narenas--;
}
- assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
+ expect_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
if (strcmp(opa, "disabled") == 0) {
new_arena_ind = narenas - 1;
- assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
(void *)&new_arena_ind, sizeof(unsigned)), 0,
"Unexpected mallctl() failure");
new_arena_ind = 0;
- assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
(void *)&new_arena_ind, sizeof(unsigned)), 0,
"Unexpected mallctl() failure");
} else {
- assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1;
if (old_arena_ind != new_arena_ind) {
- assert_d_eq(mallctl("thread.arena",
+ expect_d_eq(mallctl("thread.arena",
(void *)&old_arena_ind, &sz, (void *)&new_arena_ind,
sizeof(unsigned)), EPERM, "thread.arena ctl "
"should not be allowed with percpu arena");
@@ -379,32 +524,32 @@ TEST_BEGIN(test_arena_i_initialized) {
bool initialized;
sz = sizeof(narenas);
- assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+ expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
- assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
for (i = 0; i < narenas; i++) {
mib[1] = i;
sz = sizeof(initialized);
- assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL,
+ expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL,
0), 0, "Unexpected mallctl() failure");
}
mib[1] = MALLCTL_ARENAS_ALL;
sz = sizeof(initialized);
- assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0,
+ expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_true(initialized,
+ expect_true(initialized,
"Merged arena statistics should always be initialized");
/* Equivalent to the above but using mallctl() directly. */
sz = sizeof(initialized);
- assert_d_eq(mallctl(
+ expect_d_eq(mallctl(
"arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized",
(void *)&initialized, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_true(initialized,
+ expect_true(initialized,
"Merged arena statistics should always be initialized");
}
TEST_END
@@ -413,17 +558,17 @@ TEST_BEGIN(test_arena_i_dirty_decay_ms) {
ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
size_t sz = sizeof(ssize_t);
- assert_d_eq(mallctl("arena.0.dirty_decay_ms",
+ expect_d_eq(mallctl("arena.0.dirty_decay_ms",
(void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
dirty_decay_ms = -2;
- assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
+ expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
(void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
dirty_decay_ms = 0x7fffffff;
- assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
+ expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
(void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
@@ -432,10 +577,10 @@ TEST_BEGIN(test_arena_i_dirty_decay_ms) {
dirty_decay_ms++) {
ssize_t old_dirty_decay_ms;
- assert_d_eq(mallctl("arena.0.dirty_decay_ms",
+ expect_d_eq(mallctl("arena.0.dirty_decay_ms",
(void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
- assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
+ expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
"Unexpected old arena.0.dirty_decay_ms");
}
}
@@ -445,17 +590,17 @@ TEST_BEGIN(test_arena_i_muzzy_decay_ms) {
ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
size_t sz = sizeof(ssize_t);
- assert_d_eq(mallctl("arena.0.muzzy_decay_ms",
+ expect_d_eq(mallctl("arena.0.muzzy_decay_ms",
(void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
muzzy_decay_ms = -2;
- assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
+ expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
(void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
muzzy_decay_ms = 0x7fffffff;
- assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
+ expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
(void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
"Unexpected mallctl() failure");
@@ -464,10 +609,10 @@ TEST_BEGIN(test_arena_i_muzzy_decay_ms) {
muzzy_decay_ms++) {
ssize_t old_muzzy_decay_ms;
- assert_d_eq(mallctl("arena.0.muzzy_decay_ms",
+ expect_d_eq(mallctl("arena.0.muzzy_decay_ms",
(void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
- assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
+ expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
"Unexpected old arena.0.muzzy_decay_ms");
}
}
@@ -479,19 +624,19 @@ TEST_BEGIN(test_arena_i_purge) {
size_t mib[3];
size_t miblen = 3;
- assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+ expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
- assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = narenas;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
mib[1] = MALLCTL_ARENAS_ALL;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
TEST_END
@@ -502,19 +647,19 @@ TEST_BEGIN(test_arena_i_decay) {
size_t mib[3];
size_t miblen = 3;
- assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
+ expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
- assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = narenas;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
mib[1] = MALLCTL_ARENAS_ALL;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
TEST_END
@@ -526,40 +671,40 @@ TEST_BEGIN(test_arena_i_dss) {
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
"Unexpected mallctlnametomib() error");
dss_prec_new = "disabled";
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
(void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
"Unexpected mallctl() failure");
- assert_str_ne(dss_prec_old, "primary",
+ expect_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
(void *)&dss_prec_old, sizeof(dss_prec_old)), 0,
"Unexpected mallctl() failure");
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
0), 0, "Unexpected mallctl() failure");
- assert_str_ne(dss_prec_old, "primary",
+ expect_str_ne(dss_prec_old, "primary",
"Unexpected value for dss precedence");
mib[1] = narenas_total_get();
dss_prec_new = "disabled";
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
(void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
"Unexpected mallctl() failure");
- assert_str_ne(dss_prec_old, "primary",
+ expect_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
(void *)&dss_prec_old, sizeof(dss_prec_new)), 0,
"Unexpected mallctl() failure");
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
0), 0, "Unexpected mallctl() failure");
- assert_str_ne(dss_prec_old, "primary",
+ expect_str_ne(dss_prec_old, "primary",
"Unexpected value for dss precedence");
}
TEST_END
@@ -571,43 +716,43 @@ TEST_BEGIN(test_arena_i_retain_grow_limit) {
bool retain_enabled;
size_t sz = sizeof(retain_enabled);
- assert_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0),
+ expect_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0),
0, "Unexpected mallctl() failure");
test_skip_if(!retain_enabled);
sz = sizeof(default_limit);
miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen),
+ expect_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen),
0, "Unexpected mallctlnametomib() error");
- assert_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
+ expect_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_zu_eq(default_limit, SC_LARGE_MAXCLASS,
+ expect_zu_eq(default_limit, SC_LARGE_MAXCLASS,
"Unexpected default for retain_grow_limit");
new_limit = PAGE - 1;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
sizeof(new_limit)), EFAULT, "Unexpected mallctl() success");
new_limit = PAGE + 1;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
sizeof(new_limit)), 0, "Unexpected mallctl() failure");
- assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
+ expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_zu_eq(old_limit, PAGE,
+ expect_zu_eq(old_limit, PAGE,
"Unexpected value for retain_grow_limit");
/* Expect grow less than psize class 10. */
new_limit = sz_pind2sz(10) - 1;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
sizeof(new_limit)), 0, "Unexpected mallctl() failure");
- assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
+ expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_zu_eq(old_limit, sz_pind2sz(9),
+ expect_zu_eq(old_limit, sz_pind2sz(9),
"Unexpected value for retain_grow_limit");
/* Restore to default. */
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit,
sizeof(default_limit)), 0, "Unexpected mallctl() failure");
}
TEST_END
@@ -616,17 +761,17 @@ TEST_BEGIN(test_arenas_dirty_decay_ms) {
ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
size_t sz = sizeof(ssize_t);
- assert_d_eq(mallctl("arenas.dirty_decay_ms",
+ expect_d_eq(mallctl("arenas.dirty_decay_ms",
(void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
dirty_decay_ms = -2;
- assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
+ expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
(void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
dirty_decay_ms = 0x7fffffff;
- assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
+ expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
(void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
"Expected mallctl() failure");
@@ -635,10 +780,10 @@ TEST_BEGIN(test_arenas_dirty_decay_ms) {
dirty_decay_ms++) {
ssize_t old_dirty_decay_ms;
- assert_d_eq(mallctl("arenas.dirty_decay_ms",
+ expect_d_eq(mallctl("arenas.dirty_decay_ms",
(void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
- assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
+ expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
"Unexpected old arenas.dirty_decay_ms");
}
}
@@ -648,17 +793,17 @@ TEST_BEGIN(test_arenas_muzzy_decay_ms) {
ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
size_t sz = sizeof(ssize_t);
- assert_d_eq(mallctl("arenas.muzzy_decay_ms",
+ expect_d_eq(mallctl("arenas.muzzy_decay_ms",
(void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
muzzy_decay_ms = -2;
- assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
+ expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
(void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
"Unexpected mallctl() success");
muzzy_decay_ms = 0x7fffffff;
- assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
+ expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
(void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
"Expected mallctl() failure");
@@ -667,10 +812,10 @@ TEST_BEGIN(test_arenas_muzzy_decay_ms) {
muzzy_decay_ms++) {
ssize_t old_muzzy_decay_ms;
- assert_d_eq(mallctl("arenas.muzzy_decay_ms",
+ expect_d_eq(mallctl("arenas.muzzy_decay_ms",
(void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
- assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
+ expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
"Unexpected old arenas.muzzy_decay_ms");
}
}
@@ -680,9 +825,9 @@ TEST_BEGIN(test_arenas_constants) {
#define TEST_ARENAS_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
- assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \
+ expect_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \
0), 0, "Unexpected mallctl() failure"); \
- assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+ expect_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
@@ -698,9 +843,9 @@ TEST_BEGIN(test_arenas_bin_constants) {
#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
- assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \
+ expect_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \
NULL, 0), 0, "Unexpected mallctl() failure"); \
- assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+ expect_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size);
@@ -717,9 +862,9 @@ TEST_BEGIN(test_arenas_lextent_constants) {
#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \
t name; \
size_t sz = sizeof(t); \
- assert_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \
+ expect_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \
&sz, NULL, 0), 0, "Unexpected mallctl() failure"); \
- assert_zu_eq(name, expected, "Incorrect "#name" size"); \
+ expect_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_LEXTENT_CONSTANT(size_t, size,
@@ -733,16 +878,16 @@ TEST_BEGIN(test_arenas_create) {
unsigned narenas_before, arena, narenas_after;
size_t sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
+ expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
NULL, 0), 0, "Unexpected mallctl() failure");
- assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
+ expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
0), 0, "Unexpected mallctl() failure");
- assert_u_eq(narenas_before+1, narenas_after,
+ expect_u_eq(narenas_before+1, narenas_after,
"Unexpected number of arenas before versus after extension");
- assert_u_eq(arena, narenas_after-1, "Unexpected arena index");
+ expect_u_eq(arena, narenas_after-1, "Unexpected arena index");
}
TEST_END
@@ -751,22 +896,49 @@ TEST_BEGIN(test_arenas_lookup) {
void *ptr;
size_t sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
+ expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
- assert_ptr_not_null(ptr, "Unexpected mallocx() failure");
- assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
+ expect_ptr_not_null(ptr, "Unexpected mallocx() failure");
+ expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
0, "Unexpected mallctl() failure");
- assert_u_eq(arena, arena1, "Unexpected arena index");
+ expect_u_eq(arena, arena1, "Unexpected arena index");
dallocx(ptr, 0);
}
TEST_END
+TEST_BEGIN(test_prof_active) {
+ /*
+ * If config_prof is off, then the test for prof_active in
+ * test_mallctl_opt was already enough.
+ */
+ test_skip_if(!config_prof);
+ test_skip_if(opt_prof);
+
+ bool active, old;
+ size_t len = sizeof(bool);
+
+ active = true;
+ expect_d_eq(mallctl("prof.active", NULL, NULL, &active, len), ENOENT,
+ "Setting prof_active to true should fail when opt_prof is off");
+ old = true;
+ expect_d_eq(mallctl("prof.active", &old, &len, &active, len), ENOENT,
+ "Setting prof_active to true should fail when opt_prof is off");
+ expect_true(old, "old value should not be touched when mallctl fails");
+ active = false;
+ expect_d_eq(mallctl("prof.active", NULL, NULL, &active, len), 0,
+ "Setting prof_active to false should succeed when opt_prof is off");
+ expect_d_eq(mallctl("prof.active", &old, &len, &active, len), 0,
+ "Setting prof_active to false should succeed when opt_prof is off");
+ expect_false(old, "prof_active should be false when opt_prof is off");
+}
+TEST_END
+
TEST_BEGIN(test_stats_arenas) {
#define TEST_STATS_ARENAS(t, name) do { \
t name; \
size_t sz = sizeof(t); \
- assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \
+ expect_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \
NULL, 0), 0, "Unexpected mallctl() failure"); \
} while (0)
@@ -800,21 +972,21 @@ TEST_BEGIN(test_hooks) {
size_t sz = sizeof(handle);
int err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
sizeof(hooks));
- assert_d_eq(err, 0, "Hook installation failed");
- assert_ptr_ne(handle, NULL, "Hook installation gave null handle");
+ expect_d_eq(err, 0, "Hook installation failed");
+ expect_ptr_ne(handle, NULL, "Hook installation gave null handle");
void *ptr = mallocx(1, 0);
- assert_true(hook_called, "Alloc hook not called");
+ expect_true(hook_called, "Alloc hook not called");
hook_called = false;
free(ptr);
- assert_true(hook_called, "Free hook not called");
+ expect_true(hook_called, "Free hook not called");
err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
sizeof(handle));
- assert_d_eq(err, 0, "Hook removal failed");
+ expect_d_eq(err, 0, "Hook removal failed");
hook_called = false;
ptr = mallocx(1, 0);
free(ptr);
- assert_false(hook_called, "Hook called after removal");
+ expect_false(hook_called, "Hook called after removal");
}
TEST_END
@@ -830,27 +1002,234 @@ TEST_BEGIN(test_hooks_exhaustion) {
handle = NULL;
err = mallctl("experimental.hooks.install", &handle, &sz,
&hooks, sizeof(hooks));
- assert_d_eq(err, 0, "Error installation hooks");
- assert_ptr_ne(handle, NULL, "Got NULL handle");
+ expect_d_eq(err, 0, "Error installation hooks");
+ expect_ptr_ne(handle, NULL, "Got NULL handle");
handles[i] = handle;
}
err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
sizeof(hooks));
- assert_d_eq(err, EAGAIN, "Should have failed hook installation");
+ expect_d_eq(err, EAGAIN, "Should have failed hook installation");
for (int i = 0; i < HOOK_MAX; i++) {
err = mallctl("experimental.hooks.remove", NULL, NULL,
&handles[i], sizeof(handles[i]));
- assert_d_eq(err, 0, "Hook removal failed");
+ expect_d_eq(err, 0, "Hook removal failed");
}
/* Insertion failed, but then we removed some; it should work now. */
handle = NULL;
err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
sizeof(hooks));
- assert_d_eq(err, 0, "Hook insertion failed");
- assert_ptr_ne(handle, NULL, "Got NULL handle");
+ expect_d_eq(err, 0, "Hook insertion failed");
+ expect_ptr_ne(handle, NULL, "Got NULL handle");
err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
sizeof(handle));
- assert_d_eq(err, 0, "Hook removal failed");
+ expect_d_eq(err, 0, "Hook removal failed");
+}
+TEST_END
+
+TEST_BEGIN(test_thread_idle) {
+ /*
+ * We're cheating a little bit in this test, and inferring things about
+ * implementation internals (like tcache details). We have to;
+ * thread.idle has no guaranteed effects. We need stats to make these
+ * inferences.
+ */
+ test_skip_if(!config_stats);
+
+ int err;
+ size_t sz;
+ size_t miblen;
+
+ bool tcache_enabled = false;
+ sz = sizeof(tcache_enabled);
+ err = mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ test_skip_if(!tcache_enabled);
+
+ size_t tcache_max;
+ sz = sizeof(tcache_max);
+ err = mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ test_skip_if(tcache_max == 0);
+
+ unsigned arena_ind;
+ sz = sizeof(arena_ind);
+ err = mallctl("thread.arena", &arena_ind, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+
+ /* We're going to do an allocation of size 1, which we know is small. */
+ size_t mib[5];
+ miblen = sizeof(mib)/sizeof(mib[0]);
+ err = mallctlnametomib("stats.arenas.0.small.ndalloc", mib, &miblen);
+ expect_d_eq(err, 0, "");
+ mib[2] = arena_ind;
+
+ /*
+ * This alloc and dalloc should leave something in the tcache, in a
+ * small size's cache bin.
+ */
+ void *ptr = mallocx(1, 0);
+ dallocx(ptr, 0);
+
+ uint64_t epoch;
+ err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
+ expect_d_eq(err, 0, "");
+
+ uint64_t small_dalloc_pre_idle;
+ sz = sizeof(small_dalloc_pre_idle);
+ err = mallctlbymib(mib, miblen, &small_dalloc_pre_idle, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+
+ err = mallctl("thread.idle", NULL, NULL, NULL, 0);
+ expect_d_eq(err, 0, "");
+
+ err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
+ expect_d_eq(err, 0, "");
+
+ uint64_t small_dalloc_post_idle;
+ sz = sizeof(small_dalloc_post_idle);
+ err = mallctlbymib(mib, miblen, &small_dalloc_post_idle, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+
+ expect_u64_lt(small_dalloc_pre_idle, small_dalloc_post_idle,
+ "Purge didn't flush the tcache");
+}
+TEST_END
+
+TEST_BEGIN(test_thread_peak) {
+ test_skip_if(!config_stats);
+
+ /*
+ * We don't commit to any stable amount of accuracy for peak tracking
+ * (in practice, when this test was written, we made sure to be within
+ * 100k). But 10MB is big for more or less any definition of big.
+ */
+ size_t big_size = 10 * 1024 * 1024;
+ size_t small_size = 256;
+
+ void *ptr;
+ int err;
+ size_t sz;
+ uint64_t peak;
+ sz = sizeof(uint64_t);
+
+ err = mallctl("thread.peak.reset", NULL, NULL, NULL, 0);
+ expect_d_eq(err, 0, "");
+ ptr = mallocx(SC_SMALL_MAXCLASS, 0);
+ err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ expect_u64_eq(peak, SC_SMALL_MAXCLASS, "Missed an update");
+ free(ptr);
+ err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ expect_u64_eq(peak, SC_SMALL_MAXCLASS, "Freeing changed peak");
+ ptr = mallocx(big_size, 0);
+ free(ptr);
+ /*
+ * The peak should have hit big_size in the last two lines, even though
+ * the net allocated bytes has since dropped back down to zero. We
+ * should have noticed the peak change without having down any mallctl
+ * calls while net allocated bytes was high.
+ */
+ err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ expect_u64_ge(peak, big_size, "Missed a peak change.");
+
+ /* Allocate big_size, but using small allocations. */
+ size_t nallocs = big_size / small_size;
+ void **ptrs = calloc(nallocs, sizeof(void *));
+ err = mallctl("thread.peak.reset", NULL, NULL, NULL, 0);
+ expect_d_eq(err, 0, "");
+ err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ expect_u64_eq(0, peak, "Missed a reset.");
+ for (size_t i = 0; i < nallocs; i++) {
+ ptrs[i] = mallocx(small_size, 0);
+ }
+ for (size_t i = 0; i < nallocs; i++) {
+ free(ptrs[i]);
+ }
+ err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
+ expect_d_eq(err, 0, "");
+ /*
+ * We don't guarantee exactness; make sure we're within 10% of the peak,
+ * though.
+ */
+ expect_u64_ge(peak, nallocx(small_size, 0) * nallocs * 9 / 10,
+ "Missed some peak changes.");
+ expect_u64_le(peak, nallocx(small_size, 0) * nallocs * 11 / 10,
+ "Overcounted peak changes.");
+ free(ptrs);
+}
+TEST_END
+
+typedef struct activity_test_data_s activity_test_data_t;
+struct activity_test_data_s {
+ uint64_t obtained_alloc;
+ uint64_t obtained_dalloc;
+};
+
+static void
+activity_test_callback(void *uctx, uint64_t alloc, uint64_t dalloc) {
+ activity_test_data_t *test_data = (activity_test_data_t *)uctx;
+ test_data->obtained_alloc = alloc;
+ test_data->obtained_dalloc = dalloc;
+}
+
+TEST_BEGIN(test_thread_activity_callback) {
+ test_skip_if(!config_stats);
+
+ const size_t big_size = 10 * 1024 * 1024;
+ void *ptr;
+ int err;
+ size_t sz;
+
+ uint64_t *allocatedp;
+ uint64_t *deallocatedp;
+ sz = sizeof(allocatedp);
+ err = mallctl("thread.allocatedp", &allocatedp, &sz, NULL, 0);
+ assert_d_eq(0, err, "");
+ err = mallctl("thread.deallocatedp", &deallocatedp, &sz, NULL, 0);
+ assert_d_eq(0, err, "");
+
+ activity_callback_thunk_t old_thunk = {(activity_callback_t)111,
+ (void *)222};
+
+ activity_test_data_t test_data = {333, 444};
+ activity_callback_thunk_t new_thunk =
+ {&activity_test_callback, &test_data};
+
+ sz = sizeof(old_thunk);
+ err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
+ &new_thunk, sizeof(new_thunk));
+ assert_d_eq(0, err, "");
+
+ expect_true(old_thunk.callback == NULL, "Callback already installed");
+ expect_true(old_thunk.uctx == NULL, "Callback data already installed");
+
+ ptr = mallocx(big_size, 0);
+ expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
+ expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
+
+ free(ptr);
+ expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
+ expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
+
+ sz = sizeof(old_thunk);
+ new_thunk = (activity_callback_thunk_t){ NULL, NULL };
+ err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
+ &new_thunk, sizeof(new_thunk));
+ assert_d_eq(0, err, "");
+
+ expect_true(old_thunk.callback == &activity_test_callback, "");
+ expect_true(old_thunk.uctx == &test_data, "");
+
+ /* Inserting NULL should have turned off tracking. */
+ test_data.obtained_alloc = 333;
+ test_data.obtained_dalloc = 444;
+ ptr = mallocx(big_size, 0);
+ free(ptr);
+ expect_u64_eq(333, test_data.obtained_alloc, "");
+ expect_u64_eq(444, test_data.obtained_dalloc, "");
}
TEST_END
@@ -862,6 +1241,9 @@ main(void) {
test_mallctlbymib_errors,
test_mallctl_read_write,
test_mallctlnametomib_short_mib,
+ test_mallctlnametomib_short_name,
+ test_mallctlmibnametomib,
+ test_mallctlbymibname,
test_mallctl_config,
test_mallctl_opt,
test_manpage_example,
@@ -882,7 +1264,11 @@ main(void) {
test_arenas_lextent_constants,
test_arenas_create,
test_arenas_lookup,
+ test_prof_active,
test_stats_arenas,
test_hooks,
- test_hooks_exhaustion);
+ test_hooks_exhaustion,
+ test_thread_idle,
+ test_thread_peak,
+ test_thread_activity_callback);
}
diff --git a/deps/jemalloc/test/unit/malloc_conf_2.c b/deps/jemalloc/test/unit/malloc_conf_2.c
new file mode 100644
index 000000000..ecfa4991c
--- /dev/null
+++ b/deps/jemalloc/test/unit/malloc_conf_2.c
@@ -0,0 +1,29 @@
+#include "test/jemalloc_test.h"
+
+const char *malloc_conf = "dirty_decay_ms:1000";
+const char *malloc_conf_2_conf_harder = "dirty_decay_ms:1234";
+
+TEST_BEGIN(test_malloc_conf_2) {
+#ifdef _WIN32
+ bool windows = true;
+#else
+ bool windows = false;
+#endif
+ /* Windows doesn't support weak symbol linker trickery. */
+ test_skip_if(windows);
+
+ ssize_t dirty_decay_ms;
+ size_t sz = sizeof(dirty_decay_ms);
+
+ int err = mallctl("opt.dirty_decay_ms", &dirty_decay_ms, &sz, NULL, 0);
+ assert_d_eq(err, 0, "Unexpected mallctl failure");
+ expect_zd_eq(dirty_decay_ms, 1234,
+ "malloc_conf_2 setting didn't take effect");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_malloc_conf_2);
+}
diff --git a/deps/jemalloc/test/unit/malloc_conf_2.sh b/deps/jemalloc/test/unit/malloc_conf_2.sh
new file mode 100644
index 000000000..2c780f1a2
--- /dev/null
+++ b/deps/jemalloc/test/unit/malloc_conf_2.sh
@@ -0,0 +1 @@
+export MALLOC_CONF="dirty_decay_ms:500"
diff --git a/deps/jemalloc/test/unit/malloc_io.c b/deps/jemalloc/test/unit/malloc_io.c
index 79ba7fc53..385f7450e 100644
--- a/deps/jemalloc/test/unit/malloc_io.c
+++ b/deps/jemalloc/test/unit/malloc_io.c
@@ -4,9 +4,9 @@ TEST_BEGIN(test_malloc_strtoumax_no_endptr) {
int err;
set_errno(0);
- assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
+ expect_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
err = get_errno();
- assert_d_eq(err, 0, "Unexpected failure");
+ expect_d_eq(err, 0, "Unexpected failure");
}
TEST_END
@@ -89,14 +89,14 @@ TEST_BEGIN(test_malloc_strtoumax) {
set_errno(0);
result = malloc_strtoumax(test->input, &remainder, test->base);
err = get_errno();
- assert_d_eq(err, test->expected_errno,
+ expect_d_eq(err, test->expected_errno,
"Expected errno %s for \"%s\", base %d",
test->expected_errno_name, test->input, test->base);
- assert_str_eq(remainder, test->expected_remainder,
+ expect_str_eq(remainder, test->expected_remainder,
"Unexpected remainder for \"%s\", base %d",
test->input, test->base);
if (err == 0) {
- assert_ju_eq(result, test->expected_x,
+ expect_ju_eq(result, test->expected_x,
"Unexpected result for \"%s\", base %d",
test->input, test->base);
}
@@ -111,10 +111,10 @@ TEST_BEGIN(test_malloc_snprintf_truncated) {
size_t len;
#define TEST(expected_str_untruncated, ...) do { \
result = malloc_snprintf(buf, len, __VA_ARGS__); \
- assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
+ expect_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \
"Unexpected string inequality (\"%s\" vs \"%s\")", \
buf, expected_str_untruncated); \
- assert_zu_eq(result, strlen(expected_str_untruncated), \
+ expect_zu_eq(result, strlen(expected_str_untruncated), \
"Unexpected result"); \
} while (0)
@@ -142,8 +142,8 @@ TEST_BEGIN(test_malloc_snprintf) {
size_t result;
#define TEST(expected_str, ...) do { \
result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \
- assert_str_eq(buf, expected_str, "Unexpected output"); \
- assert_zu_eq(result, strlen(expected_str), "Unexpected result");\
+ expect_str_eq(buf, expected_str, "Unexpected output"); \
+ expect_zu_eq(result, strlen(expected_str), "Unexpected result");\
} while (0)
TEST("hello", "hello");
@@ -175,6 +175,7 @@ TEST_BEGIN(test_malloc_snprintf) {
TEST("_1234_", "_%o_", 01234);
TEST("_01234_", "_%#o_", 01234);
TEST("_1234_", "_%u_", 1234);
+ TEST("01234", "%05u", 1234);
TEST("_1234_", "_%d_", 1234);
TEST("_ 1234_", "_% d_", 1234);
@@ -183,6 +184,15 @@ TEST_BEGIN(test_malloc_snprintf) {
TEST("_-1234_", "_% d_", -1234);
TEST("_-1234_", "_%+d_", -1234);
+ /*
+ * Morally, we should test these too, but 0-padded signed types are not
+ * yet supported.
+ *
+ * TEST("01234", "%05", 1234);
+ * TEST("-1234", "%05d", -1234);
+ * TEST("-01234", "%06d", -1234);
+ */
+
TEST("_-1234_", "_%d_", -1234);
TEST("_1234_", "_%d_", 1234);
TEST("_-1234_", "_%i_", -1234);
diff --git a/deps/jemalloc/test/unit/math.c b/deps/jemalloc/test/unit/math.c
index 09ef20c7b..a32767c53 100644
--- a/deps/jemalloc/test/unit/math.c
+++ b/deps/jemalloc/test/unit/math.c
@@ -41,7 +41,7 @@ TEST_BEGIN(test_ln_gamma_factorial) {
/* exp(ln_gamma(x)) == (x-1)! for integer x. */
for (x = 1; x <= 21; x++) {
- assert_true(double_eq_rel(exp(ln_gamma(x)),
+ expect_true(double_eq_rel(exp(ln_gamma(x)),
(double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect factorial result for x=%u", x);
}
@@ -192,7 +192,7 @@ TEST_BEGIN(test_ln_gamma_misc) {
for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
double x = (double)i * 0.25;
- assert_true(double_eq_rel(ln_gamma(x),
+ expect_true(double_eq_rel(ln_gamma(x),
ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect ln_gamma result for i=%u", i);
}
@@ -242,7 +242,7 @@ TEST_BEGIN(test_pt_norm) {
for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
double p = (double)i * 0.01;
- assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
+ expect_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect pt_norm result for i=%u", i);
}
@@ -295,7 +295,7 @@ TEST_BEGIN(test_pt_chi2) {
double ln_gamma_df = ln_gamma(df * 0.5);
for (j = 1; j < 100; j += 7) {
double p = (double)j * 0.01;
- assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
+ expect_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR),
"Incorrect pt_chi2 result for i=%u, j=%u", i, j);
e++;
@@ -356,7 +356,7 @@ TEST_BEGIN(test_pt_gamma_shape) {
double ln_gamma_shape = ln_gamma(shape);
for (j = 1; j < 100; j += 7) {
double p = (double)j * 0.01;
- assert_true(double_eq_rel(pt_gamma(p, shape, 1.0,
+ expect_true(double_eq_rel(pt_gamma(p, shape, 1.0,
ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR,
MAX_ABS_ERR),
"Incorrect pt_gamma result for i=%u, j=%u", i, j);
@@ -370,7 +370,7 @@ TEST_BEGIN(test_pt_gamma_scale) {
double shape = 1.0;
double ln_gamma_shape = ln_gamma(shape);
- assert_true(double_eq_rel(
+ expect_true(double_eq_rel(
pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0,
pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR,
MAX_ABS_ERR),
diff --git a/deps/jemalloc/test/unit/mpsc_queue.c b/deps/jemalloc/test/unit/mpsc_queue.c
new file mode 100644
index 000000000..895edf840
--- /dev/null
+++ b/deps/jemalloc/test/unit/mpsc_queue.c
@@ -0,0 +1,304 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/mpsc_queue.h"
+
+typedef struct elem_s elem_t;
+typedef ql_head(elem_t) elem_list_t;
+typedef mpsc_queue(elem_t) elem_mpsc_queue_t;
+struct elem_s {
+ int thread;
+ int idx;
+ ql_elm(elem_t) link;
+};
+
+/* Include both proto and gen to make sure they match up. */
+mpsc_queue_proto(static, elem_mpsc_queue_, elem_mpsc_queue_t, elem_t,
+ elem_list_t);
+mpsc_queue_gen(static, elem_mpsc_queue_, elem_mpsc_queue_t, elem_t,
+ elem_list_t, link);
+
+static void
+init_elems_simple(elem_t *elems, int nelems, int thread) {
+ for (int i = 0; i < nelems; i++) {
+ elems[i].thread = thread;
+ elems[i].idx = i;
+ ql_elm_new(&elems[i], link);
+ }
+}
+
+static void
+check_elems_simple(elem_list_t *list, int nelems, int thread) {
+ elem_t *elem;
+ int next_idx = 0;
+ ql_foreach(elem, list, link) {
+ expect_d_lt(next_idx, nelems, "Too many list items");
+ expect_d_eq(thread, elem->thread, "");
+ expect_d_eq(next_idx, elem->idx, "List out of order");
+ next_idx++;
+ }
+}
+
+TEST_BEGIN(test_simple) {
+ enum {NELEMS = 10};
+ elem_t elems[NELEMS];
+ elem_list_t list;
+ elem_mpsc_queue_t queue;
+
+ /* Pop empty queue onto empty list -> empty list */
+ ql_new(&list);
+ elem_mpsc_queue_new(&queue);
+ elem_mpsc_queue_pop_batch(&queue, &list);
+ expect_true(ql_empty(&list), "");
+
+ /* Pop empty queue onto nonempty list -> list unchanged */
+ ql_new(&list);
+ elem_mpsc_queue_new(&queue);
+ init_elems_simple(elems, NELEMS, 0);
+ for (int i = 0; i < NELEMS; i++) {
+ ql_tail_insert(&list, &elems[i], link);
+ }
+ elem_mpsc_queue_pop_batch(&queue, &list);
+ check_elems_simple(&list, NELEMS, 0);
+
+ /* Pop nonempty queue onto empty list -> list takes queue contents */
+ ql_new(&list);
+ elem_mpsc_queue_new(&queue);
+ init_elems_simple(elems, NELEMS, 0);
+ for (int i = 0; i < NELEMS; i++) {
+ elem_mpsc_queue_push(&queue, &elems[i]);
+ }
+ elem_mpsc_queue_pop_batch(&queue, &list);
+ check_elems_simple(&list, NELEMS, 0);
+
+ /* Pop nonempty queue onto nonempty list -> list gains queue contents */
+ ql_new(&list);
+ elem_mpsc_queue_new(&queue);
+ init_elems_simple(elems, NELEMS, 0);
+ for (int i = 0; i < NELEMS / 2; i++) {
+ ql_tail_insert(&list, &elems[i], link);
+ }
+ for (int i = NELEMS / 2; i < NELEMS; i++) {
+ elem_mpsc_queue_push(&queue, &elems[i]);
+ }
+ elem_mpsc_queue_pop_batch(&queue, &list);
+ check_elems_simple(&list, NELEMS, 0);
+
+}
+TEST_END
+
+TEST_BEGIN(test_push_single_or_batch) {
+ enum {
+ BATCH_MAX = 10,
+ /*
+ * We'll push i items one-at-a-time, then i items as a batch,
+ * then i items as a batch again, as i ranges from 1 to
+ * BATCH_MAX. So we need 3 times the sum of the numbers from 1
+ * to BATCH_MAX elements total.
+ */
+ NELEMS = 3 * BATCH_MAX * (BATCH_MAX - 1) / 2
+ };
+ elem_t elems[NELEMS];
+ init_elems_simple(elems, NELEMS, 0);
+ elem_list_t list;
+ ql_new(&list);
+ elem_mpsc_queue_t queue;
+ elem_mpsc_queue_new(&queue);
+ int next_idx = 0;
+ for (int i = 1; i < 10; i++) {
+ /* Push i items 1 at a time. */
+ for (int j = 0; j < i; j++) {
+ elem_mpsc_queue_push(&queue, &elems[next_idx]);
+ next_idx++;
+ }
+ /* Push i items in batch. */
+ for (int j = 0; j < i; j++) {
+ ql_tail_insert(&list, &elems[next_idx], link);
+ next_idx++;
+ }
+ elem_mpsc_queue_push_batch(&queue, &list);
+ expect_true(ql_empty(&list), "Batch push should empty source");
+ /*
+ * Push i items in batch, again. This tests two batches
+ * proceeding one after the other.
+ */
+ for (int j = 0; j < i; j++) {
+ ql_tail_insert(&list, &elems[next_idx], link);
+ next_idx++;
+ }
+ elem_mpsc_queue_push_batch(&queue, &list);
+ expect_true(ql_empty(&list), "Batch push should empty source");
+ }
+ expect_d_eq(NELEMS, next_idx, "Miscomputed number of elems to push.");
+
+ expect_true(ql_empty(&list), "");
+ elem_mpsc_queue_pop_batch(&queue, &list);
+ check_elems_simple(&list, NELEMS, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_multi_op) {
+ enum {NELEMS = 20};
+ elem_t elems[NELEMS];
+ init_elems_simple(elems, NELEMS, 0);
+ elem_list_t push_list;
+ ql_new(&push_list);
+ elem_list_t result_list;
+ ql_new(&result_list);
+ elem_mpsc_queue_t queue;
+ elem_mpsc_queue_new(&queue);
+
+ int next_idx = 0;
+ /* Push first quarter 1-at-a-time. */
+ for (int i = 0; i < NELEMS / 4; i++) {
+ elem_mpsc_queue_push(&queue, &elems[next_idx]);
+ next_idx++;
+ }
+ /* Push second quarter in batch. */
+ for (int i = NELEMS / 4; i < NELEMS / 2; i++) {
+ ql_tail_insert(&push_list, &elems[next_idx], link);
+ next_idx++;
+ }
+ elem_mpsc_queue_push_batch(&queue, &push_list);
+ /* Batch pop all pushed elements. */
+ elem_mpsc_queue_pop_batch(&queue, &result_list);
+ /* Push third quarter in batch. */
+ for (int i = NELEMS / 2; i < 3 * NELEMS / 4; i++) {
+ ql_tail_insert(&push_list, &elems[next_idx], link);
+ next_idx++;
+ }
+ elem_mpsc_queue_push_batch(&queue, &push_list);
+ /* Push last quarter one-at-a-time. */
+ for (int i = 3 * NELEMS / 4; i < NELEMS; i++) {
+ elem_mpsc_queue_push(&queue, &elems[next_idx]);
+ next_idx++;
+ }
+ /* Pop them again. Order of existing list should be preserved. */
+ elem_mpsc_queue_pop_batch(&queue, &result_list);
+
+ check_elems_simple(&result_list, NELEMS, 0);
+
+}
+TEST_END
+
+typedef struct pusher_arg_s pusher_arg_t;
+struct pusher_arg_s {
+ elem_mpsc_queue_t *queue;
+ int thread;
+ elem_t *elems;
+ int nelems;
+};
+
+typedef struct popper_arg_s popper_arg_t;
+struct popper_arg_s {
+ elem_mpsc_queue_t *queue;
+ int npushers;
+ int nelems_per_pusher;
+ int *pusher_counts;
+};
+
+static void *
+thd_pusher(void *void_arg) {
+ pusher_arg_t *arg = (pusher_arg_t *)void_arg;
+ int next_idx = 0;
+ while (next_idx < arg->nelems) {
+ /* Push 10 items in batch. */
+ elem_list_t list;
+ ql_new(&list);
+ int limit = next_idx + 10;
+ while (next_idx < arg->nelems && next_idx < limit) {
+ ql_tail_insert(&list, &arg->elems[next_idx], link);
+ next_idx++;
+ }
+ elem_mpsc_queue_push_batch(arg->queue, &list);
+ /* Push 10 items one-at-a-time. */
+ limit = next_idx + 10;
+ while (next_idx < arg->nelems && next_idx < limit) {
+ elem_mpsc_queue_push(arg->queue, &arg->elems[next_idx]);
+ next_idx++;
+ }
+
+ }
+ return NULL;
+}
+
+static void *
+thd_popper(void *void_arg) {
+ popper_arg_t *arg = (popper_arg_t *)void_arg;
+ int done_pushers = 0;
+ while (done_pushers < arg->npushers) {
+ elem_list_t list;
+ ql_new(&list);
+ elem_mpsc_queue_pop_batch(arg->queue, &list);
+ elem_t *elem;
+ ql_foreach(elem, &list, link) {
+ int thread = elem->thread;
+ int idx = elem->idx;
+ expect_d_eq(arg->pusher_counts[thread], idx,
+ "Thread's pushes reordered");
+ arg->pusher_counts[thread]++;
+ if (arg->pusher_counts[thread]
+ == arg->nelems_per_pusher) {
+ done_pushers++;
+ }
+ }
+ }
+ return NULL;
+}
+
+TEST_BEGIN(test_multiple_threads) {
+ enum {
+ NPUSHERS = 4,
+ NELEMS_PER_PUSHER = 1000*1000,
+ };
+ thd_t pushers[NPUSHERS];
+ pusher_arg_t pusher_arg[NPUSHERS];
+
+ thd_t popper;
+ popper_arg_t popper_arg;
+
+ elem_mpsc_queue_t queue;
+ elem_mpsc_queue_new(&queue);
+
+ elem_t *elems = calloc(NPUSHERS * NELEMS_PER_PUSHER, sizeof(elem_t));
+ elem_t *elem_iter = elems;
+ for (int i = 0; i < NPUSHERS; i++) {
+ pusher_arg[i].queue = &queue;
+ pusher_arg[i].thread = i;
+ pusher_arg[i].elems = elem_iter;
+ pusher_arg[i].nelems = NELEMS_PER_PUSHER;
+
+ init_elems_simple(elem_iter, NELEMS_PER_PUSHER, i);
+ elem_iter += NELEMS_PER_PUSHER;
+ }
+ popper_arg.queue = &queue;
+ popper_arg.npushers = NPUSHERS;
+ popper_arg.nelems_per_pusher = NELEMS_PER_PUSHER;
+ int pusher_counts[NPUSHERS] = {0};
+ popper_arg.pusher_counts = pusher_counts;
+
+ thd_create(&popper, thd_popper, (void *)&popper_arg);
+ for (int i = 0; i < NPUSHERS; i++) {
+ thd_create(&pushers[i], thd_pusher, &pusher_arg[i]);
+ }
+
+ thd_join(popper, NULL);
+ for (int i = 0; i < NPUSHERS; i++) {
+ thd_join(pushers[i], NULL);
+ }
+
+ for (int i = 0; i < NPUSHERS; i++) {
+ expect_d_eq(NELEMS_PER_PUSHER, pusher_counts[i], "");
+ }
+
+ free(elems);
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_simple,
+ test_push_single_or_batch,
+ test_multi_op,
+ test_multiple_threads);
+}
diff --git a/deps/jemalloc/test/unit/mq.c b/deps/jemalloc/test/unit/mq.c
index 57a4d54e4..f833f77ce 100644
--- a/deps/jemalloc/test/unit/mq.c
+++ b/deps/jemalloc/test/unit/mq.c
@@ -13,17 +13,17 @@ TEST_BEGIN(test_mq_basic) {
mq_t mq;
mq_msg_t msg;
- assert_false(mq_init(&mq), "Unexpected mq_init() failure");
- assert_u_eq(mq_count(&mq), 0, "mq should be empty");
- assert_ptr_null(mq_tryget(&mq),
+ expect_false(mq_init(&mq), "Unexpected mq_init() failure");
+ expect_u_eq(mq_count(&mq), 0, "mq should be empty");
+ expect_ptr_null(mq_tryget(&mq),
"mq_tryget() should fail when the queue is empty");
mq_put(&mq, &msg);
- assert_u_eq(mq_count(&mq), 1, "mq should contain one message");
- assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg");
+ expect_u_eq(mq_count(&mq), 1, "mq should contain one message");
+ expect_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg");
mq_put(&mq, &msg);
- assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg");
+ expect_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg");
mq_fini(&mq);
}
@@ -36,7 +36,7 @@ thd_receiver_start(void *arg) {
for (i = 0; i < (NSENDERS * NMSGS); i++) {
mq_msg_t *msg = mq_get(mq);
- assert_ptr_not_null(msg, "mq_get() should never return NULL");
+ expect_ptr_not_null(msg, "mq_get() should never return NULL");
dallocx(msg, 0);
}
return NULL;
@@ -51,7 +51,7 @@ thd_sender_start(void *arg) {
mq_msg_t *msg;
void *p;
p = mallocx(sizeof(mq_msg_t), 0);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
msg = (mq_msg_t *)p;
mq_put(mq, msg);
}
@@ -64,7 +64,7 @@ TEST_BEGIN(test_mq_threaded) {
thd_t senders[NSENDERS];
unsigned i;
- assert_false(mq_init(&mq), "Unexpected mq_init() failure");
+ expect_false(mq_init(&mq), "Unexpected mq_init() failure");
thd_create(&receiver, thd_receiver_start, (void *)&mq);
for (i = 0; i < NSENDERS; i++) {
diff --git a/deps/jemalloc/test/unit/mtx.c b/deps/jemalloc/test/unit/mtx.c
index 424587b03..4aeebc13f 100644
--- a/deps/jemalloc/test/unit/mtx.c
+++ b/deps/jemalloc/test/unit/mtx.c
@@ -6,7 +6,7 @@
TEST_BEGIN(test_mtx_basic) {
mtx_t mtx;
- assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
+ expect_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
mtx_lock(&mtx);
mtx_unlock(&mtx);
mtx_fini(&mtx);
@@ -36,7 +36,7 @@ TEST_BEGIN(test_mtx_race) {
thd_t thds[NTHREADS];
unsigned i;
- assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
+ expect_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
arg.x = 0;
for (i = 0; i < NTHREADS; i++) {
thd_create(&thds[i], thd_start, (void *)&arg);
@@ -44,7 +44,7 @@ TEST_BEGIN(test_mtx_race) {
for (i = 0; i < NTHREADS; i++) {
thd_join(thds[i], NULL);
}
- assert_u_eq(arg.x, NTHREADS * NINCRS,
+ expect_u_eq(arg.x, NTHREADS * NINCRS,
"Race-related counter corruption");
}
TEST_END
diff --git a/deps/jemalloc/test/unit/nstime.c b/deps/jemalloc/test/unit/nstime.c
index f31378058..56238ab3b 100644
--- a/deps/jemalloc/test/unit/nstime.c
+++ b/deps/jemalloc/test/unit/nstime.c
@@ -6,9 +6,9 @@ TEST_BEGIN(test_nstime_init) {
nstime_t nst;
nstime_init(&nst, 42000000043);
- assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
- assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
- assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
+ expect_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
+ expect_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
+ expect_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
}
TEST_END
@@ -16,8 +16,8 @@ TEST_BEGIN(test_nstime_init2) {
nstime_t nst;
nstime_init2(&nst, 42, 43);
- assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
- assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
+ expect_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
+ expect_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
}
TEST_END
@@ -25,10 +25,10 @@ TEST_BEGIN(test_nstime_copy) {
nstime_t nsta, nstb;
nstime_init2(&nsta, 42, 43);
- nstime_init(&nstb, 0);
+ nstime_init_zero(&nstb);
nstime_copy(&nstb, &nsta);
- assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
- assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
+ expect_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
+ expect_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
}
TEST_END
@@ -37,31 +37,31 @@ TEST_BEGIN(test_nstime_compare) {
nstime_init2(&nsta, 42, 43);
nstime_copy(&nstb, &nsta);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
- assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
+ expect_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
nstime_init2(&nstb, 42, 42);
- assert_d_eq(nstime_compare(&nsta, &nstb), 1,
+ expect_d_eq(nstime_compare(&nsta, &nstb), 1,
"nsta should be greater than nstb");
- assert_d_eq(nstime_compare(&nstb, &nsta), -1,
+ expect_d_eq(nstime_compare(&nstb, &nsta), -1,
"nstb should be less than nsta");
nstime_init2(&nstb, 42, 44);
- assert_d_eq(nstime_compare(&nsta, &nstb), -1,
+ expect_d_eq(nstime_compare(&nsta, &nstb), -1,
"nsta should be less than nstb");
- assert_d_eq(nstime_compare(&nstb, &nsta), 1,
+ expect_d_eq(nstime_compare(&nstb, &nsta), 1,
"nstb should be greater than nsta");
nstime_init2(&nstb, 41, BILLION - 1);
- assert_d_eq(nstime_compare(&nsta, &nstb), 1,
+ expect_d_eq(nstime_compare(&nsta, &nstb), 1,
"nsta should be greater than nstb");
- assert_d_eq(nstime_compare(&nstb, &nsta), -1,
+ expect_d_eq(nstime_compare(&nstb, &nsta), -1,
"nstb should be less than nsta");
nstime_init2(&nstb, 43, 0);
- assert_d_eq(nstime_compare(&nsta, &nstb), -1,
+ expect_d_eq(nstime_compare(&nsta, &nstb), -1,
"nsta should be less than nstb");
- assert_d_eq(nstime_compare(&nstb, &nsta), 1,
+ expect_d_eq(nstime_compare(&nstb, &nsta), 1,
"nstb should be greater than nsta");
}
TEST_END
@@ -73,14 +73,14 @@ TEST_BEGIN(test_nstime_add) {
nstime_copy(&nstb, &nsta);
nstime_add(&nsta, &nstb);
nstime_init2(&nstb, 84, 86);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect addition result");
nstime_init2(&nsta, 42, BILLION - 1);
nstime_copy(&nstb, &nsta);
nstime_add(&nsta, &nstb);
nstime_init2(&nstb, 85, BILLION - 2);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect addition result");
}
TEST_END
@@ -91,13 +91,13 @@ TEST_BEGIN(test_nstime_iadd) {
nstime_init2(&nsta, 42, BILLION - 1);
nstime_iadd(&nsta, 1);
nstime_init2(&nstb, 43, 0);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect addition result");
nstime_init2(&nsta, 42, 1);
nstime_iadd(&nsta, BILLION + 1);
nstime_init2(&nstb, 43, 2);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect addition result");
}
TEST_END
@@ -108,15 +108,15 @@ TEST_BEGIN(test_nstime_subtract) {
nstime_init2(&nsta, 42, 43);
nstime_copy(&nstb, &nsta);
nstime_subtract(&nsta, &nstb);
- nstime_init(&nstb, 0);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ nstime_init_zero(&nstb);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect subtraction result");
nstime_init2(&nsta, 42, 43);
nstime_init2(&nstb, 41, 44);
nstime_subtract(&nsta, &nstb);
nstime_init2(&nstb, 0, BILLION - 1);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect subtraction result");
}
TEST_END
@@ -126,14 +126,14 @@ TEST_BEGIN(test_nstime_isubtract) {
nstime_init2(&nsta, 42, 43);
nstime_isubtract(&nsta, 42*BILLION + 43);
- nstime_init(&nstb, 0);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ nstime_init_zero(&nstb);
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect subtraction result");
nstime_init2(&nsta, 42, 43);
nstime_isubtract(&nsta, 41*BILLION + 44);
nstime_init2(&nstb, 0, BILLION - 1);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect subtraction result");
}
TEST_END
@@ -144,13 +144,13 @@ TEST_BEGIN(test_nstime_imultiply) {
nstime_init2(&nsta, 42, 43);
nstime_imultiply(&nsta, 10);
nstime_init2(&nstb, 420, 430);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect multiplication result");
nstime_init2(&nsta, 42, 666666666);
nstime_imultiply(&nsta, 3);
nstime_init2(&nstb, 127, 999999998);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect multiplication result");
}
TEST_END
@@ -162,14 +162,14 @@ TEST_BEGIN(test_nstime_idivide) {
nstime_copy(&nstb, &nsta);
nstime_imultiply(&nsta, 10);
nstime_idivide(&nsta, 10);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect division result");
nstime_init2(&nsta, 42, 666666666);
nstime_copy(&nstb, &nsta);
nstime_imultiply(&nsta, 3);
nstime_idivide(&nsta, 3);
- assert_d_eq(nstime_compare(&nsta, &nstb), 0,
+ expect_d_eq(nstime_compare(&nsta, &nstb), 0,
"Incorrect division result");
}
TEST_END
@@ -180,7 +180,7 @@ TEST_BEGIN(test_nstime_divide) {
nstime_init2(&nsta, 42, 43);
nstime_copy(&nstb, &nsta);
nstime_imultiply(&nsta, 10);
- assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
+ expect_u64_eq(nstime_divide(&nsta, &nstb), 10,
"Incorrect division result");
nstime_init2(&nsta, 42, 43);
@@ -188,7 +188,7 @@ TEST_BEGIN(test_nstime_divide) {
nstime_imultiply(&nsta, 10);
nstime_init(&nstc, 1);
nstime_add(&nsta, &nstc);
- assert_u64_eq(nstime_divide(&nsta, &nstb), 10,
+ expect_u64_eq(nstime_divide(&nsta, &nstb), 10,
"Incorrect division result");
nstime_init2(&nsta, 42, 43);
@@ -196,40 +196,43 @@ TEST_BEGIN(test_nstime_divide) {
nstime_imultiply(&nsta, 10);
nstime_init(&nstc, 1);
nstime_subtract(&nsta, &nstc);
- assert_u64_eq(nstime_divide(&nsta, &nstb), 9,
+ expect_u64_eq(nstime_divide(&nsta, &nstb), 9,
"Incorrect division result");
}
TEST_END
-TEST_BEGIN(test_nstime_monotonic) {
- nstime_monotonic();
-}
-TEST_END
+void
+test_nstime_since_once(nstime_t *t) {
+ nstime_t old_t;
+ nstime_copy(&old_t, t);
-TEST_BEGIN(test_nstime_update) {
- nstime_t nst;
+ uint64_t ns_since = nstime_ns_since(t);
+ nstime_update(t);
- nstime_init(&nst, 0);
+ nstime_t new_t;
+ nstime_copy(&new_t, t);
+ nstime_subtract(&new_t, &old_t);
- assert_false(nstime_update(&nst), "Basic time update failed.");
+ expect_u64_ge(nstime_ns(&new_t), ns_since,
+ "Incorrect time since result");
+}
- /* Only Rip Van Winkle sleeps this long. */
- {
- nstime_t addend;
- nstime_init2(&addend, 631152000, 0);
- nstime_add(&nst, &addend);
- }
- {
- nstime_t nst0;
- nstime_copy(&nst0, &nst);
- assert_true(nstime_update(&nst),
- "Update should detect time roll-back.");
- assert_d_eq(nstime_compare(&nst, &nst0), 0,
- "Time should not have been modified");
+TEST_BEGIN(test_nstime_ns_since) {
+ nstime_t t;
+
+ nstime_init_update(&t);
+ for (uint64_t i = 0; i < 10000; i++) {
+ /* Keeps updating t and verifies ns_since is valid. */
+ test_nstime_since_once(&t);
}
}
TEST_END
+TEST_BEGIN(test_nstime_monotonic) {
+ nstime_monotonic();
+}
+TEST_END
+
int
main(void) {
return test(
@@ -244,6 +247,6 @@ main(void) {
test_nstime_imultiply,
test_nstime_idivide,
test_nstime_divide,
- test_nstime_monotonic,
- test_nstime_update);
+ test_nstime_ns_since,
+ test_nstime_monotonic);
}
diff --git a/deps/jemalloc/test/unit/oversize_threshold.c b/deps/jemalloc/test/unit/oversize_threshold.c
new file mode 100644
index 000000000..44a8f76a4
--- /dev/null
+++ b/deps/jemalloc/test/unit/oversize_threshold.c
@@ -0,0 +1,133 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/ctl.h"
+
+static void
+arena_mallctl(const char *mallctl_str, unsigned arena, void *oldp,
+ size_t *oldlen, void *newp, size_t newlen) {
+ int err;
+ char buf[100];
+ malloc_snprintf(buf, sizeof(buf), mallctl_str, arena);
+
+ err = mallctl(buf, oldp, oldlen, newp, newlen);
+ expect_d_eq(0, err, "Mallctl failed; %s", buf);
+}
+
+TEST_BEGIN(test_oversize_threshold_get_set) {
+ int err;
+ size_t old_threshold;
+ size_t new_threshold;
+ size_t threshold_sz = sizeof(old_threshold);
+
+ unsigned arena;
+ size_t arena_sz = sizeof(arena);
+ err = mallctl("arenas.create", (void *)&arena, &arena_sz, NULL, 0);
+ expect_d_eq(0, err, "Arena creation failed");
+
+ /* Just a write. */
+ new_threshold = 1024 * 1024;
+ arena_mallctl("arena.%u.oversize_threshold", arena, NULL, NULL,
+ &new_threshold, threshold_sz);
+
+ /* Read and write */
+ new_threshold = 2 * 1024 * 1024;
+ arena_mallctl("arena.%u.oversize_threshold", arena, &old_threshold,
+ &threshold_sz, &new_threshold, threshold_sz);
+ expect_zu_eq(1024 * 1024, old_threshold, "Should have read old value");
+
+ /* Just a read */
+ arena_mallctl("arena.%u.oversize_threshold", arena, &old_threshold,
+ &threshold_sz, NULL, 0);
+ expect_zu_eq(2 * 1024 * 1024, old_threshold, "Should have read old value");
+}
+TEST_END
+
+static size_t max_purged = 0;
+static bool
+purge_forced_record_max(extent_hooks_t* hooks, void *addr, size_t sz,
+ size_t offset, size_t length, unsigned arena_ind) {
+ if (length > max_purged) {
+ max_purged = length;
+ }
+ return false;
+}
+
+static bool
+dalloc_record_max(extent_hooks_t *extent_hooks, void *addr, size_t sz,
+ bool comitted, unsigned arena_ind) {
+ if (sz > max_purged) {
+ max_purged = sz;
+ }
+ return false;
+}
+
+extent_hooks_t max_recording_extent_hooks;
+
+TEST_BEGIN(test_oversize_threshold) {
+ max_recording_extent_hooks = ehooks_default_extent_hooks;
+ max_recording_extent_hooks.purge_forced = &purge_forced_record_max;
+ max_recording_extent_hooks.dalloc = &dalloc_record_max;
+
+ extent_hooks_t *extent_hooks = &max_recording_extent_hooks;
+
+ int err;
+
+ unsigned arena;
+ size_t arena_sz = sizeof(arena);
+ err = mallctl("arenas.create", (void *)&arena, &arena_sz, NULL, 0);
+ expect_d_eq(0, err, "Arena creation failed");
+ arena_mallctl("arena.%u.extent_hooks", arena, NULL, NULL, &extent_hooks,
+ sizeof(extent_hooks));
+
+ /*
+ * This test will fundamentally race with purging, since we're going to
+ * check the dirty stats to see if our oversized allocation got purged.
+ * We don't want other purging to happen accidentally. We can't just
+ * disable purging entirely, though, since that will also disable
+ * oversize purging. Just set purging intervals to be very large.
+ */
+ ssize_t decay_ms = 100 * 1000;
+ ssize_t decay_ms_sz = sizeof(decay_ms);
+ arena_mallctl("arena.%u.dirty_decay_ms", arena, NULL, NULL, &decay_ms,
+ decay_ms_sz);
+ arena_mallctl("arena.%u.muzzy_decay_ms", arena, NULL, NULL, &decay_ms,
+ decay_ms_sz);
+
+ /* Clean everything out. */
+ arena_mallctl("arena.%u.purge", arena, NULL, NULL, NULL, 0);
+ max_purged = 0;
+
+ /* Set threshold to 1MB. */
+ size_t threshold = 1024 * 1024;
+ size_t threshold_sz = sizeof(threshold);
+ arena_mallctl("arena.%u.oversize_threshold", arena, NULL, NULL,
+ &threshold, threshold_sz);
+
+ /* Allocating and freeing half a megabyte should leave them dirty. */
+ void *ptr = mallocx(512 * 1024, MALLOCX_ARENA(arena));
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+ if (!is_background_thread_enabled()) {
+ expect_zu_lt(max_purged, 512 * 1024, "Expected no 512k purge");
+ }
+
+ /* Purge again to reset everything out. */
+ arena_mallctl("arena.%u.purge", arena, NULL, NULL, NULL, 0);
+ max_purged = 0;
+
+ /*
+ * Allocating and freeing 2 megabytes should have them purged because of
+ * the oversize threshold.
+ */
+ ptr = mallocx(2 * 1024 * 1024, MALLOCX_ARENA(arena));
+ dallocx(ptr, MALLOCX_TCACHE_NONE);
+ expect_zu_ge(max_purged, 2 * 1024 * 1024, "Expected a 2MB purge");
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_oversize_threshold_get_set,
+ test_oversize_threshold);
+}
+
diff --git a/deps/jemalloc/test/unit/pa.c b/deps/jemalloc/test/unit/pa.c
new file mode 100644
index 000000000..b1e2f6e9e
--- /dev/null
+++ b/deps/jemalloc/test/unit/pa.c
@@ -0,0 +1,126 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/pa.h"
+
+static void *
+alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
+ void *ret = pages_map(new_addr, size, alignment, commit);
+ return ret;
+}
+
+static bool
+merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
+ return !maps_coalesce;
+}
+
+static bool
+split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
+ return !maps_coalesce;
+}
+
+static void
+init_test_extent_hooks(extent_hooks_t *hooks) {
+ /*
+ * The default hooks are mostly fine for testing. A few of them,
+ * though, access globals (alloc for dss setting in an arena, split and
+ * merge touch the global emap to find head state. The first of these
+ * can be fixed by keeping that state with the hooks, where it logically
+ * belongs. The second, though, we can only fix when we use the extent
+ * hook API.
+ */
+ memcpy(hooks, &ehooks_default_extent_hooks, sizeof(extent_hooks_t));
+ hooks->alloc = &alloc_hook;
+ hooks->merge = &merge_hook;
+ hooks->split = &split_hook;
+}
+
+typedef struct test_data_s test_data_t;
+struct test_data_s {
+ pa_shard_t shard;
+ pa_central_t central;
+ base_t *base;
+ emap_t emap;
+ pa_shard_stats_t stats;
+ malloc_mutex_t stats_mtx;
+ extent_hooks_t hooks;
+};
+
+test_data_t *init_test_data(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
+ test_data_t *test_data = calloc(1, sizeof(test_data_t));
+ assert_ptr_not_null(test_data, "");
+ init_test_extent_hooks(&test_data->hooks);
+
+ base_t *base = base_new(TSDN_NULL, /* ind */ 1, &test_data->hooks,
+ /* metadata_use_hooks */ true);
+ assert_ptr_not_null(base, "");
+
+ test_data->base = base;
+ bool err = emap_init(&test_data->emap, test_data->base,
+ /* zeroed */ true);
+ assert_false(err, "");
+
+ nstime_t time;
+ nstime_init(&time, 0);
+
+ err = pa_central_init(&test_data->central, base, opt_hpa,
+ &hpa_hooks_default);
+ assert_false(err, "");
+
+ const size_t pa_oversize_threshold = 8 * 1024 * 1024;
+ err = pa_shard_init(TSDN_NULL, &test_data->shard, &test_data->central,
+ &test_data->emap, test_data->base, /* ind */ 1, &test_data->stats,
+ &test_data->stats_mtx, &time, pa_oversize_threshold, dirty_decay_ms,
+ muzzy_decay_ms);
+ assert_false(err, "");
+
+ return test_data;
+}
+
+void destroy_test_data(test_data_t *data) {
+ base_delete(TSDN_NULL, data->base);
+ free(data);
+}
+
+static void *
+do_alloc_free_purge(void *arg) {
+ test_data_t *test_data = (test_data_t *)arg;
+ for (int i = 0; i < 10 * 1000; i++) {
+ bool deferred_work_generated = false;
+ edata_t *edata = pa_alloc(TSDN_NULL, &test_data->shard, PAGE,
+ PAGE, /* slab */ false, /* szind */ 0, /* zero */ false,
+ /* guarded */ false, &deferred_work_generated);
+ assert_ptr_not_null(edata, "");
+ pa_dalloc(TSDN_NULL, &test_data->shard, edata,
+ &deferred_work_generated);
+ malloc_mutex_lock(TSDN_NULL,
+ &test_data->shard.pac.decay_dirty.mtx);
+ pac_decay_all(TSDN_NULL, &test_data->shard.pac,
+ &test_data->shard.pac.decay_dirty,
+ &test_data->shard.pac.stats->decay_dirty,
+ &test_data->shard.pac.ecache_dirty, true);
+ malloc_mutex_unlock(TSDN_NULL,
+ &test_data->shard.pac.decay_dirty.mtx);
+ }
+ return NULL;
+}
+
+TEST_BEGIN(test_alloc_free_purge_thds) {
+ test_data_t *test_data = init_test_data(0, 0);
+ thd_t thds[4];
+ for (int i = 0; i < 4; i++) {
+ thd_create(&thds[i], do_alloc_free_purge, test_data);
+ }
+ for (int i = 0; i < 4; i++) {
+ thd_join(thds[i], NULL);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_alloc_free_purge_thds);
+}
diff --git a/deps/jemalloc/test/unit/pack.c b/deps/jemalloc/test/unit/pack.c
index fc188b003..e6392825b 100644
--- a/deps/jemalloc/test/unit/pack.c
+++ b/deps/jemalloc/test/unit/pack.c
@@ -22,7 +22,7 @@ binind_compute(void) {
unsigned nbins, i;
sz = sizeof(nbins);
- assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
+ expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
"Unexpected mallctl failure");
for (i = 0; i < nbins; i++) {
@@ -30,12 +30,12 @@ binind_compute(void) {
size_t miblen = sizeof(mib)/sizeof(size_t);
size_t size;
- assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
+ expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
&miblen), 0, "Unexpected mallctlnametomb failure");
mib[2] = (size_t)i;
sz = sizeof(size);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
0), 0, "Unexpected mallctlbymib failure");
if (size == SZ) {
return i;
@@ -54,11 +54,11 @@ nregs_per_run_compute(void) {
size_t mib[4];
size_t miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
"Unexpected mallctlnametomb failure");
mib[2] = (size_t)binind;
sz = sizeof(nregs);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
0), 0, "Unexpected mallctlbymib failure");
return nregs;
}
@@ -69,7 +69,7 @@ arenas_create_mallctl(void) {
size_t sz;
sz = sizeof(arena_ind);
- assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Error in arenas.create");
return arena_ind;
@@ -80,10 +80,10 @@ arena_reset_mallctl(unsigned arena_ind) {
size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
mib[1] = (size_t)arena_ind;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
@@ -105,7 +105,7 @@ TEST_BEGIN(test_pack) {
for (j = 0; j < nregs_per_run; j++) {
void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
MALLOCX_TCACHE_NONE);
- assert_ptr_not_null(p,
+ expect_ptr_not_null(p,
"Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
" MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
SZ, arena_ind, i, j);
@@ -148,7 +148,7 @@ TEST_BEGIN(test_pack) {
}
p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
MALLOCX_TCACHE_NONE);
- assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
+ expect_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
"Unexpected refill discrepancy, run=%zu, reg=%zu\n",
i, j);
}
diff --git a/deps/jemalloc/test/unit/pages.c b/deps/jemalloc/test/unit/pages.c
index ee729eece..8dfd1a72c 100644
--- a/deps/jemalloc/test/unit/pages.c
+++ b/deps/jemalloc/test/unit/pages.c
@@ -8,13 +8,13 @@ TEST_BEGIN(test_pages_huge) {
alloc_size = HUGEPAGE * 2 - PAGE;
commit = true;
pages = pages_map(NULL, alloc_size, PAGE, &commit);
- assert_ptr_not_null(pages, "Unexpected pages_map() error");
+ expect_ptr_not_null(pages, "Unexpected pages_map() error");
if (init_system_thp_mode == thp_mode_default) {
hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE));
- assert_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge,
+ expect_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge,
"Unexpected pages_huge() result");
- assert_false(pages_nohuge(hugepage, HUGEPAGE),
+ expect_false(pages_nohuge(hugepage, HUGEPAGE),
"Unexpected pages_nohuge() result");
}
diff --git a/deps/jemalloc/test/unit/peak.c b/deps/jemalloc/test/unit/peak.c
new file mode 100644
index 000000000..11129785f
--- /dev/null
+++ b/deps/jemalloc/test/unit/peak.c
@@ -0,0 +1,47 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/peak.h"
+
+TEST_BEGIN(test_peak) {
+ peak_t peak = PEAK_INITIALIZER;
+ expect_u64_eq(0, peak_max(&peak),
+ "Peak should be zero at initialization");
+ peak_update(&peak, 100, 50);
+ expect_u64_eq(50, peak_max(&peak),
+ "Missed update");
+ peak_update(&peak, 100, 100);
+ expect_u64_eq(50, peak_max(&peak), "Dallocs shouldn't change peak");
+ peak_update(&peak, 100, 200);
+ expect_u64_eq(50, peak_max(&peak), "Dallocs shouldn't change peak");
+ peak_update(&peak, 200, 200);
+ expect_u64_eq(50, peak_max(&peak), "Haven't reached peak again");
+ peak_update(&peak, 300, 200);
+ expect_u64_eq(100, peak_max(&peak), "Missed an update.");
+ peak_set_zero(&peak, 300, 200);
+ expect_u64_eq(0, peak_max(&peak), "No effect from zeroing");
+ peak_update(&peak, 300, 300);
+ expect_u64_eq(0, peak_max(&peak), "Dalloc shouldn't change peak");
+ peak_update(&peak, 400, 300);
+ expect_u64_eq(0, peak_max(&peak), "Should still be net negative");
+ peak_update(&peak, 500, 300);
+ expect_u64_eq(100, peak_max(&peak), "Missed an update.");
+ /*
+ * Above, we set to zero while a net allocator; let's try as a
+ * net-deallocator.
+ */
+ peak_set_zero(&peak, 600, 700);
+ expect_u64_eq(0, peak_max(&peak), "No effect from zeroing.");
+ peak_update(&peak, 600, 800);
+ expect_u64_eq(0, peak_max(&peak), "Dalloc shouldn't change peak.");
+ peak_update(&peak, 700, 800);
+ expect_u64_eq(0, peak_max(&peak), "Should still be net negative.");
+ peak_update(&peak, 800, 800);
+ expect_u64_eq(100, peak_max(&peak), "Missed an update.");
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_peak);
+}
diff --git a/deps/jemalloc/test/unit/ph.c b/deps/jemalloc/test/unit/ph.c
index 88bf56f88..28f5e488e 100644
--- a/deps/jemalloc/test/unit/ph.c
+++ b/deps/jemalloc/test/unit/ph.c
@@ -3,11 +3,12 @@
#include "jemalloc/internal/ph.h"
typedef struct node_s node_t;
+ph_structs(heap, node_t);
struct node_s {
#define NODE_MAGIC 0x9823af7e
uint32_t magic;
- phn(node_t) link;
+ heap_link_t link;
uint64_t key;
};
@@ -30,14 +31,28 @@ node_cmp(const node_t *a, const node_t *b) {
static int
node_cmp_magic(const node_t *a, const node_t *b) {
- assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
- assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
+ expect_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
+ expect_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
return node_cmp(a, b);
}
-typedef ph(node_t) heap_t;
-ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic);
+ph_gen(static, heap, node_t, link, node_cmp_magic);
+
+static node_t *
+node_next_get(const node_t *node) {
+ return phn_next_get((node_t *)node, offsetof(node_t, link));
+}
+
+static node_t *
+node_prev_get(const node_t *node) {
+ return phn_prev_get((node_t *)node, offsetof(node_t, link));
+}
+
+static node_t *
+node_lchild_get(const node_t *node) {
+ return phn_lchild_get((node_t *)node, offsetof(node_t, link));
+}
static void
node_print(const node_t *node, unsigned depth) {
@@ -49,14 +64,14 @@ node_print(const node_t *node, unsigned depth) {
}
malloc_printf("%2"FMTu64"\n", node->key);
- leftmost_child = phn_lchild_get(node_t, link, node);
+ leftmost_child = node_lchild_get(node);
if (leftmost_child == NULL) {
return;
}
node_print(leftmost_child, depth + 1);
- for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
- NULL; sibling = phn_next_get(node_t, link, sibling)) {
+ for (sibling = node_next_get(leftmost_child); sibling !=
+ NULL; sibling = node_next_get(sibling)) {
node_print(sibling, depth + 1);
}
}
@@ -66,16 +81,15 @@ heap_print(const heap_t *heap) {
node_t *auxelm;
malloc_printf("vvv heap %p vvv\n", heap);
- if (heap->ph_root == NULL) {
+ if (heap->ph.root == NULL) {
goto label_return;
}
- node_print(heap->ph_root, 0);
+ node_print(heap->ph.root, 0);
- for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
- auxelm = phn_next_get(node_t, link, auxelm)) {
- assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
- link, auxelm)), auxelm,
+ for (auxelm = node_next_get(heap->ph.root); auxelm != NULL;
+ auxelm = node_next_get(auxelm)) {
+ expect_ptr_eq(node_next_get(node_prev_get(auxelm)), auxelm,
"auxelm's prev doesn't link to auxelm");
node_print(auxelm, 0);
}
@@ -90,22 +104,21 @@ node_validate(const node_t *node, const node_t *parent) {
node_t *leftmost_child, *sibling;
if (parent != NULL) {
- assert_d_ge(node_cmp_magic(node, parent), 0,
+ expect_d_ge(node_cmp_magic(node, parent), 0,
"Child is less than parent");
}
- leftmost_child = phn_lchild_get(node_t, link, node);
+ leftmost_child = node_lchild_get(node);
if (leftmost_child == NULL) {
return nnodes;
}
- assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child),
+ expect_ptr_eq(node_prev_get(leftmost_child),
(void *)node, "Leftmost child does not link to node");
nnodes += node_validate(leftmost_child, node);
- for (sibling = phn_next_get(node_t, link, leftmost_child); sibling !=
- NULL; sibling = phn_next_get(node_t, link, sibling)) {
- assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
- link, sibling)), sibling,
+ for (sibling = node_next_get(leftmost_child); sibling !=
+ NULL; sibling = node_next_get(sibling)) {
+ expect_ptr_eq(node_next_get(node_prev_get(sibling)), sibling,
"sibling's prev doesn't link to sibling");
nnodes += node_validate(sibling, node);
}
@@ -117,16 +130,15 @@ heap_validate(const heap_t *heap) {
unsigned nnodes = 0;
node_t *auxelm;
- if (heap->ph_root == NULL) {
+ if (heap->ph.root == NULL) {
goto label_return;
}
- nnodes += node_validate(heap->ph_root, NULL);
+ nnodes += node_validate(heap->ph.root, NULL);
- for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL;
- auxelm = phn_next_get(node_t, link, auxelm)) {
- assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t,
- link, auxelm)), auxelm,
+ for (auxelm = node_next_get(heap->ph.root); auxelm != NULL;
+ auxelm = node_next_get(auxelm)) {
+ expect_ptr_eq(node_next_get(node_prev_get(auxelm)), auxelm,
"auxelm's prev doesn't link to auxelm");
nnodes += node_validate(auxelm, NULL);
}
@@ -142,9 +154,9 @@ TEST_BEGIN(test_ph_empty) {
heap_t heap;
heap_new(&heap);
- assert_true(heap_empty(&heap), "Heap should be empty");
- assert_ptr_null(heap_first(&heap), "Unexpected node");
- assert_ptr_null(heap_any(&heap), "Unexpected node");
+ expect_true(heap_empty(&heap), "Heap should be empty");
+ expect_ptr_null(heap_first(&heap), "Unexpected node");
+ expect_ptr_null(heap_any(&heap), "Unexpected node");
}
TEST_END
@@ -203,7 +215,7 @@ TEST_BEGIN(test_ph_random) {
for (j = 1; j <= NNODES; j++) {
/* Initialize heap and nodes. */
heap_new(&heap);
- assert_u_eq(heap_validate(&heap), 0,
+ expect_u_eq(heap_validate(&heap), 0,
"Incorrect node count");
for (k = 0; k < j; k++) {
nodes[k].magic = NODE_MAGIC;
@@ -214,34 +226,34 @@ TEST_BEGIN(test_ph_random) {
for (k = 0; k < j; k++) {
heap_insert(&heap, &nodes[k]);
if (i % 13 == 12) {
- assert_ptr_not_null(heap_any(&heap),
+ expect_ptr_not_null(heap_any(&heap),
"Heap should not be empty");
/* Trigger merging. */
- assert_ptr_not_null(heap_first(&heap),
+ expect_ptr_not_null(heap_first(&heap),
"Heap should not be empty");
}
- assert_u_eq(heap_validate(&heap), k + 1,
+ expect_u_eq(heap_validate(&heap), k + 1,
"Incorrect node count");
}
- assert_false(heap_empty(&heap),
+ expect_false(heap_empty(&heap),
"Heap should not be empty");
/* Remove nodes. */
switch (i % 6) {
case 0:
for (k = 0; k < j; k++) {
- assert_u_eq(heap_validate(&heap), j - k,
+ expect_u_eq(heap_validate(&heap), j - k,
"Incorrect node count");
node_remove(&heap, &nodes[k]);
- assert_u_eq(heap_validate(&heap), j - k
+ expect_u_eq(heap_validate(&heap), j - k
- 1, "Incorrect node count");
}
break;
case 1:
for (k = j; k > 0; k--) {
node_remove(&heap, &nodes[k-1]);
- assert_u_eq(heap_validate(&heap), k - 1,
+ expect_u_eq(heap_validate(&heap), k - 1,
"Incorrect node count");
}
break;
@@ -249,10 +261,10 @@ TEST_BEGIN(test_ph_random) {
node_t *prev = NULL;
for (k = 0; k < j; k++) {
node_t *node = node_remove_first(&heap);
- assert_u_eq(heap_validate(&heap), j - k
+ expect_u_eq(heap_validate(&heap), j - k
- 1, "Incorrect node count");
if (prev != NULL) {
- assert_d_ge(node_cmp(node,
+ expect_d_ge(node_cmp(node,
prev), 0,
"Bad removal order");
}
@@ -263,15 +275,15 @@ TEST_BEGIN(test_ph_random) {
node_t *prev = NULL;
for (k = 0; k < j; k++) {
node_t *node = heap_first(&heap);
- assert_u_eq(heap_validate(&heap), j - k,
+ expect_u_eq(heap_validate(&heap), j - k,
"Incorrect node count");
if (prev != NULL) {
- assert_d_ge(node_cmp(node,
+ expect_d_ge(node_cmp(node,
prev), 0,
"Bad removal order");
}
node_remove(&heap, node);
- assert_u_eq(heap_validate(&heap), j - k
+ expect_u_eq(heap_validate(&heap), j - k
- 1, "Incorrect node count");
prev = node;
}
@@ -279,17 +291,17 @@ TEST_BEGIN(test_ph_random) {
} case 4: {
for (k = 0; k < j; k++) {
node_remove_any(&heap);
- assert_u_eq(heap_validate(&heap), j - k
+ expect_u_eq(heap_validate(&heap), j - k
- 1, "Incorrect node count");
}
break;
} case 5: {
for (k = 0; k < j; k++) {
node_t *node = heap_any(&heap);
- assert_u_eq(heap_validate(&heap), j - k,
+ expect_u_eq(heap_validate(&heap), j - k,
"Incorrect node count");
node_remove(&heap, node);
- assert_u_eq(heap_validate(&heap), j - k
+ expect_u_eq(heap_validate(&heap), j - k
- 1, "Incorrect node count");
}
break;
@@ -297,11 +309,11 @@ TEST_BEGIN(test_ph_random) {
not_reached();
}
- assert_ptr_null(heap_first(&heap),
+ expect_ptr_null(heap_first(&heap),
"Heap should be empty");
- assert_ptr_null(heap_any(&heap),
+ expect_ptr_null(heap_any(&heap),
"Heap should be empty");
- assert_true(heap_empty(&heap), "Heap should be empty");
+ expect_true(heap_empty(&heap), "Heap should be empty");
}
}
fini_gen_rand(sfmt);
diff --git a/deps/jemalloc/test/unit/prng.c b/deps/jemalloc/test/unit/prng.c
index b5795c2f4..a6d9b014a 100644
--- a/deps/jemalloc/test/unit/prng.c
+++ b/deps/jemalloc/test/unit/prng.c
@@ -1,44 +1,44 @@
#include "test/jemalloc_test.h"
-static void
-test_prng_lg_range_u32(bool atomic) {
- atomic_u32_t sa, sb;
+TEST_BEGIN(test_prng_lg_range_u32) {
+ uint32_t sa, sb;
uint32_t ra, rb;
unsigned lg_range;
- atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
- ra = prng_lg_range_u32(&sa, 32, atomic);
- atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
- rb = prng_lg_range_u32(&sa, 32, atomic);
- assert_u32_eq(ra, rb,
+ sa = 42;
+ ra = prng_lg_range_u32(&sa, 32);
+ sa = 42;
+ rb = prng_lg_range_u32(&sa, 32);
+ expect_u32_eq(ra, rb,
"Repeated generation should produce repeated results");
- atomic_store_u32(&sb, 42, ATOMIC_RELAXED);
- rb = prng_lg_range_u32(&sb, 32, atomic);
- assert_u32_eq(ra, rb,
+ sb = 42;
+ rb = prng_lg_range_u32(&sb, 32);
+ expect_u32_eq(ra, rb,
"Equivalent generation should produce equivalent results");
- atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
- ra = prng_lg_range_u32(&sa, 32, atomic);
- rb = prng_lg_range_u32(&sa, 32, atomic);
- assert_u32_ne(ra, rb,
+ sa = 42;
+ ra = prng_lg_range_u32(&sa, 32);
+ rb = prng_lg_range_u32(&sa, 32);
+ expect_u32_ne(ra, rb,
"Full-width results must not immediately repeat");
- atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
- ra = prng_lg_range_u32(&sa, 32, atomic);
+ sa = 42;
+ ra = prng_lg_range_u32(&sa, 32);
for (lg_range = 31; lg_range > 0; lg_range--) {
- atomic_store_u32(&sb, 42, ATOMIC_RELAXED);
- rb = prng_lg_range_u32(&sb, lg_range, atomic);
- assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
+ sb = 42;
+ rb = prng_lg_range_u32(&sb, lg_range);
+ expect_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
0, "High order bits should be 0, lg_range=%u", lg_range);
- assert_u32_eq(rb, (ra >> (32 - lg_range)),
+ expect_u32_eq(rb, (ra >> (32 - lg_range)),
"Expected high order bits of full-width result, "
"lg_range=%u", lg_range);
}
+
}
+TEST_END
-static void
-test_prng_lg_range_u64(void) {
+TEST_BEGIN(test_prng_lg_range_u64) {
uint64_t sa, sb, ra, rb;
unsigned lg_range;
@@ -46,18 +46,18 @@ test_prng_lg_range_u64(void) {
ra = prng_lg_range_u64(&sa, 64);
sa = 42;
rb = prng_lg_range_u64(&sa, 64);
- assert_u64_eq(ra, rb,
+ expect_u64_eq(ra, rb,
"Repeated generation should produce repeated results");
sb = 42;
rb = prng_lg_range_u64(&sb, 64);
- assert_u64_eq(ra, rb,
+ expect_u64_eq(ra, rb,
"Equivalent generation should produce equivalent results");
sa = 42;
ra = prng_lg_range_u64(&sa, 64);
rb = prng_lg_range_u64(&sa, 64);
- assert_u64_ne(ra, rb,
+ expect_u64_ne(ra, rb,
"Full-width results must not immediately repeat");
sa = 42;
@@ -65,173 +65,125 @@ test_prng_lg_range_u64(void) {
for (lg_range = 63; lg_range > 0; lg_range--) {
sb = 42;
rb = prng_lg_range_u64(&sb, lg_range);
- assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
+ expect_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
0, "High order bits should be 0, lg_range=%u", lg_range);
- assert_u64_eq(rb, (ra >> (64 - lg_range)),
+ expect_u64_eq(rb, (ra >> (64 - lg_range)),
"Expected high order bits of full-width result, "
"lg_range=%u", lg_range);
}
}
+TEST_END
-static void
-test_prng_lg_range_zu(bool atomic) {
- atomic_zu_t sa, sb;
+TEST_BEGIN(test_prng_lg_range_zu) {
+ size_t sa, sb;
size_t ra, rb;
unsigned lg_range;
- atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
- ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
- atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
- rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
- assert_zu_eq(ra, rb,
+ sa = 42;
+ ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
+ sa = 42;
+ rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
+ expect_zu_eq(ra, rb,
"Repeated generation should produce repeated results");
- atomic_store_zu(&sb, 42, ATOMIC_RELAXED);
- rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
- assert_zu_eq(ra, rb,
+ sb = 42;
+ rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR));
+ expect_zu_eq(ra, rb,
"Equivalent generation should produce equivalent results");
- atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
- ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
- rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
- assert_zu_ne(ra, rb,
+ sa = 42;
+ ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
+ rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
+ expect_zu_ne(ra, rb,
"Full-width results must not immediately repeat");
- atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
- ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
+ sa = 42;
+ ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0;
lg_range--) {
- atomic_store_zu(&sb, 42, ATOMIC_RELAXED);
- rb = prng_lg_range_zu(&sb, lg_range, atomic);
- assert_zu_eq((rb & (SIZE_T_MAX << lg_range)),
+ sb = 42;
+ rb = prng_lg_range_zu(&sb, lg_range);
+ expect_zu_eq((rb & (SIZE_T_MAX << lg_range)),
0, "High order bits should be 0, lg_range=%u", lg_range);
- assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
+ expect_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
lg_range)), "Expected high order bits of full-width "
"result, lg_range=%u", lg_range);
}
-}
-
-TEST_BEGIN(test_prng_lg_range_u32_nonatomic) {
- test_prng_lg_range_u32(false);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_lg_range_u32_atomic) {
- test_prng_lg_range_u32(true);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_lg_range_u64_nonatomic) {
- test_prng_lg_range_u64();
-}
-TEST_END
-TEST_BEGIN(test_prng_lg_range_zu_nonatomic) {
- test_prng_lg_range_zu(false);
}
TEST_END
-TEST_BEGIN(test_prng_lg_range_zu_atomic) {
- test_prng_lg_range_zu(true);
-}
-TEST_END
-
-static void
-test_prng_range_u32(bool atomic) {
+TEST_BEGIN(test_prng_range_u32) {
uint32_t range;
-#define MAX_RANGE 10000000
-#define RANGE_STEP 97
-#define NREPS 10
- for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
- atomic_u32_t s;
+ const uint32_t max_range = 10000000;
+ const uint32_t range_step = 97;
+ const unsigned nreps = 10;
+
+ for (range = 2; range < max_range; range += range_step) {
+ uint32_t s;
unsigned rep;
- atomic_store_u32(&s, range, ATOMIC_RELAXED);
- for (rep = 0; rep < NREPS; rep++) {
- uint32_t r = prng_range_u32(&s, range, atomic);
+ s = range;
+ for (rep = 0; rep < nreps; rep++) {
+ uint32_t r = prng_range_u32(&s, range);
- assert_u32_lt(r, range, "Out of range");
+ expect_u32_lt(r, range, "Out of range");
}
}
}
+TEST_END
-static void
-test_prng_range_u64(void) {
+TEST_BEGIN(test_prng_range_u64) {
uint64_t range;
-#define MAX_RANGE 10000000
-#define RANGE_STEP 97
-#define NREPS 10
- for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
+ const uint64_t max_range = 10000000;
+ const uint64_t range_step = 97;
+ const unsigned nreps = 10;
+
+ for (range = 2; range < max_range; range += range_step) {
uint64_t s;
unsigned rep;
s = range;
- for (rep = 0; rep < NREPS; rep++) {
+ for (rep = 0; rep < nreps; rep++) {
uint64_t r = prng_range_u64(&s, range);
- assert_u64_lt(r, range, "Out of range");
+ expect_u64_lt(r, range, "Out of range");
}
}
}
+TEST_END
-static void
-test_prng_range_zu(bool atomic) {
+TEST_BEGIN(test_prng_range_zu) {
size_t range;
-#define MAX_RANGE 10000000
-#define RANGE_STEP 97
-#define NREPS 10
- for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
- atomic_zu_t s;
+ const size_t max_range = 10000000;
+ const size_t range_step = 97;
+ const unsigned nreps = 10;
+
+
+ for (range = 2; range < max_range; range += range_step) {
+ size_t s;
unsigned rep;
- atomic_store_zu(&s, range, ATOMIC_RELAXED);
- for (rep = 0; rep < NREPS; rep++) {
- size_t r = prng_range_zu(&s, range, atomic);
+ s = range;
+ for (rep = 0; rep < nreps; rep++) {
+ size_t r = prng_range_zu(&s, range);
- assert_zu_lt(r, range, "Out of range");
+ expect_zu_lt(r, range, "Out of range");
}
}
}
-
-TEST_BEGIN(test_prng_range_u32_nonatomic) {
- test_prng_range_u32(false);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_range_u32_atomic) {
- test_prng_range_u32(true);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_range_u64_nonatomic) {
- test_prng_range_u64();
-}
-TEST_END
-
-TEST_BEGIN(test_prng_range_zu_nonatomic) {
- test_prng_range_zu(false);
-}
-TEST_END
-
-TEST_BEGIN(test_prng_range_zu_atomic) {
- test_prng_range_zu(true);
-}
TEST_END
int
main(void) {
- return test(
- test_prng_lg_range_u32_nonatomic,
- test_prng_lg_range_u32_atomic,
- test_prng_lg_range_u64_nonatomic,
- test_prng_lg_range_zu_nonatomic,
- test_prng_lg_range_zu_atomic,
- test_prng_range_u32_nonatomic,
- test_prng_range_u32_atomic,
- test_prng_range_u64_nonatomic,
- test_prng_range_zu_nonatomic,
- test_prng_range_zu_atomic);
+ return test_no_reentrancy(
+ test_prng_lg_range_u32,
+ test_prng_lg_range_u64,
+ test_prng_lg_range_zu,
+ test_prng_range_u32,
+ test_prng_range_u64,
+ test_prng_range_zu);
}
diff --git a/deps/jemalloc/test/unit/prof_accum.c b/deps/jemalloc/test/unit/prof_accum.c
index 252200635..ef392acda 100644
--- a/deps/jemalloc/test/unit/prof_accum.c
+++ b/deps/jemalloc/test/unit/prof_accum.c
@@ -1,12 +1,15 @@
#include "test/jemalloc_test.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_sys.h"
+
#define NTHREADS 4
#define NALLOCS_PER_THREAD 50
#define DUMP_INTERVAL 1
#define BT_COUNT_CHECK_INTERVAL 5
static int
-prof_dump_open_intercept(bool propagate_err, const char *filename) {
+prof_dump_open_file_intercept(const char *filename, int mode) {
int fd;
fd = open("/dev/null", O_WRONLY);
@@ -32,14 +35,14 @@ thd_start(void *varg) {
void *p = alloc_from_permuted_backtrace(thd_ind, i);
dallocx(p, 0);
if (i % DUMP_INTERVAL == 0) {
- assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
0, "Unexpected error while dumping heap profile");
}
if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
i+1 == NALLOCS_PER_THREAD) {
bt_count = prof_bt_count();
- assert_zu_le(bt_count_prev+(i-i_prev), bt_count,
+ expect_zu_le(bt_count_prev+(i-i_prev), bt_count,
"Expected larger backtrace count increase");
i_prev = i;
bt_count_prev = bt_count;
@@ -58,11 +61,11 @@ TEST_BEGIN(test_idump) {
test_skip_if(!config_prof);
active = true;
- assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+ expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
sizeof(active)), 0,
"Unexpected mallctl failure while activating profiling");
- prof_dump_open = prof_dump_open_intercept;
+ prof_dump_open_file = prof_dump_open_file_intercept;
for (i = 0; i < NTHREADS; i++) {
thd_args[i] = i;
diff --git a/deps/jemalloc/test/unit/prof_active.c b/deps/jemalloc/test/unit/prof_active.c
index 850a24a77..af29e7ad2 100644
--- a/deps/jemalloc/test/unit/prof_active.c
+++ b/deps/jemalloc/test/unit/prof_active.c
@@ -1,14 +1,16 @@
#include "test/jemalloc_test.h"
+#include "jemalloc/internal/prof_data.h"
+
static void
mallctl_bool_get(const char *name, bool expected, const char *func, int line) {
bool old;
size_t sz;
sz = sizeof(old);
- assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0,
+ expect_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0,
"%s():%d: Unexpected mallctl failure reading %s", func, line, name);
- assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
+ expect_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
name);
}
@@ -19,11 +21,11 @@ mallctl_bool_set(const char *name, bool old_expected, bool val_new,
size_t sz;
sz = sizeof(old);
- assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new,
+ expect_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new,
sizeof(val_new)), 0,
"%s():%d: Unexpected mallctl failure reading/writing %s", func,
line, name);
- assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
+ expect_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
line, name);
}
@@ -67,11 +69,11 @@ prof_sampling_probe_impl(bool expect_sample, const char *func, int line) {
void *p;
size_t expected_backtraces = expect_sample ? 1 : 0;
- assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func,
+ expect_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func,
line);
p = mallocx(1, 0);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
- assert_zu_eq(prof_bt_count(), expected_backtraces,
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_zu_eq(prof_bt_count(), expected_backtraces,
"%s():%d: Unexpected backtrace count", func, line);
dallocx(p, 0);
}
diff --git a/deps/jemalloc/test/unit/prof_active.sh b/deps/jemalloc/test/unit/prof_active.sh
index 0167cb10b..9749674af 100644
--- a/deps/jemalloc/test/unit/prof_active.sh
+++ b/deps/jemalloc/test/unit/prof_active.sh
@@ -1,5 +1,5 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
- export MALLOC_CONF="prof:true,prof_thread_active_init:false,lg_prof_sample:0"
+ export MALLOC_CONF="prof:true,prof_active:true,prof_thread_active_init:false,lg_prof_sample:0"
fi
diff --git a/deps/jemalloc/test/unit/prof_gdump.c b/deps/jemalloc/test/unit/prof_gdump.c
index f7e0aac76..46e45036a 100644
--- a/deps/jemalloc/test/unit/prof_gdump.c
+++ b/deps/jemalloc/test/unit/prof_gdump.c
@@ -1,9 +1,11 @@
#include "test/jemalloc_test.h"
+#include "jemalloc/internal/prof_sys.h"
+
static bool did_prof_dump_open;
static int
-prof_dump_open_intercept(bool propagate_err, const char *filename) {
+prof_dump_open_file_intercept(const char *filename, int mode) {
int fd;
did_prof_dump_open = true;
@@ -15,6 +17,7 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) {
}
TEST_BEGIN(test_gdump) {
+ test_skip_if(opt_hpa);
bool active, gdump, gdump_old;
void *p, *q, *r, *s;
size_t sz;
@@ -22,43 +25,43 @@ TEST_BEGIN(test_gdump) {
test_skip_if(!config_prof);
active = true;
- assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+ expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
sizeof(active)), 0,
"Unexpected mallctl failure while activating profiling");
- prof_dump_open = prof_dump_open_intercept;
+ prof_dump_open_file = prof_dump_open_file_intercept;
did_prof_dump_open = false;
p = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
- assert_true(did_prof_dump_open, "Expected a profile dump");
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_true(did_prof_dump_open, "Expected a profile dump");
did_prof_dump_open = false;
q = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
- assert_ptr_not_null(q, "Unexpected mallocx() failure");
- assert_true(did_prof_dump_open, "Expected a profile dump");
+ expect_ptr_not_null(q, "Unexpected mallocx() failure");
+ expect_true(did_prof_dump_open, "Expected a profile dump");
gdump = false;
sz = sizeof(gdump_old);
- assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
+ expect_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
(void *)&gdump, sizeof(gdump)), 0,
"Unexpected mallctl failure while disabling prof.gdump");
assert(gdump_old);
did_prof_dump_open = false;
r = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
- assert_ptr_not_null(q, "Unexpected mallocx() failure");
- assert_false(did_prof_dump_open, "Unexpected profile dump");
+ expect_ptr_not_null(q, "Unexpected mallocx() failure");
+ expect_false(did_prof_dump_open, "Unexpected profile dump");
gdump = true;
sz = sizeof(gdump_old);
- assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
+ expect_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
(void *)&gdump, sizeof(gdump)), 0,
"Unexpected mallctl failure while enabling prof.gdump");
assert(!gdump_old);
did_prof_dump_open = false;
s = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
- assert_ptr_not_null(q, "Unexpected mallocx() failure");
- assert_true(did_prof_dump_open, "Expected a profile dump");
+ expect_ptr_not_null(q, "Unexpected mallocx() failure");
+ expect_true(did_prof_dump_open, "Expected a profile dump");
dallocx(p, 0);
dallocx(q, 0);
diff --git a/deps/jemalloc/test/unit/prof_hook.c b/deps/jemalloc/test/unit/prof_hook.c
new file mode 100644
index 000000000..6480d9303
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_hook.c
@@ -0,0 +1,169 @@
+#include "test/jemalloc_test.h"
+
+const char *dump_filename = "/dev/null";
+
+prof_backtrace_hook_t default_hook;
+
+bool mock_bt_hook_called = false;
+bool mock_dump_hook_called = false;
+
+void
+mock_bt_hook(void **vec, unsigned *len, unsigned max_len) {
+ *len = max_len;
+ for (unsigned i = 0; i < max_len; ++i) {
+ vec[i] = (void *)((uintptr_t)i);
+ }
+ mock_bt_hook_called = true;
+}
+
+void
+mock_bt_augmenting_hook(void **vec, unsigned *len, unsigned max_len) {
+ default_hook(vec, len, max_len);
+ expect_u_gt(*len, 0, "Default backtrace hook returned empty backtrace");
+ expect_u_lt(*len, max_len,
+ "Default backtrace hook returned too large backtrace");
+
+ /* Add a separator between default frames and augmented */
+ vec[*len] = (void *)0x030303030;
+ (*len)++;
+
+ /* Add more stack frames */
+ for (unsigned i = 0; i < 3; ++i) {
+ if (*len == max_len) {
+ break;
+ }
+ vec[*len] = (void *)((uintptr_t)i);
+ (*len)++;
+ }
+
+
+ mock_bt_hook_called = true;
+}
+
+void
+mock_dump_hook(const char *filename) {
+ mock_dump_hook_called = true;
+ expect_str_eq(filename, dump_filename,
+ "Incorrect file name passed to the dump hook");
+}
+
+TEST_BEGIN(test_prof_backtrace_hook_replace) {
+
+ test_skip_if(!config_prof);
+
+ mock_bt_hook_called = false;
+
+ void *p0 = mallocx(1, 0);
+ assert_ptr_not_null(p0, "Failed to allocate");
+
+ expect_false(mock_bt_hook_called, "Called mock hook before it's set");
+
+ prof_backtrace_hook_t null_hook = NULL;
+ expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
+ NULL, 0, (void *)&null_hook, sizeof(null_hook)),
+ EINVAL, "Incorrectly allowed NULL backtrace hook");
+
+ size_t default_hook_sz = sizeof(prof_backtrace_hook_t);
+ prof_backtrace_hook_t hook = &mock_bt_hook;
+ expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
+ (void *)&default_hook, &default_hook_sz, (void *)&hook,
+ sizeof(hook)), 0, "Unexpected mallctl failure setting hook");
+
+ void *p1 = mallocx(1, 0);
+ assert_ptr_not_null(p1, "Failed to allocate");
+
+ expect_true(mock_bt_hook_called, "Didn't call mock hook");
+
+ prof_backtrace_hook_t current_hook;
+ size_t current_hook_sz = sizeof(prof_backtrace_hook_t);
+ expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
+ (void *)&current_hook, &current_hook_sz, (void *)&default_hook,
+ sizeof(default_hook)), 0,
+ "Unexpected mallctl failure resetting hook to default");
+
+ expect_ptr_eq(current_hook, hook,
+ "Hook returned by mallctl is not equal to mock hook");
+
+ dallocx(p1, 0);
+ dallocx(p0, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_prof_backtrace_hook_augment) {
+
+ test_skip_if(!config_prof);
+
+ mock_bt_hook_called = false;
+
+ void *p0 = mallocx(1, 0);
+ assert_ptr_not_null(p0, "Failed to allocate");
+
+ expect_false(mock_bt_hook_called, "Called mock hook before it's set");
+
+ size_t default_hook_sz = sizeof(prof_backtrace_hook_t);
+ prof_backtrace_hook_t hook = &mock_bt_augmenting_hook;
+ expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
+ (void *)&default_hook, &default_hook_sz, (void *)&hook,
+ sizeof(hook)), 0, "Unexpected mallctl failure setting hook");
+
+ void *p1 = mallocx(1, 0);
+ assert_ptr_not_null(p1, "Failed to allocate");
+
+ expect_true(mock_bt_hook_called, "Didn't call mock hook");
+
+ prof_backtrace_hook_t current_hook;
+ size_t current_hook_sz = sizeof(prof_backtrace_hook_t);
+ expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
+ (void *)&current_hook, &current_hook_sz, (void *)&default_hook,
+ sizeof(default_hook)), 0,
+ "Unexpected mallctl failure resetting hook to default");
+
+ expect_ptr_eq(current_hook, hook,
+ "Hook returned by mallctl is not equal to mock hook");
+
+ dallocx(p1, 0);
+ dallocx(p0, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_prof_dump_hook) {
+
+ test_skip_if(!config_prof);
+
+ mock_dump_hook_called = false;
+
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&dump_filename,
+ sizeof(dump_filename)), 0, "Failed to dump heap profile");
+
+ expect_false(mock_dump_hook_called, "Called dump hook before it's set");
+
+ size_t default_hook_sz = sizeof(prof_dump_hook_t);
+ prof_dump_hook_t hook = &mock_dump_hook;
+ expect_d_eq(mallctl("experimental.hooks.prof_dump",
+ (void *)&default_hook, &default_hook_sz, (void *)&hook,
+ sizeof(hook)), 0, "Unexpected mallctl failure setting hook");
+
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&dump_filename,
+ sizeof(dump_filename)), 0, "Failed to dump heap profile");
+
+ expect_true(mock_dump_hook_called, "Didn't call mock hook");
+
+ prof_dump_hook_t current_hook;
+ size_t current_hook_sz = sizeof(prof_dump_hook_t);
+ expect_d_eq(mallctl("experimental.hooks.prof_dump",
+ (void *)&current_hook, &current_hook_sz, (void *)&default_hook,
+ sizeof(default_hook)), 0,
+ "Unexpected mallctl failure resetting hook to default");
+
+ expect_ptr_eq(current_hook, hook,
+ "Hook returned by mallctl is not equal to mock hook");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_prof_backtrace_hook_replace,
+ test_prof_backtrace_hook_augment,
+ test_prof_dump_hook);
+}
diff --git a/deps/jemalloc/test/unit/prof_hook.sh b/deps/jemalloc/test/unit/prof_hook.sh
new file mode 100644
index 000000000..c7ebd8f98
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_hook.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
+fi
+
diff --git a/deps/jemalloc/test/unit/prof_idump.c b/deps/jemalloc/test/unit/prof_idump.c
index 1cc6c98cd..455ac5297 100644
--- a/deps/jemalloc/test/unit/prof_idump.c
+++ b/deps/jemalloc/test/unit/prof_idump.c
@@ -1,13 +1,21 @@
#include "test/jemalloc_test.h"
+#include "jemalloc/internal/prof_sys.h"
+
+#define TEST_PREFIX "test_prefix"
+
static bool did_prof_dump_open;
static int
-prof_dump_open_intercept(bool propagate_err, const char *filename) {
+prof_dump_open_file_intercept(const char *filename, int mode) {
int fd;
did_prof_dump_open = true;
+ const char filename_prefix[] = TEST_PREFIX ".";
+ expect_d_eq(strncmp(filename_prefix, filename, sizeof(filename_prefix)
+ - 1), 0, "Dump file name should start with \"" TEST_PREFIX ".\"");
+
fd = open("/dev/null", O_WRONLY);
assert_d_ne(fd, -1, "Unexpected open() failure");
@@ -18,20 +26,27 @@ TEST_BEGIN(test_idump) {
bool active;
void *p;
+ const char *test_prefix = TEST_PREFIX;
+
test_skip_if(!config_prof);
active = true;
- assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+
+ expect_d_eq(mallctl("prof.prefix", NULL, NULL, (void *)&test_prefix,
+ sizeof(test_prefix)), 0,
+ "Unexpected mallctl failure while overwriting dump prefix");
+
+ expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
sizeof(active)), 0,
"Unexpected mallctl failure while activating profiling");
- prof_dump_open = prof_dump_open_intercept;
+ prof_dump_open_file = prof_dump_open_file_intercept;
did_prof_dump_open = false;
p = mallocx(1, 0);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
dallocx(p, 0);
- assert_true(did_prof_dump_open, "Expected a profile dump");
+ expect_true(did_prof_dump_open, "Expected a profile dump");
}
TEST_END
diff --git a/deps/jemalloc/test/unit/prof_log.c b/deps/jemalloc/test/unit/prof_log.c
index 92fbd7cea..5ff208e2d 100644
--- a/deps/jemalloc/test/unit/prof_log.c
+++ b/deps/jemalloc/test/unit/prof_log.c
@@ -1,18 +1,19 @@
#include "test/jemalloc_test.h"
+#include "jemalloc/internal/prof_log.h"
#define N_PARAM 100
#define N_THREADS 10
-static void assert_rep() {
- assert_b_eq(prof_log_rep_check(), false, "Rep check failed");
+static void expect_rep() {
+ expect_b_eq(prof_log_rep_check(), false, "Rep check failed");
}
-static void assert_log_empty() {
- assert_zu_eq(prof_log_bt_count(), 0,
+static void expect_log_empty() {
+ expect_zu_eq(prof_log_bt_count(), 0,
"The log has backtraces; it isn't empty");
- assert_zu_eq(prof_log_thr_count(), 0,
+ expect_zu_eq(prof_log_thr_count(), 0,
"The log has threads; it isn't empty");
- assert_zu_eq(prof_log_alloc_count(), 0,
+ expect_zu_eq(prof_log_alloc_count(), 0,
"The log has allocations; it isn't empty");
}
@@ -34,22 +35,22 @@ TEST_BEGIN(test_prof_log_many_logs) {
test_skip_if(!config_prof);
for (i = 0; i < N_PARAM; i++) {
- assert_b_eq(prof_log_is_logging(), false,
+ expect_b_eq(prof_log_is_logging(), false,
"Logging shouldn't have started yet");
- assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when starting logging");
- assert_b_eq(prof_log_is_logging(), true,
+ expect_b_eq(prof_log_is_logging(), true,
"Logging should be started by now");
- assert_log_empty();
- assert_rep();
+ expect_log_empty();
+ expect_rep();
f();
- assert_zu_eq(prof_log_thr_count(), 1, "Wrong thread count");
- assert_rep();
- assert_b_eq(prof_log_is_logging(), true,
+ expect_zu_eq(prof_log_thr_count(), 1, "Wrong thread count");
+ expect_rep();
+ expect_b_eq(prof_log_is_logging(), true,
"Logging should still be on");
- assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when stopping logging");
- assert_b_eq(prof_log_is_logging(), false,
+ expect_b_eq(prof_log_is_logging(), false,
"Logging should have turned off");
}
}
@@ -61,7 +62,7 @@ static void *f_thread(void *unused) {
int i;
for (i = 0; i < N_PARAM; i++) {
void *p = malloc(100);
- memset(p, 100, sizeof(char));
+ memset(p, 100, 1);
free(p);
}
@@ -73,7 +74,7 @@ TEST_BEGIN(test_prof_log_many_threads) {
test_skip_if(!config_prof);
int i;
- assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when starting logging");
for (i = 0; i < N_THREADS; i++) {
thd_create(&thr_buf[i], &f_thread, NULL);
@@ -82,10 +83,10 @@ TEST_BEGIN(test_prof_log_many_threads) {
for (i = 0; i < N_THREADS; i++) {
thd_join(thr_buf[i], NULL);
}
- assert_zu_eq(prof_log_thr_count(), N_THREADS,
+ expect_zu_eq(prof_log_thr_count(), N_THREADS,
"Wrong number of thread entries");
- assert_rep();
- assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
+ expect_rep();
+ expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when stopping logging");
}
TEST_END
@@ -110,19 +111,19 @@ TEST_BEGIN(test_prof_log_many_traces) {
test_skip_if(!config_prof);
- assert_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when starting logging");
int i;
- assert_rep();
- assert_log_empty();
+ expect_rep();
+ expect_log_empty();
for (i = 0; i < N_PARAM; i++) {
- assert_rep();
+ expect_rep();
f1();
- assert_rep();
+ expect_rep();
f2();
- assert_rep();
+ expect_rep();
f3();
- assert_rep();
+ expect_rep();
}
/*
* There should be 8 total backtraces: two for malloc/free in f1(), two
@@ -131,16 +132,18 @@ TEST_BEGIN(test_prof_log_many_traces) {
* optimizations such as loop unrolling might generate more call sites.
* So >= 8 traces are expected.
*/
- assert_zu_ge(prof_log_bt_count(), 8,
+ expect_zu_ge(prof_log_bt_count(), 8,
"Expect at least 8 backtraces given sample workload");
- assert_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure when stopping logging");
}
TEST_END
int
main(void) {
- prof_log_dummy_set(true);
+ if (config_prof) {
+ prof_log_dummy_set(true);
+ }
return test_no_reentrancy(
test_prof_log_many_logs,
test_prof_log_many_traces,
diff --git a/deps/jemalloc/test/unit/prof_log.sh b/deps/jemalloc/test/unit/prof_log.sh
index 8fcc7d8a7..485f9bf0a 100644
--- a/deps/jemalloc/test/unit/prof_log.sh
+++ b/deps/jemalloc/test/unit/prof_log.sh
@@ -1,5 +1,5 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
- export MALLOC_CONF="prof:true,lg_prof_sample:0"
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
fi
diff --git a/deps/jemalloc/test/unit/prof_mdump.c b/deps/jemalloc/test/unit/prof_mdump.c
new file mode 100644
index 000000000..75b3a5159
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_mdump.c
@@ -0,0 +1,216 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/prof_sys.h"
+
+static const char *test_filename = "test_filename";
+static bool did_prof_dump_open;
+
+static int
+prof_dump_open_file_intercept(const char *filename, int mode) {
+ int fd;
+
+ did_prof_dump_open = true;
+
+ /*
+ * Stronger than a strcmp() - verifying that we internally directly use
+ * the caller supplied char pointer.
+ */
+ expect_ptr_eq(filename, test_filename,
+ "Dump file name should be \"%s\"", test_filename);
+
+ fd = open("/dev/null", O_WRONLY);
+ assert_d_ne(fd, -1, "Unexpected open() failure");
+
+ return fd;
+}
+
+TEST_BEGIN(test_mdump_normal) {
+ test_skip_if(!config_prof);
+
+ prof_dump_open_file_t *open_file_orig = prof_dump_open_file;
+
+ void *p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ prof_dump_open_file = prof_dump_open_file_intercept;
+ did_prof_dump_open = false;
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename,
+ sizeof(test_filename)), 0,
+ "Unexpected mallctl failure while dumping");
+ expect_true(did_prof_dump_open, "Expected a profile dump");
+
+ dallocx(p, 0);
+
+ prof_dump_open_file = open_file_orig;
+}
+TEST_END
+
+static int
+prof_dump_open_file_error(const char *filename, int mode) {
+ return -1;
+}
+
+/*
+ * In the context of test_mdump_output_error, prof_dump_write_file_count is the
+ * total number of times prof_dump_write_file_error() is expected to be called.
+ * In the context of test_mdump_maps_error, prof_dump_write_file_count is the
+ * total number of times prof_dump_write_file_error() is expected to be called
+ * starting from the one that contains an 'M' (beginning the "MAPPED_LIBRARIES"
+ * header).
+ */
+static int prof_dump_write_file_count;
+
+static ssize_t
+prof_dump_write_file_error(int fd, const void *s, size_t len) {
+ --prof_dump_write_file_count;
+
+ expect_d_ge(prof_dump_write_file_count, 0,
+ "Write is called after error occurs");
+
+ if (prof_dump_write_file_count == 0) {
+ return -1;
+ } else {
+ /*
+ * Any non-negative number indicates success, and for
+ * simplicity we just use 0. When prof_dump_write_file_count
+ * is positive, it means that we haven't reached the write that
+ * we want to fail; when prof_dump_write_file_count is
+ * negative, it means that we've already violated the
+ * expect_d_ge(prof_dump_write_file_count, 0) statement above,
+ * but instead of aborting, we continue the rest of the test,
+ * and we indicate that all the writes after the failed write
+ * are successful.
+ */
+ return 0;
+ }
+}
+
+static void
+expect_write_failure(int count) {
+ prof_dump_write_file_count = count;
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename,
+ sizeof(test_filename)), EFAULT, "Dump should err");
+ expect_d_eq(prof_dump_write_file_count, 0,
+ "Dumping stopped after a wrong number of writes");
+}
+
+TEST_BEGIN(test_mdump_output_error) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_debug);
+
+ prof_dump_open_file_t *open_file_orig = prof_dump_open_file;
+ prof_dump_write_file_t *write_file_orig = prof_dump_write_file;
+
+ prof_dump_write_file = prof_dump_write_file_error;
+
+ void *p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ /*
+ * When opening the dump file fails, there shouldn't be any write, and
+ * mallctl() should return failure.
+ */
+ prof_dump_open_file = prof_dump_open_file_error;
+ expect_write_failure(0);
+
+ /*
+ * When the n-th write fails, there shouldn't be any more write, and
+ * mallctl() should return failure.
+ */
+ prof_dump_open_file = prof_dump_open_file_intercept;
+ expect_write_failure(1); /* First write fails. */
+ expect_write_failure(2); /* Second write fails. */
+
+ dallocx(p, 0);
+
+ prof_dump_open_file = open_file_orig;
+ prof_dump_write_file = write_file_orig;
+}
+TEST_END
+
+static int
+prof_dump_open_maps_error() {
+ return -1;
+}
+
+static bool started_piping_maps_file;
+
+static ssize_t
+prof_dump_write_maps_file_error(int fd, const void *s, size_t len) {
+ /* The main dump doesn't contain any capital 'M'. */
+ if (!started_piping_maps_file && strchr(s, 'M') != NULL) {
+ started_piping_maps_file = true;
+ }
+
+ if (started_piping_maps_file) {
+ return prof_dump_write_file_error(fd, s, len);
+ } else {
+ /* Return success when we haven't started piping maps. */
+ return 0;
+ }
+}
+
+static void
+expect_maps_write_failure(int count) {
+ int mfd = prof_dump_open_maps();
+ if (mfd == -1) {
+ /* No need to continue if we just can't find the maps file. */
+ return;
+ }
+ close(mfd);
+ started_piping_maps_file = false;
+ expect_write_failure(count);
+ expect_true(started_piping_maps_file, "Should start piping maps");
+}
+
+TEST_BEGIN(test_mdump_maps_error) {
+ test_skip_if(!config_prof);
+ test_skip_if(!config_debug);
+
+ prof_dump_open_file_t *open_file_orig = prof_dump_open_file;
+ prof_dump_write_file_t *write_file_orig = prof_dump_write_file;
+ prof_dump_open_maps_t *open_maps_orig = prof_dump_open_maps;
+
+ prof_dump_open_file = prof_dump_open_file_intercept;
+ prof_dump_write_file = prof_dump_write_maps_file_error;
+
+ void *p = mallocx(1, 0);
+ assert_ptr_not_null(p, "Unexpected mallocx() failure");
+
+ /*
+ * When opening the maps file fails, there shouldn't be any maps write,
+ * and mallctl() should return success.
+ */
+ prof_dump_open_maps = prof_dump_open_maps_error;
+ started_piping_maps_file = false;
+ prof_dump_write_file_count = 0;
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename,
+ sizeof(test_filename)), 0,
+ "mallctl should not fail in case of maps file opening failure");
+ expect_false(started_piping_maps_file, "Shouldn't start piping maps");
+ expect_d_eq(prof_dump_write_file_count, 0,
+ "Dumping stopped after a wrong number of writes");
+
+ /*
+ * When the n-th maps write fails (given that we are able to find the
+ * maps file), there shouldn't be any more maps write, and mallctl()
+ * should return failure.
+ */
+ prof_dump_open_maps = open_maps_orig;
+ expect_maps_write_failure(1); /* First write fails. */
+ expect_maps_write_failure(2); /* Second write fails. */
+
+ dallocx(p, 0);
+
+ prof_dump_open_file = open_file_orig;
+ prof_dump_write_file = write_file_orig;
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_mdump_normal,
+ test_mdump_output_error,
+ test_mdump_maps_error);
+}
diff --git a/deps/jemalloc/test/unit/prof_mdump.sh b/deps/jemalloc/test/unit/prof_mdump.sh
new file mode 100644
index 000000000..d14cb8c5e
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_mdump.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,lg_prof_sample:0"
+fi
+
diff --git a/deps/jemalloc/test/unit/prof_recent.c b/deps/jemalloc/test/unit/prof_recent.c
new file mode 100644
index 000000000..4fb37236f
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_recent.c
@@ -0,0 +1,678 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/prof_recent.h"
+
+/* As specified in the shell script */
+#define OPT_ALLOC_MAX 3
+
+/* Invariant before and after every test (when config_prof is on) */
+static void
+confirm_prof_setup() {
+ /* Options */
+ assert_true(opt_prof, "opt_prof not on");
+ assert_true(opt_prof_active, "opt_prof_active not on");
+ assert_zd_eq(opt_prof_recent_alloc_max, OPT_ALLOC_MAX,
+ "opt_prof_recent_alloc_max not set correctly");
+
+ /* Dynamics */
+ assert_true(prof_active_state, "prof_active not on");
+ assert_zd_eq(prof_recent_alloc_max_ctl_read(), OPT_ALLOC_MAX,
+ "prof_recent_alloc_max not set correctly");
+}
+
+TEST_BEGIN(test_confirm_setup) {
+ test_skip_if(!config_prof);
+ confirm_prof_setup();
+}
+TEST_END
+
+TEST_BEGIN(test_prof_recent_off) {
+ test_skip_if(config_prof);
+
+ const ssize_t past_ref = 0, future_ref = 0;
+ const size_t len_ref = sizeof(ssize_t);
+
+ ssize_t past = past_ref, future = future_ref;
+ size_t len = len_ref;
+
+#define ASSERT_SHOULD_FAIL(opt, a, b, c, d) do { \
+ assert_d_eq(mallctl("experimental.prof_recent." opt, a, b, c, \
+ d), ENOENT, "Should return ENOENT when config_prof is off");\
+ assert_zd_eq(past, past_ref, "output was touched"); \
+ assert_zu_eq(len, len_ref, "output length was touched"); \
+ assert_zd_eq(future, future_ref, "input was touched"); \
+} while (0)
+
+ ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, NULL, 0);
+ ASSERT_SHOULD_FAIL("alloc_max", &past, &len, NULL, 0);
+ ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, &future, len);
+ ASSERT_SHOULD_FAIL("alloc_max", &past, &len, &future, len);
+
+#undef ASSERT_SHOULD_FAIL
+}
+TEST_END
+
+TEST_BEGIN(test_prof_recent_on) {
+ test_skip_if(!config_prof);
+
+ ssize_t past, future;
+ size_t len = sizeof(ssize_t);
+
+ confirm_prof_setup();
+
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, NULL, 0), 0, "no-op mallctl should be allowed");
+ confirm_prof_setup();
+
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ &past, &len, NULL, 0), 0, "Read error");
+ expect_zd_eq(past, OPT_ALLOC_MAX, "Wrong read result");
+ future = OPT_ALLOC_MAX + 1;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, len), 0, "Write error");
+ future = -1;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ &past, &len, &future, len), 0, "Read/write error");
+ expect_zd_eq(past, OPT_ALLOC_MAX + 1, "Wrong read result");
+ future = -2;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ &past, &len, &future, len), EINVAL,
+ "Invalid write should return EINVAL");
+ expect_zd_eq(past, OPT_ALLOC_MAX + 1,
+ "Output should not be touched given invalid write");
+ future = OPT_ALLOC_MAX;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ &past, &len, &future, len), 0, "Read/write error");
+ expect_zd_eq(past, -1, "Wrong read result");
+ future = OPT_ALLOC_MAX + 2;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ &past, &len, &future, len * 2), EINVAL,
+ "Invalid write should return EINVAL");
+ expect_zd_eq(past, -1,
+ "Output should not be touched given invalid write");
+
+ confirm_prof_setup();
+}
+TEST_END
+
+/* Reproducible sequence of request sizes */
+#define NTH_REQ_SIZE(n) ((n) * 97 + 101)
+
+static void
+confirm_malloc(void *p) {
+ assert_ptr_not_null(p, "malloc failed unexpectedly");
+ edata_t *e = emap_edata_lookup(TSDN_NULL, &arena_emap_global, p);
+ assert_ptr_not_null(e, "NULL edata for living pointer");
+ prof_recent_t *n = edata_prof_recent_alloc_get_no_lock_test(e);
+ assert_ptr_not_null(n, "Record in edata should not be NULL");
+ expect_ptr_not_null(n->alloc_tctx,
+ "alloc_tctx in record should not be NULL");
+ expect_ptr_eq(e, prof_recent_alloc_edata_get_no_lock_test(n),
+ "edata pointer in record is not correct");
+ expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
+}
+
+static void
+confirm_record_size(prof_recent_t *n, unsigned kth) {
+ expect_zu_eq(n->size, NTH_REQ_SIZE(kth),
+ "Recorded allocation size is wrong");
+}
+
+static void
+confirm_record_living(prof_recent_t *n) {
+ expect_ptr_not_null(n->alloc_tctx,
+ "alloc_tctx in record should not be NULL");
+ edata_t *edata = prof_recent_alloc_edata_get_no_lock_test(n);
+ assert_ptr_not_null(edata,
+ "Recorded edata should not be NULL for living pointer");
+ expect_ptr_eq(n, edata_prof_recent_alloc_get_no_lock_test(edata),
+ "Record in edata is not correct");
+ expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
+}
+
+static void
+confirm_record_released(prof_recent_t *n) {
+ expect_ptr_not_null(n->alloc_tctx,
+ "alloc_tctx in record should not be NULL");
+ expect_ptr_null(prof_recent_alloc_edata_get_no_lock_test(n),
+ "Recorded edata should be NULL for released pointer");
+ expect_ptr_not_null(n->dalloc_tctx,
+ "dalloc_tctx in record should not be NULL for released pointer");
+}
+
+TEST_BEGIN(test_prof_recent_alloc) {
+ test_skip_if(!config_prof);
+
+ bool b;
+ unsigned i, c;
+ size_t req_size;
+ void *p;
+ prof_recent_t *n;
+ ssize_t future;
+
+ confirm_prof_setup();
+
+ /*
+ * First batch of 2 * OPT_ALLOC_MAX allocations. After the
+ * (OPT_ALLOC_MAX - 1)'th allocation the recorded allocations should
+ * always be the last OPT_ALLOC_MAX allocations coming from here.
+ */
+ for (i = 0; i < 2 * OPT_ALLOC_MAX; ++i) {
+ req_size = NTH_REQ_SIZE(i);
+ p = malloc(req_size);
+ confirm_malloc(p);
+ if (i < OPT_ALLOC_MAX - 1) {
+ assert_false(ql_empty(&prof_recent_alloc_list),
+ "Empty recent allocation");
+ free(p);
+ /*
+ * The recorded allocations may still include some
+ * other allocations before the test run started,
+ * so keep allocating without checking anything.
+ */
+ continue;
+ }
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ ++c;
+ confirm_record_size(n, i + c - OPT_ALLOC_MAX);
+ if (c == OPT_ALLOC_MAX) {
+ confirm_record_living(n);
+ } else {
+ confirm_record_released(n);
+ }
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX,
+ "Incorrect total number of allocations");
+ free(p);
+ }
+
+ confirm_prof_setup();
+
+ b = false;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
+ "mallctl for turning off prof_active failed");
+
+ /*
+ * Second batch of OPT_ALLOC_MAX allocations. Since prof_active is
+ * turned off, this batch shouldn't be recorded.
+ */
+ for (; i < 3 * OPT_ALLOC_MAX; ++i) {
+ req_size = NTH_REQ_SIZE(i);
+ p = malloc(req_size);
+ assert_ptr_not_null(p, "malloc failed unexpectedly");
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ confirm_record_size(n, c + OPT_ALLOC_MAX);
+ confirm_record_released(n);
+ ++c;
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX,
+ "Incorrect total number of allocations");
+ free(p);
+ }
+
+ b = true;
+ assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
+ "mallctl for turning on prof_active failed");
+
+ confirm_prof_setup();
+
+ /*
+ * Third batch of OPT_ALLOC_MAX allocations. Since prof_active is
+ * turned back on, they should be recorded, and in the list of recorded
+ * allocations they should follow the first batch rather than the
+ * second batch.
+ */
+ for (; i < 4 * OPT_ALLOC_MAX; ++i) {
+ req_size = NTH_REQ_SIZE(i);
+ p = malloc(req_size);
+ confirm_malloc(p);
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ ++c;
+ confirm_record_size(n,
+ /* Is the allocation from the third batch? */
+ i + c - OPT_ALLOC_MAX >= 3 * OPT_ALLOC_MAX ?
+ /* If yes, then it's just recorded. */
+ i + c - OPT_ALLOC_MAX :
+ /*
+ * Otherwise, it should come from the first batch
+ * instead of the second batch.
+ */
+ i + c - 2 * OPT_ALLOC_MAX);
+ if (c == OPT_ALLOC_MAX) {
+ confirm_record_living(n);
+ } else {
+ confirm_record_released(n);
+ }
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX,
+ "Incorrect total number of allocations");
+ free(p);
+ }
+
+ /* Increasing the limit shouldn't alter the list of records. */
+ future = OPT_ALLOC_MAX + 1;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
+ confirm_record_released(n);
+ ++c;
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX,
+ "Incorrect total number of allocations");
+
+ /*
+ * Decreasing the limit shouldn't alter the list of records as long as
+ * the new limit is still no less than the length of the list.
+ */
+ future = OPT_ALLOC_MAX;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
+ confirm_record_released(n);
+ ++c;
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX,
+ "Incorrect total number of allocations");
+
+ /*
+ * Decreasing the limit should shorten the list of records if the new
+ * limit is less than the length of the list.
+ */
+ future = OPT_ALLOC_MAX - 1;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ ++c;
+ confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
+ confirm_record_released(n);
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX - 1,
+ "Incorrect total number of allocations");
+
+ /* Setting to unlimited shouldn't alter the list of records. */
+ future = -1;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ c = 0;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ ++c;
+ confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
+ confirm_record_released(n);
+ }
+ assert_u_eq(c, OPT_ALLOC_MAX - 1,
+ "Incorrect total number of allocations");
+
+ /* Downshift to only one record. */
+ future = 1;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ assert_false(ql_empty(&prof_recent_alloc_list), "Recent list is empty");
+ n = ql_first(&prof_recent_alloc_list);
+ confirm_record_size(n, 4 * OPT_ALLOC_MAX - 1);
+ confirm_record_released(n);
+ n = ql_next(&prof_recent_alloc_list, n, link);
+ assert_ptr_null(n, "Recent list should only contain one record");
+
+ /* Completely turn off. */
+ future = 0;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ assert_true(ql_empty(&prof_recent_alloc_list),
+ "Recent list should be empty");
+
+ /* Restore the settings. */
+ future = OPT_ALLOC_MAX;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ assert_true(ql_empty(&prof_recent_alloc_list),
+ "Recent list should be empty");
+
+ confirm_prof_setup();
+}
+TEST_END
+
+#undef NTH_REQ_SIZE
+
+#define DUMP_OUT_SIZE 4096
+static char dump_out[DUMP_OUT_SIZE];
+static size_t dump_out_len = 0;
+
+static void
+test_dump_write_cb(void *not_used, const char *str) {
+ size_t len = strlen(str);
+ assert(dump_out_len + len < DUMP_OUT_SIZE);
+ memcpy(dump_out + dump_out_len, str, len + 1);
+ dump_out_len += len;
+}
+
+static void
+call_dump() {
+ static void *in[2] = {test_dump_write_cb, NULL};
+ dump_out_len = 0;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_dump",
+ NULL, NULL, in, sizeof(in)), 0, "Dump mallctl raised error");
+}
+
+typedef struct {
+ size_t size;
+ size_t usize;
+ bool released;
+} confirm_record_t;
+
+#define DUMP_ERROR "Dump output is wrong"
+
+static void
+confirm_record(const char *template, const confirm_record_t *records,
+ const size_t n_records) {
+ static const char *types[2] = {"alloc", "dalloc"};
+ static char buf[64];
+
+ /*
+ * The template string would be in the form of:
+ * "{...,\"recent_alloc\":[]}",
+ * and dump_out would be in the form of:
+ * "{...,\"recent_alloc\":[...]}".
+ * Using "- 2" serves to cut right before the ending "]}".
+ */
+ assert_d_eq(memcmp(dump_out, template, strlen(template) - 2), 0,
+ DUMP_ERROR);
+ assert_d_eq(memcmp(dump_out + strlen(dump_out) - 2,
+ template + strlen(template) - 2, 2), 0, DUMP_ERROR);
+
+ const char *start = dump_out + strlen(template) - 2;
+ const char *end = dump_out + strlen(dump_out) - 2;
+ const confirm_record_t *record;
+ for (record = records; record < records + n_records; ++record) {
+
+#define ASSERT_CHAR(c) do { \
+ assert_true(start < end, DUMP_ERROR); \
+ assert_c_eq(*start++, c, DUMP_ERROR); \
+} while (0)
+
+#define ASSERT_STR(s) do { \
+ const size_t len = strlen(s); \
+ assert_true(start + len <= end, DUMP_ERROR); \
+ assert_d_eq(memcmp(start, s, len), 0, DUMP_ERROR); \
+ start += len; \
+} while (0)
+
+#define ASSERT_FORMATTED_STR(s, ...) do { \
+ malloc_snprintf(buf, sizeof(buf), s, __VA_ARGS__); \
+ ASSERT_STR(buf); \
+} while (0)
+
+ if (record != records) {
+ ASSERT_CHAR(',');
+ }
+
+ ASSERT_CHAR('{');
+
+ ASSERT_STR("\"size\"");
+ ASSERT_CHAR(':');
+ ASSERT_FORMATTED_STR("%zu", record->size);
+ ASSERT_CHAR(',');
+
+ ASSERT_STR("\"usize\"");
+ ASSERT_CHAR(':');
+ ASSERT_FORMATTED_STR("%zu", record->usize);
+ ASSERT_CHAR(',');
+
+ ASSERT_STR("\"released\"");
+ ASSERT_CHAR(':');
+ ASSERT_STR(record->released ? "true" : "false");
+ ASSERT_CHAR(',');
+
+ const char **type = types;
+ while (true) {
+ ASSERT_FORMATTED_STR("\"%s_thread_uid\"", *type);
+ ASSERT_CHAR(':');
+ while (isdigit(*start)) {
+ ++start;
+ }
+ ASSERT_CHAR(',');
+
+ if (opt_prof_sys_thread_name) {
+ ASSERT_FORMATTED_STR("\"%s_thread_name\"",
+ *type);
+ ASSERT_CHAR(':');
+ ASSERT_CHAR('"');
+ while (*start != '"') {
+ ++start;
+ }
+ ASSERT_CHAR('"');
+ ASSERT_CHAR(',');
+ }
+
+ ASSERT_FORMATTED_STR("\"%s_time\"", *type);
+ ASSERT_CHAR(':');
+ while (isdigit(*start)) {
+ ++start;
+ }
+ ASSERT_CHAR(',');
+
+ ASSERT_FORMATTED_STR("\"%s_trace\"", *type);
+ ASSERT_CHAR(':');
+ ASSERT_CHAR('[');
+ while (isdigit(*start) || *start == 'x' ||
+ (*start >= 'a' && *start <= 'f') ||
+ *start == '\"' || *start == ',') {
+ ++start;
+ }
+ ASSERT_CHAR(']');
+
+ if (strcmp(*type, "dalloc") == 0) {
+ break;
+ }
+
+ assert(strcmp(*type, "alloc") == 0);
+ if (!record->released) {
+ break;
+ }
+
+ ASSERT_CHAR(',');
+ ++type;
+ }
+
+ ASSERT_CHAR('}');
+
+#undef ASSERT_FORMATTED_STR
+#undef ASSERT_STR
+#undef ASSERT_CHAR
+
+ }
+ assert_ptr_eq(record, records + n_records, DUMP_ERROR);
+ assert_ptr_eq(start, end, DUMP_ERROR);
+}
+
+TEST_BEGIN(test_prof_recent_alloc_dump) {
+ test_skip_if(!config_prof);
+
+ confirm_prof_setup();
+
+ ssize_t future;
+ void *p, *q;
+ confirm_record_t records[2];
+
+ assert_zu_eq(lg_prof_sample, (size_t)0,
+ "lg_prof_sample not set correctly");
+
+ future = 0;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ call_dump();
+ expect_str_eq(dump_out, "{\"sample_interval\":1,"
+ "\"recent_alloc_max\":0,\"recent_alloc\":[]}", DUMP_ERROR);
+
+ future = 2;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ call_dump();
+ const char *template = "{\"sample_interval\":1,"
+ "\"recent_alloc_max\":2,\"recent_alloc\":[]}";
+ expect_str_eq(dump_out, template, DUMP_ERROR);
+
+ p = malloc(7);
+ call_dump();
+ records[0].size = 7;
+ records[0].usize = sz_s2u(7);
+ records[0].released = false;
+ confirm_record(template, records, 1);
+
+ q = mallocx(17, MALLOCX_ALIGN(128));
+ call_dump();
+ records[1].size = 17;
+ records[1].usize = sz_sa2u(17, 128);
+ records[1].released = false;
+ confirm_record(template, records, 2);
+
+ free(q);
+ call_dump();
+ records[1].released = true;
+ confirm_record(template, records, 2);
+
+ free(p);
+ call_dump();
+ records[0].released = true;
+ confirm_record(template, records, 2);
+
+ future = OPT_ALLOC_MAX;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
+ confirm_prof_setup();
+}
+TEST_END
+
+#undef DUMP_ERROR
+#undef DUMP_OUT_SIZE
+
+#define N_THREADS 8
+#define N_PTRS 512
+#define N_CTLS 8
+#define N_ITERS 2048
+#define STRESS_ALLOC_MAX 4096
+
+typedef struct {
+ thd_t thd;
+ size_t id;
+ void *ptrs[N_PTRS];
+ size_t count;
+} thd_data_t;
+
+static thd_data_t thd_data[N_THREADS];
+static ssize_t test_max;
+
+static void
+test_write_cb(void *cbopaque, const char *str) {
+ sleep_ns(1000 * 1000);
+}
+
+static void *
+f_thread(void *arg) {
+ const size_t thd_id = *(size_t *)arg;
+ thd_data_t *data_p = thd_data + thd_id;
+ assert(data_p->id == thd_id);
+ data_p->count = 0;
+ uint64_t rand = (uint64_t)thd_id;
+ tsd_t *tsd = tsd_fetch();
+ assert(test_max > 1);
+ ssize_t last_max = -1;
+ for (int i = 0; i < N_ITERS; i++) {
+ rand = prng_range_u64(&rand, N_PTRS + N_CTLS * 5);
+ assert(data_p->count <= N_PTRS);
+ if (rand < data_p->count) {
+ assert(data_p->count > 0);
+ if (rand != data_p->count - 1) {
+ assert(data_p->count > 1);
+ void *temp = data_p->ptrs[rand];
+ data_p->ptrs[rand] =
+ data_p->ptrs[data_p->count - 1];
+ data_p->ptrs[data_p->count - 1] = temp;
+ }
+ free(data_p->ptrs[--data_p->count]);
+ } else if (rand < N_PTRS) {
+ assert(data_p->count < N_PTRS);
+ data_p->ptrs[data_p->count++] = malloc(1);
+ } else if (rand % 5 == 0) {
+ prof_recent_alloc_dump(tsd, test_write_cb, NULL);
+ } else if (rand % 5 == 1) {
+ last_max = prof_recent_alloc_max_ctl_read();
+ } else if (rand % 5 == 2) {
+ last_max =
+ prof_recent_alloc_max_ctl_write(tsd, test_max * 2);
+ } else if (rand % 5 == 3) {
+ last_max =
+ prof_recent_alloc_max_ctl_write(tsd, test_max);
+ } else {
+ assert(rand % 5 == 4);
+ last_max =
+ prof_recent_alloc_max_ctl_write(tsd, test_max / 2);
+ }
+ assert_zd_ge(last_max, -1, "Illegal last-N max");
+ }
+
+ while (data_p->count > 0) {
+ free(data_p->ptrs[--data_p->count]);
+ }
+
+ return NULL;
+}
+
+TEST_BEGIN(test_prof_recent_stress) {
+ test_skip_if(!config_prof);
+
+ confirm_prof_setup();
+
+ test_max = OPT_ALLOC_MAX;
+ for (size_t i = 0; i < N_THREADS; i++) {
+ thd_data_t *data_p = thd_data + i;
+ data_p->id = i;
+ thd_create(&data_p->thd, &f_thread, &data_p->id);
+ }
+ for (size_t i = 0; i < N_THREADS; i++) {
+ thd_data_t *data_p = thd_data + i;
+ thd_join(data_p->thd, NULL);
+ }
+
+ test_max = STRESS_ALLOC_MAX;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
+ for (size_t i = 0; i < N_THREADS; i++) {
+ thd_data_t *data_p = thd_data + i;
+ data_p->id = i;
+ thd_create(&data_p->thd, &f_thread, &data_p->id);
+ }
+ for (size_t i = 0; i < N_THREADS; i++) {
+ thd_data_t *data_p = thd_data + i;
+ thd_join(data_p->thd, NULL);
+ }
+
+ test_max = OPT_ALLOC_MAX;
+ assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
+ NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
+ confirm_prof_setup();
+}
+TEST_END
+
+#undef STRESS_ALLOC_MAX
+#undef N_ITERS
+#undef N_PTRS
+#undef N_THREADS
+
+int
+main(void) {
+ return test(
+ test_confirm_setup,
+ test_prof_recent_off,
+ test_prof_recent_on,
+ test_prof_recent_alloc,
+ test_prof_recent_alloc_dump,
+ test_prof_recent_stress);
+}
diff --git a/deps/jemalloc/test/unit/prof_recent.sh b/deps/jemalloc/test/unit/prof_recent.sh
new file mode 100644
index 000000000..58a54a47b
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_recent.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0,prof_recent_alloc_max:3"
+fi
diff --git a/deps/jemalloc/test/unit/prof_reset.c b/deps/jemalloc/test/unit/prof_reset.c
index 7cce42d27..9b33b2051 100644
--- a/deps/jemalloc/test/unit/prof_reset.c
+++ b/deps/jemalloc/test/unit/prof_reset.c
@@ -1,7 +1,10 @@
#include "test/jemalloc_test.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_sys.h"
+
static int
-prof_dump_open_intercept(bool propagate_err, const char *filename) {
+prof_dump_open_file_intercept(const char *filename, int mode) {
int fd;
fd = open("/dev/null", O_WRONLY);
@@ -12,54 +15,53 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) {
static void
set_prof_active(bool active) {
- assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
+ expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
sizeof(active)), 0, "Unexpected mallctl failure");
}
static size_t
get_lg_prof_sample(void) {
- size_t lg_prof_sample;
+ size_t ret;
size_t sz = sizeof(size_t);
- assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz,
- NULL, 0), 0,
+ expect_d_eq(mallctl("prof.lg_sample", (void *)&ret, &sz, NULL, 0), 0,
"Unexpected mallctl failure while reading profiling sample rate");
- return lg_prof_sample;
+ return ret;
}
static void
-do_prof_reset(size_t lg_prof_sample) {
- assert_d_eq(mallctl("prof.reset", NULL, NULL,
- (void *)&lg_prof_sample, sizeof(size_t)), 0,
+do_prof_reset(size_t lg_prof_sample_input) {
+ expect_d_eq(mallctl("prof.reset", NULL, NULL,
+ (void *)&lg_prof_sample_input, sizeof(size_t)), 0,
"Unexpected mallctl failure while resetting profile data");
- assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
+ expect_zu_eq(lg_prof_sample_input, get_lg_prof_sample(),
"Expected profile sample rate change");
}
TEST_BEGIN(test_prof_reset_basic) {
- size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
+ size_t lg_prof_sample_orig, lg_prof_sample_cur, lg_prof_sample_next;
size_t sz;
unsigned i;
test_skip_if(!config_prof);
sz = sizeof(size_t);
- assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
+ expect_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
&sz, NULL, 0), 0,
"Unexpected mallctl failure while reading profiling sample rate");
- assert_zu_eq(lg_prof_sample_orig, 0,
+ expect_zu_eq(lg_prof_sample_orig, 0,
"Unexpected profiling sample rate");
- lg_prof_sample = get_lg_prof_sample();
- assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
+ lg_prof_sample_cur = get_lg_prof_sample();
+ expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
"Unexpected disagreement between \"opt.lg_prof_sample\" and "
"\"prof.lg_sample\"");
/* Test simple resets. */
for (i = 0; i < 2; i++) {
- assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl failure while resetting profile data");
- lg_prof_sample = get_lg_prof_sample();
- assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
+ lg_prof_sample_cur = get_lg_prof_sample();
+ expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
"Unexpected profile sample rate change");
}
@@ -67,64 +69,42 @@ TEST_BEGIN(test_prof_reset_basic) {
lg_prof_sample_next = 1;
for (i = 0; i < 2; i++) {
do_prof_reset(lg_prof_sample_next);
- lg_prof_sample = get_lg_prof_sample();
- assert_zu_eq(lg_prof_sample, lg_prof_sample_next,
+ lg_prof_sample_cur = get_lg_prof_sample();
+ expect_zu_eq(lg_prof_sample_cur, lg_prof_sample_next,
"Expected profile sample rate change");
lg_prof_sample_next = lg_prof_sample_orig;
}
/* Make sure the test code restored prof.lg_sample. */
- lg_prof_sample = get_lg_prof_sample();
- assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
+ lg_prof_sample_cur = get_lg_prof_sample();
+ expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
"Unexpected disagreement between \"opt.lg_prof_sample\" and "
"\"prof.lg_sample\"");
}
TEST_END
-bool prof_dump_header_intercepted = false;
-prof_cnt_t cnt_all_copy = {0, 0, 0, 0};
-static bool
-prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err,
- const prof_cnt_t *cnt_all) {
- prof_dump_header_intercepted = true;
- memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t));
-
- return false;
-}
-
TEST_BEGIN(test_prof_reset_cleanup) {
- void *p;
- prof_dump_header_t *prof_dump_header_orig;
-
test_skip_if(!config_prof);
set_prof_active(true);
- assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
- p = mallocx(1, 0);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
- assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
-
- prof_dump_header_orig = prof_dump_header;
- prof_dump_header = prof_dump_header_intercept;
- assert_false(prof_dump_header_intercepted, "Unexpected intercept");
+ expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
+ void *p = mallocx(1, 0);
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
- assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
- 0, "Unexpected error while dumping heap profile");
- assert_true(prof_dump_header_intercepted, "Expected intercept");
- assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation");
+ prof_cnt_t cnt_all;
+ prof_cnt_all(&cnt_all);
+ expect_u64_eq(cnt_all.curobjs, 1, "Expected 1 allocation");
- assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
"Unexpected error while resetting heap profile data");
- assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
- 0, "Unexpected error while dumping heap profile");
- assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations");
- assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
-
- prof_dump_header = prof_dump_header_orig;
+ prof_cnt_all(&cnt_all);
+ expect_u64_eq(cnt_all.curobjs, 0, "Expected 0 allocations");
+ expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
dallocx(p, 0);
- assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
+ expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
set_prof_active(false);
}
@@ -145,13 +125,13 @@ thd_start(void *varg) {
for (i = 0; i < NALLOCS_PER_THREAD; i++) {
if (i % RESET_INTERVAL == 0) {
- assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
+ expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
0, "Unexpected error while resetting heap profile "
"data");
}
if (i % DUMP_INTERVAL == 0) {
- assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
+ expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
0, "Unexpected error while dumping heap profile");
}
@@ -162,7 +142,7 @@ thd_start(void *varg) {
*pp = NULL;
}
*pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
- assert_ptr_not_null(*pp,
+ expect_ptr_not_null(*pp,
"Unexpected btalloc() failure");
}
}
@@ -189,7 +169,7 @@ TEST_BEGIN(test_prof_reset) {
test_skip_if(!config_prof);
bt_count = prof_bt_count();
- assert_zu_eq(bt_count, 0,
+ expect_zu_eq(bt_count, 0,
"Unexpected pre-existing tdata structures");
tdata_count = prof_tdata_count();
@@ -206,9 +186,9 @@ TEST_BEGIN(test_prof_reset) {
thd_join(thds[i], NULL);
}
- assert_zu_eq(prof_bt_count(), bt_count,
+ expect_zu_eq(prof_bt_count(), bt_count,
"Unexpected bactrace count change");
- assert_zu_eq(prof_tdata_count(), tdata_count,
+ expect_zu_eq(prof_tdata_count(), tdata_count,
"Unexpected remaining tdata structures");
set_prof_active(false);
@@ -246,19 +226,19 @@ TEST_BEGIN(test_xallocx) {
/* Allocate small object (which will be promoted). */
p = ptrs[i] = mallocx(1, 0);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
/* Reset profiling. */
do_prof_reset(0);
/* Perform successful xallocx(). */
sz = sallocx(p, 0);
- assert_zu_eq(xallocx(p, sz, 0, 0), sz,
+ expect_zu_eq(xallocx(p, sz, 0, 0), sz,
"Unexpected xallocx() failure");
/* Perform unsuccessful xallocx(). */
nsz = nallocx(sz+1, 0);
- assert_zu_eq(xallocx(p, nsz, 0, 0), sz,
+ expect_zu_eq(xallocx(p, nsz, 0, 0), sz,
"Unexpected xallocx() success");
}
@@ -276,7 +256,7 @@ TEST_END
int
main(void) {
/* Intercept dumping prior to running any tests. */
- prof_dump_open = prof_dump_open_intercept;
+ prof_dump_open_file = prof_dump_open_file_intercept;
return test_no_reentrancy(
test_prof_reset_basic,
diff --git a/deps/jemalloc/test/unit/prof_reset.sh b/deps/jemalloc/test/unit/prof_reset.sh
index 43c516a08..daefeb70c 100644
--- a/deps/jemalloc/test/unit/prof_reset.sh
+++ b/deps/jemalloc/test/unit/prof_reset.sh
@@ -1,5 +1,5 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
- export MALLOC_CONF="prof:true,prof_active:false,lg_prof_sample:0"
+ export MALLOC_CONF="prof:true,prof_active:false,lg_prof_sample:0,prof_recent_alloc_max:0"
fi
diff --git a/deps/jemalloc/test/unit/prof_stats.c b/deps/jemalloc/test/unit/prof_stats.c
new file mode 100644
index 000000000..c88c4ae0f
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_stats.c
@@ -0,0 +1,151 @@
+#include "test/jemalloc_test.h"
+
+#define N_PTRS 3
+
+static void
+test_combinations(szind_t ind, size_t sizes_array[N_PTRS],
+ int flags_array[N_PTRS]) {
+#define MALLCTL_STR_LEN 64
+ assert(opt_prof && opt_prof_stats);
+
+ char mallctl_live_str[MALLCTL_STR_LEN];
+ char mallctl_accum_str[MALLCTL_STR_LEN];
+ if (ind < SC_NBINS) {
+ malloc_snprintf(mallctl_live_str, MALLCTL_STR_LEN,
+ "prof.stats.bins.%u.live", (unsigned)ind);
+ malloc_snprintf(mallctl_accum_str, MALLCTL_STR_LEN,
+ "prof.stats.bins.%u.accum", (unsigned)ind);
+ } else {
+ malloc_snprintf(mallctl_live_str, MALLCTL_STR_LEN,
+ "prof.stats.lextents.%u.live", (unsigned)(ind - SC_NBINS));
+ malloc_snprintf(mallctl_accum_str, MALLCTL_STR_LEN,
+ "prof.stats.lextents.%u.accum", (unsigned)(ind - SC_NBINS));
+ }
+
+ size_t stats_len = 2 * sizeof(uint64_t);
+
+ uint64_t live_stats_orig[2];
+ assert_d_eq(mallctl(mallctl_live_str, &live_stats_orig, &stats_len,
+ NULL, 0), 0, "");
+ uint64_t accum_stats_orig[2];
+ assert_d_eq(mallctl(mallctl_accum_str, &accum_stats_orig, &stats_len,
+ NULL, 0), 0, "");
+
+ void *ptrs[N_PTRS];
+
+ uint64_t live_req_sum = 0;
+ uint64_t live_count = 0;
+ uint64_t accum_req_sum = 0;
+ uint64_t accum_count = 0;
+
+ for (size_t i = 0; i < N_PTRS; ++i) {
+ size_t sz = sizes_array[i];
+ int flags = flags_array[i];
+ void *p = mallocx(sz, flags);
+ assert_ptr_not_null(p, "malloc() failed");
+ assert(TEST_MALLOC_SIZE(p) == sz_index2size(ind));
+ ptrs[i] = p;
+ live_req_sum += sz;
+ live_count++;
+ accum_req_sum += sz;
+ accum_count++;
+ uint64_t live_stats[2];
+ assert_d_eq(mallctl(mallctl_live_str, &live_stats, &stats_len,
+ NULL, 0), 0, "");
+ expect_u64_eq(live_stats[0] - live_stats_orig[0],
+ live_req_sum, "");
+ expect_u64_eq(live_stats[1] - live_stats_orig[1],
+ live_count, "");
+ uint64_t accum_stats[2];
+ assert_d_eq(mallctl(mallctl_accum_str, &accum_stats, &stats_len,
+ NULL, 0), 0, "");
+ expect_u64_eq(accum_stats[0] - accum_stats_orig[0],
+ accum_req_sum, "");
+ expect_u64_eq(accum_stats[1] - accum_stats_orig[1],
+ accum_count, "");
+ }
+
+ for (size_t i = 0; i < N_PTRS; ++i) {
+ size_t sz = sizes_array[i];
+ int flags = flags_array[i];
+ sdallocx(ptrs[i], sz, flags);
+ live_req_sum -= sz;
+ live_count--;
+ uint64_t live_stats[2];
+ assert_d_eq(mallctl(mallctl_live_str, &live_stats, &stats_len,
+ NULL, 0), 0, "");
+ expect_u64_eq(live_stats[0] - live_stats_orig[0],
+ live_req_sum, "");
+ expect_u64_eq(live_stats[1] - live_stats_orig[1],
+ live_count, "");
+ uint64_t accum_stats[2];
+ assert_d_eq(mallctl(mallctl_accum_str, &accum_stats, &stats_len,
+ NULL, 0), 0, "");
+ expect_u64_eq(accum_stats[0] - accum_stats_orig[0],
+ accum_req_sum, "");
+ expect_u64_eq(accum_stats[1] - accum_stats_orig[1],
+ accum_count, "");
+ }
+#undef MALLCTL_STR_LEN
+}
+
+static void
+test_szind_wrapper(szind_t ind) {
+ size_t sizes_array[N_PTRS];
+ int flags_array[N_PTRS];
+ for (size_t i = 0, sz = sz_index2size(ind) - N_PTRS; i < N_PTRS;
+ ++i, ++sz) {
+ sizes_array[i] = sz;
+ flags_array[i] = 0;
+ }
+ test_combinations(ind, sizes_array, flags_array);
+}
+
+TEST_BEGIN(test_prof_stats) {
+ test_skip_if(!config_prof);
+ test_szind_wrapper(0);
+ test_szind_wrapper(1);
+ test_szind_wrapper(2);
+ test_szind_wrapper(SC_NBINS);
+ test_szind_wrapper(SC_NBINS + 1);
+ test_szind_wrapper(SC_NBINS + 2);
+}
+TEST_END
+
+static void
+test_szind_aligned_wrapper(szind_t ind, unsigned lg_align) {
+ size_t sizes_array[N_PTRS];
+ int flags_array[N_PTRS];
+ int flags = MALLOCX_LG_ALIGN(lg_align);
+ for (size_t i = 0, sz = sz_index2size(ind) - N_PTRS; i < N_PTRS;
+ ++i, ++sz) {
+ sizes_array[i] = sz;
+ flags_array[i] = flags;
+ }
+ test_combinations(
+ sz_size2index(sz_sa2u(sz_index2size(ind), 1 << lg_align)),
+ sizes_array, flags_array);
+}
+
+TEST_BEGIN(test_prof_stats_aligned) {
+ test_skip_if(!config_prof);
+ for (szind_t ind = 0; ind < 10; ++ind) {
+ for (unsigned lg_align = 0; lg_align < 10; ++lg_align) {
+ test_szind_aligned_wrapper(ind, lg_align);
+ }
+ }
+ for (szind_t ind = SC_NBINS - 5; ind < SC_NBINS + 5; ++ind) {
+ for (unsigned lg_align = SC_LG_LARGE_MINCLASS - 5;
+ lg_align < SC_LG_LARGE_MINCLASS + 5; ++lg_align) {
+ test_szind_aligned_wrapper(ind, lg_align);
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_prof_stats,
+ test_prof_stats_aligned);
+}
diff --git a/deps/jemalloc/test/unit/prof_stats.sh b/deps/jemalloc/test/unit/prof_stats.sh
new file mode 100644
index 000000000..f3c819b57
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_stats.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0,prof_stats:true"
+fi
diff --git a/deps/jemalloc/test/unit/prof_sys_thread_name.c b/deps/jemalloc/test/unit/prof_sys_thread_name.c
new file mode 100644
index 000000000..affc788aa
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_sys_thread_name.c
@@ -0,0 +1,77 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/prof_sys.h"
+
+static const char *test_thread_name = "test_name";
+
+static int
+test_prof_sys_thread_name_read_error(char *buf, size_t limit) {
+ return ENOSYS;
+}
+
+static int
+test_prof_sys_thread_name_read(char *buf, size_t limit) {
+ assert(strlen(test_thread_name) < limit);
+ strncpy(buf, test_thread_name, limit);
+ return 0;
+}
+
+static int
+test_prof_sys_thread_name_read_clear(char *buf, size_t limit) {
+ assert(limit > 0);
+ buf[0] = '\0';
+ return 0;
+}
+
+TEST_BEGIN(test_prof_sys_thread_name) {
+ test_skip_if(!config_prof);
+
+ bool oldval;
+ size_t sz = sizeof(oldval);
+ assert_d_eq(mallctl("opt.prof_sys_thread_name", &oldval, &sz, NULL, 0),
+ 0, "mallctl failed");
+ assert_true(oldval, "option was not set correctly");
+
+ const char *thread_name;
+ sz = sizeof(thread_name);
+ assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
+ "mallctl read for thread name should not fail");
+ expect_str_eq(thread_name, "", "Initial thread name should be empty");
+
+ thread_name = test_thread_name;
+ assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, sz),
+ ENOENT, "mallctl write for thread name should fail");
+ assert_ptr_eq(thread_name, test_thread_name,
+ "Thread name should not be touched");
+
+ prof_sys_thread_name_read = test_prof_sys_thread_name_read_error;
+ void *p = malloc(1);
+ free(p);
+ assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
+ "mallctl read for thread name should not fail");
+ assert_str_eq(thread_name, "",
+ "Thread name should stay the same if the system call fails");
+
+ prof_sys_thread_name_read = test_prof_sys_thread_name_read;
+ p = malloc(1);
+ free(p);
+ assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
+ "mallctl read for thread name should not fail");
+ assert_str_eq(thread_name, test_thread_name,
+ "Thread name should be changed if the system call succeeds");
+
+ prof_sys_thread_name_read = test_prof_sys_thread_name_read_clear;
+ p = malloc(1);
+ free(p);
+ assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
+ "mallctl read for thread name should not fail");
+ expect_str_eq(thread_name, "", "Thread name should be updated if the "
+ "system call returns a different name");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_prof_sys_thread_name);
+}
diff --git a/deps/jemalloc/test/unit/prof_sys_thread_name.sh b/deps/jemalloc/test/unit/prof_sys_thread_name.sh
new file mode 100644
index 000000000..1f02a8a80
--- /dev/null
+++ b/deps/jemalloc/test/unit/prof_sys_thread_name.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0,prof_sys_thread_name:true"
+fi
diff --git a/deps/jemalloc/test/unit/prof_tctx.c b/deps/jemalloc/test/unit/prof_tctx.c
index ff3b2b0ca..e0efdc36a 100644
--- a/deps/jemalloc/test/unit/prof_tctx.c
+++ b/deps/jemalloc/test/unit/prof_tctx.c
@@ -1,40 +1,42 @@
#include "test/jemalloc_test.h"
+#include "jemalloc/internal/prof_data.h"
+
TEST_BEGIN(test_prof_realloc) {
- tsdn_t *tsdn;
+ tsd_t *tsd;
int flags;
void *p, *q;
- prof_tctx_t *tctx_p, *tctx_q;
- uint64_t curobjs_0, curobjs_1, curobjs_2, curobjs_3;
+ prof_info_t prof_info_p, prof_info_q;
+ prof_cnt_t cnt_0, cnt_1, cnt_2, cnt_3;
test_skip_if(!config_prof);
- tsdn = tsdn_fetch();
+ tsd = tsd_fetch();
flags = MALLOCX_TCACHE_NONE;
- prof_cnt_all(&curobjs_0, NULL, NULL, NULL);
+ prof_cnt_all(&cnt_0);
p = mallocx(1024, flags);
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
- tctx_p = prof_tctx_get(tsdn, p, NULL);
- assert_ptr_ne(tctx_p, (prof_tctx_t *)(uintptr_t)1U,
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
+ prof_info_get(tsd, p, NULL, &prof_info_p);
+ expect_ptr_ne(prof_info_p.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
"Expected valid tctx");
- prof_cnt_all(&curobjs_1, NULL, NULL, NULL);
- assert_u64_eq(curobjs_0 + 1, curobjs_1,
+ prof_cnt_all(&cnt_1);
+ expect_u64_eq(cnt_0.curobjs + 1, cnt_1.curobjs,
"Allocation should have increased sample size");
q = rallocx(p, 2048, flags);
- assert_ptr_ne(p, q, "Expected move");
- assert_ptr_not_null(p, "Unexpected rmallocx() failure");
- tctx_q = prof_tctx_get(tsdn, q, NULL);
- assert_ptr_ne(tctx_q, (prof_tctx_t *)(uintptr_t)1U,
+ expect_ptr_ne(p, q, "Expected move");
+ expect_ptr_not_null(p, "Unexpected rmallocx() failure");
+ prof_info_get(tsd, q, NULL, &prof_info_q);
+ expect_ptr_ne(prof_info_q.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
"Expected valid tctx");
- prof_cnt_all(&curobjs_2, NULL, NULL, NULL);
- assert_u64_eq(curobjs_1, curobjs_2,
+ prof_cnt_all(&cnt_2);
+ expect_u64_eq(cnt_1.curobjs, cnt_2.curobjs,
"Reallocation should not have changed sample size");
dallocx(q, flags);
- prof_cnt_all(&curobjs_3, NULL, NULL, NULL);
- assert_u64_eq(curobjs_0, curobjs_3,
+ prof_cnt_all(&cnt_3);
+ expect_u64_eq(cnt_0.curobjs, cnt_3.curobjs,
"Sample size should have returned to base level");
}
TEST_END
diff --git a/deps/jemalloc/test/unit/prof_tctx.sh b/deps/jemalloc/test/unit/prof_tctx.sh
index 8fcc7d8a7..485f9bf0a 100644
--- a/deps/jemalloc/test/unit/prof_tctx.sh
+++ b/deps/jemalloc/test/unit/prof_tctx.sh
@@ -1,5 +1,5 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
- export MALLOC_CONF="prof:true,lg_prof_sample:0"
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
fi
diff --git a/deps/jemalloc/test/unit/prof_thread_name.c b/deps/jemalloc/test/unit/prof_thread_name.c
index c9c2a2b76..3c4614fca 100644
--- a/deps/jemalloc/test/unit/prof_thread_name.c
+++ b/deps/jemalloc/test/unit/prof_thread_name.c
@@ -7,11 +7,11 @@ mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
size_t sz;
sz = sizeof(thread_name_old);
- assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz,
+ expect_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz,
NULL, 0), 0,
"%s():%d: Unexpected mallctl failure reading thread.prof.name",
func, line);
- assert_str_eq(thread_name_old, thread_name_expected,
+ expect_str_eq(thread_name_old, thread_name_expected,
"%s():%d: Unexpected thread.prof.name value", func, line);
}
#define mallctl_thread_name_get(a) \
@@ -20,9 +20,9 @@ mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
static void
mallctl_thread_name_set_impl(const char *thread_name, const char *func,
int line) {
- assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
+ expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
(void *)&thread_name, sizeof(thread_name)), 0,
- "%s():%d: Unexpected mallctl failure reading thread.prof.name",
+ "%s():%d: Unexpected mallctl failure writing thread.prof.name",
func, line);
mallctl_thread_name_get_impl(thread_name, func, line);
}
@@ -33,20 +33,21 @@ TEST_BEGIN(test_prof_thread_name_validation) {
const char *thread_name;
test_skip_if(!config_prof);
+ test_skip_if(opt_prof_sys_thread_name);
mallctl_thread_name_get("");
mallctl_thread_name_set("hi there");
/* NULL input shouldn't be allowed. */
thread_name = NULL;
- assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
+ expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
(void *)&thread_name, sizeof(thread_name)), EFAULT,
"Unexpected mallctl result writing \"%s\" to thread.prof.name",
thread_name);
/* '\n' shouldn't be allowed. */
thread_name = "hi\nthere";
- assert_d_eq(mallctl("thread.prof.name", NULL, NULL,
+ expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
(void *)&thread_name, sizeof(thread_name)), EFAULT,
"Unexpected mallctl result writing \"%s\" to thread.prof.name",
thread_name);
@@ -57,7 +58,7 @@ TEST_BEGIN(test_prof_thread_name_validation) {
size_t sz;
sz = sizeof(thread_name_old);
- assert_d_eq(mallctl("thread.prof.name",
+ expect_d_eq(mallctl("thread.prof.name",
(void *)&thread_name_old, &sz, (void *)&thread_name,
sizeof(thread_name)), EPERM,
"Unexpected mallctl result writing \"%s\" to "
@@ -82,7 +83,7 @@ thd_start(void *varg) {
mallctl_thread_name_set(thread_name);
for (i = 0; i < NRESET; i++) {
- assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
"Unexpected error while resetting heap profile data");
mallctl_thread_name_get(thread_name);
}
@@ -94,12 +95,13 @@ thd_start(void *varg) {
}
TEST_BEGIN(test_prof_thread_name_threaded) {
+ test_skip_if(!config_prof);
+ test_skip_if(opt_prof_sys_thread_name);
+
thd_t thds[NTHREADS];
unsigned thd_args[NTHREADS];
unsigned i;
- test_skip_if(!config_prof);
-
for (i = 0; i < NTHREADS; i++) {
thd_args[i] = i;
thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
diff --git a/deps/jemalloc/test/unit/psset.c b/deps/jemalloc/test/unit/psset.c
new file mode 100644
index 000000000..6ff720129
--- /dev/null
+++ b/deps/jemalloc/test/unit/psset.c
@@ -0,0 +1,748 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/psset.h"
+
+#define PAGESLAB_ADDR ((void *)(1234 * HUGEPAGE))
+#define PAGESLAB_AGE 5678
+
+#define ALLOC_ARENA_IND 111
+#define ALLOC_ESN 222
+
+static void
+edata_init_test(edata_t *edata) {
+ memset(edata, 0, sizeof(*edata));
+ edata_arena_ind_set(edata, ALLOC_ARENA_IND);
+ edata_esn_set(edata, ALLOC_ESN);
+}
+
+static void
+test_psset_fake_purge(hpdata_t *ps) {
+ hpdata_purge_state_t purge_state;
+ hpdata_alloc_allowed_set(ps, false);
+ hpdata_purge_begin(ps, &purge_state);
+ void *addr;
+ size_t size;
+ while (hpdata_purge_next(ps, &purge_state, &addr, &size)) {
+ }
+ hpdata_purge_end(ps, &purge_state);
+ hpdata_alloc_allowed_set(ps, true);
+}
+
+static void
+test_psset_alloc_new(psset_t *psset, hpdata_t *ps, edata_t *r_edata,
+ size_t size) {
+ hpdata_assert_empty(ps);
+
+ test_psset_fake_purge(ps);
+
+ psset_insert(psset, ps);
+ psset_update_begin(psset, ps);
+
+ void *addr = hpdata_reserve_alloc(ps, size);
+ edata_init(r_edata, edata_arena_ind_get(r_edata), addr, size,
+ /* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active,
+ /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
+ EXTENT_NOT_HEAD);
+ edata_ps_set(r_edata, ps);
+ psset_update_end(psset, ps);
+}
+
+static bool
+test_psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size) {
+ hpdata_t *ps = psset_pick_alloc(psset, size);
+ if (ps == NULL) {
+ return true;
+ }
+ psset_update_begin(psset, ps);
+ void *addr = hpdata_reserve_alloc(ps, size);
+ edata_init(r_edata, edata_arena_ind_get(r_edata), addr, size,
+ /* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active,
+ /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
+ EXTENT_NOT_HEAD);
+ edata_ps_set(r_edata, ps);
+ psset_update_end(psset, ps);
+ return false;
+}
+
+static hpdata_t *
+test_psset_dalloc(psset_t *psset, edata_t *edata) {
+ hpdata_t *ps = edata_ps_get(edata);
+ psset_update_begin(psset, ps);
+ hpdata_unreserve(ps, edata_addr_get(edata), edata_size_get(edata));
+ psset_update_end(psset, ps);
+ if (hpdata_empty(ps)) {
+ psset_remove(psset, ps);
+ return ps;
+ } else {
+ return NULL;
+ }
+}
+
+static void
+edata_expect(edata_t *edata, size_t page_offset, size_t page_cnt) {
+ /*
+ * Note that allocations should get the arena ind of their home
+ * arena, *not* the arena ind of the pageslab allocator.
+ */
+ expect_u_eq(ALLOC_ARENA_IND, edata_arena_ind_get(edata),
+ "Arena ind changed");
+ expect_ptr_eq(
+ (void *)((uintptr_t)PAGESLAB_ADDR + (page_offset << LG_PAGE)),
+ edata_addr_get(edata), "Didn't allocate in order");
+ expect_zu_eq(page_cnt << LG_PAGE, edata_size_get(edata), "");
+ expect_false(edata_slab_get(edata), "");
+ expect_u_eq(SC_NSIZES, edata_szind_get_maybe_invalid(edata),
+ "");
+ expect_u64_eq(0, edata_sn_get(edata), "");
+ expect_d_eq(edata_state_get(edata), extent_state_active, "");
+ expect_false(edata_zeroed_get(edata), "");
+ expect_true(edata_committed_get(edata), "");
+ expect_d_eq(EXTENT_PAI_HPA, edata_pai_get(edata), "");
+ expect_false(edata_is_head_get(edata), "");
+}
+
+TEST_BEGIN(test_empty) {
+ bool err;
+ hpdata_t pageslab;
+ hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
+
+ edata_t alloc;
+ edata_init_test(&alloc);
+
+ psset_t psset;
+ psset_init(&psset);
+
+ /* Empty psset should return fail allocations. */
+ err = test_psset_alloc_reuse(&psset, &alloc, PAGE);
+ expect_true(err, "Empty psset succeeded in an allocation.");
+}
+TEST_END
+
+TEST_BEGIN(test_fill) {
+ bool err;
+
+ hpdata_t pageslab;
+ hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
+
+ edata_t alloc[HUGEPAGE_PAGES];
+
+ psset_t psset;
+ psset_init(&psset);
+
+ edata_init_test(&alloc[0]);
+ test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
+ for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
+ edata_init_test(&alloc[i]);
+ err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ }
+
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
+ edata_t *edata = &alloc[i];
+ edata_expect(edata, i, 1);
+ }
+
+ /* The pageslab, and thus psset, should now have no allocations. */
+ edata_t extra_alloc;
+ edata_init_test(&extra_alloc);
+ err = test_psset_alloc_reuse(&psset, &extra_alloc, PAGE);
+ expect_true(err, "Alloc succeeded even though psset should be empty");
+}
+TEST_END
+
+TEST_BEGIN(test_reuse) {
+ bool err;
+ hpdata_t *ps;
+
+ hpdata_t pageslab;
+ hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
+
+ edata_t alloc[HUGEPAGE_PAGES];
+
+ psset_t psset;
+ psset_init(&psset);
+
+ edata_init_test(&alloc[0]);
+ test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
+ for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
+ edata_init_test(&alloc[i]);
+ err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ }
+
+ /* Free odd indices. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i ++) {
+ if (i % 2 == 0) {
+ continue;
+ }
+ ps = test_psset_dalloc(&psset, &alloc[i]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+ }
+ /* Realloc into them. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
+ if (i % 2 == 0) {
+ continue;
+ }
+ err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ edata_expect(&alloc[i], i, 1);
+ }
+ /* Now, free the pages at indices 0 or 1 mod 2. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
+ if (i % 4 > 1) {
+ continue;
+ }
+ ps = test_psset_dalloc(&psset, &alloc[i]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+ }
+ /* And realloc 2-page allocations into them. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
+ if (i % 4 != 0) {
+ continue;
+ }
+ err = test_psset_alloc_reuse(&psset, &alloc[i], 2 * PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ edata_expect(&alloc[i], i, 2);
+ }
+ /* Free all the 2-page allocations. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
+ if (i % 4 != 0) {
+ continue;
+ }
+ ps = test_psset_dalloc(&psset, &alloc[i]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+ }
+ /*
+ * Free up a 1-page hole next to a 2-page hole, but somewhere in the
+ * middle of the pageslab. Index 11 should be right before such a hole
+ * (since 12 % 4 == 0).
+ */
+ size_t index_of_3 = 11;
+ ps = test_psset_dalloc(&psset, &alloc[index_of_3]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+ err = test_psset_alloc_reuse(&psset, &alloc[index_of_3], 3 * PAGE);
+ expect_false(err, "Should have been able to find alloc.");
+ edata_expect(&alloc[index_of_3], index_of_3, 3);
+
+ /*
+ * Free up a 4-page hole at the end. Recall that the pages at offsets 0
+ * and 1 mod 4 were freed above, so we just have to free the last
+ * allocations.
+ */
+ ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+ ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 2]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+
+ /* Make sure we can satisfy an allocation at the very end of a slab. */
+ size_t index_of_4 = HUGEPAGE_PAGES - 4;
+ err = test_psset_alloc_reuse(&psset, &alloc[index_of_4], 4 * PAGE);
+ expect_false(err, "Should have been able to find alloc.");
+ edata_expect(&alloc[index_of_4], index_of_4, 4);
+}
+TEST_END
+
+TEST_BEGIN(test_evict) {
+ bool err;
+ hpdata_t *ps;
+
+ hpdata_t pageslab;
+ hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
+
+ edata_t alloc[HUGEPAGE_PAGES];
+
+ psset_t psset;
+ psset_init(&psset);
+
+ /* Alloc the whole slab. */
+ edata_init_test(&alloc[0]);
+ test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
+ for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
+ edata_init_test(&alloc[i]);
+ err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
+ expect_false(err, "Unxpected allocation failure");
+ }
+
+ /* Dealloc the whole slab, going forwards. */
+ for (size_t i = 0; i < HUGEPAGE_PAGES - 1; i++) {
+ ps = test_psset_dalloc(&psset, &alloc[i]);
+ expect_ptr_null(ps, "Nonempty pageslab evicted");
+ }
+ ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
+ expect_ptr_eq(&pageslab, ps, "Empty pageslab not evicted.");
+
+ err = test_psset_alloc_reuse(&psset, &alloc[0], PAGE);
+ expect_true(err, "psset should be empty.");
+}
+TEST_END
+
+TEST_BEGIN(test_multi_pageslab) {
+ bool err;
+ hpdata_t *ps;
+
+ hpdata_t pageslab[2];
+ hpdata_init(&pageslab[0], PAGESLAB_ADDR, PAGESLAB_AGE);
+ hpdata_init(&pageslab[1],
+ (void *)((uintptr_t)PAGESLAB_ADDR + HUGEPAGE),
+ PAGESLAB_AGE + 1);
+
+ edata_t alloc[2][HUGEPAGE_PAGES];
+
+ psset_t psset;
+ psset_init(&psset);
+
+ /* Insert both slabs. */
+ edata_init_test(&alloc[0][0]);
+ test_psset_alloc_new(&psset, &pageslab[0], &alloc[0][0], PAGE);
+ edata_init_test(&alloc[1][0]);
+ test_psset_alloc_new(&psset, &pageslab[1], &alloc[1][0], PAGE);
+
+ /* Fill them both up; make sure we do so in first-fit order. */
+ for (size_t i = 0; i < 2; i++) {
+ for (size_t j = 1; j < HUGEPAGE_PAGES; j++) {
+ edata_init_test(&alloc[i][j]);
+ err = test_psset_alloc_reuse(&psset, &alloc[i][j], PAGE);
+ expect_false(err,
+ "Nonempty psset failed page allocation.");
+ assert_ptr_eq(&pageslab[i], edata_ps_get(&alloc[i][j]),
+ "Didn't pick pageslabs in first-fit");
+ }
+ }
+
+ /*
+ * Free up a 2-page hole in the earlier slab, and a 1-page one in the
+ * later one. We should still pick the later one.
+ */
+ ps = test_psset_dalloc(&psset, &alloc[0][0]);
+ expect_ptr_null(ps, "Unexpected eviction");
+ ps = test_psset_dalloc(&psset, &alloc[0][1]);
+ expect_ptr_null(ps, "Unexpected eviction");
+ ps = test_psset_dalloc(&psset, &alloc[1][0]);
+ expect_ptr_null(ps, "Unexpected eviction");
+ err = test_psset_alloc_reuse(&psset, &alloc[0][0], PAGE);
+ expect_ptr_eq(&pageslab[1], edata_ps_get(&alloc[0][0]),
+ "Should have picked the fuller pageslab");
+
+ /*
+ * Now both slabs have 1-page holes. Free up a second one in the later
+ * slab.
+ */
+ ps = test_psset_dalloc(&psset, &alloc[1][1]);
+ expect_ptr_null(ps, "Unexpected eviction");
+
+ /*
+ * We should be able to allocate a 2-page object, even though an earlier
+ * size class is nonempty.
+ */
+ err = test_psset_alloc_reuse(&psset, &alloc[1][0], 2 * PAGE);
+ expect_false(err, "Allocation should have succeeded");
+}
+TEST_END
+
+static void
+stats_expect_empty(psset_bin_stats_t *stats) {
+ assert_zu_eq(0, stats->npageslabs,
+ "Supposedly empty bin had positive npageslabs");
+ expect_zu_eq(0, stats->nactive, "Unexpected nonempty bin"
+ "Supposedly empty bin had positive nactive");
+}
+
+static void
+stats_expect(psset_t *psset, size_t nactive) {
+ if (nactive == HUGEPAGE_PAGES) {
+ expect_zu_eq(1, psset->stats.full_slabs[0].npageslabs,
+ "Expected a full slab");
+ expect_zu_eq(HUGEPAGE_PAGES,
+ psset->stats.full_slabs[0].nactive,
+ "Should have exactly filled the bin");
+ } else {
+ stats_expect_empty(&psset->stats.full_slabs[0]);
+ }
+ size_t ninactive = HUGEPAGE_PAGES - nactive;
+ pszind_t nonempty_pind = PSSET_NPSIZES;
+ if (ninactive != 0 && ninactive < HUGEPAGE_PAGES) {
+ nonempty_pind = sz_psz2ind(sz_psz_quantize_floor(
+ ninactive << LG_PAGE));
+ }
+ for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
+ if (i == nonempty_pind) {
+ assert_zu_eq(1,
+ psset->stats.nonfull_slabs[i][0].npageslabs,
+ "Should have found a slab");
+ expect_zu_eq(nactive,
+ psset->stats.nonfull_slabs[i][0].nactive,
+ "Mismatch in active pages");
+ } else {
+ stats_expect_empty(&psset->stats.nonfull_slabs[i][0]);
+ }
+ }
+ expect_zu_eq(nactive, psset_nactive(psset), "");
+}
+
+TEST_BEGIN(test_stats) {
+ bool err;
+
+ hpdata_t pageslab;
+ hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
+
+ edata_t alloc[HUGEPAGE_PAGES];
+
+ psset_t psset;
+ psset_init(&psset);
+ stats_expect(&psset, 0);
+
+ edata_init_test(&alloc[0]);
+ test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
+ for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
+ stats_expect(&psset, i);
+ edata_init_test(&alloc[i]);
+ err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ }
+ stats_expect(&psset, HUGEPAGE_PAGES);
+ hpdata_t *ps;
+ for (ssize_t i = HUGEPAGE_PAGES - 1; i >= 0; i--) {
+ ps = test_psset_dalloc(&psset, &alloc[i]);
+ expect_true((ps == NULL) == (i != 0),
+ "test_psset_dalloc should only evict a slab on the last "
+ "free");
+ stats_expect(&psset, i);
+ }
+
+ test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
+ stats_expect(&psset, 1);
+ psset_update_begin(&psset, &pageslab);
+ stats_expect(&psset, 0);
+ psset_update_end(&psset, &pageslab);
+ stats_expect(&psset, 1);
+}
+TEST_END
+
+/*
+ * Fills in and inserts two pageslabs, with the first better than the second,
+ * and each fully allocated (into the allocations in allocs and worse_allocs,
+ * each of which should be HUGEPAGE_PAGES long), except for a single free page
+ * at the end.
+ *
+ * (There's nothing magic about these numbers; it's just useful to share the
+ * setup between the oldest fit and the insert/remove test).
+ */
+static void
+init_test_pageslabs(psset_t *psset, hpdata_t *pageslab,
+ hpdata_t *worse_pageslab, edata_t *alloc, edata_t *worse_alloc) {
+ bool err;
+
+ hpdata_init(pageslab, (void *)(10 * HUGEPAGE), PAGESLAB_AGE);
+ /*
+ * This pageslab would be better from an address-first-fit POV, but
+ * worse from an age POV.
+ */
+ hpdata_init(worse_pageslab, (void *)(9 * HUGEPAGE), PAGESLAB_AGE + 1);
+
+ psset_init(psset);
+
+ edata_init_test(&alloc[0]);
+ test_psset_alloc_new(psset, pageslab, &alloc[0], PAGE);
+ for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
+ edata_init_test(&alloc[i]);
+ err = test_psset_alloc_reuse(psset, &alloc[i], PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ expect_ptr_eq(pageslab, edata_ps_get(&alloc[i]),
+ "Allocated from the wrong pageslab");
+ }
+
+ edata_init_test(&worse_alloc[0]);
+ test_psset_alloc_new(psset, worse_pageslab, &worse_alloc[0], PAGE);
+ expect_ptr_eq(worse_pageslab, edata_ps_get(&worse_alloc[0]),
+ "Allocated from the wrong pageslab");
+ /*
+ * Make the two pssets otherwise indistinguishable; all full except for
+ * a single page.
+ */
+ for (size_t i = 1; i < HUGEPAGE_PAGES - 1; i++) {
+ edata_init_test(&worse_alloc[i]);
+ err = test_psset_alloc_reuse(psset, &alloc[i], PAGE);
+ expect_false(err, "Nonempty psset failed page allocation.");
+ expect_ptr_eq(worse_pageslab, edata_ps_get(&alloc[i]),
+ "Allocated from the wrong pageslab");
+ }
+
+ /* Deallocate the last page from the older pageslab. */
+ hpdata_t *evicted = test_psset_dalloc(psset,
+ &alloc[HUGEPAGE_PAGES - 1]);
+ expect_ptr_null(evicted, "Unexpected eviction");
+}
+
+TEST_BEGIN(test_oldest_fit) {
+ bool err;
+ edata_t alloc[HUGEPAGE_PAGES];
+ edata_t worse_alloc[HUGEPAGE_PAGES];
+
+ hpdata_t pageslab;
+ hpdata_t worse_pageslab;
+
+ psset_t psset;
+
+ init_test_pageslabs(&psset, &pageslab, &worse_pageslab, alloc,
+ worse_alloc);
+
+ /* The edata should come from the better pageslab. */
+ edata_t test_edata;
+ edata_init_test(&test_edata);
+ err = test_psset_alloc_reuse(&psset, &test_edata, PAGE);
+ expect_false(err, "Nonempty psset failed page allocation");
+ expect_ptr_eq(&pageslab, edata_ps_get(&test_edata),
+ "Allocated from the wrong pageslab");
+}
+TEST_END
+
+TEST_BEGIN(test_insert_remove) {
+ bool err;
+ hpdata_t *ps;
+ edata_t alloc[HUGEPAGE_PAGES];
+ edata_t worse_alloc[HUGEPAGE_PAGES];
+
+ hpdata_t pageslab;
+ hpdata_t worse_pageslab;
+
+ psset_t psset;
+
+ init_test_pageslabs(&psset, &pageslab, &worse_pageslab, alloc,
+ worse_alloc);
+
+ /* Remove better; should still be able to alloc from worse. */
+ psset_update_begin(&psset, &pageslab);
+ err = test_psset_alloc_reuse(&psset, &worse_alloc[HUGEPAGE_PAGES - 1],
+ PAGE);
+ expect_false(err, "Removal should still leave an empty page");
+ expect_ptr_eq(&worse_pageslab,
+ edata_ps_get(&worse_alloc[HUGEPAGE_PAGES - 1]),
+ "Allocated out of wrong ps");
+
+ /*
+ * After deallocating the previous alloc and reinserting better, it
+ * should be preferred for future allocations.
+ */
+ ps = test_psset_dalloc(&psset, &worse_alloc[HUGEPAGE_PAGES - 1]);
+ expect_ptr_null(ps, "Incorrect eviction of nonempty pageslab");
+ psset_update_end(&psset, &pageslab);
+ err = test_psset_alloc_reuse(&psset, &alloc[HUGEPAGE_PAGES - 1], PAGE);
+ expect_false(err, "psset should be nonempty");
+ expect_ptr_eq(&pageslab, edata_ps_get(&alloc[HUGEPAGE_PAGES - 1]),
+ "Removal/reinsertion shouldn't change ordering");
+ /*
+ * After deallocating and removing both, allocations should fail.
+ */
+ ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
+ expect_ptr_null(ps, "Incorrect eviction");
+ psset_update_begin(&psset, &pageslab);
+ psset_update_begin(&psset, &worse_pageslab);
+ err = test_psset_alloc_reuse(&psset, &alloc[HUGEPAGE_PAGES - 1], PAGE);
+ expect_true(err, "psset should be empty, but an alloc succeeded");
+}
+TEST_END
+
+TEST_BEGIN(test_purge_prefers_nonhuge) {
+ /*
+ * All else being equal, we should prefer purging non-huge pages over
+ * huge ones for non-empty extents.
+ */
+
+ /* Nothing magic about this constant. */
+ enum {
+ NHP = 23,
+ };
+ hpdata_t *hpdata;
+
+ psset_t psset;
+ psset_init(&psset);
+
+ hpdata_t hpdata_huge[NHP];
+ uintptr_t huge_begin = (uintptr_t)&hpdata_huge[0];
+ uintptr_t huge_end = (uintptr_t)&hpdata_huge[NHP];
+ hpdata_t hpdata_nonhuge[NHP];
+ uintptr_t nonhuge_begin = (uintptr_t)&hpdata_nonhuge[0];
+ uintptr_t nonhuge_end = (uintptr_t)&hpdata_nonhuge[NHP];
+
+ for (size_t i = 0; i < NHP; i++) {
+ hpdata_init(&hpdata_huge[i], (void *)((10 + i) * HUGEPAGE),
+ 123 + i);
+ psset_insert(&psset, &hpdata_huge[i]);
+
+ hpdata_init(&hpdata_nonhuge[i],
+ (void *)((10 + NHP + i) * HUGEPAGE),
+ 456 + i);
+ psset_insert(&psset, &hpdata_nonhuge[i]);
+
+ }
+ for (int i = 0; i < 2 * NHP; i++) {
+ hpdata = psset_pick_alloc(&psset, HUGEPAGE * 3 / 4);
+ psset_update_begin(&psset, hpdata);
+ void *ptr;
+ ptr = hpdata_reserve_alloc(hpdata, HUGEPAGE * 3 / 4);
+ /* Ignore the first alloc, which will stick around. */
+ (void)ptr;
+ /*
+ * The second alloc is to dirty the pages; free it immediately
+ * after allocating.
+ */
+ ptr = hpdata_reserve_alloc(hpdata, HUGEPAGE / 4);
+ hpdata_unreserve(hpdata, ptr, HUGEPAGE / 4);
+
+ if (huge_begin <= (uintptr_t)hpdata
+ && (uintptr_t)hpdata < huge_end) {
+ hpdata_hugify(hpdata);
+ }
+
+ hpdata_purge_allowed_set(hpdata, true);
+ psset_update_end(&psset, hpdata);
+ }
+
+ /*
+ * We've got a bunch of 1/8th dirty hpdatas. It should give us all the
+ * non-huge ones to purge, then all the huge ones, then refuse to purge
+ * further.
+ */
+ for (int i = 0; i < NHP; i++) {
+ hpdata = psset_pick_purge(&psset);
+ assert_true(nonhuge_begin <= (uintptr_t)hpdata
+ && (uintptr_t)hpdata < nonhuge_end, "");
+ psset_update_begin(&psset, hpdata);
+ test_psset_fake_purge(hpdata);
+ hpdata_purge_allowed_set(hpdata, false);
+ psset_update_end(&psset, hpdata);
+ }
+ for (int i = 0; i < NHP; i++) {
+ hpdata = psset_pick_purge(&psset);
+ expect_true(huge_begin <= (uintptr_t)hpdata
+ && (uintptr_t)hpdata < huge_end, "");
+ psset_update_begin(&psset, hpdata);
+ hpdata_dehugify(hpdata);
+ test_psset_fake_purge(hpdata);
+ hpdata_purge_allowed_set(hpdata, false);
+ psset_update_end(&psset, hpdata);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_purge_prefers_empty) {
+ void *ptr;
+
+ psset_t psset;
+ psset_init(&psset);
+
+ hpdata_t hpdata_empty;
+ hpdata_t hpdata_nonempty;
+ hpdata_init(&hpdata_empty, (void *)(10 * HUGEPAGE), 123);
+ psset_insert(&psset, &hpdata_empty);
+ hpdata_init(&hpdata_nonempty, (void *)(11 * HUGEPAGE), 456);
+ psset_insert(&psset, &hpdata_nonempty);
+
+ psset_update_begin(&psset, &hpdata_empty);
+ ptr = hpdata_reserve_alloc(&hpdata_empty, PAGE);
+ expect_ptr_eq(hpdata_addr_get(&hpdata_empty), ptr, "");
+ hpdata_unreserve(&hpdata_empty, ptr, PAGE);
+ hpdata_purge_allowed_set(&hpdata_empty, true);
+ psset_update_end(&psset, &hpdata_empty);
+
+ psset_update_begin(&psset, &hpdata_nonempty);
+ ptr = hpdata_reserve_alloc(&hpdata_nonempty, 10 * PAGE);
+ expect_ptr_eq(hpdata_addr_get(&hpdata_nonempty), ptr, "");
+ hpdata_unreserve(&hpdata_nonempty, ptr, 9 * PAGE);
+ hpdata_purge_allowed_set(&hpdata_nonempty, true);
+ psset_update_end(&psset, &hpdata_nonempty);
+
+ /*
+ * The nonempty slab has 9 dirty pages, while the empty one has only 1.
+ * We should still pick the empty one for purging.
+ */
+ hpdata_t *to_purge = psset_pick_purge(&psset);
+ expect_ptr_eq(&hpdata_empty, to_purge, "");
+}
+TEST_END
+
+TEST_BEGIN(test_purge_prefers_empty_huge) {
+ void *ptr;
+
+ psset_t psset;
+ psset_init(&psset);
+
+ enum {NHP = 10 };
+
+ hpdata_t hpdata_huge[NHP];
+ hpdata_t hpdata_nonhuge[NHP];
+
+ uintptr_t cur_addr = 100 * HUGEPAGE;
+ uint64_t cur_age = 123;
+ for (int i = 0; i < NHP; i++) {
+ hpdata_init(&hpdata_huge[i], (void *)cur_addr, cur_age);
+ cur_addr += HUGEPAGE;
+ cur_age++;
+ psset_insert(&psset, &hpdata_huge[i]);
+
+ hpdata_init(&hpdata_nonhuge[i], (void *)cur_addr, cur_age);
+ cur_addr += HUGEPAGE;
+ cur_age++;
+ psset_insert(&psset, &hpdata_nonhuge[i]);
+
+ /*
+ * Make the hpdata_huge[i] fully dirty, empty, purgable, and
+ * huge.
+ */
+ psset_update_begin(&psset, &hpdata_huge[i]);
+ ptr = hpdata_reserve_alloc(&hpdata_huge[i], HUGEPAGE);
+ expect_ptr_eq(hpdata_addr_get(&hpdata_huge[i]), ptr, "");
+ hpdata_hugify(&hpdata_huge[i]);
+ hpdata_unreserve(&hpdata_huge[i], ptr, HUGEPAGE);
+ hpdata_purge_allowed_set(&hpdata_huge[i], true);
+ psset_update_end(&psset, &hpdata_huge[i]);
+
+ /*
+ * Make hpdata_nonhuge[i] fully dirty, empty, purgable, and
+ * non-huge.
+ */
+ psset_update_begin(&psset, &hpdata_nonhuge[i]);
+ ptr = hpdata_reserve_alloc(&hpdata_nonhuge[i], HUGEPAGE);
+ expect_ptr_eq(hpdata_addr_get(&hpdata_nonhuge[i]), ptr, "");
+ hpdata_unreserve(&hpdata_nonhuge[i], ptr, HUGEPAGE);
+ hpdata_purge_allowed_set(&hpdata_nonhuge[i], true);
+ psset_update_end(&psset, &hpdata_nonhuge[i]);
+ }
+
+ /*
+ * We have a bunch of empty slabs, half huge, half nonhuge, inserted in
+ * alternating order. We should pop all the huge ones before popping
+ * any of the non-huge ones for purging.
+ */
+ for (int i = 0; i < NHP; i++) {
+ hpdata_t *to_purge = psset_pick_purge(&psset);
+ expect_ptr_eq(&hpdata_huge[i], to_purge, "");
+ psset_update_begin(&psset, to_purge);
+ hpdata_purge_allowed_set(to_purge, false);
+ psset_update_end(&psset, to_purge);
+ }
+ for (int i = 0; i < NHP; i++) {
+ hpdata_t *to_purge = psset_pick_purge(&psset);
+ expect_ptr_eq(&hpdata_nonhuge[i], to_purge, "");
+ psset_update_begin(&psset, to_purge);
+ hpdata_purge_allowed_set(to_purge, false);
+ psset_update_end(&psset, to_purge);
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test_no_reentrancy(
+ test_empty,
+ test_fill,
+ test_reuse,
+ test_evict,
+ test_multi_pageslab,
+ test_stats,
+ test_oldest_fit,
+ test_insert_remove,
+ test_purge_prefers_nonhuge,
+ test_purge_prefers_empty,
+ test_purge_prefers_empty_huge);
+}
diff --git a/deps/jemalloc/test/unit/ql.c b/deps/jemalloc/test/unit/ql.c
index b76c24c41..f9130582f 100644
--- a/deps/jemalloc/test/unit/ql.c
+++ b/deps/jemalloc/test/unit/ql.c
@@ -18,21 +18,22 @@ test_empty_list(list_head_t *head) {
list_t *t;
unsigned i;
- assert_ptr_null(ql_first(head), "Unexpected element for empty list");
- assert_ptr_null(ql_last(head, link),
+ expect_true(ql_empty(head), "Unexpected element for empty list");
+ expect_ptr_null(ql_first(head), "Unexpected element for empty list");
+ expect_ptr_null(ql_last(head, link),
"Unexpected element for empty list");
i = 0;
ql_foreach(t, head, link) {
i++;
}
- assert_u_eq(i, 0, "Unexpected element for empty list");
+ expect_u_eq(i, 0, "Unexpected element for empty list");
i = 0;
ql_reverse_foreach(t, head, link) {
i++;
}
- assert_u_eq(i, 0, "Unexpected element for empty list");
+ expect_u_eq(i, 0, "Unexpected element for empty list");
}
TEST_BEGIN(test_ql_empty) {
@@ -58,34 +59,35 @@ test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) {
list_t *t;
unsigned i;
- assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
- assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
+ expect_false(ql_empty(head), "List should not be empty");
+ expect_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
+ expect_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
"Element id mismatch");
i = 0;
ql_foreach(t, head, link) {
- assert_c_eq(t->id, entries[i].id, "Element id mismatch");
+ expect_c_eq(t->id, entries[i].id, "Element id mismatch");
i++;
}
i = 0;
ql_reverse_foreach(t, head, link) {
- assert_c_eq(t->id, entries[nentries-i-1].id,
+ expect_c_eq(t->id, entries[nentries-i-1].id,
"Element id mismatch");
i++;
}
for (i = 0; i < nentries-1; i++) {
t = ql_next(head, &entries[i], link);
- assert_c_eq(t->id, entries[i+1].id, "Element id mismatch");
+ expect_c_eq(t->id, entries[i+1].id, "Element id mismatch");
}
- assert_ptr_null(ql_next(head, &entries[nentries-1], link),
+ expect_ptr_null(ql_next(head, &entries[nentries-1], link),
"Unexpected element");
- assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
+ expect_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
for (i = 1; i < nentries; i++) {
t = ql_prev(head, &entries[i], link);
- assert_c_eq(t->id, entries[i-1].id, "Element id mismatch");
+ expect_c_eq(t->id, entries[i-1].id, "Element id mismatch");
}
}
@@ -192,6 +194,114 @@ TEST_BEGIN(test_ql_insert) {
}
TEST_END
+static void
+test_concat_split_entries(list_t *entries, unsigned nentries_a,
+ unsigned nentries_b) {
+ init_entries(entries, nentries_a + nentries_b);
+
+ list_head_t head_a;
+ ql_new(&head_a);
+ for (unsigned i = 0; i < nentries_a; i++) {
+ ql_tail_insert(&head_a, &entries[i], link);
+ }
+ if (nentries_a == 0) {
+ test_empty_list(&head_a);
+ } else {
+ test_entries_list(&head_a, entries, nentries_a);
+ }
+
+ list_head_t head_b;
+ ql_new(&head_b);
+ for (unsigned i = 0; i < nentries_b; i++) {
+ ql_tail_insert(&head_b, &entries[nentries_a + i], link);
+ }
+ if (nentries_b == 0) {
+ test_empty_list(&head_b);
+ } else {
+ test_entries_list(&head_b, entries + nentries_a, nentries_b);
+ }
+
+ ql_concat(&head_a, &head_b, link);
+ if (nentries_a + nentries_b == 0) {
+ test_empty_list(&head_a);
+ } else {
+ test_entries_list(&head_a, entries, nentries_a + nentries_b);
+ }
+ test_empty_list(&head_b);
+
+ if (nentries_b == 0) {
+ return;
+ }
+
+ list_head_t head_c;
+ ql_split(&head_a, &entries[nentries_a], &head_c, link);
+ if (nentries_a == 0) {
+ test_empty_list(&head_a);
+ } else {
+ test_entries_list(&head_a, entries, nentries_a);
+ }
+ test_entries_list(&head_c, entries + nentries_a, nentries_b);
+}
+
+TEST_BEGIN(test_ql_concat_split) {
+ list_t entries[NENTRIES];
+
+ test_concat_split_entries(entries, 0, 0);
+
+ test_concat_split_entries(entries, 0, 1);
+ test_concat_split_entries(entries, 1, 0);
+
+ test_concat_split_entries(entries, 0, NENTRIES);
+ test_concat_split_entries(entries, 1, NENTRIES - 1);
+ test_concat_split_entries(entries, NENTRIES / 2,
+ NENTRIES - NENTRIES / 2);
+ test_concat_split_entries(entries, NENTRIES - 1, 1);
+ test_concat_split_entries(entries, NENTRIES, 0);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_rotate) {
+ list_head_t head;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head);
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++) {
+ ql_tail_insert(&head, &entries[i], link);
+ }
+
+ char head_id = ql_first(&head)->id;
+ for (i = 0; i < NENTRIES; i++) {
+ assert_c_eq(ql_first(&head)->id, head_id, "");
+ ql_rotate(&head, link);
+ assert_c_eq(ql_last(&head, link)->id, head_id, "");
+ head_id++;
+ }
+ test_entries_list(&head, entries, NENTRIES);
+}
+TEST_END
+
+TEST_BEGIN(test_ql_move) {
+ list_head_t head_dest, head_src;
+ list_t entries[NENTRIES];
+ unsigned i;
+
+ ql_new(&head_src);
+ ql_move(&head_dest, &head_src);
+ test_empty_list(&head_src);
+ test_empty_list(&head_dest);
+
+ init_entries(entries, sizeof(entries)/sizeof(list_t));
+ for (i = 0; i < NENTRIES; i++) {
+ ql_tail_insert(&head_src, &entries[i], link);
+ }
+ ql_move(&head_dest, &head_src);
+ test_empty_list(&head_src);
+ test_entries_list(&head_dest, entries, NENTRIES);
+}
+TEST_END
+
int
main(void) {
return test(
@@ -200,5 +310,8 @@ main(void) {
test_ql_tail_remove,
test_ql_head_insert,
test_ql_head_remove,
- test_ql_insert);
+ test_ql_insert,
+ test_ql_concat_split,
+ test_ql_rotate,
+ test_ql_move);
}
diff --git a/deps/jemalloc/test/unit/qr.c b/deps/jemalloc/test/unit/qr.c
index 271a10953..16eed0e92 100644
--- a/deps/jemalloc/test/unit/qr.c
+++ b/deps/jemalloc/test/unit/qr.c
@@ -34,7 +34,7 @@ test_independent_entries(ring_t *entries) {
qr_foreach(t, &entries[i], link) {
j++;
}
- assert_u_eq(j, 1,
+ expect_u_eq(j, 1,
"Iteration over single-element ring should visit precisely "
"one element");
}
@@ -43,19 +43,19 @@ test_independent_entries(ring_t *entries) {
qr_reverse_foreach(t, &entries[i], link) {
j++;
}
- assert_u_eq(j, 1,
+ expect_u_eq(j, 1,
"Iteration over single-element ring should visit precisely "
"one element");
}
for (i = 0; i < NENTRIES; i++) {
t = qr_next(&entries[i], link);
- assert_ptr_eq(t, &entries[i],
+ expect_ptr_eq(t, &entries[i],
"Next element in single-element ring should be same as "
"current element");
}
for (i = 0; i < NENTRIES; i++) {
t = qr_prev(&entries[i], link);
- assert_ptr_eq(t, &entries[i],
+ expect_ptr_eq(t, &entries[i],
"Previous element in single-element ring should be same as "
"current element");
}
@@ -77,7 +77,7 @@ test_entries_ring(ring_t *entries) {
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_foreach(t, &entries[i], link) {
- assert_c_eq(t->id, entries[(i+j) % NENTRIES].id,
+ expect_c_eq(t->id, entries[(i+j) % NENTRIES].id,
"Element id mismatch");
j++;
}
@@ -85,19 +85,19 @@ test_entries_ring(ring_t *entries) {
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_reverse_foreach(t, &entries[i], link) {
- assert_c_eq(t->id, entries[(NENTRIES+i-j-1) %
+ expect_c_eq(t->id, entries[(NENTRIES+i-j-1) %
NENTRIES].id, "Element id mismatch");
j++;
}
}
for (i = 0; i < NENTRIES; i++) {
t = qr_next(&entries[i], link);
- assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
+ expect_c_eq(t->id, entries[(i+1) % NENTRIES].id,
"Element id mismatch");
}
for (i = 0; i < NENTRIES; i++) {
t = qr_prev(&entries[i], link);
- assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
+ expect_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
"Element id mismatch");
}
}
@@ -127,13 +127,13 @@ TEST_BEGIN(test_qr_remove) {
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_foreach(t, &entries[i], link) {
- assert_c_eq(t->id, entries[i+j].id,
+ expect_c_eq(t->id, entries[i+j].id,
"Element id mismatch");
j++;
}
j = 0;
qr_reverse_foreach(t, &entries[i], link) {
- assert_c_eq(t->id, entries[NENTRIES - 1 - j].id,
+ expect_c_eq(t->id, entries[NENTRIES - 1 - j].id,
"Element id mismatch");
j++;
}
@@ -155,7 +155,7 @@ TEST_BEGIN(test_qr_before_insert) {
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_foreach(t, &entries[i], link) {
- assert_c_eq(t->id, entries[(NENTRIES+i-j) %
+ expect_c_eq(t->id, entries[(NENTRIES+i-j) %
NENTRIES].id, "Element id mismatch");
j++;
}
@@ -163,19 +163,19 @@ TEST_BEGIN(test_qr_before_insert) {
for (i = 0; i < NENTRIES; i++) {
j = 0;
qr_reverse_foreach(t, &entries[i], link) {
- assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id,
+ expect_c_eq(t->id, entries[(i+j+1) % NENTRIES].id,
"Element id mismatch");
j++;
}
}
for (i = 0; i < NENTRIES; i++) {
t = qr_next(&entries[i], link);
- assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
+ expect_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
"Element id mismatch");
}
for (i = 0; i < NENTRIES; i++) {
t = qr_prev(&entries[i], link);
- assert_c_eq(t->id, entries[(i+1) % NENTRIES].id,
+ expect_c_eq(t->id, entries[(i+1) % NENTRIES].id,
"Element id mismatch");
}
}
@@ -190,11 +190,11 @@ test_split_entries(ring_t *entries) {
j = 0;
qr_foreach(t, &entries[i], link) {
if (i < SPLIT_INDEX) {
- assert_c_eq(t->id,
+ expect_c_eq(t->id,
entries[(i+j) % SPLIT_INDEX].id,
"Element id mismatch");
} else {
- assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) %
+ expect_c_eq(t->id, entries[(i+j-SPLIT_INDEX) %
(NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id,
"Element id mismatch");
}
@@ -212,22 +212,22 @@ TEST_BEGIN(test_qr_meld_split) {
qr_after_insert(&entries[i - 1], &entries[i], link);
}
- qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
+ qr_split(&entries[0], &entries[SPLIT_INDEX], link);
test_split_entries(entries);
- qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
+ qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
test_entries_ring(entries);
- qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
+ qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
test_split_entries(entries);
- qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link);
+ qr_split(&entries[0], &entries[SPLIT_INDEX], link);
test_entries_ring(entries);
- qr_split(&entries[0], &entries[0], ring_t, link);
+ qr_split(&entries[0], &entries[0], link);
test_entries_ring(entries);
- qr_meld(&entries[0], &entries[0], ring_t, link);
+ qr_meld(&entries[0], &entries[0], link);
test_entries_ring(entries);
}
TEST_END
diff --git a/deps/jemalloc/test/unit/rb.c b/deps/jemalloc/test/unit/rb.c
index 65c049207..827ec510f 100644
--- a/deps/jemalloc/test/unit/rb.c
+++ b/deps/jemalloc/test/unit/rb.c
@@ -1,5 +1,7 @@
#include "test/jemalloc_test.h"
+#include <stdlib.h>
+
#include "jemalloc/internal/rb.h"
#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \
@@ -13,27 +15,63 @@
} \
} while (0)
-typedef struct node_s node_t;
+static bool summarize_always_returns_true = false;
+typedef struct node_s node_t;
struct node_s {
#define NODE_MAGIC 0x9823af7e
uint32_t magic;
rb_node(node_t) link;
+ /* Order used by nodes. */
uint64_t key;
+ /*
+ * Our made-up summary property is "specialness", with summarization
+ * taking the max.
+ */
+ uint64_t specialness;
+
+ /*
+ * Used by some of the test randomization to avoid double-removing
+ * nodes.
+ */
+ bool mid_remove;
+
+ /*
+ * To test searching functionality, we want to temporarily weaken the
+ * ordering to allow non-equal nodes that nevertheless compare equal.
+ */
+ bool allow_duplicates;
+
+ /*
+ * In check_consistency, it's handy to know a node's rank in the tree;
+ * this tracks it (but only there; not all tests use this).
+ */
+ int rank;
+ int filtered_rank;
+
+ /*
+ * Replicate the internal structure of the tree, to make sure the
+ * implementation doesn't miss any updates.
+ */
+ const node_t *summary_lchild;
+ const node_t *summary_rchild;
+ uint64_t summary_max_specialness;
};
static int
node_cmp(const node_t *a, const node_t *b) {
int ret;
- assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
- assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
+ expect_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
+ expect_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
ret = (a->key > b->key) - (a->key < b->key);
- if (ret == 0) {
+ if (ret == 0 && !a->allow_duplicates) {
/*
* Duplicates are not allowed in the tree, so force an
- * arbitrary ordering for non-identical items with equal keys.
+ * arbitrary ordering for non-identical items with equal keys,
+ * unless the user is searching and wants to allow the
+ * duplicate.
*/
ret = (((uintptr_t)a) > ((uintptr_t)b))
- (((uintptr_t)a) < ((uintptr_t)b));
@@ -41,8 +79,77 @@ node_cmp(const node_t *a, const node_t *b) {
return ret;
}
+static uint64_t
+node_subtree_specialness(node_t *n, const node_t *lchild,
+ const node_t *rchild) {
+ uint64_t subtree_specialness = n->specialness;
+ if (lchild != NULL
+ && lchild->summary_max_specialness > subtree_specialness) {
+ subtree_specialness = lchild->summary_max_specialness;
+ }
+ if (rchild != NULL
+ && rchild->summary_max_specialness > subtree_specialness) {
+ subtree_specialness = rchild->summary_max_specialness;
+ }
+ return subtree_specialness;
+}
+
+static bool
+node_summarize(node_t *a, const node_t *lchild, const node_t *rchild) {
+ uint64_t new_summary_max_specialness = node_subtree_specialness(
+ a, lchild, rchild);
+ bool changed = (a->summary_lchild != lchild)
+ || (a->summary_rchild != rchild)
+ || (new_summary_max_specialness != a->summary_max_specialness);
+ a->summary_max_specialness = new_summary_max_specialness;
+ a->summary_lchild = lchild;
+ a->summary_rchild = rchild;
+ return changed || summarize_always_returns_true;
+}
+
typedef rb_tree(node_t) tree_t;
-rb_gen(static, tree_, tree_t, node_t, link, node_cmp);
+rb_summarized_proto(static, tree_, tree_t, node_t);
+rb_summarized_gen(static, tree_, tree_t, node_t, link, node_cmp,
+ node_summarize);
+
+static bool
+specialness_filter_node(void *ctx, node_t *node) {
+ uint64_t specialness = *(uint64_t *)ctx;
+ return node->specialness >= specialness;
+}
+
+static bool
+specialness_filter_subtree(void *ctx, node_t *node) {
+ uint64_t specialness = *(uint64_t *)ctx;
+ return node->summary_max_specialness >= specialness;
+}
+
+static node_t *
+tree_iterate_cb(tree_t *tree, node_t *node, void *data) {
+ unsigned *i = (unsigned *)data;
+ node_t *search_node;
+
+ expect_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
+
+ /* Test rb_search(). */
+ search_node = tree_search(tree, node);
+ expect_ptr_eq(search_node, node,
+ "tree_search() returned unexpected node");
+
+ /* Test rb_nsearch(). */
+ search_node = tree_nsearch(tree, node);
+ expect_ptr_eq(search_node, node,
+ "tree_nsearch() returned unexpected node");
+
+ /* Test rb_psearch(). */
+ search_node = tree_psearch(tree, node);
+ expect_ptr_eq(search_node, node,
+ "tree_psearch() returned unexpected node");
+
+ (*i)++;
+
+ return NULL;
+}
TEST_BEGIN(test_rb_empty) {
tree_t tree;
@@ -50,21 +157,47 @@ TEST_BEGIN(test_rb_empty) {
tree_new(&tree);
- assert_true(tree_empty(&tree), "Tree should be empty");
- assert_ptr_null(tree_first(&tree), "Unexpected node");
- assert_ptr_null(tree_last(&tree), "Unexpected node");
+ expect_true(tree_empty(&tree), "Tree should be empty");
+ expect_ptr_null(tree_first(&tree), "Unexpected node");
+ expect_ptr_null(tree_last(&tree), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
- assert_ptr_null(tree_search(&tree, &key), "Unexpected node");
+ expect_ptr_null(tree_search(&tree, &key), "Unexpected node");
key.key = 0;
key.magic = NODE_MAGIC;
- assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
+ expect_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
+
+ key.key = 0;
+ key.magic = NODE_MAGIC;
+ expect_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
+
+ unsigned nodes = 0;
+ tree_iter_filtered(&tree, NULL, &tree_iterate_cb,
+ &nodes, &specialness_filter_node, &specialness_filter_subtree,
+ NULL);
+ expect_u_eq(0, nodes, "");
+
+ nodes = 0;
+ tree_reverse_iter_filtered(&tree, NULL, &tree_iterate_cb,
+ &nodes, &specialness_filter_node, &specialness_filter_subtree,
+ NULL);
+ expect_u_eq(0, nodes, "");
+
+ expect_ptr_null(tree_first_filtered(&tree, &specialness_filter_node,
+ &specialness_filter_subtree, NULL), "");
+ expect_ptr_null(tree_last_filtered(&tree, &specialness_filter_node,
+ &specialness_filter_subtree, NULL), "");
key.key = 0;
key.magic = NODE_MAGIC;
- assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
+ expect_ptr_null(tree_search_filtered(&tree, &key,
+ &specialness_filter_node, &specialness_filter_subtree, NULL), "");
+ expect_ptr_null(tree_nsearch_filtered(&tree, &key,
+ &specialness_filter_node, &specialness_filter_subtree, NULL), "");
+ expect_ptr_null(tree_psearch_filtered(&tree, &key,
+ &specialness_filter_node, &specialness_filter_subtree, NULL), "");
}
TEST_END
@@ -81,6 +214,16 @@ tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) {
left_node = rbtn_left_get(node_t, link, node);
right_node = rbtn_right_get(node_t, link, node);
+ expect_ptr_eq(left_node, node->summary_lchild,
+ "summary missed a tree update");
+ expect_ptr_eq(right_node, node->summary_rchild,
+ "summary missed a tree update");
+
+ uint64_t expected_subtree_specialness = node_subtree_specialness(node,
+ left_node, right_node);
+ expect_u64_eq(expected_subtree_specialness,
+ node->summary_max_specialness, "Incorrect summary");
+
if (!rbtn_red_get(node_t, link, node)) {
black_depth++;
}
@@ -88,17 +231,17 @@ tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) {
/* Red nodes must be interleaved with black nodes. */
if (rbtn_red_get(node_t, link, node)) {
if (left_node != NULL) {
- assert_false(rbtn_red_get(node_t, link, left_node),
+ expect_false(rbtn_red_get(node_t, link, left_node),
"Node should be black");
}
if (right_node != NULL) {
- assert_false(rbtn_red_get(node_t, link, right_node),
+ expect_false(rbtn_red_get(node_t, link, right_node),
"Node should be black");
}
}
/* Self. */
- assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
+ expect_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
/* Left subtree. */
if (left_node != NULL) {
@@ -117,33 +260,6 @@ tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) {
return ret;
}
-static node_t *
-tree_iterate_cb(tree_t *tree, node_t *node, void *data) {
- unsigned *i = (unsigned *)data;
- node_t *search_node;
-
- assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
-
- /* Test rb_search(). */
- search_node = tree_search(tree, node);
- assert_ptr_eq(search_node, node,
- "tree_search() returned unexpected node");
-
- /* Test rb_nsearch(). */
- search_node = tree_nsearch(tree, node);
- assert_ptr_eq(search_node, node,
- "tree_nsearch() returned unexpected node");
-
- /* Test rb_psearch(). */
- search_node = tree_psearch(tree, node);
- assert_ptr_eq(search_node, node,
- "tree_psearch() returned unexpected node");
-
- (*i)++;
-
- return NULL;
-}
-
static unsigned
tree_iterate(tree_t *tree) {
unsigned i;
@@ -174,14 +290,14 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes) {
/* Test rb_nsearch(). */
search_node = tree_nsearch(tree, node);
if (search_node != NULL) {
- assert_u64_ge(search_node->key, node->key,
+ expect_u64_ge(search_node->key, node->key,
"Key ordering error");
}
/* Test rb_psearch(). */
search_node = tree_psearch(tree, node);
if (search_node != NULL) {
- assert_u64_le(search_node->key, node->key,
+ expect_u64_le(search_node->key, node->key,
"Key ordering error");
}
@@ -189,10 +305,10 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes) {
rbtn_black_height(node_t, link, tree, black_height);
imbalances = tree_recurse(tree->rbt_root, black_height, 0);
- assert_u_eq(imbalances, 0, "Tree is unbalanced");
- assert_u_eq(tree_iterate(tree), nnodes-1,
+ expect_u_eq(imbalances, 0, "Tree is unbalanced");
+ expect_u_eq(tree_iterate(tree), nnodes-1,
"Unexpected node iteration count");
- assert_u_eq(tree_iterate_reverse(tree), nnodes-1,
+ expect_u_eq(tree_iterate_reverse(tree), nnodes-1,
"Unexpected node iteration count");
}
@@ -220,14 +336,16 @@ static void
destroy_cb(node_t *node, void *data) {
unsigned *nnodes = (unsigned *)data;
- assert_u_gt(*nnodes, 0, "Destruction removed too many nodes");
+ expect_u_gt(*nnodes, 0, "Destruction removed too many nodes");
(*nnodes)--;
}
TEST_BEGIN(test_rb_random) {
-#define NNODES 25
-#define NBAGS 250
-#define SEED 42
+ enum {
+ NNODES = 25,
+ NBAGS = 500,
+ SEED = 42
+ };
sfmt_t *sfmt;
uint64_t bag[NNODES];
tree_t tree;
@@ -255,12 +373,26 @@ TEST_BEGIN(test_rb_random) {
}
}
+ /*
+ * We alternate test behavior with a period of 2 here, and a
+ * period of 5 down below, so there's no cycle in which certain
+ * combinations get omitted.
+ */
+ summarize_always_returns_true = (i % 2 == 0);
+
for (j = 1; j <= NNODES; j++) {
/* Initialize tree and nodes. */
tree_new(&tree);
for (k = 0; k < j; k++) {
nodes[k].magic = NODE_MAGIC;
nodes[k].key = bag[k];
+ nodes[k].specialness = gen_rand64_range(sfmt,
+ NNODES);
+ nodes[k].mid_remove = false;
+ nodes[k].allow_duplicates = false;
+ nodes[k].summary_lchild = NULL;
+ nodes[k].summary_rchild = NULL;
+ nodes[k].summary_max_specialness = 0;
}
/* Insert nodes. */
@@ -271,19 +403,19 @@ TEST_BEGIN(test_rb_random) {
black_height);
imbalances = tree_recurse(tree.rbt_root,
black_height, 0);
- assert_u_eq(imbalances, 0,
+ expect_u_eq(imbalances, 0,
"Tree is unbalanced");
- assert_u_eq(tree_iterate(&tree), k+1,
+ expect_u_eq(tree_iterate(&tree), k+1,
"Unexpected node iteration count");
- assert_u_eq(tree_iterate_reverse(&tree), k+1,
+ expect_u_eq(tree_iterate_reverse(&tree), k+1,
"Unexpected node iteration count");
- assert_false(tree_empty(&tree),
+ expect_false(tree_empty(&tree),
"Tree should not be empty");
- assert_ptr_not_null(tree_first(&tree),
+ expect_ptr_not_null(tree_first(&tree),
"Tree should not be empty");
- assert_ptr_not_null(tree_last(&tree),
+ expect_ptr_not_null(tree_last(&tree),
"Tree should not be empty");
tree_next(&tree, &nodes[k]);
@@ -312,7 +444,7 @@ TEST_BEGIN(test_rb_random) {
remove_iterate_cb, (void *)&nnodes);
nnodes--;
} while (start != NULL);
- assert_u_eq(nnodes, 0,
+ expect_u_eq(nnodes, 0,
"Removal terminated early");
break;
} case 3: {
@@ -326,13 +458,13 @@ TEST_BEGIN(test_rb_random) {
(void *)&nnodes);
nnodes--;
} while (start != NULL);
- assert_u_eq(nnodes, 0,
+ expect_u_eq(nnodes, 0,
"Removal terminated early");
break;
} case 4: {
unsigned nnodes = j;
tree_destroy(&tree, destroy_cb, &nnodes);
- assert_u_eq(nnodes, 0,
+ expect_u_eq(nnodes, 0,
"Destruction terminated early");
break;
} default:
@@ -341,15 +473,547 @@ TEST_BEGIN(test_rb_random) {
}
}
fini_gen_rand(sfmt);
-#undef NNODES
-#undef NBAGS
-#undef SEED
+}
+TEST_END
+
+static void
+expect_simple_consistency(tree_t *tree, uint64_t specialness,
+ bool expected_empty, node_t *expected_first, node_t *expected_last) {
+ bool empty;
+ node_t *first;
+ node_t *last;
+
+ empty = tree_empty_filtered(tree, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_b_eq(expected_empty, empty, "");
+
+ first = tree_first_filtered(tree,
+ &specialness_filter_node, &specialness_filter_subtree,
+ (void *)&specialness);
+ expect_ptr_eq(expected_first, first, "");
+
+ last = tree_last_filtered(tree,
+ &specialness_filter_node, &specialness_filter_subtree,
+ (void *)&specialness);
+ expect_ptr_eq(expected_last, last, "");
+}
+
+TEST_BEGIN(test_rb_filter_simple) {
+ enum {FILTER_NODES = 10};
+ node_t nodes[FILTER_NODES];
+ for (unsigned i = 0; i < FILTER_NODES; i++) {
+ nodes[i].magic = NODE_MAGIC;
+ nodes[i].key = i;
+ if (i == 0) {
+ nodes[i].specialness = 0;
+ } else {
+ nodes[i].specialness = ffs_u(i);
+ }
+ nodes[i].mid_remove = false;
+ nodes[i].allow_duplicates = false;
+ nodes[i].summary_lchild = NULL;
+ nodes[i].summary_rchild = NULL;
+ nodes[i].summary_max_specialness = 0;
+ }
+
+ summarize_always_returns_true = false;
+
+ tree_t tree;
+ tree_new(&tree);
+
+ /* Should be empty */
+ expect_simple_consistency(&tree, /* specialness */ 0, /* empty */ true,
+ /* first */ NULL, /* last */ NULL);
+
+ /* Fill in just the odd nodes. */
+ for (int i = 1; i < FILTER_NODES; i += 2) {
+ tree_insert(&tree, &nodes[i]);
+ }
+
+ /* A search for an odd node should succeed. */
+ expect_simple_consistency(&tree, /* specialness */ 0, /* empty */ false,
+ /* first */ &nodes[1], /* last */ &nodes[9]);
+
+ /* But a search for an even one should fail. */
+ expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ true,
+ /* first */ NULL, /* last */ NULL);
+
+ /* Now we add an even. */
+ tree_insert(&tree, &nodes[4]);
+ expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
+ /* first */ &nodes[4], /* last */ &nodes[4]);
+
+ /* A smaller even, and a larger even. */
+ tree_insert(&tree, &nodes[2]);
+ tree_insert(&tree, &nodes[8]);
+
+ /*
+ * A first-search (resp. last-search) for an even should switch to the
+ * lower (higher) one, now that it's been added.
+ */
+ expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
+ /* first */ &nodes[2], /* last */ &nodes[8]);
+
+ /*
+ * If we remove 2, a first-search we should go back to 4, while a
+ * last-search should remain unchanged.
+ */
+ tree_remove(&tree, &nodes[2]);
+ expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
+ /* first */ &nodes[4], /* last */ &nodes[8]);
+
+ /* Reinsert 2, then find it again. */
+ tree_insert(&tree, &nodes[2]);
+ expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
+ /* first */ &nodes[2], /* last */ &nodes[8]);
+
+ /* Searching for a multiple of 4 should not have changed. */
+ expect_simple_consistency(&tree, /* specialness */ 2, /* empty */ false,
+ /* first */ &nodes[4], /* last */ &nodes[8]);
+
+ /* And a multiple of 8 */
+ expect_simple_consistency(&tree, /* specialness */ 3, /* empty */ false,
+ /* first */ &nodes[8], /* last */ &nodes[8]);
+
+ /* But not a multiple of 16 */
+ expect_simple_consistency(&tree, /* specialness */ 4, /* empty */ true,
+ /* first */ NULL, /* last */ NULL);
+}
+TEST_END
+
+typedef struct iter_ctx_s iter_ctx_t;
+struct iter_ctx_s {
+ int ncalls;
+ node_t *last_node;
+
+ int ncalls_max;
+ bool forward;
+};
+
+static node_t *
+tree_iterate_filtered_cb(tree_t *tree, node_t *node, void *arg) {
+ iter_ctx_t *ctx = (iter_ctx_t *)arg;
+ ctx->ncalls++;
+ expect_u64_ge(node->specialness, 1,
+ "Should only invoke cb on nodes that pass the filter");
+ if (ctx->last_node != NULL) {
+ if (ctx->forward) {
+ expect_d_lt(node_cmp(ctx->last_node, node), 0,
+ "Incorrect iteration order");
+ } else {
+ expect_d_gt(node_cmp(ctx->last_node, node), 0,
+ "Incorrect iteration order");
+ }
+ }
+ ctx->last_node = node;
+ if (ctx->ncalls == ctx->ncalls_max) {
+ return node;
+ }
+ return NULL;
+}
+
+static int
+qsort_node_cmp(const void *ap, const void *bp) {
+ node_t *a = *(node_t **)ap;
+ node_t *b = *(node_t **)bp;
+ return node_cmp(a, b);
+}
+
+#define UPDATE_TEST_MAX 100
+static void
+check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) {
+ uint64_t specialness = 1;
+
+ bool empty;
+ bool real_empty = true;
+ node_t *first;
+ node_t *real_first = NULL;
+ node_t *last;
+ node_t *real_last = NULL;
+ for (int i = 0; i < nnodes; i++) {
+ if (nodes[i].specialness >= specialness) {
+ real_empty = false;
+ if (real_first == NULL
+ || node_cmp(&nodes[i], real_first) < 0) {
+ real_first = &nodes[i];
+ }
+ if (real_last == NULL
+ || node_cmp(&nodes[i], real_last) > 0) {
+ real_last = &nodes[i];
+ }
+ }
+ }
+
+ empty = tree_empty_filtered(tree, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_b_eq(real_empty, empty, "");
+
+ first = tree_first_filtered(tree, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_eq(real_first, first, "");
+
+ last = tree_last_filtered(tree, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_eq(real_last, last, "");
+
+ for (int i = 0; i < nnodes; i++) {
+ node_t *next_filtered;
+ node_t *real_next_filtered = NULL;
+ node_t *prev_filtered;
+ node_t *real_prev_filtered = NULL;
+ for (int j = 0; j < nnodes; j++) {
+ if (nodes[j].specialness < specialness) {
+ continue;
+ }
+ if (node_cmp(&nodes[j], &nodes[i]) < 0
+ && (real_prev_filtered == NULL
+ || node_cmp(&nodes[j], real_prev_filtered) > 0)) {
+ real_prev_filtered = &nodes[j];
+ }
+ if (node_cmp(&nodes[j], &nodes[i]) > 0
+ && (real_next_filtered == NULL
+ || node_cmp(&nodes[j], real_next_filtered) < 0)) {
+ real_next_filtered = &nodes[j];
+ }
+ }
+ next_filtered = tree_next_filtered(tree, &nodes[i],
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_next_filtered, next_filtered, "");
+
+ prev_filtered = tree_prev_filtered(tree, &nodes[i],
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_prev_filtered, prev_filtered, "");
+
+ node_t *search_filtered;
+ node_t *real_search_filtered;
+ node_t *nsearch_filtered;
+ node_t *real_nsearch_filtered;
+ node_t *psearch_filtered;
+ node_t *real_psearch_filtered;
+
+ /*
+ * search, nsearch, psearch from a node before nodes[i] in the
+ * ordering.
+ */
+ node_t before;
+ before.magic = NODE_MAGIC;
+ before.key = nodes[i].key - 1;
+ before.allow_duplicates = false;
+ real_search_filtered = NULL;
+ search_filtered = tree_search_filtered(tree, &before,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_search_filtered, search_filtered, "");
+
+ real_nsearch_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : real_next_filtered);
+ nsearch_filtered = tree_nsearch_filtered(tree, &before,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
+
+ real_psearch_filtered = real_prev_filtered;
+ psearch_filtered = tree_psearch_filtered(tree, &before,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
+
+ /* search, nsearch, psearch from nodes[i] */
+ real_search_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : NULL);
+ search_filtered = tree_search_filtered(tree, &nodes[i],
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_search_filtered, search_filtered, "");
+
+ real_nsearch_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : real_next_filtered);
+ nsearch_filtered = tree_nsearch_filtered(tree, &nodes[i],
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
+
+ real_psearch_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : real_prev_filtered);
+ psearch_filtered = tree_psearch_filtered(tree, &nodes[i],
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
+
+ /*
+ * search, nsearch, psearch from a node equivalent to but
+ * distinct from nodes[i].
+ */
+ node_t equiv;
+ equiv.magic = NODE_MAGIC;
+ equiv.key = nodes[i].key;
+ equiv.allow_duplicates = true;
+ real_search_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : NULL);
+ search_filtered = tree_search_filtered(tree, &equiv,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_search_filtered, search_filtered, "");
+
+ real_nsearch_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : real_next_filtered);
+ nsearch_filtered = tree_nsearch_filtered(tree, &equiv,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
+
+ real_psearch_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : real_prev_filtered);
+ psearch_filtered = tree_psearch_filtered(tree, &equiv,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
+
+ /*
+ * search, nsearch, psearch from a node after nodes[i] in the
+ * ordering.
+ */
+ node_t after;
+ after.magic = NODE_MAGIC;
+ after.key = nodes[i].key + 1;
+ after.allow_duplicates = false;
+ real_search_filtered = NULL;
+ search_filtered = tree_search_filtered(tree, &after,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_search_filtered, search_filtered, "");
+
+ real_nsearch_filtered = real_next_filtered;
+ nsearch_filtered = tree_nsearch_filtered(tree, &after,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
+
+ real_psearch_filtered = (nodes[i].specialness >= specialness ?
+ &nodes[i] : real_prev_filtered);
+ psearch_filtered = tree_psearch_filtered(tree, &after,
+ &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
+ }
+
+ /* Filtered iteration test setup. */
+ int nspecial = 0;
+ node_t *sorted_nodes[UPDATE_TEST_MAX];
+ node_t *sorted_filtered_nodes[UPDATE_TEST_MAX];
+ for (int i = 0; i < nnodes; i++) {
+ sorted_nodes[i] = &nodes[i];
+ }
+ qsort(sorted_nodes, nnodes, sizeof(node_t *), &qsort_node_cmp);
+ for (int i = 0; i < nnodes; i++) {
+ sorted_nodes[i]->rank = i;
+ sorted_nodes[i]->filtered_rank = nspecial;
+ if (sorted_nodes[i]->specialness >= 1) {
+ sorted_filtered_nodes[nspecial] = sorted_nodes[i];
+ nspecial++;
+ }
+ }
+
+ node_t *iter_result;
+
+ iter_ctx_t ctx;
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ ctx.ncalls_max = INT_MAX;
+ ctx.forward = true;
+
+ /* Filtered forward iteration from the beginning. */
+ iter_result = tree_iter_filtered(tree, NULL, &tree_iterate_filtered_cb,
+ &ctx, &specialness_filter_node, &specialness_filter_subtree,
+ &specialness);
+ expect_ptr_null(iter_result, "");
+ expect_d_eq(nspecial, ctx.ncalls, "");
+ /* Filtered forward iteration from a starting point. */
+ for (int i = 0; i < nnodes; i++) {
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ iter_result = tree_iter_filtered(tree, &nodes[i],
+ &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_null(iter_result, "");
+ expect_d_eq(nspecial - nodes[i].filtered_rank, ctx.ncalls, "");
+ }
+ /* Filtered forward iteration from the beginning, with stopping */
+ for (int i = 0; i < nspecial; i++) {
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ ctx.ncalls_max = i + 1;
+ iter_result = tree_iter_filtered(tree, NULL,
+ &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_eq(sorted_filtered_nodes[i], iter_result, "");
+ expect_d_eq(ctx.ncalls, i + 1, "");
+ }
+ /* Filtered forward iteration from a starting point, with stopping. */
+ for (int i = 0; i < nnodes; i++) {
+ for (int j = 0; j < nspecial - nodes[i].filtered_rank; j++) {
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ ctx.ncalls_max = j + 1;
+ iter_result = tree_iter_filtered(tree, &nodes[i],
+ &tree_iterate_filtered_cb, &ctx,
+ &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_d_eq(j + 1, ctx.ncalls, "");
+ expect_ptr_eq(sorted_filtered_nodes[
+ nodes[i].filtered_rank + j], iter_result, "");
+ }
+ }
+
+ /* Backwards iteration. */
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ ctx.ncalls_max = INT_MAX;
+ ctx.forward = false;
+
+ /* Filtered backward iteration from the end. */
+ iter_result = tree_reverse_iter_filtered(tree, NULL,
+ &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_null(iter_result, "");
+ expect_d_eq(nspecial, ctx.ncalls, "");
+ /* Filtered backward iteration from a starting point. */
+ for (int i = 0; i < nnodes; i++) {
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ iter_result = tree_reverse_iter_filtered(tree, &nodes[i],
+ &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_null(iter_result, "");
+ int surplus_rank = (nodes[i].specialness >= 1 ? 1 : 0);
+ expect_d_eq(nodes[i].filtered_rank + surplus_rank, ctx.ncalls,
+ "");
+ }
+ /* Filtered backward iteration from the end, with stopping */
+ for (int i = 0; i < nspecial; i++) {
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ ctx.ncalls_max = i + 1;
+ iter_result = tree_reverse_iter_filtered(tree, NULL,
+ &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_ptr_eq(sorted_filtered_nodes[nspecial - i - 1],
+ iter_result, "");
+ expect_d_eq(ctx.ncalls, i + 1, "");
+ }
+ /* Filtered backward iteration from a starting point, with stopping. */
+ for (int i = 0; i < nnodes; i++) {
+ int surplus_rank = (nodes[i].specialness >= 1 ? 1 : 0);
+ for (int j = 0; j < nodes[i].filtered_rank + surplus_rank;
+ j++) {
+ ctx.ncalls = 0;
+ ctx.last_node = NULL;
+ ctx.ncalls_max = j + 1;
+ iter_result = tree_reverse_iter_filtered(tree,
+ &nodes[i], &tree_iterate_filtered_cb, &ctx,
+ &specialness_filter_node,
+ &specialness_filter_subtree, &specialness);
+ expect_d_eq(j + 1, ctx.ncalls, "");
+ expect_ptr_eq(sorted_filtered_nodes[
+ nodes[i].filtered_rank - j - 1 + surplus_rank],
+ iter_result, "");
+ }
+ }
+}
+
+static void
+do_update_search_test(int nnodes, int ntrees, int nremovals,
+ int nupdates) {
+ node_t nodes[UPDATE_TEST_MAX];
+ assert(nnodes <= UPDATE_TEST_MAX);
+
+ sfmt_t *sfmt = init_gen_rand(12345);
+ for (int i = 0; i < ntrees; i++) {
+ tree_t tree;
+ tree_new(&tree);
+ for (int j = 0; j < nnodes; j++) {
+ nodes[j].magic = NODE_MAGIC;
+ /*
+ * In consistency checking, we increment or decrement a
+ * key and assume that the result is not a key in the
+ * tree. This isn't a *real* concern with 64-bit keys
+ * and a good PRNG, but why not be correct anyways?
+ */
+ nodes[j].key = 2 * gen_rand64(sfmt);
+ nodes[j].specialness = 0;
+ nodes[j].mid_remove = false;
+ nodes[j].allow_duplicates = false;
+ nodes[j].summary_lchild = NULL;
+ nodes[j].summary_rchild = NULL;
+ nodes[j].summary_max_specialness = 0;
+ tree_insert(&tree, &nodes[j]);
+ }
+ for (int j = 0; j < nremovals; j++) {
+ int victim = (int)gen_rand64_range(sfmt, nnodes);
+ if (!nodes[victim].mid_remove) {
+ tree_remove(&tree, &nodes[victim]);
+ nodes[victim].mid_remove = true;
+ }
+ }
+ for (int j = 0; j < nnodes; j++) {
+ if (nodes[j].mid_remove) {
+ nodes[j].mid_remove = false;
+ nodes[j].key = 2 * gen_rand64(sfmt);
+ tree_insert(&tree, &nodes[j]);
+ }
+ }
+ for (int j = 0; j < nupdates; j++) {
+ uint32_t ind = gen_rand32_range(sfmt, nnodes);
+ nodes[ind].specialness = 1 - nodes[ind].specialness;
+ tree_update_summaries(&tree, &nodes[ind]);
+ check_consistency(&tree, nodes, nnodes);
+ }
+ }
+}
+
+TEST_BEGIN(test_rb_update_search) {
+ summarize_always_returns_true = false;
+ do_update_search_test(2, 100, 3, 50);
+ do_update_search_test(5, 100, 3, 50);
+ do_update_search_test(12, 100, 5, 1000);
+ do_update_search_test(100, 1, 50, 500);
+}
+TEST_END
+
+typedef rb_tree(node_t) unsummarized_tree_t;
+rb_gen(static UNUSED, unsummarized_tree_, unsummarized_tree_t, node_t, link,
+ node_cmp);
+
+static node_t *
+unsummarized_tree_iterate_cb(unsummarized_tree_t *tree, node_t *node,
+ void *data) {
+ unsigned *i = (unsigned *)data;
+ (*i)++;
+ return NULL;
+}
+/*
+ * The unsummarized and summarized funtionality is implemented via the same
+ * functions; we don't really need to do much more than test that we can exclude
+ * the filtered functionality without anything breaking.
+ */
+TEST_BEGIN(test_rb_unsummarized) {
+ unsummarized_tree_t tree;
+ unsummarized_tree_new(&tree);
+ unsigned nnodes = 0;
+ unsummarized_tree_iter(&tree, NULL, &unsummarized_tree_iterate_cb,
+ &nnodes);
+ expect_u_eq(0, nnodes, "");
}
TEST_END
int
main(void) {
- return test(
+ return test_no_reentrancy(
test_rb_empty,
- test_rb_random);
+ test_rb_random,
+ test_rb_filter_simple,
+ test_rb_update_search,
+ test_rb_unsummarized);
}
diff --git a/deps/jemalloc/test/unit/retained.c b/deps/jemalloc/test/unit/retained.c
index 7993fd3d9..aa9f6847b 100644
--- a/deps/jemalloc/test/unit/retained.c
+++ b/deps/jemalloc/test/unit/retained.c
@@ -1,5 +1,6 @@
#include "test/jemalloc_test.h"
+#include "jemalloc/internal/san.h"
#include "jemalloc/internal/spin.h"
static unsigned arena_ind;
@@ -12,58 +13,58 @@ static atomic_u_t nfinished;
static unsigned
do_arena_create(extent_hooks_t *h) {
- unsigned arena_ind;
- size_t sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
+ unsigned new_arena_ind;
+ size_t ind_sz = sizeof(unsigned);
+ expect_d_eq(mallctl("arenas.create", (void *)&new_arena_ind, &ind_sz,
(void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
"Unexpected mallctl() failure");
- return arena_ind;
+ return new_arena_ind;
}
static void
-do_arena_destroy(unsigned arena_ind) {
+do_arena_destroy(unsigned ind) {
size_t mib[3];
size_t miblen;
miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
"Unexpected mallctlnametomib() failure");
- mib[1] = (size_t)arena_ind;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
+ mib[1] = (size_t)ind;
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
"Unexpected mallctlbymib() failure");
}
static void
do_refresh(void) {
- uint64_t epoch = 1;
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
- sizeof(epoch)), 0, "Unexpected mallctl() failure");
+ uint64_t refresh_epoch = 1;
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&refresh_epoch,
+ sizeof(refresh_epoch)), 0, "Unexpected mallctl() failure");
}
static size_t
-do_get_size_impl(const char *cmd, unsigned arena_ind) {
+do_get_size_impl(const char *cmd, unsigned ind) {
size_t mib[4];
size_t miblen = sizeof(mib) / sizeof(size_t);
size_t z = sizeof(size_t);
- assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
+ expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
- mib[2] = arena_ind;
+ mib[2] = ind;
size_t size;
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
return size;
}
static size_t
-do_get_active(unsigned arena_ind) {
- return do_get_size_impl("stats.arenas.0.pactive", arena_ind) * PAGE;
+do_get_active(unsigned ind) {
+ return do_get_size_impl("stats.arenas.0.pactive", ind) * PAGE;
}
static size_t
-do_get_mapped(unsigned arena_ind) {
- return do_get_size_impl("stats.arenas.0.mapped", arena_ind);
+do_get_mapped(unsigned ind) {
+ return do_get_size_impl("stats.arenas.0.mapped", ind);
}
static void *
@@ -76,7 +77,7 @@ thd_start(void *arg) {
next_epoch) {
spin_adaptive(&spinner);
}
- assert_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
+ expect_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
/*
* Allocate. The main thread will reset the arena, so there's
@@ -86,7 +87,7 @@ thd_start(void *arg) {
void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) |
MALLOCX_TCACHE_NONE
);
- assert_ptr_not_null(p,
+ expect_ptr_not_null(p,
"Unexpected mallocx() failure\n");
}
@@ -99,10 +100,12 @@ thd_start(void *arg) {
TEST_BEGIN(test_retained) {
test_skip_if(!config_stats);
+ test_skip_if(opt_hpa);
arena_ind = do_arena_create(NULL);
sz = nallocx(HUGEPAGE, 0);
- esz = sz + sz_large_pad;
+ size_t guard_sz = san_guard_enabled() ? SAN_PAGE_GUARDS_SIZE : 0;
+ esz = sz + sz_large_pad + guard_sz;
atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
@@ -132,17 +135,18 @@ TEST_BEGIN(test_retained) {
*/
do_refresh();
- size_t allocated = esz * nthreads * PER_THD_NALLOCS;
+ size_t allocated = (esz - guard_sz) * nthreads *
+ PER_THD_NALLOCS;
size_t active = do_get_active(arena_ind);
- assert_zu_le(allocated, active, "Unexpected active memory");
+ expect_zu_le(allocated, active, "Unexpected active memory");
size_t mapped = do_get_mapped(arena_ind);
- assert_zu_le(active, mapped, "Unexpected mapped memory");
+ expect_zu_le(active, mapped, "Unexpected mapped memory");
arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
size_t usable = 0;
size_t fragmented = 0;
for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
- arena->extent_grow_next; pind++) {
+ arena->pa_shard.pac.exp_grow.next; pind++) {
size_t psz = sz_pind2sz(pind);
size_t psz_fragmented = psz % esz;
size_t psz_usable = psz - psz_fragmented;
@@ -150,7 +154,7 @@ TEST_BEGIN(test_retained) {
* Only consider size classes that wouldn't be skipped.
*/
if (psz_usable > 0) {
- assert_zu_lt(usable, allocated,
+ expect_zu_lt(usable, allocated,
"Excessive retained memory "
"(%#zx[+%#zx] > %#zx)", usable, psz_usable,
allocated);
@@ -165,7 +169,7 @@ TEST_BEGIN(test_retained) {
* (rather than retaining) during reset.
*/
do_arena_destroy(arena_ind);
- assert_u_eq(do_arena_create(NULL), arena_ind,
+ expect_u_eq(do_arena_create(NULL), arena_ind,
"Unexpected arena index");
}
diff --git a/deps/jemalloc/test/unit/rtree.c b/deps/jemalloc/test/unit/rtree.c
index 90adca134..4101b72be 100644
--- a/deps/jemalloc/test/unit/rtree.c
+++ b/deps/jemalloc/test/unit/rtree.c
@@ -2,80 +2,30 @@
#include "jemalloc/internal/rtree.h"
-rtree_node_alloc_t *rtree_node_alloc_orig;
-rtree_node_dalloc_t *rtree_node_dalloc_orig;
-rtree_leaf_alloc_t *rtree_leaf_alloc_orig;
-rtree_leaf_dalloc_t *rtree_leaf_dalloc_orig;
+#define INVALID_ARENA_IND ((1U << MALLOCX_ARENA_BITS) - 1)
/* Potentially too large to safely place on the stack. */
rtree_t test_rtree;
-static rtree_node_elm_t *
-rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
- rtree_node_elm_t *node;
-
- if (rtree != &test_rtree) {
- return rtree_node_alloc_orig(tsdn, rtree, nelms);
- }
-
- malloc_mutex_unlock(tsdn, &rtree->init_lock);
- node = (rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t));
- assert_ptr_not_null(node, "Unexpected calloc() failure");
- malloc_mutex_lock(tsdn, &rtree->init_lock);
-
- return node;
-}
-
-static void
-rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree,
- rtree_node_elm_t *node) {
- if (rtree != &test_rtree) {
- rtree_node_dalloc_orig(tsdn, rtree, node);
- return;
- }
-
- free(node);
-}
-
-static rtree_leaf_elm_t *
-rtree_leaf_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
- rtree_leaf_elm_t *leaf;
-
- if (rtree != &test_rtree) {
- return rtree_leaf_alloc_orig(tsdn, rtree, nelms);
- }
-
- malloc_mutex_unlock(tsdn, &rtree->init_lock);
- leaf = (rtree_leaf_elm_t *)calloc(nelms, sizeof(rtree_leaf_elm_t));
- assert_ptr_not_null(leaf, "Unexpected calloc() failure");
- malloc_mutex_lock(tsdn, &rtree->init_lock);
-
- return leaf;
-}
-
-static void
-rtree_leaf_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *leaf) {
- if (rtree != &test_rtree) {
- rtree_leaf_dalloc_orig(tsdn, rtree, leaf);
- return;
- }
-
- free(leaf);
-}
-
TEST_BEGIN(test_rtree_read_empty) {
tsdn_t *tsdn;
tsdn = tsdn_fetch();
+ base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new failure");
+
rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
- assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
- assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE,
- false), "rtree_extent_read() should return NULL for empty tree");
- rtree_delete(tsdn, rtree);
+ expect_false(rtree_new(rtree, base, false),
+ "Unexpected rtree_new() failure");
+ rtree_contents_t contents;
+ expect_true(rtree_read_independent(tsdn, rtree, &rtree_ctx, PAGE,
+ &contents), "rtree_read_independent() should fail on empty rtree.");
+
+ base_delete(tsdn, base);
}
TEST_END
@@ -83,75 +33,119 @@ TEST_END
#undef NITERS
#undef SEED
+static edata_t *
+alloc_edata(void) {
+ void *ret = mallocx(sizeof(edata_t), MALLOCX_ALIGN(EDATA_ALIGNMENT));
+ assert_ptr_not_null(ret, "Unexpected mallocx() failure");
+
+ return ret;
+}
+
TEST_BEGIN(test_rtree_extrema) {
- extent_t extent_a, extent_b;
- extent_init(&extent_a, NULL, NULL, SC_LARGE_MINCLASS, false,
- sz_size2index(SC_LARGE_MINCLASS), 0,
- extent_state_active, false, false, true, EXTENT_NOT_HEAD);
- extent_init(&extent_b, NULL, NULL, 0, false, SC_NSIZES, 0,
- extent_state_active, false, false, true, EXTENT_NOT_HEAD);
+ edata_t *edata_a, *edata_b;
+ edata_a = alloc_edata();
+ edata_b = alloc_edata();
+ edata_init(edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
+ false, sz_size2index(SC_LARGE_MINCLASS), 0,
+ extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
+ edata_init(edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
+ extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
tsdn_t *tsdn = tsdn_fetch();
+ base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new failure");
+
rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
- assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
-
- assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &extent_a,
- extent_szind_get(&extent_a), extent_slab_get(&extent_a)),
+ expect_false(rtree_new(rtree, base, false),
+ "Unexpected rtree_new() failure");
+
+ rtree_contents_t contents_a;
+ contents_a.edata = edata_a;
+ contents_a.metadata.szind = edata_szind_get(edata_a);
+ contents_a.metadata.slab = edata_slab_get(edata_a);
+ contents_a.metadata.is_head = edata_is_head_get(edata_a);
+ contents_a.metadata.state = edata_state_get(edata_a);
+ expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a),
+ "Unexpected rtree_write() failure");
+ expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a),
"Unexpected rtree_write() failure");
- rtree_szind_slab_update(tsdn, rtree, &rtree_ctx, PAGE,
- extent_szind_get(&extent_a), extent_slab_get(&extent_a));
- assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, true),
- &extent_a,
- "rtree_extent_read() should return previously set value");
-
- assert_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
- &extent_b, extent_szind_get_maybe_invalid(&extent_b),
- extent_slab_get(&extent_b)), "Unexpected rtree_write() failure");
- assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
- ~((uintptr_t)0), true), &extent_b,
- "rtree_extent_read() should return previously set value");
-
- rtree_delete(tsdn, rtree);
+ rtree_contents_t read_contents_a = rtree_read(tsdn, rtree, &rtree_ctx,
+ PAGE);
+ expect_true(contents_a.edata == read_contents_a.edata
+ && contents_a.metadata.szind == read_contents_a.metadata.szind
+ && contents_a.metadata.slab == read_contents_a.metadata.slab
+ && contents_a.metadata.is_head == read_contents_a.metadata.is_head
+ && contents_a.metadata.state == read_contents_a.metadata.state,
+ "rtree_read() should return previously set value");
+
+ rtree_contents_t contents_b;
+ contents_b.edata = edata_b;
+ contents_b.metadata.szind = edata_szind_get_maybe_invalid(edata_b);
+ contents_b.metadata.slab = edata_slab_get(edata_b);
+ contents_b.metadata.is_head = edata_is_head_get(edata_b);
+ contents_b.metadata.state = edata_state_get(edata_b);
+ expect_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
+ contents_b), "Unexpected rtree_write() failure");
+ rtree_contents_t read_contents_b = rtree_read(tsdn, rtree, &rtree_ctx,
+ ~((uintptr_t)0));
+ assert_true(contents_b.edata == read_contents_b.edata
+ && contents_b.metadata.szind == read_contents_b.metadata.szind
+ && contents_b.metadata.slab == read_contents_b.metadata.slab
+ && contents_b.metadata.is_head == read_contents_b.metadata.is_head
+ && contents_b.metadata.state == read_contents_b.metadata.state,
+ "rtree_read() should return previously set value");
+
+ base_delete(tsdn, base);
}
TEST_END
TEST_BEGIN(test_rtree_bits) {
tsdn_t *tsdn = tsdn_fetch();
+ base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new failure");
uintptr_t keys[] = {PAGE, PAGE + 1,
PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
-
- extent_t extent;
- extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0,
- extent_state_active, false, false, true, EXTENT_NOT_HEAD);
+ edata_t *edata_c = alloc_edata();
+ edata_init(edata_c, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
+ extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
- assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
+ expect_false(rtree_new(rtree, base, false),
+ "Unexpected rtree_new() failure");
for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
- assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
- &extent, SC_NSIZES, false),
- "Unexpected rtree_write() failure");
+ rtree_contents_t contents;
+ contents.edata = edata_c;
+ contents.metadata.szind = SC_NSIZES;
+ contents.metadata.slab = false;
+ contents.metadata.is_head = false;
+ contents.metadata.state = extent_state_active;
+
+ expect_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
+ contents), "Unexpected rtree_write() failure");
for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
- assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
- keys[j], true), &extent,
- "rtree_extent_read() should return previously set "
+ expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
+ keys[j]).edata, edata_c,
+ "rtree_edata_read() should return previously set "
"value and ignore insignificant key bits; i=%u, "
"j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
j, keys[i], keys[j]);
}
- assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
- (((uintptr_t)2) << LG_PAGE), false),
+ expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
+ (((uintptr_t)2) << LG_PAGE)).edata,
"Only leftmost rtree leaf should be set; i=%u", i);
rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
}
- rtree_delete(tsdn, rtree);
+ base_delete(tsdn, base);
}
TEST_END
@@ -160,69 +154,136 @@ TEST_BEGIN(test_rtree_random) {
#define SEED 42
sfmt_t *sfmt = init_gen_rand(SEED);
tsdn_t *tsdn = tsdn_fetch();
+
+ base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new failure");
+
uintptr_t keys[NSET];
rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
- extent_t extent;
- extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0,
- extent_state_active, false, false, true, EXTENT_NOT_HEAD);
+ edata_t *edata_d = alloc_edata();
+ edata_init(edata_d, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
+ extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
- assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
+ expect_false(rtree_new(rtree, base, false),
+ "Unexpected rtree_new() failure");
for (unsigned i = 0; i < NSET; i++) {
keys[i] = (uintptr_t)gen_rand64(sfmt);
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree,
&rtree_ctx, keys[i], false, true);
- assert_ptr_not_null(elm,
+ expect_ptr_not_null(elm,
"Unexpected rtree_leaf_elm_lookup() failure");
- rtree_leaf_elm_write(tsdn, rtree, elm, &extent, SC_NSIZES,
- false);
- assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
- keys[i], true), &extent,
- "rtree_extent_read() should return previously set value");
+ rtree_contents_t contents;
+ contents.edata = edata_d;
+ contents.metadata.szind = SC_NSIZES;
+ contents.metadata.slab = false;
+ contents.metadata.is_head = false;
+ contents.metadata.state = edata_state_get(edata_d);
+ rtree_leaf_elm_write(tsdn, rtree, elm, contents);
+ expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
+ keys[i]).edata, edata_d,
+ "rtree_edata_read() should return previously set value");
}
for (unsigned i = 0; i < NSET; i++) {
- assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
- keys[i], true), &extent,
- "rtree_extent_read() should return previously set value, "
+ expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
+ keys[i]).edata, edata_d,
+ "rtree_edata_read() should return previously set value, "
"i=%u", i);
}
for (unsigned i = 0; i < NSET; i++) {
rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
- assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
- keys[i], true),
- "rtree_extent_read() should return previously set value");
+ expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
+ keys[i]).edata,
+ "rtree_edata_read() should return previously set value");
}
for (unsigned i = 0; i < NSET; i++) {
- assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx,
- keys[i], true),
- "rtree_extent_read() should return previously set value");
+ expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
+ keys[i]).edata,
+ "rtree_edata_read() should return previously set value");
}
- rtree_delete(tsdn, rtree);
+ base_delete(tsdn, base);
fini_gen_rand(sfmt);
#undef NSET
#undef SEED
}
TEST_END
+static void
+test_rtree_range_write(tsdn_t *tsdn, rtree_t *rtree, uintptr_t start,
+ uintptr_t end) {
+ rtree_ctx_t rtree_ctx;
+ rtree_ctx_data_init(&rtree_ctx);
+
+ edata_t *edata_e = alloc_edata();
+ edata_init(edata_e, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
+ extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
+ rtree_contents_t contents;
+ contents.edata = edata_e;
+ contents.metadata.szind = SC_NSIZES;
+ contents.metadata.slab = false;
+ contents.metadata.is_head = false;
+ contents.metadata.state = extent_state_active;
+
+ expect_false(rtree_write(tsdn, rtree, &rtree_ctx, start,
+ contents), "Unexpected rtree_write() failure");
+ expect_false(rtree_write(tsdn, rtree, &rtree_ctx, end,
+ contents), "Unexpected rtree_write() failure");
+
+ rtree_write_range(tsdn, rtree, &rtree_ctx, start, end, contents);
+ for (uintptr_t i = 0; i < ((end - start) >> LG_PAGE); i++) {
+ expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
+ start + (i << LG_PAGE)).edata, edata_e,
+ "rtree_edata_read() should return previously set value");
+ }
+ rtree_clear_range(tsdn, rtree, &rtree_ctx, start, end);
+ rtree_leaf_elm_t *elm;
+ for (uintptr_t i = 0; i < ((end - start) >> LG_PAGE); i++) {
+ elm = rtree_leaf_elm_lookup(tsdn, rtree, &rtree_ctx,
+ start + (i << LG_PAGE), false, false);
+ expect_ptr_not_null(elm, "Should have been initialized.");
+ expect_ptr_null(rtree_leaf_elm_read(tsdn, rtree, elm,
+ false).edata, "Should have been cleared.");
+ }
+}
+
+TEST_BEGIN(test_rtree_range) {
+ tsdn_t *tsdn = tsdn_fetch();
+ base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
+ expect_ptr_not_null(base, "Unexpected base_new failure");
+
+ rtree_t *rtree = &test_rtree;
+ expect_false(rtree_new(rtree, base, false),
+ "Unexpected rtree_new() failure");
+
+ /* Not crossing rtree node boundary first. */
+ uintptr_t start = ZU(1) << rtree_leaf_maskbits();
+ uintptr_t end = start + (ZU(100) << LG_PAGE);
+ test_rtree_range_write(tsdn, rtree, start, end);
+
+ /* Crossing rtree node boundary. */
+ start = (ZU(1) << rtree_leaf_maskbits()) - (ZU(10) << LG_PAGE);
+ end = start + (ZU(100) << LG_PAGE);
+ assert_ptr_ne((void *)rtree_leafkey(start), (void *)rtree_leafkey(end),
+ "The range should span across two rtree nodes");
+ test_rtree_range_write(tsdn, rtree, start, end);
+
+ base_delete(tsdn, base);
+}
+TEST_END
+
int
main(void) {
- rtree_node_alloc_orig = rtree_node_alloc;
- rtree_node_alloc = rtree_node_alloc_intercept;
- rtree_node_dalloc_orig = rtree_node_dalloc;
- rtree_node_dalloc = rtree_node_dalloc_intercept;
- rtree_leaf_alloc_orig = rtree_leaf_alloc;
- rtree_leaf_alloc = rtree_leaf_alloc_intercept;
- rtree_leaf_dalloc_orig = rtree_leaf_dalloc;
- rtree_leaf_dalloc = rtree_leaf_dalloc_intercept;
-
return test(
test_rtree_read_empty,
test_rtree_extrema,
test_rtree_bits,
- test_rtree_random);
+ test_rtree_random,
+ test_rtree_range);
}
diff --git a/deps/jemalloc/test/unit/safety_check.c b/deps/jemalloc/test/unit/safety_check.c
index bf4bd86d6..84726675f 100644
--- a/deps/jemalloc/test/unit/safety_check.c
+++ b/deps/jemalloc/test/unit/safety_check.c
@@ -13,6 +13,13 @@ void fake_abort(const char *message) {
fake_abort_called = true;
}
+static void
+buffer_overflow_write(char *ptr, size_t size) {
+ /* Avoid overflow warnings. */
+ volatile size_t idx = size;
+ ptr[idx] = 0;
+}
+
TEST_BEGIN(test_malloc_free_overflow) {
test_skip_if(!config_prof);
test_skip_if(!config_opt_safety_checks);
@@ -20,11 +27,11 @@ TEST_BEGIN(test_malloc_free_overflow) {
safety_check_set_abort(&fake_abort);
/* Buffer overflow! */
char* ptr = malloc(128);
- ptr[128] = 0;
+ buffer_overflow_write(ptr, 128);
free(ptr);
safety_check_set_abort(NULL);
- assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
@@ -36,11 +43,11 @@ TEST_BEGIN(test_mallocx_dallocx_overflow) {
safety_check_set_abort(&fake_abort);
/* Buffer overflow! */
char* ptr = mallocx(128, 0);
- ptr[128] = 0;
+ buffer_overflow_write(ptr, 128);
dallocx(ptr, 0);
safety_check_set_abort(NULL);
- assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
@@ -52,11 +59,11 @@ TEST_BEGIN(test_malloc_sdallocx_overflow) {
safety_check_set_abort(&fake_abort);
/* Buffer overflow! */
char* ptr = malloc(128);
- ptr[128] = 0;
+ buffer_overflow_write(ptr, 128);
sdallocx(ptr, 128, 0);
safety_check_set_abort(NULL);
- assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
@@ -68,12 +75,12 @@ TEST_BEGIN(test_realloc_overflow) {
safety_check_set_abort(&fake_abort);
/* Buffer overflow! */
char* ptr = malloc(128);
- ptr[128] = 0;
+ buffer_overflow_write(ptr, 128);
ptr = realloc(ptr, 129);
safety_check_set_abort(NULL);
free(ptr);
- assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
@@ -85,12 +92,12 @@ TEST_BEGIN(test_rallocx_overflow) {
safety_check_set_abort(&fake_abort);
/* Buffer overflow! */
char* ptr = malloc(128);
- ptr[128] = 0;
+ buffer_overflow_write(ptr, 128);
ptr = rallocx(ptr, 129, 0);
safety_check_set_abort(NULL);
free(ptr);
- assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
}
TEST_END
@@ -102,11 +109,11 @@ TEST_BEGIN(test_xallocx_overflow) {
safety_check_set_abort(&fake_abort);
/* Buffer overflow! */
char* ptr = malloc(128);
- ptr[128] = 0;
+ buffer_overflow_write(ptr, 128);
size_t result = xallocx(ptr, 129, 0, 0);
- assert_zu_eq(result, 128, "");
+ expect_zu_eq(result, 128, "");
free(ptr);
- assert_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
+ expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
fake_abort_called = false;
safety_check_set_abort(NULL);
}
diff --git a/deps/jemalloc/test/unit/safety_check.sh b/deps/jemalloc/test/unit/safety_check.sh
index 8fcc7d8a7..485f9bf0a 100644
--- a/deps/jemalloc/test/unit/safety_check.sh
+++ b/deps/jemalloc/test/unit/safety_check.sh
@@ -1,5 +1,5 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
- export MALLOC_CONF="prof:true,lg_prof_sample:0"
+ export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
fi
diff --git a/deps/jemalloc/test/unit/san.c b/deps/jemalloc/test/unit/san.c
new file mode 100644
index 000000000..5b98f52e6
--- /dev/null
+++ b/deps/jemalloc/test/unit/san.c
@@ -0,0 +1,207 @@
+#include "test/jemalloc_test.h"
+#include "test/arena_util.h"
+#include "test/san.h"
+
+#include "jemalloc/internal/san.h"
+
+static void
+verify_extent_guarded(tsdn_t *tsdn, void *ptr) {
+ expect_true(extent_is_guarded(tsdn, ptr),
+ "All extents should be guarded.");
+}
+
+#define MAX_SMALL_ALLOCATIONS 4096
+void *small_alloc[MAX_SMALL_ALLOCATIONS];
+
+/*
+ * This test allocates page sized slabs and checks that every two slabs have
+ * at least one page in between them. That page is supposed to be the guard
+ * page.
+ */
+TEST_BEGIN(test_guarded_small) {
+ test_skip_if(opt_prof);
+
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ unsigned npages = 16, pages_found = 0, ends_found = 0;
+ VARIABLE_ARRAY(uintptr_t, pages, npages);
+
+ /* Allocate to get sanitized pointers. */
+ size_t slab_sz = PAGE;
+ size_t sz = slab_sz / 8;
+ unsigned n_alloc = 0;
+ while (n_alloc < MAX_SMALL_ALLOCATIONS) {
+ void *ptr = malloc(sz);
+ expect_ptr_not_null(ptr, "Unexpected malloc() failure");
+ small_alloc[n_alloc] = ptr;
+ verify_extent_guarded(tsdn, ptr);
+ if ((uintptr_t)ptr % PAGE == 0) {
+ assert_u_lt(pages_found, npages,
+ "Unexpectedly large number of page aligned allocs");
+ pages[pages_found++] = (uintptr_t)ptr;
+ }
+ if (((uintptr_t)ptr + (uintptr_t)sz) % PAGE == 0) {
+ ends_found++;
+ }
+ n_alloc++;
+ if (pages_found == npages && ends_found == npages) {
+ break;
+ }
+ }
+ /* Should found the ptrs being checked for overflow and underflow. */
+ expect_u_eq(pages_found, npages, "Could not found the expected pages.");
+ expect_u_eq(ends_found, npages, "Could not found the expected pages.");
+
+ /* Verify the pages are not continuous, i.e. separated by guards. */
+ for (unsigned i = 0; i < npages - 1; i++) {
+ for (unsigned j = i + 1; j < npages; j++) {
+ uintptr_t ptr_diff = pages[i] > pages[j] ?
+ pages[i] - pages[j] : pages[j] - pages[i];
+ expect_zu_ge((size_t)ptr_diff, slab_sz + PAGE,
+ "There should be at least one pages between "
+ "guarded slabs");
+ }
+ }
+
+ for (unsigned i = 0; i < n_alloc + 1; i++) {
+ free(small_alloc[i]);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_guarded_large) {
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ unsigned nlarge = 32;
+ VARIABLE_ARRAY(uintptr_t, large, nlarge);
+
+ /* Allocate to get sanitized pointers. */
+ size_t large_sz = SC_LARGE_MINCLASS;
+ for (unsigned i = 0; i < nlarge; i++) {
+ void *ptr = malloc(large_sz);
+ verify_extent_guarded(tsdn, ptr);
+ expect_ptr_not_null(ptr, "Unexpected malloc() failure");
+ large[i] = (uintptr_t)ptr;
+ }
+
+ /* Verify the pages are not continuous, i.e. separated by guards. */
+ for (unsigned i = 0; i < nlarge; i++) {
+ for (unsigned j = i + 1; j < nlarge; j++) {
+ uintptr_t ptr_diff = large[i] > large[j] ?
+ large[i] - large[j] : large[j] - large[i];
+ expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE,
+ "There should be at least two pages between "
+ " guarded large allocations");
+ }
+ }
+
+ for (unsigned i = 0; i < nlarge; i++) {
+ free((void *)large[i]);
+ }
+}
+TEST_END
+
+static void
+verify_pdirty(unsigned arena_ind, uint64_t expected) {
+ uint64_t pdirty = get_arena_pdirty(arena_ind);
+ expect_u64_eq(pdirty, expected / PAGE,
+ "Unexpected dirty page amount.");
+}
+
+static void
+verify_pmuzzy(unsigned arena_ind, uint64_t expected) {
+ uint64_t pmuzzy = get_arena_pmuzzy(arena_ind);
+ expect_u64_eq(pmuzzy, expected / PAGE,
+ "Unexpected muzzy page amount.");
+}
+
+TEST_BEGIN(test_guarded_decay) {
+ unsigned arena_ind = do_arena_create(-1, -1);
+ do_decay(arena_ind);
+ do_purge(arena_ind);
+
+ verify_pdirty(arena_ind, 0);
+ verify_pmuzzy(arena_ind, 0);
+
+ /* Verify that guarded extents as dirty. */
+ size_t sz1 = PAGE, sz2 = PAGE * 2;
+ /* W/o maps_coalesce, guarded extents are unguarded eagerly. */
+ size_t add_guard_size = maps_coalesce ? 0 : SAN_PAGE_GUARDS_SIZE;
+ generate_dirty(arena_ind, sz1);
+ verify_pdirty(arena_ind, sz1 + add_guard_size);
+ verify_pmuzzy(arena_ind, 0);
+
+ /* Should reuse the first extent. */
+ generate_dirty(arena_ind, sz1);
+ verify_pdirty(arena_ind, sz1 + add_guard_size);
+ verify_pmuzzy(arena_ind, 0);
+
+ /* Should not reuse; expect new dirty pages. */
+ generate_dirty(arena_ind, sz2);
+ verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
+ verify_pmuzzy(arena_ind, 0);
+
+ tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
+ int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+
+ /* Should reuse dirty extents for the two mallocx. */
+ void *p1 = do_mallocx(sz1, flags);
+ verify_extent_guarded(tsdn, p1);
+ verify_pdirty(arena_ind, sz2 + add_guard_size);
+
+ void *p2 = do_mallocx(sz2, flags);
+ verify_extent_guarded(tsdn, p2);
+ verify_pdirty(arena_ind, 0);
+ verify_pmuzzy(arena_ind, 0);
+
+ dallocx(p1, flags);
+ verify_pdirty(arena_ind, sz1 + add_guard_size);
+ dallocx(p2, flags);
+ verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
+ verify_pmuzzy(arena_ind, 0);
+
+ do_purge(arena_ind);
+ verify_pdirty(arena_ind, 0);
+ verify_pmuzzy(arena_ind, 0);
+
+ if (config_stats) {
+ expect_u64_eq(get_arena_npurge(arena_ind), 1,
+ "Expected purging to occur");
+ expect_u64_eq(get_arena_dirty_npurge(arena_ind), 1,
+ "Expected purging to occur");
+ expect_u64_eq(get_arena_dirty_purged(arena_ind),
+ (sz1 + sz2 + 2 * add_guard_size) / PAGE,
+ "Expected purging to occur");
+ expect_u64_eq(get_arena_muzzy_npurge(arena_ind), 0,
+ "Expected purging to occur");
+ }
+
+ if (opt_retain) {
+ /*
+ * With retain, guarded extents are not mergable and will be
+ * cached in ecache_retained. They should be reused.
+ */
+ void *new_p1 = do_mallocx(sz1, flags);
+ verify_extent_guarded(tsdn, p1);
+ expect_ptr_eq(p1, new_p1, "Expect to reuse p1");
+
+ void *new_p2 = do_mallocx(sz2, flags);
+ verify_extent_guarded(tsdn, p2);
+ expect_ptr_eq(p2, new_p2, "Expect to reuse p2");
+
+ dallocx(new_p1, flags);
+ verify_pdirty(arena_ind, sz1 + add_guard_size);
+ dallocx(new_p2, flags);
+ verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
+ verify_pmuzzy(arena_ind, 0);
+ }
+
+ do_arena_destroy(arena_ind);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_guarded_small,
+ test_guarded_large,
+ test_guarded_decay);
+}
diff --git a/deps/jemalloc/test/unit/san.sh b/deps/jemalloc/test/unit/san.sh
new file mode 100644
index 000000000..933b4a4d6
--- /dev/null
+++ b/deps/jemalloc/test/unit/san.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="san_guard_large:1,san_guard_small:1"
diff --git a/deps/jemalloc/test/unit/san_bump.c b/deps/jemalloc/test/unit/san_bump.c
new file mode 100644
index 000000000..cafa37fee
--- /dev/null
+++ b/deps/jemalloc/test/unit/san_bump.c
@@ -0,0 +1,111 @@
+#include "test/jemalloc_test.h"
+#include "test/arena_util.h"
+
+#include "jemalloc/internal/arena_structs.h"
+#include "jemalloc/internal/san_bump.h"
+
+TEST_BEGIN(test_san_bump_alloc) {
+ test_skip_if(!maps_coalesce || !opt_retain);
+
+ tsdn_t *tsdn = tsdn_fetch();
+
+ san_bump_alloc_t sba;
+ san_bump_alloc_init(&sba);
+
+ unsigned arena_ind = do_arena_create(0, 0);
+ assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena");
+
+ arena_t *arena = arena_get(tsdn, arena_ind, false);
+ pac_t *pac = &arena->pa_shard.pac;
+
+ size_t alloc_size = PAGE * 16;
+ size_t alloc_n = alloc_size / sizeof(unsigned);
+ edata_t* edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
+ alloc_size, /* zero */ false);
+
+ expect_ptr_not_null(edata, "Failed to allocate edata");
+ expect_u_eq(edata_arena_ind_get(edata), arena_ind,
+ "Edata was assigned an incorrect arena id");
+ expect_zu_eq(edata_size_get(edata), alloc_size,
+ "Allocated edata of incorrect size");
+ expect_false(edata_slab_get(edata),
+ "Bump allocator incorrectly assigned 'slab' to true");
+ expect_true(edata_committed_get(edata), "Edata is not committed");
+
+ void *ptr = edata_addr_get(edata);
+ expect_ptr_not_null(ptr, "Edata was assigned an invalid address");
+ /* Test that memory is allocated; no guard pages are misplaced */
+ for (unsigned i = 0; i < alloc_n; ++i) {
+ ((unsigned *)ptr)[i] = 1;
+ }
+
+ size_t alloc_size2 = PAGE * 28;
+ size_t alloc_n2 = alloc_size / sizeof(unsigned);
+ edata_t *edata2 = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
+ alloc_size2, /* zero */ true);
+
+ expect_ptr_not_null(edata2, "Failed to allocate edata");
+ expect_u_eq(edata_arena_ind_get(edata2), arena_ind,
+ "Edata was assigned an incorrect arena id");
+ expect_zu_eq(edata_size_get(edata2), alloc_size2,
+ "Allocated edata of incorrect size");
+ expect_false(edata_slab_get(edata2),
+ "Bump allocator incorrectly assigned 'slab' to true");
+ expect_true(edata_committed_get(edata2), "Edata is not committed");
+
+ void *ptr2 = edata_addr_get(edata2);
+ expect_ptr_not_null(ptr, "Edata was assigned an invalid address");
+
+ uintptr_t ptrdiff = ptr2 > ptr ? (uintptr_t)ptr2 - (uintptr_t)ptr
+ : (uintptr_t)ptr - (uintptr_t)ptr2;
+ size_t between_allocs = (size_t)ptrdiff - alloc_size;
+
+ expect_zu_ge(between_allocs, PAGE,
+ "Guard page between allocs is missing");
+
+ for (unsigned i = 0; i < alloc_n2; ++i) {
+ expect_u_eq(((unsigned *)ptr2)[i], 0, "Memory is not zeroed");
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_large_alloc_size) {
+ test_skip_if(!maps_coalesce || !opt_retain);
+
+ tsdn_t *tsdn = tsdn_fetch();
+
+ san_bump_alloc_t sba;
+ san_bump_alloc_init(&sba);
+
+ unsigned arena_ind = do_arena_create(0, 0);
+ assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena");
+
+ arena_t *arena = arena_get(tsdn, arena_ind, false);
+ pac_t *pac = &arena->pa_shard.pac;
+
+ size_t alloc_size = SBA_RETAINED_ALLOC_SIZE * 2;
+ edata_t* edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
+ alloc_size, /* zero */ false);
+ expect_u_eq(edata_arena_ind_get(edata), arena_ind,
+ "Edata was assigned an incorrect arena id");
+ expect_zu_eq(edata_size_get(edata), alloc_size,
+ "Allocated edata of incorrect size");
+ expect_false(edata_slab_get(edata),
+ "Bump allocator incorrectly assigned 'slab' to true");
+ expect_true(edata_committed_get(edata), "Edata is not committed");
+
+ void *ptr = edata_addr_get(edata);
+ expect_ptr_not_null(ptr, "Edata was assigned an invalid address");
+ /* Test that memory is allocated; no guard pages are misplaced */
+ for (unsigned i = 0; i < alloc_size / PAGE; ++i) {
+ *((char *)ptr + PAGE * i) = 1;
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_san_bump_alloc,
+ test_large_alloc_size);
+}
diff --git a/deps/jemalloc/test/unit/sc.c b/deps/jemalloc/test/unit/sc.c
index bf51d8e59..d207481c3 100644
--- a/deps/jemalloc/test/unit/sc.c
+++ b/deps/jemalloc/test/unit/sc.c
@@ -9,7 +9,7 @@ TEST_BEGIN(test_update_slab_size) {
+ (ZU(tiny->ndelta) << tiny->lg_delta);
size_t pgs_too_big = (tiny_size * BITMAP_MAXBITS + PAGE - 1) / PAGE + 1;
sc_data_update_slab_size(&data, tiny_size, tiny_size, (int)pgs_too_big);
- assert_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages");
+ expect_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages");
sc_data_update_slab_size(&data, 1, 10 * PAGE, 1);
for (int i = 0; i < data.nbins; i++) {
@@ -17,9 +17,9 @@ TEST_BEGIN(test_update_slab_size) {
size_t reg_size = (ZU(1) << sc->lg_base)
+ (ZU(sc->ndelta) << sc->lg_delta);
if (reg_size <= PAGE) {
- assert_d_eq(sc->pgs, 1, "Ignored valid page size hint");
+ expect_d_eq(sc->pgs, 1, "Ignored valid page size hint");
} else {
- assert_d_gt(sc->pgs, 1,
+ expect_d_gt(sc->pgs, 1,
"Allowed invalid page size hint");
}
}
diff --git a/deps/jemalloc/test/unit/sec.c b/deps/jemalloc/test/unit/sec.c
new file mode 100644
index 000000000..f3ec403da
--- /dev/null
+++ b/deps/jemalloc/test/unit/sec.c
@@ -0,0 +1,634 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/sec.h"
+
+typedef struct pai_test_allocator_s pai_test_allocator_t;
+struct pai_test_allocator_s {
+ pai_t pai;
+ bool alloc_fail;
+ size_t alloc_count;
+ size_t alloc_batch_count;
+ size_t dalloc_count;
+ size_t dalloc_batch_count;
+ /*
+ * We use a simple bump allocator as the implementation. This isn't
+ * *really* correct, since we may allow expansion into a subsequent
+ * allocation, but it's not like the SEC is really examining the
+ * pointers it gets back; this is mostly just helpful for debugging.
+ */
+ uintptr_t next_ptr;
+ size_t expand_count;
+ bool expand_return_value;
+ size_t shrink_count;
+ bool shrink_return_value;
+};
+
+static void
+test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
+ size_t max_bytes) {
+ sec_opts_t opts;
+ opts.nshards = 1;
+ opts.max_alloc = max_alloc;
+ opts.max_bytes = max_bytes;
+ /*
+ * Just choose reasonable defaults for these; most tests don't care so
+ * long as they're something reasonable.
+ */
+ opts.bytes_after_flush = max_bytes / 2;
+ opts.batch_fill_extra = 4;
+
+ /*
+ * We end up leaking this base, but that's fine; this test is
+ * short-running, and SECs are arena-scoped in reality.
+ */
+ base_t *base = base_new(TSDN_NULL, /* ind */ 123,
+ &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
+
+ bool err = sec_init(TSDN_NULL, sec, base, fallback, &opts);
+ assert_false(err, "Unexpected initialization failure");
+ assert_u_ge(sec->npsizes, 0, "Zero size classes allowed for caching");
+}
+
+static inline edata_t *
+pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t alignment, bool zero, bool guarded, bool frequent_reuse,
+ bool *deferred_work_generated) {
+ assert(!guarded);
+ pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
+ if (ta->alloc_fail) {
+ return NULL;
+ }
+ edata_t *edata = malloc(sizeof(edata_t));
+ assert_ptr_not_null(edata, "");
+ ta->next_ptr += alignment - 1;
+ edata_init(edata, /* arena_ind */ 0,
+ (void *)(ta->next_ptr & ~(alignment - 1)), size,
+ /* slab */ false,
+ /* szind */ 0, /* sn */ 1, extent_state_active, /* zero */ zero,
+ /* comitted */ true, /* ranged */ false, EXTENT_NOT_HEAD);
+ ta->next_ptr += size;
+ ta->alloc_count++;
+ return edata;
+}
+
+static inline size_t
+pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t nallocs, edata_list_active_t *results,
+ bool *deferred_work_generated) {
+ pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
+ if (ta->alloc_fail) {
+ return 0;
+ }
+ for (size_t i = 0; i < nallocs; i++) {
+ edata_t *edata = malloc(sizeof(edata_t));
+ assert_ptr_not_null(edata, "");
+ edata_init(edata, /* arena_ind */ 0,
+ (void *)ta->next_ptr, size,
+ /* slab */ false, /* szind */ 0, /* sn */ 1,
+ extent_state_active, /* zero */ false, /* comitted */ true,
+ /* ranged */ false, EXTENT_NOT_HEAD);
+ ta->next_ptr += size;
+ ta->alloc_batch_count++;
+ edata_list_active_append(results, edata);
+ }
+ return nallocs;
+}
+
+static bool
+pai_test_allocator_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool zero,
+ bool *deferred_work_generated) {
+ pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
+ ta->expand_count++;
+ return ta->expand_return_value;
+}
+
+static bool
+pai_test_allocator_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool *deferred_work_generated) {
+ pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
+ ta->shrink_count++;
+ return ta->shrink_return_value;
+}
+
+static void
+pai_test_allocator_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated) {
+ pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
+ ta->dalloc_count++;
+ free(edata);
+}
+
+static void
+pai_test_allocator_dalloc_batch(tsdn_t *tsdn, pai_t *self,
+ edata_list_active_t *list, bool *deferred_work_generated) {
+ pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
+
+ edata_t *edata;
+ while ((edata = edata_list_active_first(list)) != NULL) {
+ edata_list_active_remove(list, edata);
+ ta->dalloc_batch_count++;
+ free(edata);
+ }
+}
+
+static inline void
+pai_test_allocator_init(pai_test_allocator_t *ta) {
+ ta->alloc_fail = false;
+ ta->alloc_count = 0;
+ ta->alloc_batch_count = 0;
+ ta->dalloc_count = 0;
+ ta->dalloc_batch_count = 0;
+ /* Just don't start the edata at 0. */
+ ta->next_ptr = 10 * PAGE;
+ ta->expand_count = 0;
+ ta->expand_return_value = false;
+ ta->shrink_count = 0;
+ ta->shrink_return_value = false;
+ ta->pai.alloc = &pai_test_allocator_alloc;
+ ta->pai.alloc_batch = &pai_test_allocator_alloc_batch;
+ ta->pai.expand = &pai_test_allocator_expand;
+ ta->pai.shrink = &pai_test_allocator_shrink;
+ ta->pai.dalloc = &pai_test_allocator_dalloc;
+ ta->pai.dalloc_batch = &pai_test_allocator_dalloc_batch;
+}
+
+TEST_BEGIN(test_reuse) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+ /*
+ * We can't use the "real" tsd, since we malloc within the test
+ * allocator hooks; we'd get lock inversion crashes. Eventually, we
+ * should have a way to mock tsds, but for now just don't do any
+ * lock-order checking.
+ */
+ tsdn_t *tsdn = TSDN_NULL;
+ /*
+ * 11 allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
+ * able to get to 33 pages in the cache before triggering a flush. We
+ * set the flush liimt to twice this amount, to avoid accidentally
+ * triggering a flush caused by the batch-allocation down the cache fill
+ * pathway disrupting ordering.
+ */
+ enum { NALLOCS = 11 };
+ edata_t *one_page[NALLOCS];
+ edata_t *two_page[NALLOCS];
+ bool deferred_work_generated = false;
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 2 * PAGE,
+ /* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
+ for (int i = 0; i < NALLOCS; i++) {
+ one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
+ two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
+ }
+ expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
+ size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
+ expect_zu_le(2 * NALLOCS, max_allocs,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of allocations");
+ /*
+ * Free in a different order than we allocated, to make sure free-list
+ * separation works correctly.
+ */
+ for (int i = NALLOCS - 1; i >= 0; i--) {
+ pai_dalloc(tsdn, &sec.pai, one_page[i],
+ &deferred_work_generated);
+ }
+ for (int i = NALLOCS - 1; i >= 0; i--) {
+ pai_dalloc(tsdn, &sec.pai, two_page[i],
+ &deferred_work_generated);
+ }
+ expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of allocations");
+ /*
+ * Check that the n'th most recent deallocated extent is returned for
+ * the n'th alloc request of a given size.
+ */
+ for (int i = 0; i < NALLOCS; i++) {
+ edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_ptr_eq(one_page[i], alloc1,
+ "Got unexpected allocation");
+ expect_ptr_eq(two_page[i], alloc2,
+ "Got unexpected allocation");
+ }
+ expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of allocations");
+}
+TEST_END
+
+
+TEST_BEGIN(test_auto_flush) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+ /*
+ * 10-allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
+ * able to get to 30 pages in the cache before triggering a flush. The
+ * choice of NALLOCS here is chosen to match the batch allocation
+ * default (4 extra + 1 == 5; so 10 allocations leaves the cache exactly
+ * empty, even in the presence of batch allocation on fill).
+ * Eventually, once our allocation batching strategies become smarter,
+ * this should change.
+ */
+ enum { NALLOCS = 10 };
+ edata_t *extra_alloc;
+ edata_t *allocs[NALLOCS];
+ bool deferred_work_generated = false;
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
+ /* max_bytes */ NALLOCS * PAGE);
+ for (int i = 0; i < NALLOCS; i++) {
+ allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
+ }
+ extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
+ /* guarded */ false, /* frequent_reuse */ false,
+ &deferred_work_generated);
+ expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
+ size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
+ expect_zu_le(NALLOCS + 1, max_allocs,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of allocations");
+ /* Free until the SEC is full, but should not have flushed yet. */
+ for (int i = 0; i < NALLOCS; i++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
+ }
+ expect_zu_le(NALLOCS + 1, max_allocs,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of allocations");
+ /*
+ * Free the extra allocation; this should trigger a flush. The internal
+ * flushing logic is allowed to get complicated; for now, we rely on our
+ * whitebox knowledge of the fact that the SEC flushes bins in their
+ * entirety when it decides to do so, and it has only one bin active
+ * right now.
+ */
+ pai_dalloc(tsdn, &sec.pai, extra_alloc, &deferred_work_generated);
+ expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of (non-batch) deallocations");
+ expect_zu_eq(NALLOCS + 1, ta.dalloc_batch_count,
+ "Incorrect number of batch deallocations");
+}
+TEST_END
+
+/*
+ * A disable and a flush are *almost* equivalent; the only difference is what
+ * happens afterwards; disabling disallows all future caching as well.
+ */
+static void
+do_disable_flush_test(bool is_disable) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+
+ enum { NALLOCS = 11 };
+ edata_t *allocs[NALLOCS];
+ bool deferred_work_generated = false;
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
+ /* max_bytes */ NALLOCS * PAGE);
+ for (int i = 0; i < NALLOCS; i++) {
+ allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
+ }
+ /* Free all but the last aloc. */
+ for (int i = 0; i < NALLOCS - 1; i++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
+ }
+ size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
+
+ expect_zu_le(NALLOCS, max_allocs, "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of allocations");
+
+ if (is_disable) {
+ sec_disable(tsdn, &sec);
+ } else {
+ sec_flush(tsdn, &sec);
+ }
+
+ expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(0, ta.dalloc_count,
+ "Incorrect number of (non-batch) deallocations");
+ expect_zu_le(NALLOCS - 1, ta.dalloc_batch_count,
+ "Incorrect number of batch deallocations");
+ size_t old_dalloc_batch_count = ta.dalloc_batch_count;
+
+ /*
+ * If we free into a disabled SEC, it should forward to the fallback.
+ * Otherwise, the SEC should accept the allocation.
+ */
+ pai_dalloc(tsdn, &sec.pai, allocs[NALLOCS - 1],
+ &deferred_work_generated);
+
+ expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(is_disable ? 1 : 0, ta.dalloc_count,
+ "Incorrect number of (non-batch) deallocations");
+ expect_zu_eq(old_dalloc_batch_count, ta.dalloc_batch_count,
+ "Incorrect number of batch deallocations");
+}
+
+TEST_BEGIN(test_disable) {
+ do_disable_flush_test(/* is_disable */ true);
+}
+TEST_END
+
+TEST_BEGIN(test_flush) {
+ do_disable_flush_test(/* is_disable */ false);
+}
+TEST_END
+
+TEST_BEGIN(test_max_alloc_respected) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+
+ size_t max_alloc = 2 * PAGE;
+ size_t attempted_alloc = 3 * PAGE;
+
+ bool deferred_work_generated = false;
+
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, max_alloc,
+ /* max_bytes */ 1000 * PAGE);
+
+ for (size_t i = 0; i < 100; i++) {
+ expect_zu_eq(i, ta.alloc_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(i, ta.dalloc_count,
+ "Incorrect number of deallocations");
+ edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
+ PAGE, /* zero */ false, /* guarded */ false,
+ /* frequent_reuse */ false, &deferred_work_generated);
+ expect_ptr_not_null(edata, "Unexpected alloc failure");
+ expect_zu_eq(i + 1, ta.alloc_count,
+ "Incorrect number of allocations");
+ expect_zu_eq(i, ta.dalloc_count,
+ "Incorrect number of deallocations");
+ pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_expand_shrink_delegate) {
+ /*
+ * Expand and shrink shouldn't affect sec state; they should just
+ * delegate to the fallback PAI.
+ */
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+
+ bool deferred_work_generated = false;
+
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
+ /* max_bytes */ 1000 * PAGE);
+ edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
+ &deferred_work_generated);
+ expect_ptr_not_null(edata, "Unexpected alloc failure");
+
+ bool err = pai_expand(tsdn, &sec.pai, edata, PAGE, 4 * PAGE,
+ /* zero */ false, &deferred_work_generated);
+ expect_false(err, "Unexpected expand failure");
+ expect_zu_eq(1, ta.expand_count, "");
+ ta.expand_return_value = true;
+ err = pai_expand(tsdn, &sec.pai, edata, 4 * PAGE, 3 * PAGE,
+ /* zero */ false, &deferred_work_generated);
+ expect_true(err, "Unexpected expand success");
+ expect_zu_eq(2, ta.expand_count, "");
+
+ err = pai_shrink(tsdn, &sec.pai, edata, 4 * PAGE, 2 * PAGE,
+ &deferred_work_generated);
+ expect_false(err, "Unexpected shrink failure");
+ expect_zu_eq(1, ta.shrink_count, "");
+ ta.shrink_return_value = true;
+ err = pai_shrink(tsdn, &sec.pai, edata, 2 * PAGE, PAGE,
+ &deferred_work_generated);
+ expect_true(err, "Unexpected shrink success");
+ expect_zu_eq(2, ta.shrink_count, "");
+}
+TEST_END
+
+TEST_BEGIN(test_nshards_0) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+ base_t *base = base_new(TSDN_NULL, /* ind */ 123,
+ &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
+
+ sec_opts_t opts = SEC_OPTS_DEFAULT;
+ opts.nshards = 0;
+ sec_init(TSDN_NULL, &sec, base, &ta.pai, &opts);
+
+ bool deferred_work_generated = false;
+ edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
+ &deferred_work_generated);
+ pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
+
+ /* Both operations should have gone directly to the fallback. */
+ expect_zu_eq(1, ta.alloc_count, "");
+ expect_zu_eq(1, ta.dalloc_count, "");
+}
+TEST_END
+
+static void
+expect_stats_pages(tsdn_t *tsdn, sec_t *sec, size_t npages) {
+ sec_stats_t stats;
+ /*
+ * Check that the stats merging accumulates rather than overwrites by
+ * putting some (made up) data there to begin with.
+ */
+ stats.bytes = 123;
+ sec_stats_merge(tsdn, sec, &stats);
+ assert_zu_le(npages * PAGE + 123, stats.bytes, "");
+}
+
+TEST_BEGIN(test_stats_simple) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+
+ enum {
+ NITERS = 100,
+ FLUSH_PAGES = 20,
+ };
+
+ bool deferred_work_generated = false;
+
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
+ /* max_bytes */ FLUSH_PAGES * PAGE);
+
+ edata_t *allocs[FLUSH_PAGES];
+ for (size_t i = 0; i < FLUSH_PAGES; i++) {
+ allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_stats_pages(tsdn, &sec, 0);
+ }
+
+ /* Increase and decrease, without flushing. */
+ for (size_t i = 0; i < NITERS; i++) {
+ for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[j],
+ &deferred_work_generated);
+ expect_stats_pages(tsdn, &sec, j + 1);
+ }
+ for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
+ allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false,
+ /* frequent_reuse */ false,
+ &deferred_work_generated);
+ expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
+ }
+ }
+}
+TEST_END
+
+TEST_BEGIN(test_stats_auto_flush) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+
+ enum {
+ FLUSH_PAGES = 10,
+ };
+
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
+ /* max_bytes */ FLUSH_PAGES * PAGE);
+
+ edata_t *extra_alloc0;
+ edata_t *extra_alloc1;
+ edata_t *allocs[2 * FLUSH_PAGES];
+
+ bool deferred_work_generated = false;
+
+ extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
+ /* guarded */ false, /* frequent_reuse */ false,
+ &deferred_work_generated);
+ extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
+ /* guarded */ false, /* frequent_reuse */ false,
+ &deferred_work_generated);
+
+ for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
+ allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ }
+
+ for (size_t i = 0; i < FLUSH_PAGES; i++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
+ }
+ pai_dalloc(tsdn, &sec.pai, extra_alloc0, &deferred_work_generated);
+
+ /* Flush the remaining pages; stats should still work. */
+ for (size_t i = 0; i < FLUSH_PAGES; i++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES + i],
+ &deferred_work_generated);
+ }
+
+ pai_dalloc(tsdn, &sec.pai, extra_alloc1, &deferred_work_generated);
+
+ expect_stats_pages(tsdn, &sec, ta.alloc_count + ta.alloc_batch_count
+ - ta.dalloc_count - ta.dalloc_batch_count);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_manual_flush) {
+ pai_test_allocator_t ta;
+ pai_test_allocator_init(&ta);
+ sec_t sec;
+
+ /* See the note above -- we can't use the real tsd. */
+ tsdn_t *tsdn = TSDN_NULL;
+
+ enum {
+ FLUSH_PAGES = 10,
+ };
+
+ test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
+ /* max_bytes */ FLUSH_PAGES * PAGE);
+
+ bool deferred_work_generated = false;
+ edata_t *allocs[FLUSH_PAGES];
+ for (size_t i = 0; i < FLUSH_PAGES; i++) {
+ allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
+ /* zero */ false, /* guarded */ false, /* frequent_reuse */
+ false, &deferred_work_generated);
+ expect_stats_pages(tsdn, &sec, 0);
+ }
+
+ /* Dalloc the first half of the allocations. */
+ for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
+ expect_stats_pages(tsdn, &sec, i + 1);
+ }
+
+ sec_flush(tsdn, &sec);
+ expect_stats_pages(tsdn, &sec, 0);
+
+ /* Flush the remaining pages. */
+ for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
+ pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES / 2 + i],
+ &deferred_work_generated);
+ expect_stats_pages(tsdn, &sec, i + 1);
+ }
+ sec_disable(tsdn, &sec);
+ expect_stats_pages(tsdn, &sec, 0);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_reuse,
+ test_auto_flush,
+ test_disable,
+ test_flush,
+ test_max_alloc_respected,
+ test_expand_shrink_delegate,
+ test_nshards_0,
+ test_stats_simple,
+ test_stats_auto_flush,
+ test_stats_manual_flush);
+}
diff --git a/deps/jemalloc/test/unit/seq.c b/deps/jemalloc/test/unit/seq.c
index 19613b0b2..06ed68345 100644
--- a/deps/jemalloc/test/unit/seq.c
+++ b/deps/jemalloc/test/unit/seq.c
@@ -15,10 +15,10 @@ set_data(data_t *data, int num) {
}
static void
-assert_data(data_t *data) {
+expect_data(data_t *data) {
int num = data->arr[0];
for (int i = 0; i < 10; i++) {
- assert_d_eq(num, data->arr[i], "Data consistency error");
+ expect_d_eq(num, data->arr[i], "Data consistency error");
}
}
@@ -37,8 +37,8 @@ seq_reader_thd(void *arg) {
while (iter < 1000 * 1000 - 1) {
bool success = seq_try_load_data(&local_data, &thd_data->data);
if (success) {
- assert_data(&local_data);
- assert_d_le(iter, local_data.arr[0],
+ expect_data(&local_data);
+ expect_d_le(iter, local_data.arr[0],
"Seq read went back in time.");
iter = local_data.arr[0];
}
@@ -82,8 +82,8 @@ TEST_BEGIN(test_seq_simple) {
seq_store_data(&seq, &data);
set_data(&data, 0);
bool success = seq_try_load_data(&data, &seq);
- assert_b_eq(success, true, "Failed non-racing read");
- assert_data(&data);
+ expect_b_eq(success, true, "Failed non-racing read");
+ expect_data(&data);
}
}
TEST_END
diff --git a/deps/jemalloc/test/unit/size_check.c b/deps/jemalloc/test/unit/size_check.c
new file mode 100644
index 000000000..accdc405b
--- /dev/null
+++ b/deps/jemalloc/test/unit/size_check.c
@@ -0,0 +1,79 @@
+#include "test/jemalloc_test.h"
+
+#include "jemalloc/internal/safety_check.h"
+
+bool fake_abort_called;
+void fake_abort(const char *message) {
+ (void)message;
+ fake_abort_called = true;
+}
+
+#define SMALL_SIZE1 SC_SMALL_MAXCLASS
+#define SMALL_SIZE2 (SC_SMALL_MAXCLASS / 2)
+
+#define LARGE_SIZE1 SC_LARGE_MINCLASS
+#define LARGE_SIZE2 (LARGE_SIZE1 * 2)
+
+void *
+test_invalid_size_pre(size_t sz) {
+ safety_check_set_abort(&fake_abort);
+
+ fake_abort_called = false;
+ void *ptr = malloc(sz);
+ assert_ptr_not_null(ptr, "Unexpected failure");
+
+ return ptr;
+}
+
+void
+test_invalid_size_post(void) {
+ expect_true(fake_abort_called, "Safety check didn't fire");
+ safety_check_set_abort(NULL);
+}
+
+TEST_BEGIN(test_invalid_size_sdallocx) {
+ test_skip_if(!config_opt_size_checks);
+
+ void *ptr = test_invalid_size_pre(SMALL_SIZE1);
+ sdallocx(ptr, SMALL_SIZE2, 0);
+ test_invalid_size_post();
+
+ ptr = test_invalid_size_pre(LARGE_SIZE1);
+ sdallocx(ptr, LARGE_SIZE2, 0);
+ test_invalid_size_post();
+}
+TEST_END
+
+TEST_BEGIN(test_invalid_size_sdallocx_nonzero_flag) {
+ test_skip_if(!config_opt_size_checks);
+
+ void *ptr = test_invalid_size_pre(SMALL_SIZE1);
+ sdallocx(ptr, SMALL_SIZE2, MALLOCX_TCACHE_NONE);
+ test_invalid_size_post();
+
+ ptr = test_invalid_size_pre(LARGE_SIZE1);
+ sdallocx(ptr, LARGE_SIZE2, MALLOCX_TCACHE_NONE);
+ test_invalid_size_post();
+}
+TEST_END
+
+TEST_BEGIN(test_invalid_size_sdallocx_noflags) {
+ test_skip_if(!config_opt_size_checks);
+
+ void *ptr = test_invalid_size_pre(SMALL_SIZE1);
+ je_sdallocx_noflags(ptr, SMALL_SIZE2);
+ test_invalid_size_post();
+
+ ptr = test_invalid_size_pre(LARGE_SIZE1);
+ je_sdallocx_noflags(ptr, LARGE_SIZE2);
+ test_invalid_size_post();
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_invalid_size_sdallocx,
+ test_invalid_size_sdallocx_nonzero_flag,
+ test_invalid_size_sdallocx_noflags);
+}
diff --git a/deps/jemalloc/test/unit/size_check.sh b/deps/jemalloc/test/unit/size_check.sh
new file mode 100644
index 000000000..352d11076
--- /dev/null
+++ b/deps/jemalloc/test/unit/size_check.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:false"
+fi
diff --git a/deps/jemalloc/test/unit/size_classes.c b/deps/jemalloc/test/unit/size_classes.c
index 694733635..c70eb592d 100644
--- a/deps/jemalloc/test/unit/size_classes.c
+++ b/deps/jemalloc/test/unit/size_classes.c
@@ -7,16 +7,16 @@ get_max_size_class(void) {
size_t sz, miblen, max_size_class;
sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
+ expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
0), 0, "Unexpected mallctl() error");
miblen = sizeof(mib) / sizeof(size_t);
- assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
+ expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib() error");
mib[2] = nlextents - 1;
sz = sizeof(size_t);
- assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
+ expect_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
NULL, 0), 0, "Unexpected mallctlbymib() error");
return max_size_class;
@@ -32,50 +32,50 @@ TEST_BEGIN(test_size_classes) {
for (index = 0, size_class = sz_index2size(index); index < max_index ||
size_class < max_size_class; index++, size_class =
sz_index2size(index)) {
- assert_true(index < max_index,
+ expect_true(index < max_index,
"Loop conditionals should be equivalent; index=%u, "
"size_class=%zu (%#zx)", index, size_class, size_class);
- assert_true(size_class < max_size_class,
+ expect_true(size_class < max_size_class,
"Loop conditionals should be equivalent; index=%u, "
"size_class=%zu (%#zx)", index, size_class, size_class);
- assert_u_eq(index, sz_size2index(size_class),
+ expect_u_eq(index, sz_size2index(size_class),
"sz_size2index() does not reverse sz_index2size(): index=%u"
" --> size_class=%zu --> index=%u --> size_class=%zu",
index, size_class, sz_size2index(size_class),
sz_index2size(sz_size2index(size_class)));
- assert_zu_eq(size_class,
+ expect_zu_eq(size_class,
sz_index2size(sz_size2index(size_class)),
"sz_index2size() does not reverse sz_size2index(): index=%u"
" --> size_class=%zu --> index=%u --> size_class=%zu",
index, size_class, sz_size2index(size_class),
sz_index2size(sz_size2index(size_class)));
- assert_u_eq(index+1, sz_size2index(size_class+1),
+ expect_u_eq(index+1, sz_size2index(size_class+1),
"Next size_class does not round up properly");
- assert_zu_eq(size_class, (index > 0) ?
+ expect_zu_eq(size_class, (index > 0) ?
sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1),
"sz_s2u() does not round up to size class");
- assert_zu_eq(size_class, sz_s2u(size_class-1),
+ expect_zu_eq(size_class, sz_s2u(size_class-1),
"sz_s2u() does not round up to size class");
- assert_zu_eq(size_class, sz_s2u(size_class),
+ expect_zu_eq(size_class, sz_s2u(size_class),
"sz_s2u() does not compute same size class");
- assert_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1),
+ expect_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1),
"sz_s2u() does not round up to next size class");
}
- assert_u_eq(index, sz_size2index(sz_index2size(index)),
+ expect_u_eq(index, sz_size2index(sz_index2size(index)),
"sz_size2index() does not reverse sz_index2size()");
- assert_zu_eq(max_size_class, sz_index2size(
+ expect_zu_eq(max_size_class, sz_index2size(
sz_size2index(max_size_class)),
"sz_index2size() does not reverse sz_size2index()");
- assert_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1),
+ expect_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1),
"sz_s2u() does not round up to size class");
- assert_zu_eq(size_class, sz_s2u(size_class-1),
+ expect_zu_eq(size_class, sz_s2u(size_class-1),
"sz_s2u() does not round up to size class");
- assert_zu_eq(size_class, sz_s2u(size_class),
+ expect_zu_eq(size_class, sz_s2u(size_class),
"sz_s2u() does not compute same size class");
}
TEST_END
@@ -90,53 +90,53 @@ TEST_BEGIN(test_psize_classes) {
for (pind = 0, size_class = sz_pind2sz(pind);
pind < max_pind || size_class < max_psz;
pind++, size_class = sz_pind2sz(pind)) {
- assert_true(pind < max_pind,
+ expect_true(pind < max_pind,
"Loop conditionals should be equivalent; pind=%u, "
"size_class=%zu (%#zx)", pind, size_class, size_class);
- assert_true(size_class < max_psz,
+ expect_true(size_class < max_psz,
"Loop conditionals should be equivalent; pind=%u, "
"size_class=%zu (%#zx)", pind, size_class, size_class);
- assert_u_eq(pind, sz_psz2ind(size_class),
+ expect_u_eq(pind, sz_psz2ind(size_class),
"sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->"
" size_class=%zu --> pind=%u --> size_class=%zu", pind,
size_class, sz_psz2ind(size_class),
sz_pind2sz(sz_psz2ind(size_class)));
- assert_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)),
+ expect_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)),
"sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->"
" size_class=%zu --> pind=%u --> size_class=%zu", pind,
size_class, sz_psz2ind(size_class),
sz_pind2sz(sz_psz2ind(size_class)));
if (size_class == SC_LARGE_MAXCLASS) {
- assert_u_eq(SC_NPSIZES, sz_psz2ind(size_class + 1),
+ expect_u_eq(SC_NPSIZES, sz_psz2ind(size_class + 1),
"Next size_class does not round up properly");
} else {
- assert_u_eq(pind + 1, sz_psz2ind(size_class + 1),
+ expect_u_eq(pind + 1, sz_psz2ind(size_class + 1),
"Next size_class does not round up properly");
}
- assert_zu_eq(size_class, (pind > 0) ?
+ expect_zu_eq(size_class, (pind > 0) ?
sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1),
"sz_psz2u() does not round up to size class");
- assert_zu_eq(size_class, sz_psz2u(size_class-1),
+ expect_zu_eq(size_class, sz_psz2u(size_class-1),
"sz_psz2u() does not round up to size class");
- assert_zu_eq(size_class, sz_psz2u(size_class),
+ expect_zu_eq(size_class, sz_psz2u(size_class),
"sz_psz2u() does not compute same size class");
- assert_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1),
+ expect_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1),
"sz_psz2u() does not round up to next size class");
}
- assert_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)),
+ expect_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)),
"sz_psz2ind() does not reverse sz_pind2sz()");
- assert_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)),
+ expect_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)),
"sz_pind2sz() does not reverse sz_psz2ind()");
- assert_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1),
+ expect_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1),
"sz_psz2u() does not round up to size class");
- assert_zu_eq(size_class, sz_psz2u(size_class-1),
+ expect_zu_eq(size_class, sz_psz2u(size_class-1),
"sz_psz2u() does not round up to size class");
- assert_zu_eq(size_class, sz_psz2u(size_class),
+ expect_zu_eq(size_class, sz_psz2u(size_class),
"sz_psz2u() does not compute same size class");
}
TEST_END
@@ -147,34 +147,34 @@ TEST_BEGIN(test_overflow) {
max_size_class = get_max_size_class();
max_psz = max_size_class + PAGE;
- assert_u_eq(sz_size2index(max_size_class+1), SC_NSIZES,
+ expect_u_eq(sz_size2index(max_size_class+1), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow");
- assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES,
+ expect_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow");
- assert_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES,
+ expect_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow");
- assert_zu_eq(sz_s2u(max_size_class+1), 0,
+ expect_zu_eq(sz_s2u(max_size_class+1), 0,
"sz_s2u() should return 0 for unsupported size");
- assert_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0,
+ expect_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0,
"sz_s2u() should return 0 for unsupported size");
- assert_zu_eq(sz_s2u(SIZE_T_MAX), 0,
+ expect_zu_eq(sz_s2u(SIZE_T_MAX), 0,
"sz_s2u() should return 0 on overflow");
- assert_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES,
+ expect_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES,
"sz_psz2ind() should return NPSIZES on overflow");
- assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES,
+ expect_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES,
"sz_psz2ind() should return NPSIZES on overflow");
- assert_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES,
+ expect_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES,
"sz_psz2ind() should return NPSIZES on overflow");
- assert_zu_eq(sz_psz2u(max_size_class+1), max_psz,
+ expect_zu_eq(sz_psz2u(max_size_class+1), max_psz,
"sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported"
" size");
- assert_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz,
+ expect_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz,
"sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported "
"size");
- assert_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz,
+ expect_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz,
"sz_psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow");
}
TEST_END
diff --git a/deps/jemalloc/test/unit/slab.c b/deps/jemalloc/test/unit/slab.c
index c56af25fe..70fc5c7d1 100644
--- a/deps/jemalloc/test/unit/slab.c
+++ b/deps/jemalloc/test/unit/slab.c
@@ -1,27 +1,33 @@
#include "test/jemalloc_test.h"
+#define INVALID_ARENA_IND ((1U << MALLOCX_ARENA_BITS) - 1)
+
TEST_BEGIN(test_arena_slab_regind) {
szind_t binind;
for (binind = 0; binind < SC_NBINS; binind++) {
size_t regind;
- extent_t slab;
+ edata_t slab;
const bin_info_t *bin_info = &bin_infos[binind];
- extent_init(&slab, NULL, mallocx(bin_info->slab_size,
- MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true,
- binind, 0, extent_state_active, false, true, true,
+ edata_init(&slab, INVALID_ARENA_IND,
+ mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)),
+ bin_info->slab_size, true,
+ binind, 0, extent_state_active, false, true, EXTENT_PAI_PAC,
EXTENT_NOT_HEAD);
- assert_ptr_not_null(extent_addr_get(&slab),
+ expect_ptr_not_null(edata_addr_get(&slab),
"Unexpected malloc() failure");
+ arena_dalloc_bin_locked_info_t dalloc_info;
+ arena_dalloc_bin_locked_begin(&dalloc_info, binind);
for (regind = 0; regind < bin_info->nregs; regind++) {
- void *reg = (void *)((uintptr_t)extent_addr_get(&slab) +
+ void *reg = (void *)((uintptr_t)edata_addr_get(&slab) +
(bin_info->reg_size * regind));
- assert_zu_eq(arena_slab_regind(&slab, binind, reg),
+ expect_zu_eq(arena_slab_regind(&dalloc_info, binind,
+ &slab, reg),
regind,
"Incorrect region index computed for size %zu",
bin_info->reg_size);
}
- free(extent_addr_get(&slab));
+ free(edata_addr_get(&slab));
}
}
TEST_END
diff --git a/deps/jemalloc/test/unit/smoothstep.c b/deps/jemalloc/test/unit/smoothstep.c
index 7c5dbb7e0..588c9f44e 100644
--- a/deps/jemalloc/test/unit/smoothstep.c
+++ b/deps/jemalloc/test/unit/smoothstep.c
@@ -26,9 +26,9 @@ TEST_BEGIN(test_smoothstep_integral) {
max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
min = max - SMOOTHSTEP_NSTEPS;
- assert_u64_ge(sum, min,
+ expect_u64_ge(sum, min,
"Integral too small, even accounting for truncation");
- assert_u64_le(sum, max, "Integral exceeds 1/2");
+ expect_u64_le(sum, max, "Integral exceeds 1/2");
if (false) {
malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n",
max - sum, SMOOTHSTEP_NSTEPS);
@@ -49,10 +49,10 @@ TEST_BEGIN(test_smoothstep_monotonic) {
prev_h = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
uint64_t h = smoothstep_tab[i];
- assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
+ expect_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
prev_h = h;
}
- assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
+ expect_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
(KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1");
}
TEST_END
@@ -72,7 +72,7 @@ TEST_BEGIN(test_smoothstep_slope) {
for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) {
uint64_t h = smoothstep_tab[i];
uint64_t delta = h - prev_h;
- assert_u64_ge(delta, prev_delta,
+ expect_u64_ge(delta, prev_delta,
"Slope must monotonically increase in 0.0 <= x <= 0.5, "
"i=%u", i);
prev_h = h;
@@ -84,7 +84,7 @@ TEST_BEGIN(test_smoothstep_slope) {
for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) {
uint64_t h = smoothstep_tab[i];
uint64_t delta = prev_h - h;
- assert_u64_ge(delta, prev_delta,
+ expect_u64_ge(delta, prev_delta,
"Slope must monotonically decrease in 0.5 <= x <= 1.0, "
"i=%u", i);
prev_h = h;
diff --git a/deps/jemalloc/test/unit/stats.c b/deps/jemalloc/test/unit/stats.c
index 646768e88..bbdbd1809 100644
--- a/deps/jemalloc/test/unit/stats.c
+++ b/deps/jemalloc/test/unit/stats.c
@@ -1,25 +1,28 @@
#include "test/jemalloc_test.h"
+#define STRINGIFY_HELPER(x) #x
+#define STRINGIFY(x) STRINGIFY_HELPER(x)
+
TEST_BEGIN(test_stats_summary) {
size_t sz, allocated, active, resident, mapped;
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
+ expect_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
+ expect_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
+ expect_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
+ expect_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
if (config_stats) {
- assert_zu_le(allocated, active,
+ expect_zu_le(allocated, active,
"allocated should be no larger than active");
- assert_zu_lt(active, resident,
+ expect_zu_lt(active, resident,
"active should be less than resident");
- assert_zu_lt(active, mapped,
+ expect_zu_lt(active, mapped,
"active should be less than mapped");
}
}
@@ -34,30 +37,30 @@ TEST_BEGIN(test_stats_large) {
int expected = config_stats ? 0 : ENOENT;
p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0));
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.large.allocated",
+ expect_d_eq(mallctl("stats.arenas.0.large.allocated",
(void *)&allocated, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
+ expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
+ expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.large.nrequests",
+ expect_d_eq(mallctl("stats.arenas.0.large.nrequests",
(void *)&nrequests, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) {
- assert_zu_gt(allocated, 0,
+ expect_zu_gt(allocated, 0,
"allocated should be greater than zero");
- assert_u64_ge(nmalloc, ndalloc,
+ expect_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
- assert_u64_le(nmalloc, nrequests,
+ expect_u64_le(nmalloc, nrequests,
"nmalloc should no larger than nrequests");
}
@@ -75,54 +78,54 @@ TEST_BEGIN(test_stats_arenas_summary) {
uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
- assert_ptr_not_null(little, "Unexpected mallocx() failure");
+ expect_ptr_not_null(little, "Unexpected mallocx() failure");
large = mallocx((1U << SC_LG_LARGE_MINCLASS),
MALLOCX_ARENA(0));
- assert_ptr_not_null(large, "Unexpected mallocx() failure");
+ expect_ptr_not_null(large, "Unexpected mallocx() failure");
dallocx(little, 0);
dallocx(large, 0);
- assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
- assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
+ expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
"Unexpected mallctl() failure");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
+ expect_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
0), expected, "Unexepected mallctl() result");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.dirty_npurge",
+ expect_d_eq(mallctl("stats.arenas.0.dirty_npurge",
(void *)&dirty_npurge, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.dirty_nmadvise",
+ expect_d_eq(mallctl("stats.arenas.0.dirty_nmadvise",
(void *)&dirty_nmadvise, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.dirty_purged",
+ expect_d_eq(mallctl("stats.arenas.0.dirty_purged",
(void *)&dirty_purged, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.muzzy_npurge",
+ expect_d_eq(mallctl("stats.arenas.0.muzzy_npurge",
(void *)&muzzy_npurge, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise",
+ expect_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise",
(void *)&muzzy_nmadvise, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.muzzy_purged",
+ expect_d_eq(mallctl("stats.arenas.0.muzzy_purged",
(void *)&muzzy_purged, &sz, NULL, 0), expected,
"Unexepected mallctl() result");
if (config_stats) {
- if (!background_thread_enabled()) {
- assert_u64_gt(dirty_npurge + muzzy_npurge, 0,
+ if (!is_background_thread_enabled() && !opt_hpa) {
+ expect_u64_gt(dirty_npurge + muzzy_npurge, 0,
"At least one purge should have occurred");
}
- assert_u64_le(dirty_nmadvise, dirty_purged,
+ expect_u64_le(dirty_nmadvise, dirty_purged,
"dirty_nmadvise should be no greater than dirty_purged");
- assert_u64_le(muzzy_nmadvise, muzzy_purged,
+ expect_u64_le(muzzy_nmadvise, muzzy_purged,
"muzzy_nmadvise should be no greater than muzzy_purged");
}
}
@@ -150,35 +153,35 @@ TEST_BEGIN(test_stats_arenas_small) {
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
p = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
- assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.small.allocated",
+ expect_d_eq(mallctl("stats.arenas.0.small.allocated",
(void *)&allocated, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
+ expect_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
+ expect_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.small.nrequests",
+ expect_d_eq(mallctl("stats.arenas.0.small.nrequests",
(void *)&nrequests, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) {
- assert_zu_gt(allocated, 0,
+ expect_zu_gt(allocated, 0,
"allocated should be greater than zero");
- assert_u64_gt(nmalloc, 0,
+ expect_u64_gt(nmalloc, 0,
"nmalloc should be no greater than zero");
- assert_u64_ge(nmalloc, ndalloc,
+ expect_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
- assert_u64_gt(nrequests, 0,
+ expect_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
}
@@ -193,27 +196,27 @@ TEST_BEGIN(test_stats_arenas_large) {
int expected = config_stats ? 0 : ENOENT;
p = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0));
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.large.allocated",
+ expect_d_eq(mallctl("stats.arenas.0.large.allocated",
(void *)&allocated, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
+ expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
+ expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
- assert_zu_gt(allocated, 0,
+ expect_zu_gt(allocated, 0,
"allocated should be greater than zero");
- assert_u64_gt(nmalloc, 0,
+ expect_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
- assert_u64_ge(nmalloc, ndalloc,
+ expect_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
}
@@ -234,85 +237,85 @@ TEST_BEGIN(test_stats_arenas_bins) {
int expected = config_stats ? 0 : ENOENT;
/* Make sure allocation below isn't satisfied by tcache. */
- assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
unsigned arena_ind, old_arena_ind;
sz = sizeof(unsigned);
- assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Arena creation failure");
sz = sizeof(arena_ind);
- assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
+ expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
(void *)&arena_ind, sizeof(arena_ind)), 0,
"Unexpected mallctl() failure");
p = malloc(bin_infos[0].reg_size);
- assert_ptr_not_null(p, "Unexpected malloc() failure");
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
- assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
char cmd[128];
sz = sizeof(uint64_t);
gen_mallctl_str(cmd, "nmalloc", arena_ind);
- assert_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected,
+ expect_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
gen_mallctl_str(cmd, "ndalloc", arena_ind);
- assert_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected,
+ expect_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
gen_mallctl_str(cmd, "nrequests", arena_ind);
- assert_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected,
+ expect_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(size_t);
gen_mallctl_str(cmd, "curregs", arena_ind);
- assert_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected,
+ expect_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(uint64_t);
gen_mallctl_str(cmd, "nfills", arena_ind);
- assert_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected,
+ expect_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
gen_mallctl_str(cmd, "nflushes", arena_ind);
- assert_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected,
+ expect_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
gen_mallctl_str(cmd, "nslabs", arena_ind);
- assert_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected,
+ expect_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
gen_mallctl_str(cmd, "nreslabs", arena_ind);
- assert_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected,
+ expect_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(size_t);
gen_mallctl_str(cmd, "curslabs", arena_ind);
- assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected,
+ expect_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
gen_mallctl_str(cmd, "nonfull_slabs", arena_ind);
- assert_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0),
+ expect_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
if (config_stats) {
- assert_u64_gt(nmalloc, 0,
+ expect_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
- assert_u64_ge(nmalloc, ndalloc,
+ expect_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
- assert_u64_gt(nrequests, 0,
+ expect_u64_gt(nrequests, 0,
"nrequests should be greater than zero");
- assert_zu_gt(curregs, 0,
+ expect_zu_gt(curregs, 0,
"allocated should be greater than zero");
if (opt_tcache) {
- assert_u64_gt(nfills, 0,
+ expect_u64_gt(nfills, 0,
"At least one fill should have occurred");
- assert_u64_gt(nflushes, 0,
+ expect_u64_gt(nflushes, 0,
"At least one flush should have occurred");
}
- assert_u64_gt(nslabs, 0,
+ expect_u64_gt(nslabs, 0,
"At least one slab should have been allocated");
- assert_zu_gt(curslabs, 0,
+ expect_zu_gt(curslabs, 0,
"At least one slab should be currently allocated");
- assert_zu_eq(nonfull_slabs, 0,
+ expect_zu_eq(nonfull_slabs, 0,
"slabs_nonfull should be empty");
}
@@ -327,33 +330,33 @@ TEST_BEGIN(test_stats_arenas_lextents) {
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(size_t);
- assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL,
+ expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL,
0), 0, "Unexpected mallctl() failure");
p = mallocx(hsize, MALLOCX_ARENA(0));
- assert_ptr_not_null(p, "Unexpected mallocx() failure");
+ expect_ptr_not_null(p, "Unexpected mallocx() failure");
- assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
0, "Unexpected mallctl() failure");
sz = sizeof(uint64_t);
- assert_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc",
+ expect_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc",
(void *)&nmalloc, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
- assert_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc",
+ expect_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc",
(void *)&ndalloc, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
sz = sizeof(size_t);
- assert_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents",
+ expect_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents",
(void *)&curlextents, &sz, NULL, 0), expected,
"Unexpected mallctl() result");
if (config_stats) {
- assert_u64_gt(nmalloc, 0,
+ expect_u64_gt(nmalloc, 0,
"nmalloc should be greater than zero");
- assert_u64_ge(nmalloc, ndalloc,
+ expect_u64_ge(nmalloc, ndalloc,
"nmalloc should be at least as large as ndalloc");
- assert_u64_gt(curlextents, 0,
+ expect_u64_gt(curlextents, 0,
"At least one extent should be currently allocated");
}
@@ -361,6 +364,58 @@ TEST_BEGIN(test_stats_arenas_lextents) {
}
TEST_END
+static void
+test_tcache_bytes_for_usize(size_t usize) {
+ uint64_t epoch;
+ size_t tcache_bytes, tcache_stashed_bytes;
+ size_t sz = sizeof(tcache_bytes);
+
+ void *ptr = mallocx(usize, 0);
+
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl(
+ "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
+ &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
+ assert_d_eq(mallctl(
+ "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
+ ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+ size_t tcache_bytes_before = tcache_bytes + tcache_stashed_bytes;
+ dallocx(ptr, 0);
+
+ expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+ assert_d_eq(mallctl(
+ "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
+ &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
+ assert_d_eq(mallctl(
+ "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
+ ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+ size_t tcache_bytes_after = tcache_bytes + tcache_stashed_bytes;
+ assert_zu_eq(tcache_bytes_after - tcache_bytes_before,
+ usize, "Incorrectly attributed a free");
+}
+
+TEST_BEGIN(test_stats_tcache_bytes_small) {
+ test_skip_if(!config_stats);
+ test_skip_if(!opt_tcache);
+ test_skip_if(opt_tcache_max < SC_SMALL_MAXCLASS);
+
+ test_tcache_bytes_for_usize(SC_SMALL_MAXCLASS);
+}
+TEST_END
+
+TEST_BEGIN(test_stats_tcache_bytes_large) {
+ test_skip_if(!config_stats);
+ test_skip_if(!opt_tcache);
+ test_skip_if(opt_tcache_max < SC_LARGE_MINCLASS);
+
+ test_tcache_bytes_for_usize(SC_LARGE_MINCLASS);
+}
+TEST_END
+
int
main(void) {
return test_no_reentrancy(
@@ -370,5 +425,7 @@ main(void) {
test_stats_arenas_small,
test_stats_arenas_large,
test_stats_arenas_bins,
- test_stats_arenas_lextents);
+ test_stats_arenas_lextents,
+ test_stats_tcache_bytes_small,
+ test_stats_tcache_bytes_large);
}
diff --git a/deps/jemalloc/test/unit/stats_print.c b/deps/jemalloc/test/unit/stats_print.c
index 014d002fd..3b3177534 100644
--- a/deps/jemalloc/test/unit/stats_print.c
+++ b/deps/jemalloc/test/unit/stats_print.c
@@ -136,7 +136,7 @@ parser_tokenize(parser_t *parser) {
size_t token_line JEMALLOC_CC_SILENCE_INIT(1);
size_t token_col JEMALLOC_CC_SILENCE_INIT(0);
- assert_zu_le(parser->pos, parser->len,
+ expect_zu_le(parser->pos, parser->len,
"Position is past end of buffer");
while (state != STATE_ACCEPT) {
@@ -686,7 +686,7 @@ parser_parse_value(parser_t *parser) {
static bool
parser_parse_pair(parser_t *parser) {
- assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
+ expect_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
"Pair should start with string");
if (parser_tokenize(parser)) {
return true;
@@ -731,7 +731,7 @@ parser_parse_values(parser_t *parser) {
static bool
parser_parse_array(parser_t *parser) {
- assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET,
+ expect_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET,
"Array should start with [");
if (parser_tokenize(parser)) {
return true;
@@ -747,7 +747,7 @@ parser_parse_array(parser_t *parser) {
static bool
parser_parse_pairs(parser_t *parser) {
- assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
+ expect_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
"Object should start with string");
if (parser_parse_pair(parser)) {
return true;
@@ -782,7 +782,7 @@ parser_parse_pairs(parser_t *parser) {
static bool
parser_parse_object(parser_t *parser) {
- assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE,
+ expect_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE,
"Object should start with {");
if (parser_tokenize(parser)) {
return true;
@@ -899,9 +899,9 @@ TEST_BEGIN(test_json_parser) {
const char *input = invalid_inputs[i];
parser_t parser;
parser_init(&parser, false);
- assert_false(parser_append(&parser, input),
+ expect_false(parser_append(&parser, input),
"Unexpected input appending failure");
- assert_true(parser_parse(&parser),
+ expect_true(parser_parse(&parser),
"Unexpected parse success for input: %s", input);
parser_fini(&parser);
}
@@ -910,9 +910,9 @@ TEST_BEGIN(test_json_parser) {
const char *input = valid_inputs[i];
parser_t parser;
parser_init(&parser, true);
- assert_false(parser_append(&parser, input),
+ expect_false(parser_append(&parser, input),
"Unexpected input appending failure");
- assert_false(parser_parse(&parser),
+ expect_false(parser_parse(&parser),
"Unexpected parse error for input: %s", input);
parser_fini(&parser);
}
@@ -961,17 +961,17 @@ TEST_BEGIN(test_stats_print_json) {
break;
case 1: {
size_t sz = sizeof(arena_ind);
- assert_d_eq(mallctl("arenas.create", (void *)&arena_ind,
+ expect_d_eq(mallctl("arenas.create", (void *)&arena_ind,
&sz, NULL, 0), 0, "Unexpected mallctl failure");
break;
} case 2: {
size_t mib[3];
size_t miblen = sizeof(mib)/sizeof(size_t);
- assert_d_eq(mallctlnametomib("arena.0.destroy",
+ expect_d_eq(mallctlnametomib("arena.0.destroy",
mib, &miblen), 0,
"Unexpected mallctlnametomib failure");
mib[1] = arena_ind;
- assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL,
+ expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL,
0), 0, "Unexpected mallctlbymib failure");
break;
} default:
@@ -983,7 +983,7 @@ TEST_BEGIN(test_stats_print_json) {
parser_init(&parser, true);
malloc_stats_print(write_cb, (void *)&parser, opts[j]);
- assert_false(parser_parse(&parser),
+ expect_false(parser_parse(&parser),
"Unexpected parse error, opts=\"%s\"", opts[j]);
parser_fini(&parser);
}
diff --git a/deps/jemalloc/test/unit/sz.c b/deps/jemalloc/test/unit/sz.c
new file mode 100644
index 000000000..8ae04b921
--- /dev/null
+++ b/deps/jemalloc/test/unit/sz.c
@@ -0,0 +1,66 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_sz_psz2ind) {
+ /*
+ * Testing page size classes which reside prior to the regular group
+ * with all size classes divisible by page size.
+ * For x86_64 Linux, it's 4096, 8192, 12288, 16384, with corresponding
+ * pszind 0, 1, 2 and 3.
+ */
+ for (size_t i = 0; i < SC_NGROUP; i++) {
+ for (size_t psz = i * PAGE + 1; psz <= (i + 1) * PAGE; psz++) {
+ pszind_t ind = sz_psz2ind(psz);
+ expect_zu_eq(ind, i, "Got %u as sz_psz2ind of %zu", ind,
+ psz);
+ }
+ }
+
+ sc_data_t data;
+ memset(&data, 0, sizeof(data));
+ sc_data_init(&data);
+ /*
+ * 'base' is the base of the first regular group with all size classes
+ * divisible by page size.
+ * For x86_64 Linux, it's 16384, and base_ind is 36.
+ */
+ size_t base_psz = 1 << (SC_LG_NGROUP + LG_PAGE);
+ size_t base_ind = 0;
+ while (base_ind < SC_NSIZES &&
+ reg_size_compute(data.sc[base_ind].lg_base,
+ data.sc[base_ind].lg_delta,
+ data.sc[base_ind].ndelta) < base_psz) {
+ base_ind++;
+ }
+ expect_zu_eq(
+ reg_size_compute(data.sc[base_ind].lg_base,
+ data.sc[base_ind].lg_delta, data.sc[base_ind].ndelta),
+ base_psz, "Size class equal to %zu not found", base_psz);
+ /*
+ * Test different sizes falling into groups after the 'base'. The
+ * increment is PAGE / 3 for the execution speed purpose.
+ */
+ base_ind -= SC_NGROUP;
+ for (size_t psz = base_psz; psz <= 64 * 1024 * 1024; psz += PAGE / 3) {
+ pszind_t ind = sz_psz2ind(psz);
+ sc_t gt_sc = data.sc[ind + base_ind];
+ expect_zu_gt(psz,
+ reg_size_compute(gt_sc.lg_base, gt_sc.lg_delta,
+ gt_sc.ndelta),
+ "Got %u as sz_psz2ind of %zu", ind, psz);
+ sc_t le_sc = data.sc[ind + base_ind + 1];
+ expect_zu_le(psz,
+ reg_size_compute(le_sc.lg_base, le_sc.lg_delta,
+ le_sc.ndelta),
+ "Got %u as sz_psz2ind of %zu", ind, psz);
+ }
+
+ pszind_t max_ind = sz_psz2ind(SC_LARGE_MAXCLASS + 1);
+ expect_lu_eq(max_ind, SC_NPSIZES,
+ "Got %u as sz_psz2ind of %llu", max_ind, SC_LARGE_MAXCLASS);
+}
+TEST_END
+
+int
+main(void) {
+ return test(test_sz_psz2ind);
+}
diff --git a/deps/jemalloc/test/unit/tcache_max.c b/deps/jemalloc/test/unit/tcache_max.c
new file mode 100644
index 000000000..1f657c859
--- /dev/null
+++ b/deps/jemalloc/test/unit/tcache_max.c
@@ -0,0 +1,175 @@
+#include "test/jemalloc_test.h"
+#include "test/san.h"
+
+const char *malloc_conf = TEST_SAN_UAF_ALIGN_DISABLE;
+
+enum {
+ alloc_option_start = 0,
+ use_malloc = 0,
+ use_mallocx,
+ alloc_option_end
+};
+
+enum {
+ dalloc_option_start = 0,
+ use_free = 0,
+ use_dallocx,
+ use_sdallocx,
+ dalloc_option_end
+};
+
+static unsigned alloc_option, dalloc_option;
+static size_t tcache_max;
+
+static void *
+alloc_func(size_t sz) {
+ void *ret;
+
+ switch (alloc_option) {
+ case use_malloc:
+ ret = malloc(sz);
+ break;
+ case use_mallocx:
+ ret = mallocx(sz, 0);
+ break;
+ default:
+ unreachable();
+ }
+ expect_ptr_not_null(ret, "Unexpected malloc / mallocx failure");
+
+ return ret;
+}
+
+static void
+dalloc_func(void *ptr, size_t sz) {
+ switch (dalloc_option) {
+ case use_free:
+ free(ptr);
+ break;
+ case use_dallocx:
+ dallocx(ptr, 0);
+ break;
+ case use_sdallocx:
+ sdallocx(ptr, sz, 0);
+ break;
+ default:
+ unreachable();
+ }
+}
+
+static size_t
+tcache_bytes_read(void) {
+ uint64_t epoch;
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ size_t tcache_bytes;
+ size_t sz = sizeof(tcache_bytes);
+ assert_d_eq(mallctl(
+ "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
+ &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
+
+ return tcache_bytes;
+}
+
+static void
+tcache_bytes_check_update(size_t *prev, ssize_t diff) {
+ size_t tcache_bytes = tcache_bytes_read();
+ expect_zu_eq(tcache_bytes, *prev + diff, "tcache bytes not expected");
+
+ *prev += diff;
+}
+
+static void
+test_tcache_bytes_alloc(size_t alloc_size) {
+ expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), 0,
+ "Unexpected tcache flush failure");
+
+ size_t usize = sz_s2u(alloc_size);
+ /* No change is expected if usize is outside of tcache_max range. */
+ bool cached = (usize <= tcache_max);
+ ssize_t diff = cached ? usize : 0;
+
+ void *ptr1 = alloc_func(alloc_size);
+ void *ptr2 = alloc_func(alloc_size);
+
+ size_t bytes = tcache_bytes_read();
+ dalloc_func(ptr2, alloc_size);
+ /* Expect tcache_bytes increase after dalloc */
+ tcache_bytes_check_update(&bytes, diff);
+
+ dalloc_func(ptr1, alloc_size);
+ /* Expect tcache_bytes increase again */
+ tcache_bytes_check_update(&bytes, diff);
+
+ void *ptr3 = alloc_func(alloc_size);
+ if (cached) {
+ expect_ptr_eq(ptr1, ptr3, "Unexpected cached ptr");
+ }
+ /* Expect tcache_bytes decrease after alloc */
+ tcache_bytes_check_update(&bytes, -diff);
+
+ void *ptr4 = alloc_func(alloc_size);
+ if (cached) {
+ expect_ptr_eq(ptr2, ptr4, "Unexpected cached ptr");
+ }
+ /* Expect tcache_bytes decrease again */
+ tcache_bytes_check_update(&bytes, -diff);
+
+ dalloc_func(ptr3, alloc_size);
+ tcache_bytes_check_update(&bytes, diff);
+ dalloc_func(ptr4, alloc_size);
+ tcache_bytes_check_update(&bytes, diff);
+}
+
+static void
+test_tcache_max_impl(void) {
+ size_t sz;
+ sz = sizeof(tcache_max);
+ assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
+ &sz, NULL, 0), 0, "Unexpected mallctl() failure");
+
+ /* opt.tcache_max set to 1024 in tcache_max.sh */
+ expect_zu_eq(tcache_max, 1024, "tcache_max not expected");
+
+ test_tcache_bytes_alloc(1);
+ test_tcache_bytes_alloc(tcache_max - 1);
+ test_tcache_bytes_alloc(tcache_max);
+ test_tcache_bytes_alloc(tcache_max + 1);
+
+ test_tcache_bytes_alloc(PAGE - 1);
+ test_tcache_bytes_alloc(PAGE);
+ test_tcache_bytes_alloc(PAGE + 1);
+
+ size_t large;
+ sz = sizeof(large);
+ assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL,
+ 0), 0, "Unexpected mallctl() failure");
+
+ test_tcache_bytes_alloc(large - 1);
+ test_tcache_bytes_alloc(large);
+ test_tcache_bytes_alloc(large + 1);
+}
+
+TEST_BEGIN(test_tcache_max) {
+ test_skip_if(!config_stats);
+ test_skip_if(!opt_tcache);
+ test_skip_if(opt_prof);
+ test_skip_if(san_uaf_detection_enabled());
+
+ for (alloc_option = alloc_option_start;
+ alloc_option < alloc_option_end;
+ alloc_option++) {
+ for (dalloc_option = dalloc_option_start;
+ dalloc_option < dalloc_option_end;
+ dalloc_option++) {
+ test_tcache_max_impl();
+ }
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(test_tcache_max);
+}
diff --git a/deps/jemalloc/test/unit/tcache_max.sh b/deps/jemalloc/test/unit/tcache_max.sh
new file mode 100644
index 000000000..4480d733c
--- /dev/null
+++ b/deps/jemalloc/test/unit/tcache_max.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="tcache_max:1024"
diff --git a/deps/jemalloc/test/unit/test_hooks.c b/deps/jemalloc/test/unit/test_hooks.c
index ded8698bc..8cd2b3bb1 100644
--- a/deps/jemalloc/test/unit/test_hooks.c
+++ b/deps/jemalloc/test/unit/test_hooks.c
@@ -12,21 +12,21 @@ func_to_hook(int arg1, int arg2) {
return arg1 + arg2;
}
-#define func_to_hook JEMALLOC_HOOK(func_to_hook, test_hooks_libc_hook)
+#define func_to_hook JEMALLOC_TEST_HOOK(func_to_hook, test_hooks_libc_hook)
TEST_BEGIN(unhooked_call) {
test_hooks_libc_hook = NULL;
hook_called = false;
- assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
- assert_false(hook_called, "Nulling out hook didn't take.");
+ expect_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
+ expect_false(hook_called, "Nulling out hook didn't take.");
}
TEST_END
TEST_BEGIN(hooked_call) {
test_hooks_libc_hook = &hook;
hook_called = false;
- assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
- assert_true(hook_called, "Hook should have executed.");
+ expect_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
+ expect_true(hook_called, "Hook should have executed.");
}
TEST_END
diff --git a/deps/jemalloc/test/unit/thread_event.c b/deps/jemalloc/test/unit/thread_event.c
new file mode 100644
index 000000000..e0b88a92d
--- /dev/null
+++ b/deps/jemalloc/test/unit/thread_event.c
@@ -0,0 +1,34 @@
+#include "test/jemalloc_test.h"
+
+TEST_BEGIN(test_next_event_fast) {
+ tsd_t *tsd = tsd_fetch();
+ te_ctx_t ctx;
+ te_ctx_get(tsd, &ctx, true);
+
+ te_ctx_last_event_set(&ctx, 0);
+ te_ctx_current_bytes_set(&ctx, TE_NEXT_EVENT_FAST_MAX - 8U);
+ te_ctx_next_event_set(tsd, &ctx, TE_NEXT_EVENT_FAST_MAX);
+#define E(event, condition, is_alloc) \
+ if (is_alloc && condition) { \
+ event##_event_wait_set(tsd, TE_NEXT_EVENT_FAST_MAX); \
+ }
+ ITERATE_OVER_ALL_EVENTS
+#undef E
+
+ /* Test next_event_fast rolling back to 0. */
+ void *p = malloc(16U);
+ assert_ptr_not_null(p, "malloc() failed");
+ free(p);
+
+ /* Test next_event_fast resuming to be equal to next_event. */
+ void *q = malloc(SC_LOOKUP_MAXCLASS);
+ assert_ptr_not_null(q, "malloc() failed");
+ free(q);
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_next_event_fast);
+}
diff --git a/deps/jemalloc/test/unit/thread_event.sh b/deps/jemalloc/test/unit/thread_event.sh
new file mode 100644
index 000000000..8fcc7d8a7
--- /dev/null
+++ b/deps/jemalloc/test/unit/thread_event.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+if [ "x${enable_prof}" = "x1" ] ; then
+ export MALLOC_CONF="prof:true,lg_prof_sample:0"
+fi
diff --git a/deps/jemalloc/test/unit/ticker.c b/deps/jemalloc/test/unit/ticker.c
index e5790a316..0dd778619 100644
--- a/deps/jemalloc/test/unit/ticker.c
+++ b/deps/jemalloc/test/unit/ticker.c
@@ -11,16 +11,16 @@ TEST_BEGIN(test_ticker_tick) {
ticker_init(&ticker, NTICKS);
for (i = 0; i < NREPS; i++) {
for (j = 0; j < NTICKS; j++) {
- assert_u_eq(ticker_read(&ticker), NTICKS - j,
+ expect_u_eq(ticker_read(&ticker), NTICKS - j,
"Unexpected ticker value (i=%d, j=%d)", i, j);
- assert_false(ticker_tick(&ticker),
+ expect_false(ticker_tick(&ticker),
"Unexpected ticker fire (i=%d, j=%d)", i, j);
}
- assert_u32_eq(ticker_read(&ticker), 0,
+ expect_u32_eq(ticker_read(&ticker), 0,
"Expected ticker depletion");
- assert_true(ticker_tick(&ticker),
+ expect_true(ticker_tick(&ticker),
"Expected ticker fire (i=%d)", i);
- assert_u32_eq(ticker_read(&ticker), NTICKS,
+ expect_u32_eq(ticker_read(&ticker), NTICKS,
"Expected ticker reset");
}
#undef NTICKS
@@ -33,14 +33,14 @@ TEST_BEGIN(test_ticker_ticks) {
ticker_init(&ticker, NTICKS);
- assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
- assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
- assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
- assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
- assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+ expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+ expect_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
+ expect_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
+ expect_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
+ expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
- assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
- assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
+ expect_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
+ expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
#undef NTICKS
}
TEST_END
@@ -51,23 +51,50 @@ TEST_BEGIN(test_ticker_copy) {
ticker_init(&ta, NTICKS);
ticker_copy(&tb, &ta);
- assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
- assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
- assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+ expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+ expect_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
+ expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
ticker_tick(&ta);
ticker_copy(&tb, &ta);
- assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
- assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
- assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
+ expect_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
+ expect_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
+ expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
#undef NTICKS
}
TEST_END
+TEST_BEGIN(test_ticker_geom) {
+ const int32_t ticks = 100;
+ const uint64_t niters = 100 * 1000;
+
+ ticker_geom_t ticker;
+ ticker_geom_init(&ticker, ticks);
+ uint64_t total_ticks = 0;
+ /* Just some random constant. */
+ uint64_t prng_state = 0x343219f93496db9fULL;
+ for (uint64_t i = 0; i < niters; i++) {
+ while(!ticker_geom_tick(&ticker, &prng_state)) {
+ total_ticks++;
+ }
+ }
+ /*
+ * In fact, with this choice of random seed and the PRNG implementation
+ * used at the time this was tested, total_ticks is 95.1% of the
+ * expected ticks.
+ */
+ expect_u64_ge(total_ticks , niters * ticks * 9 / 10,
+ "Mean off by > 10%%");
+ expect_u64_le(total_ticks , niters * ticks * 11 / 10,
+ "Mean off by > 10%%");
+}
+TEST_END
+
int
main(void) {
return test(
test_ticker_tick,
test_ticker_ticks,
- test_ticker_copy);
+ test_ticker_copy,
+ test_ticker_geom);
}
diff --git a/deps/jemalloc/test/unit/tsd.c b/deps/jemalloc/test/unit/tsd.c
index 917884dcf..205d87089 100644
--- a/deps/jemalloc/test/unit/tsd.c
+++ b/deps/jemalloc/test/unit/tsd.c
@@ -10,7 +10,7 @@ static int data_cleanup_count;
void
data_cleanup(int *data) {
if (data_cleanup_count == 0) {
- assert_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT,
+ expect_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT,
"Argument passed into cleanup function should match tsd "
"value");
}
@@ -38,7 +38,7 @@ data_cleanup(int *data) {
if (reincarnate) {
void *p = mallocx(1, 0);
- assert_ptr_not_null(p, "Unexpeced mallocx() failure");
+ expect_ptr_not_null(p, "Unexpeced mallocx() failure");
dallocx(p, 0);
}
}
@@ -48,19 +48,26 @@ thd_start(void *arg) {
int d = (int)(uintptr_t)arg;
void *p;
+ /*
+ * Test free before tsd init -- the free fast path (which does not
+ * explicitly check for NULL) has to tolerate this case, and fall back
+ * to free_default.
+ */
+ free(NULL);
+
tsd_t *tsd = tsd_fetch();
- assert_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT,
+ expect_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT,
"Initial tsd get should return initialization value");
p = malloc(1);
- assert_ptr_not_null(p, "Unexpected malloc() failure");
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
tsd_test_data_set(tsd, d);
- assert_x_eq(tsd_test_data_get(tsd), d,
+ expect_x_eq(tsd_test_data_get(tsd), d,
"After tsd set, tsd get should return value that was set");
d = 0;
- assert_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg,
+ expect_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg,
"Resetting local data should have no effect on tsd");
tsd_test_callback_set(tsd, &data_cleanup);
@@ -84,7 +91,7 @@ TEST_BEGIN(test_tsd_sub_thread) {
* We reincarnate twice in the data cleanup, so it should execute at
* least 3 times.
*/
- assert_x_ge(data_cleanup_count, 3,
+ expect_x_ge(data_cleanup_count, 3,
"Cleanup function should have executed multiple times.");
}
TEST_END
@@ -95,28 +102,28 @@ thd_start_reincarnated(void *arg) {
assert(tsd);
void *p = malloc(1);
- assert_ptr_not_null(p, "Unexpected malloc() failure");
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
/* Manually trigger reincarnation. */
- assert_ptr_not_null(tsd_arena_get(tsd),
+ expect_ptr_not_null(tsd_arena_get(tsd),
"Should have tsd arena set.");
tsd_cleanup((void *)tsd);
- assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
+ expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
"TSD arena should have been cleared.");
- assert_u_eq(tsd_state_get(tsd), tsd_state_purgatory,
+ expect_u_eq(tsd_state_get(tsd), tsd_state_purgatory,
"TSD state should be purgatory\n");
free(p);
- assert_u_eq(tsd_state_get(tsd), tsd_state_reincarnated,
+ expect_u_eq(tsd_state_get(tsd), tsd_state_reincarnated,
"TSD state should be reincarnated\n");
p = mallocx(1, MALLOCX_TCACHE_NONE);
- assert_ptr_not_null(p, "Unexpected malloc() failure");
- assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
+ expect_ptr_not_null(p, "Unexpected malloc() failure");
+ expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
"Should not have tsd arena set after reincarnation.");
free(p);
tsd_cleanup((void *)tsd);
- assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
+ expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
"TSD arena should have been cleared after 2nd cleanup.");
return NULL;
@@ -206,46 +213,46 @@ TEST_BEGIN(test_tsd_global_slow) {
* Spin-wait.
*/
}
- assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
tsd_global_slow_inc(tsd_tsdn(tsd));
free(mallocx(1, 0));
- assert_false(tsd_fast(tsd), "");
+ expect_false(tsd_fast(tsd), "");
atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST);
/* PHASE 3 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) {
}
- assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
/* Increase again, so that we can test multiple fast/slow changes. */
tsd_global_slow_inc(tsd_tsdn(tsd));
atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST);
free(mallocx(1, 0));
- assert_false(tsd_fast(tsd), "");
+ expect_false(tsd_fast(tsd), "");
/* PHASE 5 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) {
}
- assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
tsd_global_slow_dec(tsd_tsdn(tsd));
atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST);
/* We only decreased once; things should still be slow. */
free(mallocx(1, 0));
- assert_false(tsd_fast(tsd), "");
+ expect_false(tsd_fast(tsd), "");
/* PHASE 7 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) {
}
- assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
tsd_global_slow_dec(tsd_tsdn(tsd));
atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST);
/* We incremented and then decremented twice; we should be fast now. */
free(mallocx(1, 0));
- assert_true(!originally_fast || tsd_fast(tsd), "");
+ expect_true(!originally_fast || tsd_fast(tsd), "");
/* PHASE 9 */
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) {
}
- assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
+ expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
thd_join(thd, NULL);
}
diff --git a/deps/jemalloc/test/unit/uaf.c b/deps/jemalloc/test/unit/uaf.c
new file mode 100644
index 000000000..a8433c298
--- /dev/null
+++ b/deps/jemalloc/test/unit/uaf.c
@@ -0,0 +1,262 @@
+#include "test/jemalloc_test.h"
+#include "test/arena_util.h"
+#include "test/san.h"
+
+#include "jemalloc/internal/cache_bin.h"
+#include "jemalloc/internal/san.h"
+#include "jemalloc/internal/safety_check.h"
+
+const char *malloc_conf = TEST_SAN_UAF_ALIGN_ENABLE;
+
+static size_t san_uaf_align;
+
+static bool fake_abort_called;
+void fake_abort(const char *message) {
+ (void)message;
+ fake_abort_called = true;
+}
+
+static void
+test_write_after_free_pre(void) {
+ safety_check_set_abort(&fake_abort);
+ fake_abort_called = false;
+}
+
+static void
+test_write_after_free_post(void) {
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ 0, "Unexpected tcache flush failure");
+ expect_true(fake_abort_called, "Use-after-free check didn't fire.");
+ safety_check_set_abort(NULL);
+}
+
+static bool
+uaf_detection_enabled(void) {
+ if (!config_uaf_detection || !san_uaf_detection_enabled()) {
+ return false;
+ }
+
+ ssize_t lg_san_uaf_align;
+ size_t sz = sizeof(lg_san_uaf_align);
+ assert_d_eq(mallctl("opt.lg_san_uaf_align", &lg_san_uaf_align, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+ if (lg_san_uaf_align < 0) {
+ return false;
+ }
+ assert_zd_ge(lg_san_uaf_align, LG_PAGE, "san_uaf_align out of range");
+ san_uaf_align = (size_t)1 << lg_san_uaf_align;
+
+ bool tcache_enabled;
+ sz = sizeof(tcache_enabled);
+ assert_d_eq(mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+ if (!tcache_enabled) {
+ return false;
+ }
+
+ return true;
+}
+
+static size_t
+read_tcache_stashed_bytes(unsigned arena_ind) {
+ if (!config_stats) {
+ return 0;
+ }
+
+ uint64_t epoch;
+ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
+ 0, "Unexpected mallctl() failure");
+
+ size_t tcache_stashed_bytes;
+ size_t sz = sizeof(tcache_stashed_bytes);
+ assert_d_eq(mallctl(
+ "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
+ ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
+ "Unexpected mallctl failure");
+
+ return tcache_stashed_bytes;
+}
+
+static void
+test_use_after_free(size_t alloc_size, bool write_after_free) {
+ void *ptr = (void *)(uintptr_t)san_uaf_align;
+ assert_true(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
+ ptr = (void *)((uintptr_t)123 * (uintptr_t)san_uaf_align);
+ assert_true(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
+ ptr = (void *)((uintptr_t)san_uaf_align + 1);
+ assert_false(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
+
+ /*
+ * Disable purging (-1) so that all dirty pages remain committed, to
+ * make use-after-free tolerable.
+ */
+ unsigned arena_ind = do_arena_create(-1, -1);
+ int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
+
+ size_t n_max = san_uaf_align * 2;
+ void **items = mallocx(n_max * sizeof(void *), flags);
+ assert_ptr_not_null(items, "Unexpected mallocx failure");
+
+ bool found = false;
+ size_t iter = 0;
+ char magic = 's';
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ 0, "Unexpected tcache flush failure");
+ while (!found) {
+ ptr = mallocx(alloc_size, flags);
+ assert_ptr_not_null(ptr, "Unexpected mallocx failure");
+
+ found = cache_bin_nonfast_aligned(ptr);
+ *(char *)ptr = magic;
+ items[iter] = ptr;
+ assert_zu_lt(iter++, n_max, "No aligned ptr found");
+ }
+
+ if (write_after_free) {
+ test_write_after_free_pre();
+ }
+ bool junked = false;
+ while (iter-- != 0) {
+ char *volatile mem = items[iter];
+ assert_c_eq(*mem, magic, "Unexpected memory content");
+ size_t stashed_before = read_tcache_stashed_bytes(arena_ind);
+ free(mem);
+ if (*mem != magic) {
+ junked = true;
+ assert_c_eq(*mem, (char)uaf_detect_junk,
+ "Unexpected junk-filling bytes");
+ if (write_after_free) {
+ *(char *)mem = magic + 1;
+ }
+
+ size_t stashed_after = read_tcache_stashed_bytes(
+ arena_ind);
+ /*
+ * An edge case is the deallocation above triggering the
+ * tcache GC event, in which case the stashed pointers
+ * may get flushed immediately, before returning from
+ * free(). Treat these cases as checked already.
+ */
+ if (stashed_after <= stashed_before) {
+ fake_abort_called = true;
+ }
+ }
+ /* Flush tcache (including stashed). */
+ assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
+ 0, "Unexpected tcache flush failure");
+ }
+ expect_true(junked, "Aligned ptr not junked");
+ if (write_after_free) {
+ test_write_after_free_post();
+ }
+
+ dallocx(items, flags);
+ do_arena_destroy(arena_ind);
+}
+
+TEST_BEGIN(test_read_after_free) {
+ test_skip_if(!uaf_detection_enabled());
+
+ test_use_after_free(sizeof(void *), /* write_after_free */ false);
+ test_use_after_free(sizeof(void *) + 1, /* write_after_free */ false);
+ test_use_after_free(16, /* write_after_free */ false);
+ test_use_after_free(20, /* write_after_free */ false);
+ test_use_after_free(32, /* write_after_free */ false);
+ test_use_after_free(33, /* write_after_free */ false);
+ test_use_after_free(48, /* write_after_free */ false);
+ test_use_after_free(64, /* write_after_free */ false);
+ test_use_after_free(65, /* write_after_free */ false);
+ test_use_after_free(129, /* write_after_free */ false);
+ test_use_after_free(255, /* write_after_free */ false);
+ test_use_after_free(256, /* write_after_free */ false);
+}
+TEST_END
+
+TEST_BEGIN(test_write_after_free) {
+ test_skip_if(!uaf_detection_enabled());
+
+ test_use_after_free(sizeof(void *), /* write_after_free */ true);
+ test_use_after_free(sizeof(void *) + 1, /* write_after_free */ true);
+ test_use_after_free(16, /* write_after_free */ true);
+ test_use_after_free(20, /* write_after_free */ true);
+ test_use_after_free(32, /* write_after_free */ true);
+ test_use_after_free(33, /* write_after_free */ true);
+ test_use_after_free(48, /* write_after_free */ true);
+ test_use_after_free(64, /* write_after_free */ true);
+ test_use_after_free(65, /* write_after_free */ true);
+ test_use_after_free(129, /* write_after_free */ true);
+ test_use_after_free(255, /* write_after_free */ true);
+ test_use_after_free(256, /* write_after_free */ true);
+}
+TEST_END
+
+static bool
+check_allocated_intact(void **allocated, size_t n_alloc) {
+ for (unsigned i = 0; i < n_alloc; i++) {
+ void *ptr = *(void **)allocated[i];
+ bool found = false;
+ for (unsigned j = 0; j < n_alloc; j++) {
+ if (ptr == allocated[j]) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+TEST_BEGIN(test_use_after_free_integration) {
+ test_skip_if(!uaf_detection_enabled());
+
+ unsigned arena_ind = do_arena_create(-1, -1);
+ int flags = MALLOCX_ARENA(arena_ind);
+
+ size_t n_alloc = san_uaf_align * 2;
+ void **allocated = mallocx(n_alloc * sizeof(void *), flags);
+ assert_ptr_not_null(allocated, "Unexpected mallocx failure");
+
+ for (unsigned i = 0; i < n_alloc; i++) {
+ allocated[i] = mallocx(sizeof(void *) * 8, flags);
+ assert_ptr_not_null(allocated[i], "Unexpected mallocx failure");
+ if (i > 0) {
+ /* Emulate a circular list. */
+ *(void **)allocated[i] = allocated[i - 1];
+ }
+ }
+ *(void **)allocated[0] = allocated[n_alloc - 1];
+ expect_true(check_allocated_intact(allocated, n_alloc),
+ "Allocated data corrupted");
+
+ for (unsigned i = 0; i < n_alloc; i++) {
+ free(allocated[i]);
+ }
+ /* Read-after-free */
+ expect_false(check_allocated_intact(allocated, n_alloc),
+ "Junk-filling not detected");
+
+ test_write_after_free_pre();
+ for (unsigned i = 0; i < n_alloc; i++) {
+ allocated[i] = mallocx(sizeof(void *), flags);
+ assert_ptr_not_null(allocated[i], "Unexpected mallocx failure");
+ *(void **)allocated[i] = (void *)(uintptr_t)i;
+ }
+ /* Write-after-free */
+ for (unsigned i = 0; i < n_alloc; i++) {
+ free(allocated[i]);
+ *(void **)allocated[i] = NULL;
+ }
+ test_write_after_free_post();
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_read_after_free,
+ test_write_after_free,
+ test_use_after_free_integration);
+}
diff --git a/deps/jemalloc/test/unit/witness.c b/deps/jemalloc/test/unit/witness.c
index 5986da400..5a6c44827 100644
--- a/deps/jemalloc/test/unit/witness.c
+++ b/deps/jemalloc/test/unit/witness.c
@@ -34,7 +34,7 @@ witness_depth_error_intercept(const witness_list_t *witnesses,
static int
witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) {
- assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
+ expect_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
assert(oa == (void *)a);
assert(ob == (void *)b);
@@ -45,7 +45,7 @@ witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) {
static int
witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b,
void *ob) {
- assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
+ expect_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
assert(oa == (void *)a);
assert(ob == (void *)b);
@@ -121,9 +121,9 @@ TEST_BEGIN(test_witness_comp) {
witness_init(&c, "c", 1, witness_comp_reverse, &c);
witness_assert_not_owner(&witness_tsdn, &c);
- assert_false(saw_lock_error, "Unexpected witness lock error");
+ expect_false(saw_lock_error, "Unexpected witness lock error");
witness_lock(&witness_tsdn, &c);
- assert_true(saw_lock_error, "Expected witness lock error");
+ expect_true(saw_lock_error, "Expected witness lock error");
witness_unlock(&witness_tsdn, &c);
witness_assert_depth(&witness_tsdn, 1);
@@ -131,9 +131,9 @@ TEST_BEGIN(test_witness_comp) {
witness_init(&d, "d", 1, NULL, NULL);
witness_assert_not_owner(&witness_tsdn, &d);
- assert_false(saw_lock_error, "Unexpected witness lock error");
+ expect_false(saw_lock_error, "Unexpected witness lock error");
witness_lock(&witness_tsdn, &d);
- assert_true(saw_lock_error, "Expected witness lock error");
+ expect_true(saw_lock_error, "Expected witness lock error");
witness_unlock(&witness_tsdn, &d);
witness_assert_depth(&witness_tsdn, 1);
@@ -162,9 +162,9 @@ TEST_BEGIN(test_witness_reversal) {
witness_lock(&witness_tsdn, &b);
witness_assert_depth(&witness_tsdn, 1);
- assert_false(saw_lock_error, "Unexpected witness lock error");
+ expect_false(saw_lock_error, "Unexpected witness lock error");
witness_lock(&witness_tsdn, &a);
- assert_true(saw_lock_error, "Expected witness lock error");
+ expect_true(saw_lock_error, "Expected witness lock error");
witness_unlock(&witness_tsdn, &a);
witness_assert_depth(&witness_tsdn, 1);
@@ -195,11 +195,11 @@ TEST_BEGIN(test_witness_recursive) {
witness_init(&a, "a", 1, NULL, NULL);
witness_lock(&witness_tsdn, &a);
- assert_false(saw_lock_error, "Unexpected witness lock error");
- assert_false(saw_not_owner_error, "Unexpected witness not owner error");
+ expect_false(saw_lock_error, "Unexpected witness lock error");
+ expect_false(saw_not_owner_error, "Unexpected witness not owner error");
witness_lock(&witness_tsdn, &a);
- assert_true(saw_lock_error, "Expected witness lock error");
- assert_true(saw_not_owner_error, "Expected witness not owner error");
+ expect_true(saw_lock_error, "Expected witness lock error");
+ expect_true(saw_not_owner_error, "Expected witness not owner error");
witness_unlock(&witness_tsdn, &a);
@@ -225,9 +225,9 @@ TEST_BEGIN(test_witness_unlock_not_owned) {
witness_init(&a, "a", 1, NULL, NULL);
- assert_false(saw_owner_error, "Unexpected owner error");
+ expect_false(saw_owner_error, "Unexpected owner error");
witness_unlock(&witness_tsdn, &a);
- assert_true(saw_owner_error, "Expected owner error");
+ expect_true(saw_owner_error, "Expected owner error");
witness_assert_lockless(&witness_tsdn);
@@ -250,14 +250,14 @@ TEST_BEGIN(test_witness_depth) {
witness_init(&a, "a", 1, NULL, NULL);
- assert_false(saw_depth_error, "Unexpected depth error");
+ expect_false(saw_depth_error, "Unexpected depth error");
witness_assert_lockless(&witness_tsdn);
witness_assert_depth(&witness_tsdn, 0);
witness_lock(&witness_tsdn, &a);
witness_assert_lockless(&witness_tsdn);
witness_assert_depth(&witness_tsdn, 0);
- assert_true(saw_depth_error, "Expected depth error");
+ expect_true(saw_depth_error, "Expected depth error");
witness_unlock(&witness_tsdn, &a);
diff --git a/deps/jemalloc/test/unit/zero.c b/deps/jemalloc/test/unit/zero.c
index 271fd5cba..d3e81f1bc 100644
--- a/deps/jemalloc/test/unit/zero.c
+++ b/deps/jemalloc/test/unit/zero.c
@@ -8,21 +8,21 @@ test_zero(size_t sz_min, size_t sz_max) {
sz_prev = 0;
s = (uint8_t *)mallocx(sz_min, 0);
- assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
+ expect_ptr_not_null((void *)s, "Unexpected mallocx() failure");
for (sz = sallocx(s, 0); sz <= sz_max;
sz_prev = sz, sz = sallocx(s, 0)) {
if (sz_prev > 0) {
- assert_u_eq(s[0], MAGIC,
+ expect_u_eq(s[0], MAGIC,
"Previously allocated byte %zu/%zu is corrupted",
ZU(0), sz_prev);
- assert_u_eq(s[sz_prev-1], MAGIC,
+ expect_u_eq(s[sz_prev-1], MAGIC,
"Previously allocated byte %zu/%zu is corrupted",
sz_prev-1, sz_prev);
}
for (i = sz_prev; i < sz; i++) {
- assert_u_eq(s[i], 0x0,
+ expect_u_eq(s[i], 0x0,
"Newly allocated byte %zu/%zu isn't zero-filled",
i, sz);
s[i] = MAGIC;
@@ -30,7 +30,7 @@ test_zero(size_t sz_min, size_t sz_max) {
if (xallocx(s, sz+1, 0, 0) == sz) {
s = (uint8_t *)rallocx(s, sz+1, 0);
- assert_ptr_not_null((void *)s,
+ expect_ptr_not_null((void *)s,
"Unexpected rallocx() failure");
}
}
diff --git a/deps/jemalloc/test/unit/zero_realloc_abort.c b/deps/jemalloc/test/unit/zero_realloc_abort.c
new file mode 100644
index 000000000..a880d104b
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_realloc_abort.c
@@ -0,0 +1,26 @@
+#include "test/jemalloc_test.h"
+
+#include <signal.h>
+
+static bool abort_called = false;
+
+void set_abort_called() {
+ abort_called = true;
+};
+
+TEST_BEGIN(test_realloc_abort) {
+ abort_called = false;
+ safety_check_set_abort(&set_abort_called);
+ void *ptr = mallocx(42, 0);
+ expect_ptr_not_null(ptr, "Unexpected mallocx error");
+ ptr = realloc(ptr, 0);
+ expect_true(abort_called, "Realloc with zero size didn't abort");
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_realloc_abort);
+}
+
diff --git a/deps/jemalloc/test/unit/zero_realloc_abort.sh b/deps/jemalloc/test/unit/zero_realloc_abort.sh
new file mode 100644
index 000000000..37daeeaa1
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_realloc_abort.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="zero_realloc:abort"
diff --git a/deps/jemalloc/test/unit/zero_realloc_alloc.c b/deps/jemalloc/test/unit/zero_realloc_alloc.c
new file mode 100644
index 000000000..65e07bdbe
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_realloc_alloc.c
@@ -0,0 +1,48 @@
+#include "test/jemalloc_test.h"
+
+static uint64_t
+allocated() {
+ if (!config_stats) {
+ return 0;
+ }
+ uint64_t allocated;
+ size_t sz = sizeof(allocated);
+ expect_d_eq(mallctl("thread.allocated", (void *)&allocated, &sz, NULL,
+ 0), 0, "Unexpected mallctl failure");
+ return allocated;
+}
+
+static uint64_t
+deallocated() {
+ if (!config_stats) {
+ return 0;
+ }
+ uint64_t deallocated;
+ size_t sz = sizeof(deallocated);
+ expect_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+ return deallocated;
+}
+
+TEST_BEGIN(test_realloc_alloc) {
+ void *ptr = mallocx(1, 0);
+ expect_ptr_not_null(ptr, "Unexpected mallocx error");
+ uint64_t allocated_before = allocated();
+ uint64_t deallocated_before = deallocated();
+ ptr = realloc(ptr, 0);
+ uint64_t allocated_after = allocated();
+ uint64_t deallocated_after = deallocated();
+ if (config_stats) {
+ expect_u64_lt(allocated_before, allocated_after,
+ "Unexpected stats change");
+ expect_u64_lt(deallocated_before, deallocated_after,
+ "Unexpected stats change");
+ }
+ dallocx(ptr, 0);
+}
+TEST_END
+int
+main(void) {
+ return test(
+ test_realloc_alloc);
+}
diff --git a/deps/jemalloc/test/unit/zero_realloc_alloc.sh b/deps/jemalloc/test/unit/zero_realloc_alloc.sh
new file mode 100644
index 000000000..802687cff
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_realloc_alloc.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="zero_realloc:alloc"
diff --git a/deps/jemalloc/test/unit/zero_realloc_free.c b/deps/jemalloc/test/unit/zero_realloc_free.c
new file mode 100644
index 000000000..baed86c92
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_realloc_free.c
@@ -0,0 +1,33 @@
+#include "test/jemalloc_test.h"
+
+static uint64_t
+deallocated() {
+ if (!config_stats) {
+ return 0;
+ }
+ uint64_t deallocated;
+ size_t sz = sizeof(deallocated);
+ expect_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+ return deallocated;
+}
+
+TEST_BEGIN(test_realloc_free) {
+ void *ptr = mallocx(42, 0);
+ expect_ptr_not_null(ptr, "Unexpected mallocx error");
+ uint64_t deallocated_before = deallocated();
+ ptr = realloc(ptr, 0);
+ uint64_t deallocated_after = deallocated();
+ expect_ptr_null(ptr, "Realloc didn't free");
+ if (config_stats) {
+ expect_u64_gt(deallocated_after, deallocated_before,
+ "Realloc didn't free");
+ }
+}
+TEST_END
+
+int
+main(void) {
+ return test(
+ test_realloc_free);
+}
diff --git a/deps/jemalloc/test/unit/zero_realloc_free.sh b/deps/jemalloc/test/unit/zero_realloc_free.sh
new file mode 100644
index 000000000..51b01c915
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_realloc_free.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="zero_realloc:free"
diff --git a/deps/jemalloc/test/unit/zero_reallocs.c b/deps/jemalloc/test/unit/zero_reallocs.c
new file mode 100644
index 000000000..66c7a404a
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_reallocs.c
@@ -0,0 +1,40 @@
+#include "test/jemalloc_test.h"
+
+static size_t
+zero_reallocs() {
+ if (!config_stats) {
+ return 0;
+ }
+ size_t count = 12345;
+ size_t sz = sizeof(count);
+
+ expect_d_eq(mallctl("stats.zero_reallocs", (void *)&count, &sz,
+ NULL, 0), 0, "Unexpected mallctl failure");
+ return count;
+}
+
+TEST_BEGIN(test_zero_reallocs) {
+ test_skip_if(!config_stats);
+
+ for (size_t i = 0; i < 100; ++i) {
+ void *ptr = mallocx(i * i + 1, 0);
+ expect_ptr_not_null(ptr, "Unexpected mallocx error");
+ size_t count = zero_reallocs();
+ expect_zu_eq(i, count, "Incorrect zero realloc count");
+ ptr = realloc(ptr, 0);
+ expect_ptr_null(ptr, "Realloc didn't free");
+ count = zero_reallocs();
+ expect_zu_eq(i + 1, count, "Realloc didn't adjust count");
+ }
+}
+TEST_END
+
+int
+main(void) {
+ /*
+ * We expect explicit counts; reentrant tests run multiple times, so
+ * counts leak across runs.
+ */
+ return test_no_reentrancy(
+ test_zero_reallocs);
+}
diff --git a/deps/jemalloc/test/unit/zero_reallocs.sh b/deps/jemalloc/test/unit/zero_reallocs.sh
new file mode 100644
index 000000000..51b01c915
--- /dev/null
+++ b/deps/jemalloc/test/unit/zero_reallocs.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+export MALLOC_CONF="zero_realloc:free"