diff options
Diffstat (limited to 'deps/jemalloc/src/huge.c')
-rw-r--r-- | deps/jemalloc/src/huge.c | 238 |
1 files changed, 98 insertions, 140 deletions
diff --git a/deps/jemalloc/src/huge.c b/deps/jemalloc/src/huge.c index 8abd8c00c..1e9a66512 100644 --- a/deps/jemalloc/src/huge.c +++ b/deps/jemalloc/src/huge.c @@ -15,21 +15,12 @@ huge_node_get(const void *ptr) } static bool -huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node) +huge_node_set(const void *ptr, extent_node_t *node) { assert(extent_node_addr_get(node) == ptr); assert(!extent_node_achunk_get(node)); - return (chunk_register(tsdn, ptr, node)); -} - -static void -huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node) -{ - bool err; - - err = huge_node_set(tsdn, ptr, node); - assert(!err); + return (chunk_register(ptr, node)); } static void @@ -40,39 +31,39 @@ huge_node_unset(const void *ptr, const extent_node_t *node) } void * -huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) +huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, + tcache_t *tcache) { + size_t usize; - assert(usize == s2u(usize)); + usize = s2u(size); + if (usize == 0) { + /* size_t overflow. */ + return (NULL); + } - return (huge_palloc(tsdn, arena, usize, chunksize, zero)); + return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache)); } void * -huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, - bool zero) +huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, + bool zero, tcache_t *tcache) { void *ret; - size_t ausize; - arena_t *iarena; + size_t usize; extent_node_t *node; - size_t sn; bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ - assert(!tsdn_null(tsdn) || arena != NULL); - - ausize = sa2u(usize, alignment); - if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS)) + usize = sa2u(size, alignment); + if (unlikely(usize == 0)) return (NULL); - assert(ausize >= chunksize); + assert(usize >= chunksize); /* Allocate an extent node with which to track the chunk. */ - iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : - a0get(); - node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)), - CACHELINE, false, NULL, true, iarena); + node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)), + CACHELINE, false, tcache, true, arena); if (node == NULL) return (NULL); @@ -81,35 +72,33 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; - if (likely(!tsdn_null(tsdn))) - arena = arena_choose(tsdn_tsd(tsdn), arena); - if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn, - arena, usize, alignment, &sn, &is_zeroed)) == NULL) { - idalloctm(tsdn, node, NULL, true, true); + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena, + size, alignment, &is_zeroed)) == NULL) { + idalloctm(tsd, node, tcache, true); return (NULL); } - extent_node_init(node, arena, ret, usize, sn, is_zeroed, true); + extent_node_init(node, arena, ret, size, is_zeroed, true); - if (huge_node_set(tsdn, ret, node)) { - arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn); - idalloctm(tsdn, node, NULL, true, true); + if (huge_node_set(ret, node)) { + arena_chunk_dalloc_huge(arena, ret, size); + idalloctm(tsd, node, tcache, true); return (NULL); } /* Insert node into huge. */ - malloc_mutex_lock(tsdn, &arena->huge_mtx); + malloc_mutex_lock(&arena->huge_mtx); ql_elm_new(node, ql_link); ql_tail_insert(&arena->huge, node, ql_link); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(&arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed) - memset(ret, 0, usize); + memset(ret, 0, size); } else if (config_fill && unlikely(opt_junk_alloc)) - memset(ret, JEMALLOC_ALLOC_JUNK, usize); + memset(ret, 0xa5, size); - arena_decay_tick(tsdn, arena); return (ret); } @@ -127,7 +116,7 @@ huge_dalloc_junk(void *ptr, size_t usize) * unmapped. */ if (!config_munmap || (have_dss && chunk_in_dss(ptr))) - memset(ptr, JEMALLOC_FREE_JUNK, usize); + memset(ptr, 0x5a, usize); } } #ifdef JEMALLOC_JET @@ -137,8 +126,8 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); #endif static void -huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize, - size_t usize_min, size_t usize_max, bool zero) +huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, + size_t usize_max, bool zero) { size_t usize, usize_next; extent_node_t *node; @@ -162,28 +151,24 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize, if (oldsize > usize) { size_t sdiff = oldsize - usize; if (config_fill && unlikely(opt_junk_free)) { - memset((void *)((uintptr_t)ptr + usize), - JEMALLOC_FREE_JUNK, sdiff); + memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff); post_zeroed = false; } else { - post_zeroed = !chunk_purge_wrapper(tsdn, arena, - &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize, - sdiff); + post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, + ptr, CHUNK_CEILING(oldsize), usize, sdiff); } } else post_zeroed = pre_zeroed; - malloc_mutex_lock(tsdn, &arena->huge_mtx); + malloc_mutex_lock(&arena->huge_mtx); /* Update the size of the huge allocation. */ - huge_node_unset(ptr, node); assert(extent_node_size_get(node) != usize); extent_node_size_set(node, usize); - huge_node_reset(tsdn, ptr, node); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(&arena->huge_mtx); - arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize); + arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize); /* Fill if necessary (growing). */ if (oldsize < usize) { @@ -193,15 +178,14 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize, usize - oldsize); } } else if (config_fill && unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), - JEMALLOC_ALLOC_JUNK, usize - oldsize); + memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - + oldsize); } } } static bool -huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, - size_t usize) +huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) { extent_node_t *node; arena_t *arena; @@ -212,7 +196,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, node = huge_node_get(ptr); arena = extent_node_arena_get(node); pre_zeroed = extent_node_zeroed_get(node); - chunk_hooks = chunk_hooks_get(tsdn, arena); + chunk_hooks = chunk_hooks_get(arena); assert(oldsize > usize); @@ -229,59 +213,53 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, sdiff); post_zeroed = false; } else { - post_zeroed = !chunk_purge_wrapper(tsdn, arena, - &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr + - usize), CHUNK_CEILING(oldsize), + post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, + CHUNK_ADDR2BASE((uintptr_t)ptr + usize), + CHUNK_CEILING(oldsize), CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); } } else post_zeroed = pre_zeroed; - malloc_mutex_lock(tsdn, &arena->huge_mtx); + malloc_mutex_lock(&arena->huge_mtx); /* Update the size of the huge allocation. */ - huge_node_unset(ptr, node); extent_node_size_set(node, usize); - huge_node_reset(tsdn, ptr, node); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(&arena->huge_mtx); /* Zap the excess chunks. */ - arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize, - extent_node_sn_get(node)); + arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize); return (false); } static bool -huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize, - size_t usize, bool zero) { +huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) { extent_node_t *node; arena_t *arena; bool is_zeroed_subchunk, is_zeroed_chunk; node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(tsdn, &arena->huge_mtx); + malloc_mutex_lock(&arena->huge_mtx); is_zeroed_subchunk = extent_node_zeroed_get(node); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(&arena->huge_mtx); /* - * Use is_zeroed_chunk to detect whether the trailing memory is zeroed, - * update extent's zeroed field, and zero as necessary. + * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so + * that it is possible to make correct junk/zero fill decisions below. */ - is_zeroed_chunk = false; - if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize, + is_zeroed_chunk = zero; + + if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize, &is_zeroed_chunk)) return (true); - malloc_mutex_lock(tsdn, &arena->huge_mtx); - huge_node_unset(ptr, node); + malloc_mutex_lock(&arena->huge_mtx); + /* Update the size of the huge allocation. */ extent_node_size_set(node, usize); - extent_node_zeroed_set(node, extent_node_zeroed_get(node) && - is_zeroed_chunk); - huge_node_reset(tsdn, ptr, node); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(&arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed_subchunk) { @@ -294,21 +272,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize, CHUNK_CEILING(oldsize)); } } else if (config_fill && unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK, - usize - oldsize); + memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - + oldsize); } return (false); } bool -huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, +huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { assert(s2u(oldsize) == oldsize); - /* The following should have been caught by callers. */ - assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS); /* Both allocations must be huge to avoid a move. */ if (oldsize < chunksize || usize_max < chunksize) @@ -316,18 +292,13 @@ huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { /* Attempt to expand the allocation in-place. */ - if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max, - zero)) { - arena_decay_tick(tsdn, huge_aalloc(ptr)); + if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero)) return (false); - } /* Try again, this time with usize_min. */ if (usize_min < usize_max && CHUNK_CEILING(usize_min) > - CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn, - ptr, oldsize, usize_min, zero)) { - arena_decay_tick(tsdn, huge_aalloc(ptr)); + CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr, + oldsize, usize_min, zero)) return (false); - } } /* @@ -336,46 +307,36 @@ huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, */ if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { - huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min, - usize_max, zero); - arena_decay_tick(tsdn, huge_aalloc(ptr)); + huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max, + zero); return (false); } /* Attempt to shrink the allocation in-place. */ - if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) { - if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize, - usize_max)) { - arena_decay_tick(tsdn, huge_aalloc(ptr)); - return (false); - } - } + if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) + return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)); return (true); } static void * -huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, - size_t alignment, bool zero) +huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) { if (alignment <= chunksize) - return (huge_malloc(tsdn, arena, usize, zero)); - return (huge_palloc(tsdn, arena, usize, alignment, zero)); + return (huge_malloc(tsd, arena, usize, zero, tcache)); + return (huge_palloc(tsd, arena, usize, alignment, zero, tcache)); } void * -huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, - size_t usize, size_t alignment, bool zero, tcache_t *tcache) +huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t copysize; - /* The following should have been caught by callers. */ - assert(usize > 0 && usize <= HUGE_MAXCLASS); - /* Try to avoid moving the allocation. */ - if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize, - zero)) + if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero)) return (ptr); /* @@ -383,19 +344,19 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, * different size class. In that case, fall back to allocating new * space and copying. */ - ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment, - zero); + ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero, + tcache); if (ret == NULL) return (NULL); copysize = (usize < oldsize) ? usize : oldsize; memcpy(ret, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache, true); + isqalloc(tsd, ptr, oldsize, tcache); return (ret); } void -huge_dalloc(tsdn_t *tsdn, void *ptr) +huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) { extent_node_t *node; arena_t *arena; @@ -403,18 +364,15 @@ huge_dalloc(tsdn_t *tsdn, void *ptr) node = huge_node_get(ptr); arena = extent_node_arena_get(node); huge_node_unset(ptr, node); - malloc_mutex_lock(tsdn, &arena->huge_mtx); + malloc_mutex_lock(&arena->huge_mtx); ql_remove(&arena->huge, node, ql_link); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(&arena->huge_mtx); huge_dalloc_junk(extent_node_addr_get(node), extent_node_size_get(node)); - arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node), - extent_node_addr_get(node), extent_node_size_get(node), - extent_node_sn_get(node)); - idalloctm(tsdn, node, NULL, true, true); - - arena_decay_tick(tsdn, arena); + arena_chunk_dalloc_huge(extent_node_arena_get(node), + extent_node_addr_get(node), extent_node_size_get(node)); + idalloctm(tsd, node, tcache, true); } arena_t * @@ -425,7 +383,7 @@ huge_aalloc(const void *ptr) } size_t -huge_salloc(tsdn_t *tsdn, const void *ptr) +huge_salloc(const void *ptr) { size_t size; extent_node_t *node; @@ -433,15 +391,15 @@ huge_salloc(tsdn_t *tsdn, const void *ptr) node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(tsdn, &arena->huge_mtx); + malloc_mutex_lock(&arena->huge_mtx); size = extent_node_size_get(node); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(&arena->huge_mtx); return (size); } prof_tctx_t * -huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr) +huge_prof_tctx_get(const void *ptr) { prof_tctx_t *tctx; extent_node_t *node; @@ -449,29 +407,29 @@ huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr) node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(tsdn, &arena->huge_mtx); + malloc_mutex_lock(&arena->huge_mtx); tctx = extent_node_prof_tctx_get(node); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(&arena->huge_mtx); return (tctx); } void -huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) +huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) { extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(tsdn, &arena->huge_mtx); + malloc_mutex_lock(&arena->huge_mtx); extent_node_prof_tctx_set(node, tctx); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); + malloc_mutex_unlock(&arena->huge_mtx); } void -huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr) +huge_prof_tctx_reset(const void *ptr) { - huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U); + huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); } |