diff options
author | Ivan Maidanski <ivmai@mail.ru> | 2023-01-20 21:16:51 +0300 |
---|---|---|
committer | Ivan Maidanski <ivmai@mail.ru> | 2023-01-24 12:28:20 +0300 |
commit | ba137368d1add5a24ff2fafe2ae2587c765cad34 (patch) | |
tree | a949b2ab5b8d0e5da33ea02b4b65d28f53d00149 /mallocx.c | |
parent | 94eda0c39eea3f6fce7990c0984542ab2c58c79d (diff) | |
download | bdwgc-ba137368d1add5a24ff2fafe2ae2587c765cad34.tar.gz |
Support GC_memalign with alignments greater than HBLKSIZE
Issue #510 (bdwgc).
* allchblk.c (GC_allochblk_nth, GC_allochblk, next_hblk_fits_better,
find_nonbl_hblk): Add align_m1 argument.
* include/private/gc_priv.h (GC_allochblk, GC_alloc_large): Likewise.
* malloc.c (GC_alloc_large): Likewise.
* allchblk.c (GC_allochblk): Check that there is no overflow in
blocks*HBLKSIZE+align_m1; pass align_m1 to GC_allochblk_nth; try
start_list again (with may_split) if non-zero align_m1.
* allchblk.c (ALIGN_PAD_SZ): New macro.
* allchblk.c (next_hblk_fits_better): Define and use next_ofs local
variable; adjust next_hbp passed to GC_is_black_listed().
* allchblk.c (find_nonbl_hblk): Adjust search_end and last_hbp based
on align_m1 value.
* allchblk.c (GC_allochblk_nth): Add assertion that align_m1+1 is
a power of two and that align_m1+1 is 1 or a multiple of HBLKSIZE;
define and use align_ofs local variable; add assertion that last_hbp
(before GC_install_header() call) is multiple of align_m1+1.
* include/gc/gc.h (GC_memalign): Update comment to mention the
restriction on align value (should be a power of two and not less than
size of a pointer).
* include/private/gc_priv.h (GC_allochblk, GC_alloc_large): Update
comment (to mention align_m1 argument).
* include/private/gc_priv.h (GC_generic_malloc_aligned): Declare
function.
* include/private/gcconfig.h [(NACL || I386 && (EMSCRIPTEN || WASI))
&& !HBLKSIZE && !GC_NO_VALLOC] (HBLKSIZE): Do not define (to 65536 or
GETPAGESIZE()); remove TODO item.
* malloc.c (GC_alloc_large): Add alignment to n_blocks value (but not
to GC_large_allocd_bytes); pass align_m1 to GC_allochblk(); add
assertion that result is aligned to align_m1+1.
* malloc.c (GC_alloc_large_and_clear): Pass zero align_m1 to
GC_alloc_large().
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
* malloc.c (GC_generic_malloc_aligned): Move code from
GC_generic_malloc; adjust align_m1 to be either zero or not less than
HBLKSIZE-1, and pass it to GC_alloc_large(); add comment that the
result of GC_oom_fn(lb) might be unaligned.
* malloc.c (GC_generic_malloc): Call GC_generic_malloc_aligned() with
zero align_m1.
* mallocx.c (GC_generic_malloc_ignore_off_page): Expect result of
GC_alloc_large() is unlikely to be NULL.
* mallocx.c (GC_generic_malloc_many): Pass zero align_m1 to
GC_allochblk().
* new_hblk.c (GC_new_hblk): Likewise.
* mallocx.c: Do not include limits.h.
* mallocx.c (GC_memalign): Remove new_lb local variable; define and
use align_m1 local variable; if align is smaller than sizeof(void*) or
is not a power of two than return NULL; call GC_generic_malloc_aligned
and GC_clear_stack (instead of GC_oom_fn() or GC_malloc()) if align or
lb is at least HBLKSIZE/2; replace result%align to result&align_m1;
do not result+=offset if offset is zero.
* new_hblk.c (GC_new_hblk): Expect result of GC_allochblk() is unlikely
to be NULL.
* tests/gctest.c (run_one_test): Call GC_memalign() also for bigger
alignment values (up to HBLKSIZE*4).
Diffstat (limited to 'mallocx.c')
-rw-r--r-- | mallocx.c | 31 |
1 files changed, 15 insertions, 16 deletions
@@ -214,8 +214,8 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_INVOKE_FINALIZERS(); GC_DBG_COLLECT_AT_MALLOC(lb); LOCK(); - result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE); - if (NULL == result) { + result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE, 0); + if (EXPECT(NULL == result, FALSE)) { GC_oom_func oom_fn = GC_oom_fn; UNLOCK(); return (*oom_fn)(lb); @@ -441,7 +441,8 @@ GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result) } /* Next try to allocate a new block worth of objects of this size. */ { - struct hblk *h = GC_allochblk(lb, k, 0); + struct hblk *h = GC_allochblk(lb, k, 0 /* flags */, 0 /* align_m1 */); + if (h /* != NULL */) { /* CPPCHECK */ if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h)); GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb; @@ -494,31 +495,29 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_many(size_t lb) return result; } -#include <limits.h> - /* Debug version is tricky and currently missing. */ GC_API GC_ATTR_MALLOC void * GC_CALL GC_memalign(size_t align, size_t lb) { - size_t new_lb; size_t offset; ptr_t result; + size_t align_m1 = align - 1; + + /* Check the alignment argument. */ + if (align < sizeof(void *) || (align & align_m1) != 0) return NULL; if (align <= GRANULE_BYTES) return GC_malloc(lb); + if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) { - if (EXPECT(align > HBLKSIZE, FALSE)) { - return (*GC_get_oom_fn())(LONG_MAX-1024); /* Fail */ - } - return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb); - /* Will be HBLKSIZE aligned. */ + return GC_clear_stack(GC_generic_malloc_aligned(lb, NORMAL, align_m1)); } + /* We could also try to make sure that the real rounded-up object size */ /* is a multiple of align. That would be correct up to HBLKSIZE. */ /* TODO: Not space efficient for big align values. */ - new_lb = SIZET_SAT_ADD(lb, align - 1); - result = (ptr_t)GC_malloc(new_lb); + result = (ptr_t)GC_malloc(SIZET_SAT_ADD(lb, align_m1)); /* It is OK not to check result for NULL as in that case */ /* GC_memalign returns NULL too since (0 + 0 % align) is 0. */ - offset = (word)result % align; + offset = (size_t)(word)result & align_m1; if (offset != 0) { offset = align - offset; if (!GC_all_interior_pointers) { @@ -526,9 +525,9 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_memalign(size_t align, size_t lb) GC_ASSERT(offset < VALID_OFFSET_SZ); GC_register_displacement(offset); } + result += offset; } - result += offset; - GC_ASSERT((word)result % align == 0); + GC_ASSERT(((word)result & align_m1) == 0); return result; } |