summaryrefslogtreecommitdiff
path: root/malloc.c
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2023-03-24 08:35:46 +0300
committerIvan Maidanski <ivmai@mail.ru>2023-03-24 12:53:15 +0300
commit1d8ed2c5bb50d20c8903bb96f017b43a62b32ef2 (patch)
tree88c9cab5eaa62d7572de72f2673a48b0e0725ad8 /malloc.c
parentd21d456b3abecb61ec8e5dfc6f06f48ba1eb4cc5 (diff)
downloadbdwgc-1d8ed2c5bb50d20c8903bb96f017b43a62b32ef2.tar.gz
Avoid code duplication in IGNORE_OFF_PAGE-specific malloc functions
(refactoring) * alloc.c: Update comment regarding GC_generic_malloc_inner usage. * alloc.c (GC_collect_or_expand): Replace ignore_off_page argument to flags (check IGNORE_OFF_PAGE bit only in it); update comment. * alloc.c (GC_allocobj): Pass 0 as flags to GC_collect_or_expand. * dbg_mlc.c (GC_debug_generic_malloc, GC_debug_generic_or_special_malloc): Rename knd argument to k. * mallocx.c (GC_generic_or_special_malloc): Likewise. * dbg_mlc.c (GC_debug_generic_malloc): Use GC_generic_malloc_aligned() instead of GC_generic_malloc(). * dbg_mlc.c (GC_debug_generic_malloc_inner): Add flags argument. * gcj_mlc.c [GC_GCJ_SUPPORT] (GC_core_gcj_malloc): Likewise. * include/private/gc_priv.h (C_generic_malloc_aligned, GC_generic_malloc_inner): Likewise. * include/private/gc_priv.h [THREAD_LOCAL_ALLOC && GC_GCJ_SUPPORT] (GC_core_gcj_malloc): Likewise. * include/private/gc_priv.h [DBG_HDRS_ALL] (GC_debug_generic_malloc_inner): Likewise. * malloc.c (GC_generic_malloc_inner, GC_generic_malloc_aligned): Likewise. * dbg_mlc.c (GC_debug_generic_malloc_inner): Use GC_generic_malloc_inner() instead of GC_generic_malloc_inner_ignore_off_page(). * dbg_mlc.c [DBG_HDRS_ALL] (GC_debug_generic_malloc_inner_ignore_off_page): Remove GC_INNER function. * include/private/gc_priv.h [DBG_HDRS_ALL || GC_GCJ_SUPPORT || !GC_NO_FINALIZATION] (GC_generic_malloc_inner_ignore_off_page): Likewise. * include/private/gc_priv.h [DBG_HDRS_ALL] (GC_debug_generic_malloc_inner_ignore_off_page): Likewise. * malloc.c [DBG_HDRS_ALL || GC_GCJ_SUPPORT || !GC_NO_FINALIZATION] (GC_generic_malloc_inner_ignore_off_page): Likewise. * gcj_mlc.c [GC_GCJ_SUPPORT && !THREAD_LOCAL_ALLOC] (GC_gcj_malloc): Define as STATIC GC_core_gcj_malloc. * gcj_mlc.c [GC_GCJ_SUPPORT] (GC_core_gcj_malloc): Reformat comment; pass flags to GC_generic_malloc_inner(). * gcj_mlc.c [GC_GCJ_SUPPORT && !THREAD_LOCAL_ALLOC] (GC_gcj_malloc): Redirect to GC_core_gcj_malloc() passing 0 to flags argument. * thread_local_alloc.c [THREAD_LOCAL_ALLOC && GC_GCJ_SUPPORT] (GC_gcj_malloc): Likewise. * gcj_mlc.c [GC_GCJ_SUPPORT] (GC_gcj_malloc_ignore_off_page): Redirect to GC_core_gcj_malloc() passing IGNORE_OFF_PAGE. * gcj_mlc.c [GC_GCJ_SUPPORT] (GC_debug_gcj_malloc): Pass 0 as flags to GC_generic_malloc_inner(). * include/private/gc_priv.h (GC_generic_malloc_inner): Update comment. * mallocx.c (GC_generic_malloc_many): Likewise. * include/private/gc_priv.h (GC_collect_or_expand): Replace GC_bool ignore_off_page argument to unsigned flags. * include/private/gc_priv.h (GC_INTERNAL_MALLOC, GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE): Specify macro arguments. * include/private/gc_priv.h (GC_INTERNAL_MALLOC): Pass 0 as flags argument to GC_[debug_]generic_malloc_inner(). * include/private/gc_priv.h (GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE): Pass IGNORE_OFF_PAGE to GC_[debug_]generic_malloc_inner(). * malloc.c (GC_alloc_large): Pass flags (instead of flags!=0) to GC_collect_or_expand(). * malloc.c (GC_generic_malloc_inner_small): New STATIC function (move most of code from GC_generic_malloc_inner). * malloc.c (GC_generic_malloc_inner): Move comment to gc_priv.h; call GC_generic_malloc_inner_small(). * malloc.c (GC_generic_malloc_aligned): Call GC_generic_malloc_inner_small() instead of GC_generic_malloc_inner(); pass flags (instead of 0) to GC_alloc_large(); do not cast result of GC_alloc_large() to ptr_t. * malloc.c (GC_generic_malloc): Pass 0 as flags to GC_generic_malloc_aligned(). * malloc.c (GC_memalign): Likewise. * malloc.c (GC_malloc_kind_global, GC_generic_malloc_uncollectable): Call GC_generic_malloc_aligned() instead of GC_generic_malloc(). * mallocx.c (GC_generic_malloc_many): Likewise. * malloc.c (free_internal): Rename knd local variable to k. * mallocx.c (GC_generic_malloc_ignore_off_page, GC_malloc_ignore_off_page, GC_malloc_atomic_ignore_off_page): Redirect to GC_generic_malloc_aligned() passing IGNORE_OFF_PAGE. * typd_mlc.c (GC_generic_malloc_ignore_off_page): Likewise. * tests/gctest.c (run_one_test): Call GC_generic_malloc_ignore_off_page() (with size larger than HBLKSIZE); increment collectable_count.
Diffstat (limited to 'malloc.c')
-rw-r--r--malloc.c121
1 files changed, 54 insertions, 67 deletions
diff --git a/malloc.c b/malloc.c
index 3188544f..d1d7e7b6 100644
--- a/malloc.c
+++ b/malloc.c
@@ -63,7 +63,7 @@ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags,
h = GC_allochblk(lb, k, flags, align_m1);
}
# endif
- while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
+ while (0 == h && GC_collect_or_expand(n_blocks, flags, retry)) {
h = GC_allochblk(lb, k, flags, align_m1);
retry = TRUE;
}
@@ -150,69 +150,55 @@ STATIC void GC_extend_size_map(size_t i)
GC_size_map[low_limit] = granule_sz;
}
-/* Allocate lb bytes for an object of kind k. */
-/* Should not be used to directly allocate objects */
-/* that require special handling on allocation. */
-GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
+STATIC void * GC_generic_malloc_inner_small(size_t lb, int k)
{
- void *op;
-
- GC_ASSERT(I_HOLD_LOCK());
- GC_ASSERT(k < MAXOBJKINDS);
- if (SMALL_OBJ(lb)) {
- struct obj_kind * kind = GC_obj_kinds + k;
- size_t lg = GC_size_map[lb];
- void ** opp = &(kind -> ok_freelist[lg]);
+ struct obj_kind * kind = GC_obj_kinds + k;
+ size_t lg = GC_size_map[lb];
+ void ** opp = &(kind -> ok_freelist[lg]);
+ void *op = *opp;
- op = *opp;
- if (EXPECT(0 == op, FALSE)) {
- if (lg == 0) {
- if (!EXPECT(GC_is_initialized, TRUE)) {
- UNLOCK(); /* just to unset GC_lock_holder */
- GC_init();
- LOCK();
- lg = GC_size_map[lb];
- }
- if (0 == lg) {
- GC_extend_size_map(lb);
- lg = GC_size_map[lb];
- GC_ASSERT(lg != 0);
- }
- /* Retry */
- opp = &(kind -> ok_freelist[lg]);
- op = *opp;
- }
- if (0 == op) {
- if (0 == kind -> ok_reclaim_list
- && !GC_alloc_reclaim_list(kind))
- return NULL;
- op = GC_allocobj(lg, k);
- if (0 == op)
- return NULL;
- }
- }
- *opp = obj_link(op);
- obj_link(op) = 0;
- GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
- } else {
- op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0 /* flags */);
+ GC_ASSERT(I_HOLD_LOCK());
+ if (EXPECT(NULL == op, FALSE)) {
+ if (lg == 0) {
+ if (!EXPECT(GC_is_initialized, TRUE)) {
+ UNLOCK(); /* just to unset GC_lock_holder */
+ GC_init();
+ LOCK();
+ lg = GC_size_map[lb];
+ }
+ if (0 == lg) {
+ GC_extend_size_map(lb);
+ lg = GC_size_map[lb];
+ GC_ASSERT(lg != 0);
+ }
+ /* Retry */
+ opp = &(kind -> ok_freelist[lg]);
+ op = *opp;
}
- return op;
+ if (NULL == op) {
+ if (NULL == kind -> ok_reclaim_list
+ && !GC_alloc_reclaim_list(kind))
+ return NULL;
+ op = GC_allocobj(lg, k);
+ if (NULL == op) return NULL;
+ }
+ }
+ *opp = obj_link(op);
+ obj_link(op) = NULL;
+ GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
+ return op;
}
-#if defined(DBG_HDRS_ALL) || defined(GC_GCJ_SUPPORT) \
- || !defined(GC_NO_FINALIZATION)
- /* Allocate a composite object of size n bytes. The caller */
- /* guarantees that pointers past the first hblk are not relevant. */
- GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
- {
+GC_INNER void * GC_generic_malloc_inner(size_t lb, int k, unsigned flags)
+{
GC_ASSERT(I_HOLD_LOCK());
- if (lb <= HBLKSIZE)
- return GC_generic_malloc_inner(lb, k);
GC_ASSERT(k < MAXOBJKINDS);
- return GC_alloc_large_and_clear(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
- }
-#endif
+ if (SMALL_OBJ(lb)) {
+ return GC_generic_malloc_inner_small(lb, k);
+ }
+
+ return GC_alloc_large_and_clear(ADD_SLOP(lb), k, flags);
+}
#ifdef GC_COLLECT_AT_MALLOC
/* Parameter to force GC at every malloc of size greater or equal to */
@@ -224,7 +210,8 @@ GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
# endif
#endif
-GC_INNER void * GC_generic_malloc_aligned(size_t lb, int k, size_t align_m1)
+GC_INNER void * GC_generic_malloc_aligned(size_t lb, int k, unsigned flags,
+ size_t align_m1)
{
void * result;
@@ -235,7 +222,7 @@ GC_INNER void * GC_generic_malloc_aligned(size_t lb, int k, size_t align_m1)
GC_DBG_COLLECT_AT_MALLOC(lb);
if (SMALL_OBJ(lb) && EXPECT(align_m1 < GRANULE_BYTES, TRUE)) {
LOCK();
- result = GC_generic_malloc_inner(lb, k);
+ result = GC_generic_malloc_inner_small(lb, k);
UNLOCK();
} else {
size_t lg;
@@ -251,7 +238,7 @@ GC_INNER void * GC_generic_malloc_aligned(size_t lb, int k, size_t align_m1)
align_m1 = HBLKSIZE - 1;
}
LOCK();
- result = (ptr_t)GC_alloc_large(lb_rounded, k, 0 /* flags */, align_m1);
+ result = GC_alloc_large(lb_rounded, k, flags, align_m1);
if (EXPECT(result != NULL, TRUE)) {
if (GC_debugging_started) {
BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_rounded));
@@ -278,7 +265,7 @@ GC_INNER void * GC_generic_malloc_aligned(size_t lb, int k, size_t align_m1)
GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc(size_t lb, int k)
{
- return GC_generic_malloc_aligned(lb, k, 0 /* align_m1 */);
+ return GC_generic_malloc_aligned(lb, k, 0 /* flags */, 0 /* align_m1 */);
}
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind_global(size_t lb, int k)
@@ -315,7 +302,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind_global(size_t lb, int k)
/* We make the GC_clear_stack() call a tail one, hoping to get more */
/* of the stack. */
- return GC_clear_stack(GC_generic_malloc(lb, k));
+ return GC_clear_stack(GC_generic_malloc_aligned(lb, k, 0 /* flags */, 0));
}
#if defined(THREADS) && !defined(THREAD_LOCAL_ALLOC)
@@ -366,12 +353,12 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc_uncollectable(
UNLOCK();
} else {
UNLOCK();
- op = GC_generic_malloc(lb, k);
+ op = GC_generic_malloc_aligned(lb, k, 0, 0 /* align_m1 */);
/* For small objects, the free lists are completely marked. */
}
GC_ASSERT(0 == op || GC_is_marked(op));
} else {
- op = GC_generic_malloc(lb, k);
+ op = GC_generic_malloc_aligned(lb, k, 0 /* flags */, 0 /* align_m1 */);
if (op /* != NULL */) { /* CPPCHECK */
hdr * hhdr = HDR(op);
@@ -553,12 +540,12 @@ static void free_internal(void *p, hdr *hhdr)
{
size_t sz = (size_t)(hhdr -> hb_sz); /* in bytes */
size_t ngranules = BYTES_TO_GRANULES(sz); /* size in granules */
- int knd = hhdr -> hb_obj_kind;
+ int k = hhdr -> hb_obj_kind;
GC_bytes_freed += sz;
- if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
+ if (IS_UNCOLLECTABLE(k)) GC_non_gc_bytes -= sz;
if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
- struct obj_kind *ok = &GC_obj_kinds[knd];
+ struct obj_kind *ok = &GC_obj_kinds[k];
void **flh;
/* It is unnecessary to clear the mark bit. If the object is */