summaryrefslogtreecommitdiff
path: root/allchblk.c
diff options
context:
space:
mode:
authorivmai <ivmai>2009-09-19 11:23:29 +0000
committerIvan Maidanski <ivmai@mail.ru>2011-07-26 21:06:47 +0400
commitfa40c00794cf621101182ffe60e56449fb580e8a (patch)
treee27b5f5b9e2759b99889010af3a072ccb516427c /allchblk.c
parentd1d1fc8d9e51ffa49da97f99ad6158c090fb7e80 (diff)
downloadbdwgc-fa40c00794cf621101182ffe60e56449fb580e8a.tar.gz
2009-09-19 Ivan Maidanski <ivmai@mail.ru>
* allchblk.c: Expand all tabs to spaces; remove trailing spaces at EOLn; remove multiple trailing blank lines. * mark.c: Ditto. * mark_rts.c: Ditto. * pthread_stop_world.c: Ditto. * win32_threads.c: Ditto. * include/gc.h: Ditto. * include/private/gc_priv.h: Ditto. * pthread_stop_world.c: Add copyright header. * include/gc.h: Reformat some comments.
Diffstat (limited to 'allchblk.c')
-rw-r--r--allchblk.c759
1 files changed, 379 insertions, 380 deletions
diff --git a/allchblk.c b/allchblk.c
index f6a0f4ee..713e4ae6 100644
--- a/allchblk.c
+++ b/allchblk.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
@@ -26,35 +26,35 @@ GC_bool GC_use_entire_heap = 0;
* Adjacent free blocks are coalesced.
*/
-
+
# define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
- /* largest block we will allocate starting on a black */
- /* listed block. Must be >= HBLKSIZE. */
+ /* largest block we will allocate starting on a black */
+ /* listed block. Must be >= HBLKSIZE. */
# define UNIQUE_THRESHOLD 32
- /* Sizes up to this many HBLKs each have their own free list */
+ /* Sizes up to this many HBLKs each have their own free list */
# define HUGE_THRESHOLD 256
- /* Sizes of at least this many heap blocks are mapped to a */
- /* single free list. */
+ /* Sizes of at least this many heap blocks are mapped to a */
+ /* single free list. */
# define FL_COMPRESSION 8
- /* In between sizes map this many distinct sizes to a single */
- /* bin. */
+ /* In between sizes map this many distinct sizes to a single */
+ /* bin. */
# define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
- + UNIQUE_THRESHOLD
+ + UNIQUE_THRESHOLD
struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
#ifndef USE_MUNMAP
STATIC word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
- /* Number of free bytes on each list. */
+ /* Number of free bytes on each list. */
- /* Return the largest n such that */
- /* Is GC_large_allocd_bytes + the number of free bytes on lists */
- /* n .. N_HBLK_FLS > GC_max_large_allocd_bytes. */
- /* If there is no such n, return 0. */
+ /* Return the largest n such that */
+ /* Is GC_large_allocd_bytes + the number of free bytes on lists */
+ /* n .. N_HBLK_FLS > GC_max_large_allocd_bytes. */
+ /* If there is no such n, return 0. */
# ifdef __GNUC__
__inline__
# endif
@@ -65,8 +65,8 @@ struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
GC_ASSERT(GC_max_large_allocd_bytes <= GC_heapsize);
for (n = N_HBLK_FLS; n >= 0; --n) {
- bytes += GC_free_bytes[n];
- if (bytes >= GC_max_large_allocd_bytes) return n;
+ bytes += GC_free_bytes[n];
+ if (bytes >= GC_max_large_allocd_bytes) return n;
}
return 0;
}
@@ -88,8 +88,8 @@ STATIC int GC_hblk_fl_from_blocks(word blocks_needed)
if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
- + UNIQUE_THRESHOLD;
-
+ + UNIQUE_THRESHOLD;
+
}
# define PHDR(hhdr) HDR(hhdr -> hb_prev)
@@ -109,48 +109,48 @@ void GC_print_hblkfreelist(void)
hdr * hhdr;
word sz;
unsigned i;
-
+
for (i = 0; i <= N_HBLK_FLS; ++i) {
h = GC_hblkfreelist[i];
# ifdef USE_MUNMAP
if (0 != h) GC_printf("Free list %u:\n", i);
# else
if (0 != h) GC_printf("Free list %u (Total size %lu):\n",
- i, (unsigned long)GC_free_bytes[i]);
+ i, (unsigned long)GC_free_bytes[i]);
# endif
while (h != 0) {
hhdr = HDR(h);
sz = hhdr -> hb_sz;
- total_free += sz;
- GC_printf("\t%p size %lu %s black listed\n", h, (unsigned long)sz,
- GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" :
- GC_is_black_listed(h, hhdr -> hb_sz) != 0 ? "partially" :
- "not");
+ total_free += sz;
+ GC_printf("\t%p size %lu %s black listed\n", h, (unsigned long)sz,
+ GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" :
+ GC_is_black_listed(h, hhdr -> hb_sz) != 0 ? "partially" :
+ "not");
h = hhdr -> hb_next;
}
}
# ifndef USE_MUNMAP
if (total_free != GC_large_free_bytes) {
- GC_printf("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
- (unsigned long) GC_large_free_bytes);
+ GC_printf("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
+ (unsigned long) GC_large_free_bytes);
}
# endif
GC_printf("Total of %lu bytes on free list\n", (unsigned long)total_free);
}
/* Return the free list index on which the block described by the header */
-/* appears, or -1 if it appears nowhere. */
+/* appears, or -1 if it appears nowhere. */
static int free_list_index_of(hdr *wanted)
{
struct hblk * h;
hdr * hhdr;
int i;
-
+
for (i = 0; i <= N_HBLK_FLS; ++i) {
h = GC_hblkfreelist[i];
while (h != 0) {
hhdr = HDR(h);
- if (hhdr == wanted) return i;
+ if (hhdr == wanted) return i;
h = hhdr -> hb_next;
}
}
@@ -165,61 +165,61 @@ void GC_dump_regions(void)
size_t bytes;
hdr *hhdr;
for (i = 0; i < GC_n_heap_sects; ++i) {
- start = GC_heap_sects[i].hs_start;
- bytes = GC_heap_sects[i].hs_bytes;
- end = start + bytes;
- /* Merge in contiguous sections. */
- while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
- ++i;
- end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
- }
- GC_printf("***Section from %p to %p\n", start, end);
- for (p = start; p < end;) {
- hhdr = HDR(p);
- if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
- GC_printf("\t%p Missing header!!(%p)\n", p, hhdr);
- p += HBLKSIZE;
- continue;
- }
- if (HBLK_IS_FREE(hhdr)) {
+ start = GC_heap_sects[i].hs_start;
+ bytes = GC_heap_sects[i].hs_bytes;
+ end = start + bytes;
+ /* Merge in contiguous sections. */
+ while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
+ ++i;
+ end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
+ }
+ GC_printf("***Section from %p to %p\n", start, end);
+ for (p = start; p < end;) {
+ hhdr = HDR(p);
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ GC_printf("\t%p Missing header!!(%p)\n", p, hhdr);
+ p += HBLKSIZE;
+ continue;
+ }
+ if (HBLK_IS_FREE(hhdr)) {
int correct_index = GC_hblk_fl_from_blocks(
- divHBLKSZ(hhdr -> hb_sz));
- int actual_index;
-
- GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n", p,
- (unsigned long)(hhdr -> hb_sz),
- IS_MAPPED(hhdr) ? "" : " (unmapped)");
- actual_index = free_list_index_of(hhdr);
- if (-1 == actual_index) {
- GC_printf("\t\tBlock not on free list %d!!\n",
- correct_index);
- } else if (correct_index != actual_index) {
- GC_printf("\t\tBlock on list %d, should be on %d!!\n",
- actual_index, correct_index);
- }
- p += hhdr -> hb_sz;
- } else {
- GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n", p,
- (unsigned long)(hhdr -> hb_sz));
- p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
- }
- }
+ divHBLKSZ(hhdr -> hb_sz));
+ int actual_index;
+
+ GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n", p,
+ (unsigned long)(hhdr -> hb_sz),
+ IS_MAPPED(hhdr) ? "" : " (unmapped)");
+ actual_index = free_list_index_of(hhdr);
+ if (-1 == actual_index) {
+ GC_printf("\t\tBlock not on free list %d!!\n",
+ correct_index);
+ } else if (correct_index != actual_index) {
+ GC_printf("\t\tBlock on list %d, should be on %d!!\n",
+ actual_index, correct_index);
+ }
+ p += hhdr -> hb_sz;
+ } else {
+ GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n", p,
+ (unsigned long)(hhdr -> hb_sz));
+ p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
+ }
+ }
}
}
# endif /* NO_DEBUGGING */
-/* Initialize hdr for a block containing the indicated size and */
-/* kind of objects. */
-/* Return FALSE on failure. */
+/* Initialize hdr for a block containing the indicated size and */
+/* kind of objects. */
+/* Return FALSE on failure. */
static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
- int kind, unsigned flags)
+ int kind, unsigned flags)
{
word descr;
# ifndef MARK_BIT_PER_OBJ
size_t granules;
# endif
-
+
/* Set size, kind and mark proc fields */
hhdr -> hb_sz = byte_sz;
hhdr -> hb_obj_kind = (unsigned char)kind;
@@ -228,26 +228,26 @@ static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
descr = GC_obj_kinds[kind].ok_descriptor;
if (GC_obj_kinds[kind].ok_relocate_descr) descr += byte_sz;
hhdr -> hb_descr = descr;
-
+
# ifdef MARK_BIT_PER_OBJ
- /* Set hb_inv_sz as portably as possible. */
+ /* Set hb_inv_sz as portably as possible. */
/* We set it to the smallest value such that sz * inv_sz > 2**32 */
- /* This may be more precision than necessary. */
+ /* This may be more precision than necessary. */
if (byte_sz > MAXOBJBYTES) {
- hhdr -> hb_inv_sz = LARGE_INV_SZ;
+ hhdr -> hb_inv_sz = LARGE_INV_SZ;
} else {
- word inv_sz;
+ word inv_sz;
# if CPP_WORDSZ == 64
inv_sz = ((word)1 << 32)/byte_sz;
- if (((inv_sz*byte_sz) >> 32) == 0) ++inv_sz;
-# else /* 32 bit words */
- GC_ASSERT(byte_sz >= 4);
- inv_sz = ((unsigned)1 << 31)/byte_sz;
- inv_sz *= 2;
- while (inv_sz*byte_sz > byte_sz) ++inv_sz;
-# endif
- hhdr -> hb_inv_sz = inv_sz;
+ if (((inv_sz*byte_sz) >> 32) == 0) ++inv_sz;
+# else /* 32 bit words */
+ GC_ASSERT(byte_sz >= 4);
+ inv_sz = ((unsigned)1 << 31)/byte_sz;
+ inv_sz *= 2;
+ while (inv_sz*byte_sz > byte_sz) ++inv_sz;
+# endif
+ hhdr -> hb_inv_sz = inv_sz;
}
# else /* MARK_BIT_PER_GRANULE */
hhdr -> hb_large_block = (unsigned char)(byte_sz > MAXOBJBYTES);
@@ -264,10 +264,10 @@ static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
hhdr -> hb_map = GC_obj_map[index];
}
# endif /* MARK_BIT_PER_GRANULE */
-
+
/* Clear mark bits */
GC_clear_hdr_marks(hhdr);
-
+
hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
return(TRUE);
}
@@ -284,35 +284,35 @@ STATIC void GC_remove_from_fl(hdr *hhdr, int n)
GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
# ifndef USE_MUNMAP
- /* We always need index to mainatin free counts. */
+ /* We always need index to mainatin free counts. */
if (FL_UNKNOWN == n) {
index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
} else {
- index = n;
+ index = n;
}
# endif
if (hhdr -> hb_prev == 0) {
-# ifdef USE_MUNMAP
- if (FL_UNKNOWN == n) {
+# ifdef USE_MUNMAP
+ if (FL_UNKNOWN == n) {
index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
- } else {
- index = n;
- }
-# endif
- GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
- GC_hblkfreelist[index] = hhdr -> hb_next;
+ } else {
+ index = n;
+ }
+# endif
+ GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
+ GC_hblkfreelist[index] = hhdr -> hb_next;
} else {
- hdr *phdr;
- GET_HDR(hhdr -> hb_prev, phdr);
- phdr -> hb_next = hhdr -> hb_next;
+ hdr *phdr;
+ GET_HDR(hhdr -> hb_prev, phdr);
+ phdr -> hb_next = hhdr -> hb_next;
}
FREE_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
if (0 != hhdr -> hb_next) {
- hdr * nhdr;
- GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
- GET_HDR(hhdr -> hb_next, nhdr);
- nhdr -> hb_prev = hhdr -> hb_prev;
+ hdr * nhdr;
+ GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
+ GET_HDR(hhdr -> hb_next, nhdr);
+ nhdr -> hb_prev = hhdr -> hb_prev;
}
}
@@ -326,21 +326,21 @@ STATIC struct hblk * GC_free_block_ending_at(struct hblk *h)
GET_HDR(p, phdr);
while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
- p = FORWARDED_ADDR(p,phdr);
- phdr = HDR(p);
+ p = FORWARDED_ADDR(p,phdr);
+ phdr = HDR(p);
}
if (0 != phdr) {
if(HBLK_IS_FREE(phdr)) {
- return p;
- } else {
- return 0;
- }
+ return p;
+ } else {
+ return 0;
+ }
}
p = GC_prev_block(h - 1);
if (0 != p) {
phdr = HDR(p);
if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
- return p;
+ return p;
}
}
return 0;
@@ -362,9 +362,9 @@ STATIC void GC_add_to_fl(struct hblk *h, hdr *hhdr)
hdr * prevhdr = HDR(prev);
GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr)
|| (signed_word)GC_heapsize < 0);
- /* In the last case, blocks may be too large to merge. */
+ /* In the last case, blocks may be too large to merge. */
GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr)
- || (signed_word)GC_heapsize < 0);
+ || (signed_word)GC_heapsize < 0);
# endif
GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
GC_hblkfreelist[index] = h;
@@ -388,7 +388,7 @@ STATIC void GC_add_to_fl(struct hblk *h, hdr *hhdr)
int GC_unmap_threshold = MUNMAP_THRESHOLD;
/* Unmap blocks that haven't been recently touched. This is the only way */
-/* way blocks are ever unmapped. */
+/* way blocks are ever unmapped. */
void GC_unmap_old(void)
{
struct hblk * h;
@@ -397,80 +397,80 @@ void GC_unmap_old(void)
if (GC_unmap_threshold == 0)
return; /* unmapping disabled */
-
+
for (i = 0; i <= N_HBLK_FLS; ++i) {
for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
hhdr = HDR(h);
- if (!IS_MAPPED(hhdr)) continue;
+ if (!IS_MAPPED(hhdr)) continue;
- if ((unsigned short)GC_gc_no - hhdr -> hb_last_reclaimed >
- (unsigned short)GC_unmap_threshold) {
- GC_unmap((ptr_t)h, hhdr -> hb_sz);
- hhdr -> hb_flags |= WAS_UNMAPPED;
- }
+ if ((unsigned short)GC_gc_no - hhdr -> hb_last_reclaimed >
+ (unsigned short)GC_unmap_threshold) {
+ GC_unmap((ptr_t)h, hhdr -> hb_sz);
+ hhdr -> hb_flags |= WAS_UNMAPPED;
+ }
}
- }
+ }
}
-/* Merge all unmapped blocks that are adjacent to other free */
-/* blocks. This may involve remapping, since all blocks are either */
-/* fully mapped or fully unmapped. */
+/* Merge all unmapped blocks that are adjacent to other free */
+/* blocks. This may involve remapping, since all blocks are either */
+/* fully mapped or fully unmapped. */
void GC_merge_unmapped(void)
{
struct hblk * h, *next;
hdr * hhdr, *nexthdr;
word size, nextsize;
int i;
-
+
for (i = 0; i <= N_HBLK_FLS; ++i) {
h = GC_hblkfreelist[i];
while (h != 0) {
- GET_HDR(h, hhdr);
- size = hhdr->hb_sz;
- next = (struct hblk *)((word)h + size);
- GET_HDR(next, nexthdr);
- /* Coalesce with successor, if possible */
- if (0 != nexthdr && HBLK_IS_FREE(nexthdr)
- && (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0
- /* no pot. overflow */) {
- /* Note that we usually try to avoid adjacent free blocks */
- /* that are either both mapped or both unmapped. But that */
- /* isn't guaranteed to hold since we remap blocks when we */
- /* split them, and don't merge at that point. It may also */
- /* not hold if the merged block would be too big. */
- if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
- /* make both consistent, so that we can merge */
- if (size > nextsize) {
- GC_remap((ptr_t)next, nextsize);
- } else {
- GC_unmap((ptr_t)h, size);
- GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
- hhdr -> hb_flags |= WAS_UNMAPPED;
- }
- } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) {
- if (size > nextsize) {
- GC_unmap((ptr_t)next, nextsize);
- GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
- } else {
- GC_remap((ptr_t)h, size);
- hhdr -> hb_flags &= ~WAS_UNMAPPED;
- hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
- }
- } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
- /* Unmap any gap in the middle */
- GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
- }
- /* If they are both unmapped, we merge, but leave unmapped. */
- GC_remove_from_fl(hhdr, i);
- GC_remove_from_fl(nexthdr, FL_UNKNOWN);
- hhdr -> hb_sz += nexthdr -> hb_sz;
- GC_remove_header(next);
- GC_add_to_fl(h, hhdr);
- /* Start over at beginning of list */
- h = GC_hblkfreelist[i];
- } else /* not mergable with successor */ {
- h = hhdr -> hb_next;
- }
+ GET_HDR(h, hhdr);
+ size = hhdr->hb_sz;
+ next = (struct hblk *)((word)h + size);
+ GET_HDR(next, nexthdr);
+ /* Coalesce with successor, if possible */
+ if (0 != nexthdr && HBLK_IS_FREE(nexthdr)
+ && (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0
+ /* no pot. overflow */) {
+ /* Note that we usually try to avoid adjacent free blocks */
+ /* that are either both mapped or both unmapped. But that */
+ /* isn't guaranteed to hold since we remap blocks when we */
+ /* split them, and don't merge at that point. It may also */
+ /* not hold if the merged block would be too big. */
+ if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
+ /* make both consistent, so that we can merge */
+ if (size > nextsize) {
+ GC_remap((ptr_t)next, nextsize);
+ } else {
+ GC_unmap((ptr_t)h, size);
+ GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
+ hhdr -> hb_flags |= WAS_UNMAPPED;
+ }
+ } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) {
+ if (size > nextsize) {
+ GC_unmap((ptr_t)next, nextsize);
+ GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
+ } else {
+ GC_remap((ptr_t)h, size);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
+ }
+ } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
+ /* Unmap any gap in the middle */
+ GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
+ }
+ /* If they are both unmapped, we merge, but leave unmapped. */
+ GC_remove_from_fl(hhdr, i);
+ GC_remove_from_fl(nexthdr, FL_UNKNOWN);
+ hhdr -> hb_sz += nexthdr -> hb_sz;
+ GC_remove_header(next);
+ GC_add_to_fl(h, hhdr);
+ /* Start over at beginning of list */
+ h = GC_hblkfreelist[i];
+ } else /* not mergable with successor */ {
+ h = hhdr -> hb_next;
+ }
} /* while (h != 0) ... */
} /* for ... */
}
@@ -487,7 +487,7 @@ void GC_merge_unmapped(void)
* If the return value is not 0, then hhdr is the header for it.
*/
STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
- size_t bytes, int index)
+ size_t bytes, int index)
{
word total_size = hhdr -> hb_sz;
struct hblk * rest;
@@ -499,9 +499,9 @@ STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
rest = (struct hblk *)((word)h + bytes);
rest_hdr = GC_install_header(rest);
if (0 == rest_hdr) {
- /* FIXME: This is likely to be very bad news ... */
- WARN("Header allocation failed: Dropping block.\n", 0);
- return(0);
+ /* FIXME: This is likely to be very bad news ... */
+ WARN("Header allocation failed: Dropping block.\n", 0);
+ return(0);
}
rest_hdr -> hb_sz = total_size - bytes;
rest_hdr -> hb_flags = 0;
@@ -528,7 +528,7 @@ STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
* rare enough that it doesn't matter. The code is cleaner this way.)
*/
STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
- hdr *nhdr, int index /* Index of free list */)
+ hdr *nhdr, int index /* Index of free list */)
{
word total_size = hhdr -> hb_sz;
word h_size = (word)n - (word)h;
@@ -541,12 +541,12 @@ STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
nhdr -> hb_sz = total_size - h_size;
nhdr -> hb_flags = 0;
if (0 != prev) {
- HDR(prev) -> hb_next = n;
+ HDR(prev) -> hb_next = n;
} else {
GC_hblkfreelist[index] = n;
}
if (0 != next) {
- HDR(next) -> hb_prev = n;
+ HDR(next) -> hb_prev = n;
}
INCR_FREE_BYTES(index, -(signed_word)h_size);
FREE_ASSERT(GC_free_bytes[index] > 0);
@@ -557,10 +557,10 @@ STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
GC_add_to_fl(h, hhdr);
nhdr -> hb_flags |= FREE_BLK;
}
-
+
STATIC struct hblk *
GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned flags, int n,
- GC_bool may_split);
+ GC_bool may_split);
/*
* Allocate (and return pointer to) a heap block
@@ -578,8 +578,8 @@ GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
int start_list;
int i;
struct hblk *result;
- int split_limit; /* Highest index of free list whose blocks we */
- /* split. */
+ int split_limit; /* Highest index of free list whose blocks we */
+ /* split. */
GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
blocks = OBJ_SZ_TO_BLOCKS(sz);
@@ -591,38 +591,38 @@ GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
result = GC_allochblk_nth(sz, kind, flags, start_list, FALSE);
if (0 != result) return result;
if (GC_use_entire_heap || GC_dont_gc
- || USED_HEAP_SIZE < GC_requested_heapsize
+ || USED_HEAP_SIZE < GC_requested_heapsize
|| GC_incremental || !GC_should_collect()) {
- /* Should use more of the heap, even if it requires splitting. */
- split_limit = N_HBLK_FLS;
+ /* Should use more of the heap, even if it requires splitting. */
+ split_limit = N_HBLK_FLS;
} else {
# ifdef USE_MUNMAP
- /* avoid splitting, since that might require remapping */
- split_limit = 0;
+ /* avoid splitting, since that might require remapping */
+ split_limit = 0;
# else
- if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
- /* If we are deallocating lots of memory from */
- /* finalizers, fail and collect sooner rather */
- /* than later. */
- split_limit = 0;
- } else {
- /* If we have enough large blocks left to cover any */
- /* previous request for large blocks, we go ahead */
- /* and split. Assuming a steady state, that should */
- /* be safe. It means that we can use the full */
- /* heap if we allocate only small objects. */
+ if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
+ /* If we are deallocating lots of memory from */
+ /* finalizers, fail and collect sooner rather */
+ /* than later. */
+ split_limit = 0;
+ } else {
+ /* If we have enough large blocks left to cover any */
+ /* previous request for large blocks, we go ahead */
+ /* and split. Assuming a steady state, that should */
+ /* be safe. It means that we can use the full */
+ /* heap if we allocate only small objects. */
split_limit = GC_enough_large_bytes_left();
- }
+ }
# endif
}
if (start_list < UNIQUE_THRESHOLD) {
- /* No reason to try start_list again, since all blocks are exact */
- /* matches. */
+ /* No reason to try start_list again, since all blocks are exact */
+ /* matches. */
++start_list;
}
for (i = start_list; i <= split_limit; ++i) {
- struct hblk * result = GC_allochblk_nth(sz, kind, flags, i, TRUE);
- if (0 != result) return result;
+ struct hblk * result = GC_allochblk_nth(sz, kind, flags, i, TRUE);
+ if (0 != result) return result;
}
return 0;
}
@@ -636,153 +636,153 @@ STATIC struct hblk *
GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, GC_bool may_split)
{
struct hblk *hbp;
- hdr * hhdr; /* Header corr. to hbp */
- /* Initialized after loop if hbp !=0 */
- /* Gcc uninitialized use warning is bogus. */
+ hdr * hhdr; /* Header corr. to hbp */
+ /* Initialized after loop if hbp !=0 */
+ /* Gcc uninitialized use warning is bogus. */
struct hblk *thishbp;
- hdr * thishdr; /* Header corr. to hbp */
+ hdr * thishdr; /* Header corr. to hbp */
signed_word size_needed; /* number of bytes in requested objects */
- signed_word size_avail; /* bytes available in this block */
+ signed_word size_avail; /* bytes available in this block */
size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
/* search for a big enough block in free list */
- hbp = GC_hblkfreelist[n];
- for(; 0 != hbp; hbp = hhdr -> hb_next) {
- GET_HDR(hbp, hhdr);
- size_avail = hhdr->hb_sz;
- if (size_avail < size_needed) continue;
- if (size_avail != size_needed) {
- signed_word next_size;
-
- if (!may_split) continue;
- /* If the next heap block is obviously better, go on. */
- /* This prevents us from disassembling a single large block */
- /* to get tiny blocks. */
- thishbp = hhdr -> hb_next;
- if (thishbp != 0) {
- GET_HDR(thishbp, thishdr);
- next_size = (signed_word)(thishdr -> hb_sz);
- if (next_size < size_avail
- && next_size >= size_needed
- && !GC_is_black_listed(thishbp, (word)size_needed)) {
- continue;
- }
- }
- }
- if ( !IS_UNCOLLECTABLE(kind) &&
- (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
- struct hblk * lasthbp = hbp;
- ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
- signed_word orig_avail = size_avail;
- signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
- HBLKSIZE
- : size_needed);
-
-
- while ((ptr_t)lasthbp <= search_end
- && (thishbp = GC_is_black_listed(lasthbp,
- (word)eff_size_needed))
- != 0) {
- lasthbp = thishbp;
- }
- size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
- thishbp = lasthbp;
- if (size_avail >= size_needed) {
- if (thishbp != hbp &&
- 0 != (thishdr = GC_install_header(thishbp))) {
- /* Make sure it's mapped before we mangle it. */
-# ifdef USE_MUNMAP
- if (!IS_MAPPED(hhdr)) {
- GC_remap((ptr_t)hbp, hhdr -> hb_sz);
- hhdr -> hb_flags &= ~WAS_UNMAPPED;
- }
-# endif
- /* Split the block at thishbp */
- GC_split_block(hbp, hhdr, thishbp, thishdr, n);
- /* Advance to thishbp */
- hbp = thishbp;
- hhdr = thishdr;
- /* We must now allocate thishbp, since it may */
- /* be on the wrong free list. */
- }
- } else if (size_needed > (signed_word)BL_LIMIT
- && orig_avail - size_needed
- > (signed_word)BL_LIMIT) {
- /* Punt, since anything else risks unreasonable heap growth. */
- if (++GC_large_alloc_warn_suppressed
- >= GC_large_alloc_warn_interval) {
- WARN("Repeated allocation of very large block "
- "(appr. size %" GC_PRIdPTR "):\n"
- "\tMay lead to memory leak and poor performance.\n",
- size_needed);
- GC_large_alloc_warn_suppressed = 0;
- }
- size_avail = orig_avail;
- } else if (size_avail == 0 && size_needed == HBLKSIZE
- && IS_MAPPED(hhdr)) {
- if (!GC_find_leak) {
- static unsigned count = 0;
-
- /* The block is completely blacklisted. We need */
- /* to drop some such blocks, since otherwise we spend */
- /* all our time traversing them if pointerfree */
- /* blocks are unpopular. */
- /* A dropped block will be reconsidered at next GC. */
- if ((++count & 3) == 0) {
- /* Allocate and drop the block in small chunks, to */
- /* maximize the chance that we will recover some */
- /* later. */
- word total_size = hhdr -> hb_sz;
- struct hblk * limit = hbp + divHBLKSZ(total_size);
- struct hblk * h;
- struct hblk * prev = hhdr -> hb_prev;
-
- GC_large_free_bytes -= total_size;
- GC_bytes_dropped += total_size;
- GC_remove_from_fl(hhdr, n);
- for (h = hbp; h < limit; h++) {
- if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
- (void) setup_header(
- hhdr, h,
- HBLKSIZE,
- PTRFREE, 0); /* Cant fail */
- if (GC_debugging_started) {
- BZERO(h, HBLKSIZE);
- }
- }
- }
- /* Restore hbp to point at free block */
- hbp = prev;
- if (0 == hbp) {
- return GC_allochblk_nth(sz, kind, flags, n, may_split);
- }
- hhdr = HDR(hbp);
- }
- }
- }
- }
- if( size_avail >= size_needed ) {
-# ifdef USE_MUNMAP
- if (!IS_MAPPED(hhdr)) {
- GC_remap((ptr_t)hbp, hhdr -> hb_sz);
- hhdr -> hb_flags &= ~WAS_UNMAPPED;
- /* Note: This may leave adjacent, mapped free blocks. */
- }
-# endif
- /* hbp may be on the wrong freelist; the parameter n */
- /* is important. */
- hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
- break;
- }
- }
+ hbp = GC_hblkfreelist[n];
+ for(; 0 != hbp; hbp = hhdr -> hb_next) {
+ GET_HDR(hbp, hhdr);
+ size_avail = hhdr->hb_sz;
+ if (size_avail < size_needed) continue;
+ if (size_avail != size_needed) {
+ signed_word next_size;
+
+ if (!may_split) continue;
+ /* If the next heap block is obviously better, go on. */
+ /* This prevents us from disassembling a single large block */
+ /* to get tiny blocks. */
+ thishbp = hhdr -> hb_next;
+ if (thishbp != 0) {
+ GET_HDR(thishbp, thishdr);
+ next_size = (signed_word)(thishdr -> hb_sz);
+ if (next_size < size_avail
+ && next_size >= size_needed
+ && !GC_is_black_listed(thishbp, (word)size_needed)) {
+ continue;
+ }
+ }
+ }
+ if ( !IS_UNCOLLECTABLE(kind) &&
+ (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
+ struct hblk * lasthbp = hbp;
+ ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
+ signed_word orig_avail = size_avail;
+ signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
+ HBLKSIZE
+ : size_needed);
+
+
+ while ((ptr_t)lasthbp <= search_end
+ && (thishbp = GC_is_black_listed(lasthbp,
+ (word)eff_size_needed))
+ != 0) {
+ lasthbp = thishbp;
+ }
+ size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
+ thishbp = lasthbp;
+ if (size_avail >= size_needed) {
+ if (thishbp != hbp &&
+ 0 != (thishdr = GC_install_header(thishbp))) {
+ /* Make sure it's mapped before we mangle it. */
+# ifdef USE_MUNMAP
+ if (!IS_MAPPED(hhdr)) {
+ GC_remap((ptr_t)hbp, hhdr -> hb_sz);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ }
+# endif
+ /* Split the block at thishbp */
+ GC_split_block(hbp, hhdr, thishbp, thishdr, n);
+ /* Advance to thishbp */
+ hbp = thishbp;
+ hhdr = thishdr;
+ /* We must now allocate thishbp, since it may */
+ /* be on the wrong free list. */
+ }
+ } else if (size_needed > (signed_word)BL_LIMIT
+ && orig_avail - size_needed
+ > (signed_word)BL_LIMIT) {
+ /* Punt, since anything else risks unreasonable heap growth. */
+ if (++GC_large_alloc_warn_suppressed
+ >= GC_large_alloc_warn_interval) {
+ WARN("Repeated allocation of very large block "
+ "(appr. size %" GC_PRIdPTR "):\n"
+ "\tMay lead to memory leak and poor performance.\n",
+ size_needed);
+ GC_large_alloc_warn_suppressed = 0;
+ }
+ size_avail = orig_avail;
+ } else if (size_avail == 0 && size_needed == HBLKSIZE
+ && IS_MAPPED(hhdr)) {
+ if (!GC_find_leak) {
+ static unsigned count = 0;
+
+ /* The block is completely blacklisted. We need */
+ /* to drop some such blocks, since otherwise we spend */
+ /* all our time traversing them if pointerfree */
+ /* blocks are unpopular. */
+ /* A dropped block will be reconsidered at next GC. */
+ if ((++count & 3) == 0) {
+ /* Allocate and drop the block in small chunks, to */
+ /* maximize the chance that we will recover some */
+ /* later. */
+ word total_size = hhdr -> hb_sz;
+ struct hblk * limit = hbp + divHBLKSZ(total_size);
+ struct hblk * h;
+ struct hblk * prev = hhdr -> hb_prev;
+
+ GC_large_free_bytes -= total_size;
+ GC_bytes_dropped += total_size;
+ GC_remove_from_fl(hhdr, n);
+ for (h = hbp; h < limit; h++) {
+ if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
+ (void) setup_header(
+ hhdr, h,
+ HBLKSIZE,
+ PTRFREE, 0); /* Cant fail */
+ if (GC_debugging_started) {
+ BZERO(h, HBLKSIZE);
+ }
+ }
+ }
+ /* Restore hbp to point at free block */
+ hbp = prev;
+ if (0 == hbp) {
+ return GC_allochblk_nth(sz, kind, flags, n, may_split);
+ }
+ hhdr = HDR(hbp);
+ }
+ }
+ }
+ }
+ if( size_avail >= size_needed ) {
+# ifdef USE_MUNMAP
+ if (!IS_MAPPED(hhdr)) {
+ GC_remap((ptr_t)hbp, hhdr -> hb_sz);
+ hhdr -> hb_flags &= ~WAS_UNMAPPED;
+ /* Note: This may leave adjacent, mapped free blocks. */
+ }
+# endif
+ /* hbp may be on the wrong freelist; the parameter n */
+ /* is important. */
+ hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
+ break;
+ }
+ }
if (0 == hbp) return 0;
-
+
/* Add it to map of valid blocks */
- if (!GC_install_counts(hbp, (word)size_needed)) return(0);
- /* This leaks memory under very rare conditions. */
-
+ if (!GC_install_counts(hbp, (word)size_needed)) return(0);
+ /* This leaks memory under very rare conditions. */
+
/* Set up header */
if (!setup_header(hhdr, hbp, sz, kind, flags)) {
GC_remove_counts(hbp, (word)size_needed);
@@ -792,25 +792,25 @@ GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, GC_bool may_split)
/* Notify virtual dirty bit implementation that we are about to write. */
/* Ensure that pointerfree objects are not protected if it's avoidable. */
/* This also ensures that newly allocated blocks are treated as dirty. */
- /* Necessary since we don't protect free blocks. */
- GC_ASSERT((size_needed & (HBLKSIZE-1)) == 0);
- GC_remove_protection(hbp, divHBLKSZ(size_needed),
- (hhdr -> hb_descr == 0) /* pointer-free */);
-
- /* We just successfully allocated a block. Restart count of */
- /* consecutive failures. */
+ /* Necessary since we don't protect free blocks. */
+ GC_ASSERT((size_needed & (HBLKSIZE-1)) == 0);
+ GC_remove_protection(hbp, divHBLKSZ(size_needed),
+ (hhdr -> hb_descr == 0) /* pointer-free */);
+
+ /* We just successfully allocated a block. Restart count of */
+ /* consecutive failures. */
{
- extern unsigned GC_fail_count;
-
- GC_fail_count = 0;
+ extern unsigned GC_fail_count;
+
+ GC_fail_count = 0;
}
GC_large_free_bytes -= size_needed;
-
+
GC_ASSERT(IS_MAPPED(hhdr));
return( hbp );
}
-
+
/*
* Free a heap block.
*
@@ -833,17 +833,17 @@ signed_word size;
ABORT("Deallocating excessively large block. Too large an allocation?");
/* Probably possible if we try to allocate more than half the address */
/* space at once. If we dont catch it here, strange things happen */
- /* later. */
+ /* later. */
GC_remove_counts(hbp, (word)size);
hhdr->hb_sz = size;
# ifdef USE_MUNMAP
hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
# endif
-
+
/* Check for duplicate deallocation in the easy case */
if (HBLK_IS_FREE(hhdr)) {
GC_printf("Duplicate large block deallocation of %p\n", hbp);
- ABORT("Duplicate large block deallocation");
+ ABORT("Duplicate large block deallocation");
}
GC_ASSERT(IS_MAPPED(hhdr));
@@ -854,31 +854,30 @@ signed_word size;
/* Coalesce with successor, if possible */
if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)
&& (signed_word)(hhdr -> hb_sz + nexthdr -> hb_sz) > 0
- /* no overflow */) {
- GC_remove_from_fl(nexthdr, FL_UNKNOWN);
- hhdr -> hb_sz += nexthdr -> hb_sz;
- GC_remove_header(next);
+ /* no overflow */) {
+ GC_remove_from_fl(nexthdr, FL_UNKNOWN);
+ hhdr -> hb_sz += nexthdr -> hb_sz;
+ GC_remove_header(next);
}
/* Coalesce with predecessor, if possible. */
if (0 != prev) {
- prevhdr = HDR(prev);
- if (IS_MAPPED(prevhdr)
- && (signed_word)(hhdr -> hb_sz + prevhdr -> hb_sz) > 0) {
- GC_remove_from_fl(prevhdr, FL_UNKNOWN);
- prevhdr -> hb_sz += hhdr -> hb_sz;
-# ifdef USE_MUNMAP
- prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
-# endif
- GC_remove_header(hbp);
- hbp = prev;
- hhdr = prevhdr;
- }
+ prevhdr = HDR(prev);
+ if (IS_MAPPED(prevhdr)
+ && (signed_word)(hhdr -> hb_sz + prevhdr -> hb_sz) > 0) {
+ GC_remove_from_fl(prevhdr, FL_UNKNOWN);
+ prevhdr -> hb_sz += hhdr -> hb_sz;
+# ifdef USE_MUNMAP
+ prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
+# endif
+ GC_remove_header(hbp);
+ hbp = prev;
+ hhdr = prevhdr;
+ }
}
- /* FIXME: It is not clear we really always want to do these merges */
- /* with -DUSE_MUNMAP, since it updates ages and hence prevents */
- /* unmapping. */
+ /* FIXME: It is not clear we really always want to do these merges */
+ /* with -DUSE_MUNMAP, since it updates ages and hence prevents */
+ /* unmapping. */
GC_large_free_bytes += size;
- GC_add_to_fl(hbp, hhdr);
+ GC_add_to_fl(hbp, hhdr);
}
-