summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2020-06-21 14:36:01 +0300
committerIvan Maidanski <ivmai@mail.ru>2020-06-21 14:36:01 +0300
commit45a7c3594f10ddb2cbf851a65d96379a511678ad (patch)
treed1e7d7e58e7f602a98e8511ea0a2bd12e656ffc8
parente7819c6dee3e15447dce87104a6301266b4c9ad1 (diff)
downloadbdwgc-45a7c3594f10ddb2cbf851a65d96379a511678ad.tar.gz
Add GC_ prefix to scan_ptr and some other static variables
(code refactoring) The following variables are prefixed: scratch_free_ptr, hdr_free_list, scan_ptr, main_local_mark_stack, roots_were_cleared. * doc/gcdescr.md (Mark phase): Add GC_ prefix to scan_ptr. * headers.c (scratch_free_ptr, hdr_free_list): Add GC_ prefix to name; change static to STATIC. * mark.c (scan_ptr): Likewise. * mark.c [PARALLEL_MARK] (main_local_mark_stack): Likewise. * mark_rts.c (roots_were_cleared): Likewise. * headers.c (GC_scratch_alloc): Add GC_ prefix to scratch_free_ptr. * headers.c (alloc_hdr, free_hdr): Add GC_ prefix to hdr_free_list. * headers.c (GC_init_headers): Add assertion that GC_all_nils is null on entry. * include/private/gc_pmark.h (MS_PUSH_RESCUERS, MS_PUSH_UNCOLLECTABLE, MS_PARTIALLY_INVALID): Add GC_ prefix to scan_ptr. * mark.c (GC_clear_marks, GC_initiate_gc, alloc_mark_stack): Likewise. * mark.c [WRAP_MARK_SOME] (GC_mark_some): Likewise. * mark.c [PARALLEL_MARK] (GC_wait_for_markers_init, GC_do_parallel_mark): Add GC_ prefix to main_local_mark_stack. * mark_rts.c (GC_clear_roots, GC_push_roots): Add GC_ prefix to roots_were_cleared. * mark_rts.c (GC_next_exclusion): Add assertion that GC_excl_table_entries is positive.
-rw-r--r--doc/gcdescr.md2
-rw-r--r--headers.c29
-rw-r--r--include/private/gc_pmark.h16
-rw-r--r--mark.c37
-rw-r--r--mark_rts.c10
5 files changed, 49 insertions, 45 deletions
diff --git a/doc/gcdescr.md b/doc/gcdescr.md
index 4024fd85..e06f399d 100644
--- a/doc/gcdescr.md
+++ b/doc/gcdescr.md
@@ -198,7 +198,7 @@ progression of mark states for a stop-the-world collection is:
In this case `GC_objects_are_marked` will simultaneously be false, so the
mark state is advanced to
2. `MS_PUSH_UNCOLLECTABLE` indicating that it suffices to push uncollectible
- objects, roots, and then mark everything reachable from them. `scan_ptr`
+ objects, roots, and then mark everything reachable from them. `GC_scan_ptr`
is advanced through the heap until all uncollectible objects are pushed, and
objects reachable from them are marked. At that point, the next call
to `GC_mark_some` calls `GC_push_roots` to push the roots. It, then,
diff --git a/headers.c b/headers.c
index 1b3b3ebb..6a353686 100644
--- a/headers.c
+++ b/headers.c
@@ -110,20 +110,20 @@ GC_INNER hdr *
/* Routines to dynamically allocate collector data structures that will */
/* never be freed. */
-static ptr_t scratch_free_ptr = 0;
+STATIC ptr_t GC_scratch_free_ptr = 0;
/* GC_scratch_last_end_ptr is end point of last obtained scratch area. */
/* GC_scratch_end_ptr is end point of current scratch area. */
GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
{
- ptr_t result = scratch_free_ptr;
+ ptr_t result = GC_scratch_free_ptr;
size_t bytes_to_get;
bytes = ROUNDUP_GRANULE_SIZE(bytes);
for (;;) {
- scratch_free_ptr += bytes;
- if ((word)scratch_free_ptr <= (word)GC_scratch_end_ptr) {
+ GC_scratch_free_ptr += bytes;
+ if ((word)GC_scratch_free_ptr <= (word)GC_scratch_end_ptr) {
/* Unallocated space of scratch buffer has enough size. */
return result;
}
@@ -133,7 +133,7 @@ GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
result = (ptr_t)GET_MEM(bytes_to_get);
GC_add_to_our_memory(result, bytes_to_get);
/* Undo scratch free area pointer update; get memory directly. */
- scratch_free_ptr -= bytes;
+ GC_scratch_free_ptr -= bytes;
if (result != NULL) {
/* Update end point of last obtained area (needed only */
/* by GC_register_dynamic_libraries for some targets). */
@@ -149,39 +149,39 @@ GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
if (NULL == result) {
WARN("Out of memory - trying to allocate requested amount"
" (%" WARN_PRIdPTR " bytes)...\n", (word)bytes);
- scratch_free_ptr -= bytes; /* Undo free area pointer update */
+ GC_scratch_free_ptr -= bytes; /* Undo free area pointer update */
bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(bytes);
result = (ptr_t)GET_MEM(bytes_to_get);
GC_add_to_our_memory(result, bytes_to_get);
return result;
}
/* Update scratch area pointers and retry. */
- scratch_free_ptr = result;
- GC_scratch_end_ptr = scratch_free_ptr + bytes_to_get;
+ GC_scratch_free_ptr = result;
+ GC_scratch_end_ptr = GC_scratch_free_ptr + bytes_to_get;
GC_scratch_last_end_ptr = GC_scratch_end_ptr;
}
}
-static hdr * hdr_free_list = 0;
+STATIC hdr * GC_hdr_free_list = 0;
/* Return an uninitialized header */
static hdr * alloc_hdr(void)
{
hdr * result;
- if (NULL == hdr_free_list) {
+ if (NULL == GC_hdr_free_list) {
result = (hdr *)GC_scratch_alloc(sizeof(hdr));
} else {
- result = hdr_free_list;
- hdr_free_list = (hdr *) (result -> hb_next);
+ result = GC_hdr_free_list;
+ GC_hdr_free_list = (hdr *) result -> hb_next;
}
return(result);
}
GC_INLINE void free_hdr(hdr * hhdr)
{
- hhdr -> hb_next = (struct hblk *) hdr_free_list;
- hdr_free_list = hhdr;
+ hhdr -> hb_next = (struct hblk *) GC_hdr_free_list;
+ GC_hdr_free_list = hhdr;
}
#ifdef COUNT_HDR_CACHE_HITS
@@ -194,6 +194,7 @@ GC_INNER void GC_init_headers(void)
{
unsigned i;
+ GC_ASSERT(NULL == GC_all_nils);
GC_all_nils = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
if (GC_all_nils == NULL) {
GC_err_printf("Insufficient memory for GC_all_nils\n");
diff --git a/include/private/gc_pmark.h b/include/private/gc_pmark.h
index 15e79860..47827d8f 100644
--- a/include/private/gc_pmark.h
+++ b/include/private/gc_pmark.h
@@ -469,19 +469,21 @@ typedef int mark_state_t; /* Current state of marking, as follows:*/
/* being pushed. "I" holds, except */
/* that grungy roots may point to */
/* unmarked objects, as may marked */
- /* grungy objects above scan_ptr. */
+ /* grungy objects above GC_scan_ptr. */
#define MS_PUSH_UNCOLLECTABLE 2 /* "I" holds, except that marked */
- /* uncollectible objects above scan_ptr */
- /* may point to unmarked objects. */
- /* Roots may point to unmarked objects */
+ /* uncollectible objects above */
+ /* GC_scan_ptr may point to unmarked */
+ /* objects. Roots may point to */
+ /* unmarked objects. */
#define MS_ROOTS_PUSHED 3 /* "I" holds, mark stack may be nonempty. */
#define MS_PARTIALLY_INVALID 4 /* "I" may not hold, e.g. because of */
- /* the mark stack overflow. However */
- /* marked heap objects below scan_ptr */
- /* point to marked or stacked objects. */
+ /* the mark stack overflow. However, */
+ /* marked heap objects below */
+ /* GC_scan_ptr point to marked or */
+ /* stacked objects. */
#define MS_INVALID 5 /* "I" may not hold. */
diff --git a/mark.c b/mark.c
index 1b2cfd6b..f27d7c7f 100644
--- a/mark.c
+++ b/mark.c
@@ -113,7 +113,7 @@ GC_INNER mark_state_t GC_mark_state = MS_NONE;
GC_INNER GC_bool GC_mark_stack_too_small = FALSE;
-static struct hblk * scan_ptr;
+STATIC struct hblk * GC_scan_ptr;
STATIC GC_bool GC_objects_are_marked = FALSE;
/* Are there collectible marked objects in the heap? */
@@ -233,7 +233,7 @@ GC_INNER void GC_clear_marks(void)
GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
GC_objects_are_marked = FALSE;
GC_mark_state = MS_INVALID;
- scan_ptr = 0;
+ GC_scan_ptr = NULL;
}
/* Initiate a garbage collection. Initiates a full collection if the */
@@ -257,7 +257,7 @@ GC_INNER void GC_initiate_gc(void)
} else if (GC_mark_state != MS_INVALID) {
ABORT("Unexpected state");
} /* Else this is really a full collection, and mark bits are invalid. */
- scan_ptr = 0;
+ GC_scan_ptr = NULL;
}
#ifdef PARALLEL_MARK
@@ -311,8 +311,8 @@ static void alloc_mark_stack(size_t);
MARK_FROM_MARK_STACK();
break;
} else {
- scan_ptr = GC_push_next_marked_dirty(scan_ptr);
- if (scan_ptr == 0) {
+ GC_scan_ptr = GC_push_next_marked_dirty(GC_scan_ptr);
+ if (NULL == GC_scan_ptr) {
# if !defined(GC_DISABLE_INCREMENTAL)
GC_COND_LOG_PRINTF("Marked from %lu dirty pages\n",
(unsigned long)GC_n_rescuing_pages);
@@ -337,8 +337,8 @@ static void alloc_mark_stack(size_t);
MARK_FROM_MARK_STACK();
break;
} else {
- scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
- if (scan_ptr == 0) {
+ GC_scan_ptr = GC_push_next_marked_uncollectable(GC_scan_ptr);
+ if (NULL == GC_scan_ptr) {
GC_push_roots(TRUE, cold_gc_frame);
GC_objects_are_marked = TRUE;
if (GC_mark_state != MS_INVALID) {
@@ -391,7 +391,7 @@ static void alloc_mark_stack(size_t);
MARK_FROM_MARK_STACK();
break;
}
- if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
+ if (NULL == GC_scan_ptr && GC_mark_state == MS_INVALID) {
/* About to start a heap scan for marked objects. */
/* Mark stack is empty. OK to reallocate. */
if (GC_mark_stack_too_small) {
@@ -399,8 +399,8 @@ static void alloc_mark_stack(size_t);
}
GC_mark_state = MS_PARTIALLY_INVALID;
}
- scan_ptr = GC_push_next_marked(scan_ptr);
- if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
+ GC_scan_ptr = GC_push_next_marked(GC_scan_ptr);
+ if (NULL == GC_scan_ptr && GC_mark_state == MS_PARTIALLY_INVALID) {
GC_push_roots(TRUE, cold_gc_frame);
GC_objects_are_marked = TRUE;
if (GC_mark_state != MS_INVALID) {
@@ -578,8 +578,7 @@ handle_ex:
STOP_WORLD();
# endif
GC_invalidate_mark_state();
- scan_ptr = 0;
-
+ GC_scan_ptr = NULL;
ret_val = FALSE;
goto rm_handler; /* Back to platform-specific code. */
}
@@ -906,7 +905,7 @@ STATIC unsigned GC_active_count = 0; /* Number of active helpers. */
GC_INNER word GC_mark_no = 0;
-static mse *main_local_mark_stack;
+STATIC mse *GC_main_local_mark_stack;
#ifdef LINT2
# define LOCAL_MARK_STACK_SIZE (HBLKSIZE / 8)
@@ -928,17 +927,17 @@ GC_INNER void GC_wait_for_markers_init(void)
/* Allocate the local mark stack for the thread that holds GC lock. */
# ifndef CAN_HANDLE_FORK
- GC_ASSERT(NULL == main_local_mark_stack);
+ GC_ASSERT(NULL == GC_main_local_mark_stack);
# else
- if (NULL == main_local_mark_stack)
+ if (NULL == GC_main_local_mark_stack)
# endif
{
size_t bytes_to_get =
ROUNDUP_PAGESIZE_IF_MMAP(LOCAL_MARK_STACK_SIZE * sizeof(mse));
- main_local_mark_stack = (mse *)GET_MEM(bytes_to_get);
- if (NULL == main_local_mark_stack)
+ GC_main_local_mark_stack = (mse *)GET_MEM(bytes_to_get);
+ if (NULL == GC_main_local_mark_stack)
ABORT("Insufficient memory for main local_mark_stack");
- GC_add_to_our_memory((ptr_t)main_local_mark_stack, bytes_to_get);
+ GC_add_to_our_memory((ptr_t)GC_main_local_mark_stack, bytes_to_get);
}
/* Reuse marker lock and builders count to synchronize */
@@ -1207,7 +1206,7 @@ STATIC void GC_do_parallel_mark(void)
GC_help_wanted = TRUE;
GC_notify_all_marker();
/* Wake up potential helpers. */
- GC_mark_local(main_local_mark_stack, 0);
+ GC_mark_local(GC_main_local_mark_stack, 0);
GC_help_wanted = FALSE;
/* Done; clean up. */
while (GC_helper_count > 0) {
diff --git a/mark_rts.c b/mark_rts.c
index edc806a0..8b190014 100644
--- a/mark_rts.c
+++ b/mark_rts.c
@@ -271,7 +271,7 @@ void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
n_root_sets++;
}
-static GC_bool roots_were_cleared = FALSE;
+STATIC GC_bool GC_roots_were_cleared = FALSE;
GC_API void GC_CALL GC_clear_roots(void)
{
@@ -279,7 +279,7 @@ GC_API void GC_CALL GC_clear_roots(void)
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
LOCK();
- roots_were_cleared = TRUE;
+ GC_roots_were_cleared = TRUE;
n_root_sets = 0;
GC_root_size = 0;
# if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
@@ -545,8 +545,10 @@ GC_API void GC_CALL GC_clear_exclusion_table(void)
STATIC struct exclusion * GC_next_exclusion(ptr_t start_addr)
{
size_t low = 0;
- size_t high = GC_excl_table_entries - 1;
+ size_t high;
+ GC_ASSERT(GC_excl_table_entries > 0);
+ high = GC_excl_table_entries - 1;
while (high > low) {
size_t mid = (low + high) >> 1;
@@ -924,7 +926,7 @@ GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame GC_ATTR_UNUSED)
/* Mark from GC internal roots if those might otherwise have */
/* been excluded. */
- if (GC_no_dls || roots_were_cleared) {
+ if (GC_no_dls || GC_roots_were_cleared) {
GC_push_gc_structures();
}