summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog952
-rw-r--r--backgraph.c254
-rw-r--r--blacklst.c143
-rw-r--r--checksums.c120
-rw-r--r--darwin_stop_world.c653
-rw-r--r--dbg_mlc.c566
-rw-r--r--doc/README37
-rw-r--r--doc/README.DGUX386168
-rw-r--r--doc/README.autoconf6
-rw-r--r--doc/README.environment194
-rw-r--r--doc/README.macros115
-rw-r--r--doc/README.win321
-rw-r--r--dyn_load.c760
-rw-r--r--finalize.c656
-rw-r--r--gc_cpp.cc40
-rw-r--r--gc_dlopen.c31
-rw-r--r--gcj_mlc.c182
-rw-r--r--headers.c206
-rw-r--r--include/gc_allocator.h14
-rw-r--r--include/gc_backptr.h47
-rw-r--r--include/gc_config_macros.h85
-rw-r--r--include/gc_cpp.h100
-rw-r--r--include/gc_gcj.h94
-rw-r--r--include/gc_inline.h130
-rw-r--r--include/gc_mark.h257
-rw-r--r--include/gc_pthread_redirects.h35
-rw-r--r--include/gc_typed.h91
-rw-r--r--include/gc_version.h23
-rw-r--r--include/javaxfc.h17
-rw-r--r--include/new_gc_alloc.h256
-rw-r--r--include/private/darwin_semaphore.h23
-rw-r--r--include/private/dbg_mlc.h140
-rw-r--r--include/private/gc_hdrs.h146
-rw-r--r--include/private/gc_locks.h112
-rw-r--r--include/private/gc_pmark.h383
-rw-r--r--include/private/gcconfig.h1500
-rw-r--r--include/private/pthread_support.h101
-rw-r--r--include/private/thread_local_alloc.h106
-rw-r--r--mach_dep.c202
-rw-r--r--malloc.c368
-rw-r--r--mallocx.c558
-rw-r--r--new_hblk.c87
-rw-r--r--obj_map.c24
-rw-r--r--ptr_chck.c260
-rw-r--r--real_malloc.c7
-rw-r--r--reclaim.c376
-rw-r--r--stubborn.c10
-rw-r--r--tests/test.c684
-rw-r--r--tests/test_cpp.cc50
-rw-r--r--thread_local_alloc.c241
-rw-r--r--typd_mlc.c472
51 files changed, 6168 insertions, 5915 deletions
diff --git a/ChangeLog b/ChangeLog
index b74cc71f..ed9162c1 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,69 @@
+2009-09-16 Ivan Maidanski <ivmai@mail.ru>
+
+ * ChangeLog: Remove trailing spaces at EOLn; insert blank lines
+ where missed.
+ * doc/README: Expand all tabs to spaces; remove trailing spaces at
+ EOLn; remove multiple trailing blank lines.
+ * doc/README.autoconf: Ditto.
+ * doc/README.DGUX386: Ditto.
+ * doc/README.environment: Ditto.
+ * doc/README.macros: Ditto.
+ * doc/README.win32: Ditto.
+ * tests/test.c: Ditto.
+ * tests/test_cpp.cc: Ditto.
+ * backgraph.c: Ditto.
+ * blacklst.c: Ditto.
+ * checksums.c: Ditto.
+ * darwin_stop_world.c: Ditto.
+ * dbg_mlc.c: Ditto.
+ * dyn_load.c: Ditto.
+ * finalize.c: Ditto.
+ * gc_dlopen.c: Ditto.
+ * gcj_mlc.c: Ditto.
+ * headers.c: Ditto.
+ * mach_dep.c: Ditto.
+ * malloc.c: Ditto.
+ * mallocx.c: Ditto.
+ * new_hblk.c: Ditto.
+ * obj_map.c: Ditto.
+ * ptr_chck.c: Ditto.
+ * real_malloc.c: Ditto.
+ * reclaim.c: Ditto.
+ * stubborn.c: Ditto.
+ * thread_local_alloc.c: Ditto.
+ * typd_mlc.c: Ditto.
+ * gc_cpp.cc: Ditto.
+ * include/gc_allocator.h: Ditto.
+ * include/gc_backptr.h: Ditto.
+ * include/gc_config_macros.h: Ditto.
+ * include/gc_cpp.h: Ditto.
+ * include/gc_gcj.h: Ditto.
+ * include/gc_inline.h: Ditto.
+ * include/gc_mark.h: Ditto.
+ * include/gc_pthread_redirects.h: Ditto.
+ * include/gc_typed.h: Ditto.
+ * include/gc_version.h: Ditto.
+ * include/javaxfc.h: Ditto.
+ * include/new_gc_alloc.h: Ditto.
+ * include/private/darwin_semaphore.h: Ditto.
+ * include/private/dbg_mlc.h: Ditto.
+ * include/private/gc_hdrs.h: Ditto.
+ * include/private/gc_locks.h: Ditto.
+ * include/private/gc_pmark.h: Ditto.
+ * include/private/gcconfig.h: Ditto.
+ * include/private/pthread_support.h: Ditto.
+ * include/private/thread_local_alloc.h: Ditto.
+ * darwin_stop_world.c: Add copyright header.
+ * include/gc_backptr.h: Ditto.
+ * include/gc_config_macros.h: Ditto.
+ * include/gc_pthread_redirects.h: Ditto.
+ * include/gc_version.h: Ditto.
+ * include/javaxfc.h: Ditto.
+ * include/private/darwin_semaphore.h: Ditto.
+ * include/private/pthread_support.h: Ditto.
+ * gc_cpp.cc: Make copyright header uniform across the package.
+ * include/gc_cpp.h: Ditto.
+
2009-09-16 Ivan Maidanski <ivmai@mail.ru> (really Petter Urkedal)
(gc_config_h_6a.patch with a minor correction)
@@ -668,7 +734,7 @@
2009-09-10 Ivan Maidanski <ivmai@mail.ru>
(diff113)
-
+
* include/gc.h (GC_has_static_roots_func): New typedef (user filter
callback).
* include/gc.h (GC_register_has_static_roots_callback): Use
@@ -795,9 +861,9 @@
* dyn_load.c (HAVE_DL_ITERATE_PHDR): Break definition from use.
Define for FreeBSD 7.0+.
-
+
2009-09-02 Hans Boehm <Hans.Boehm@hp.com> (with help from Victor Ivrii and
- others)
+ others)
* mach_dep.c: Don't include ucontext.h with NO_GETCONTEXT.
@@ -819,11 +885,13 @@
2009-08-08 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
(Mistakenly omitted from last check-in)
+
* include/private/gc_priv.h (GC_unmapped_bytes): Define as 0 for
not USE_MUNMAP case.
2009-08-07 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
- diff111 (supersedes diff101_cvs which, in turn, resembles diff52, diff75, diff83 partly)
+ diff111 (supersedes diff101_cvs which, in turn, resembles diff52,
+ diff75, diff83 partly)
* Makefile.direct (MARK_BIT_PER_OBJ, PRINT_BLACK_LIST,
USE_PROC_FOR_LIBRARIES): Fix typo in the comments.
@@ -853,21 +921,21 @@
diff100_cvs (diff51 and diff55, partly)
* pthread_support.c (GC_allow_register_threads): New API function.
- * win32_threads.c (GC_allow_register_threads): Ditto.
- * include/gc.h (GC_allow_register_threads): New API prototype.
- * include/gc.h (GC_register_my_thread, GC_unregister_my_thread):
- Update the comments.
- * pthread_support.c (GC_register_my_thread): Check the collector
- is in the multi-threaded mode.
- * win32_threads.c (GC_register_my_thread): Ditto.
-
+ * win32_threads.c (GC_allow_register_threads): Ditto.
+ * include/gc.h (GC_allow_register_threads): New API prototype.
+ * include/gc.h (GC_register_my_thread, GC_unregister_my_thread):
+ Update the comments.
+ * pthread_support.c (GC_register_my_thread): Check the collector
+ is in the multi-threaded mode.
+ * win32_threads.c (GC_register_my_thread): Ditto.
+
2009-07-10 Hans Boehm <Hans.Boehm@hp.com>
* finalize.c (GC_finalize_all): Always call GC_invoke_finalizers
instead, following Ivan's original patch.
2009-06-20 Hans Boehm <Hans.Boehm@hp.com>
-
+
* allchblk.c (GC_allochblk_nth): Add assertion.
* checksums.c: Add GC_record_fault, GC_was_faulted,
CC_n_faulted_dirty_errors.
@@ -877,7 +945,7 @@
* os_dep.c (GC_remove_protection): Compute index correctly.
2009-06-12 Hans Boehm <Hans.Boehm@hp.com>
-
+
* include/gc_version.h, configure.ac, doc/README:
Change to version 7.2alpha3.
* configure: Regenerate.
@@ -885,7 +953,7 @@
[7.2alpha2]
2009-06-12 Hans Boehm <Hans.Boehm@hp.com>
-
+
* include/gc_version.h, configure.ac, doc/README:
Change to version 7.2alpha2.
* configure: Regenerate.
@@ -894,38 +962,38 @@
diff98_cvs(resembling diff3, diff27, diff59, diff61, diff66,
diff73 partly)
- * dbg_mlc.c (GC_print_smashed_obj): Convert a group of printf()
- calls into a single one (for output atomicity).
- * typd_mlc.c (GC_calloc_explicitly_typed): Don't declare and use
- GC_finalization_failures variable; check the result of
- GC_general_register_disappearing_link() (for lack of memory)
- instead.
- * finalize.c (GC_finalization_failures): Remove unused global
- variable.
- * finalize.c (GC_general_register_disappearing_link,
- GC_general_register_disappearing_link): Don't update the value of
- GC_finalization_failures (since unused).
- * include/private/gc_pmark.h (PUSH_ONE_CHECKED_STACK,
- GC_PUSH_ONE_STACK, GC_PUSH_ONE_HEAP): The first parameter is of
- word type now (as FIXUP_POINTER requires numeric argument).
- * finalize.c (GC_ignore_self_finalize_mark_proc): GC_PUSH_ONE_HEAP
- requires the first parameter of word type.
- * mark.c (PUSH_GRANULE): Ditto.
- * mark.c (GC_push_one, GC_push_all_eager): Ditto (for
- GC_PUSH_ONE_STACK).
- * finalize.c (GC_finalize_all): Call GC_invoke_finalizers() or
+ * dbg_mlc.c (GC_print_smashed_obj): Convert a group of printf()
+ calls into a single one (for output atomicity).
+ * typd_mlc.c (GC_calloc_explicitly_typed): Don't declare and use
+ GC_finalization_failures variable; check the result of
+ GC_general_register_disappearing_link() (for lack of memory)
+ instead.
+ * finalize.c (GC_finalization_failures): Remove unused global
+ variable.
+ * finalize.c (GC_general_register_disappearing_link,
+ GC_general_register_disappearing_link): Don't update the value of
+ GC_finalization_failures (since unused).
+ * include/private/gc_pmark.h (PUSH_ONE_CHECKED_STACK,
+ GC_PUSH_ONE_STACK, GC_PUSH_ONE_HEAP): The first parameter is of
+ word type now (as FIXUP_POINTER requires numeric argument).
+ * finalize.c (GC_ignore_self_finalize_mark_proc): GC_PUSH_ONE_HEAP
+ requires the first parameter of word type.
+ * mark.c (PUSH_GRANULE): Ditto.
+ * mark.c (GC_push_one, GC_push_all_eager): Ditto (for
+ GC_PUSH_ONE_STACK).
+ * finalize.c (GC_finalize_all): Call GC_invoke_finalizers() or
GC_finalizer_notifier directly, instead
- of GC_INVOKE_FINALIZERS() to prevent infinite looping.
+ of GC_INVOKE_FINALIZERS() to prevent infinite looping.
* include/javaxfc.h: Clarify GC_finalize_all comment.
- * gcj_mlc.c: Include gc_pmark.h before "ifdef GC_GCJ_SUPPORT" (not
- after) for configuration information.
- * gcj_mlc.c (GC_gcj_malloc_ignore_off_page): Add comment.
- * gcj_mlc.c (GC_gcj_malloc_ignore_off_page): Check "op" local
- variable for NULL before dereferencing it, return GC_oom_fn() in
- this case.
- * typd_mlc.c (GC_malloc_explicitly_typed,
- GC_malloc_explicitly_typed_ignore_off_page): Transform the code to
- suppress compiler warning (for uninitialized "lg" variable).
+ * gcj_mlc.c: Include gc_pmark.h before "ifdef GC_GCJ_SUPPORT" (not
+ after) for configuration information.
+ * gcj_mlc.c (GC_gcj_malloc_ignore_off_page): Add comment.
+ * gcj_mlc.c (GC_gcj_malloc_ignore_off_page): Check "op" local
+ variable for NULL before dereferencing it, return GC_oom_fn() in
+ this case.
+ * typd_mlc.c (GC_malloc_explicitly_typed,
+ GC_malloc_explicitly_typed_ignore_off_page): Transform the code to
+ suppress compiler warning (for uninitialized "lg" variable).
2009-06-12 Hans Boehm <Hans.Boehm@hp.com>
@@ -935,93 +1003,97 @@
2009-06-12 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
diff97_cvs (resembling diff43, diff51, diff67, diff76, diff83 partly)
- * pthread_support.c (GC_inner_start_routine): Don't release the
- GC lock between GC_register_my_thread_inner() and
- GC_init_thread_local() calls (post the "registered" even after
- calling GC_init_thread_local()).
- * win32_threads.c (GC_register_my_thread, GC_unregister_my_thread):
- Use GC_lookup_thread_inner() instead of GC_lookup_thread() and
- acquire the GC lock only once.
- * win32_threads.c (GC_thr_init): Call GC_register_my_thread_inner()
- directly instead of GC_register_my_thread() since I_HOLD_LOCK
- and our (main) thread is not registered yet (add assertion for it).
- * win32_threads.c (GC_init_parallel): Call GC_lookup_thread_inner()
- directly instead of GC_lookup_thread() (since I_HOLD_LOCK).
- * win32_threads.c (GC_lookup_thread): Remove unused function.
- * win32_threads.c: Remove "#error GC_DLL untested with Cygwin".
- * win32_threads.c (GC_win32_dll_threads): Define as FALSE macro
- also if THREAD_LOCAL_ALLOC or GC_PTHREADS.
- * win32_threads.c (GC_use_DllMain): Call ABORT also if GC_PTHREADS
- (for Cygwin).
- * win32_threads.c (GC_push_stack_for): Add parentheses around "&&"
- (inside GC_ASSERT) to prevent compiler warning.
- * win32_threads.c (GC_push_all_stacks): Remove FIXME for
- PARALLEL_MARK.
- * win32_threads.c (MAX_MARKERS, GC_markers): Move the definitions
- to a place before GC_get_next_stack().
- * win32_threads.c (marker_sp, marker_bsp): New static arrays (same
- as in pthread_support.c).
- * win32_threads.c (marker_last_stack_min): New static arrays (the
- same semantics as for last_stack_min of GC_Thread_Rep).
- * win32_threads.c (GC_get_next_stack): Handle marker threads.
- * win32_threads.c (GC_mark_thread): Save the current stack pointer
- to marker_[b]sp.
- * win32_threads.c (start_mark_threads): Initialize
- marker_last_stack_min elements (to "unset" value).
+ * pthread_support.c (GC_inner_start_routine): Don't release the
+ GC lock between GC_register_my_thread_inner() and
+ GC_init_thread_local() calls (post the "registered" even after
+ calling GC_init_thread_local()).
+ * win32_threads.c (GC_register_my_thread, GC_unregister_my_thread):
+ Use GC_lookup_thread_inner() instead of GC_lookup_thread() and
+ acquire the GC lock only once.
+ * win32_threads.c (GC_thr_init): Call GC_register_my_thread_inner()
+ directly instead of GC_register_my_thread() since I_HOLD_LOCK
+ and our (main) thread is not registered yet (add assertion for it).
+ * win32_threads.c (GC_init_parallel): Call GC_lookup_thread_inner()
+ directly instead of GC_lookup_thread() (since I_HOLD_LOCK).
+ * win32_threads.c (GC_lookup_thread): Remove unused function.
+ * win32_threads.c: Remove "#error GC_DLL untested with Cygwin".
+ * win32_threads.c (GC_win32_dll_threads): Define as FALSE macro
+ also if THREAD_LOCAL_ALLOC or GC_PTHREADS.
+ * win32_threads.c (GC_use_DllMain): Call ABORT also if GC_PTHREADS
+ (for Cygwin).
+ * win32_threads.c (GC_push_stack_for): Add parentheses around "&&"
+ (inside GC_ASSERT) to prevent compiler warning.
+ * win32_threads.c (GC_push_all_stacks): Remove FIXME for
+ PARALLEL_MARK.
+ * win32_threads.c (MAX_MARKERS, GC_markers): Move the definitions
+ to a place before GC_get_next_stack().
+ * win32_threads.c (marker_sp, marker_bsp): New static arrays (same
+ as in pthread_support.c).
+ * win32_threads.c (marker_last_stack_min): New static arrays (the
+ same semantics as for last_stack_min of GC_Thread_Rep).
+ * win32_threads.c (GC_get_next_stack): Handle marker threads.
+ * win32_threads.c (GC_mark_thread): Save the current stack pointer
+ to marker_[b]sp.
+ * win32_threads.c (start_mark_threads): Initialize
+ marker_last_stack_min elements (to "unset" value).
2009-06-12 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
(diff96_cvs, partly from diff45 and diff75)
- * misc.c (GC_set_oom_fn, GC_set_all_interior_pointers,
- GC_set_finalize_on_demand, GC_set_java_finalization,
- GC_set_finalizer_notifier, GC_set_dont_expand, GC_set_full_freq,
- GC_set_no_dls, GC_set_free_space_divisor, GC_set_max_retries,
- GC_set_dont_precollect, GC_set_time_limit, GC_set_warn_proc):
- Change return type to void (these API functions no longer return
- the old value).
- * include/gc.h: Ditto (for prototypes).
- * tests/test.c (main, WinMain, test): Remove explicit cast to void
- for GC_set_warn_proc().
- * misc.c (GC_get_oom_fn, GC_get_all_interior_pointers,
- GC_get_finalize_on_demand, GC_get_java_finalization,
- GC_get_finalizer_notifier, GC_get_dont_expand, GC_get_full_freq,
- GC_get_no_dls, GC_get_free_space_divisor, GC_get_max_retries,
- GC_get_dont_precollect, GC_get_time_limit, GC_get_warn_proc): New
- API functions (to get the current value of the corresponding R/W
- public variables).
- * include/gc.h: Ditto (for prototypes).
- * include/gc.h (GC_set_warn_proc, GC_set_free_space_divisor):
- Update the comment.
- * misc.c (GC_ignore_warn_proc): New API call-back function.
- * include/gc.h (GC_ignore_warn_proc): Ditto (for the prototype).
- * misc.c (GC_set_find_leak, GC_get_find_leak, GC_set_non_gc_bytes,
- GC_get_non_gc_bytes): New API setter and getter functions (for the
- public GC_find_leak and GC_non_gc_bytes variables, respectively).
- * include/gc.h: Ditto (for prototypes).
- * include/gc.h (GC_memalign): Add proto to GC API.
- * mallocx.c (GC_memalign): Use GC_API, GC_CALL for the definition.
- * tests/test.c (run_one_test): Test GC_memalign() on Win32 too,
- remove GC_memalign() proto.
- * misc.c (GC_write): Use multi-byte (A) variants of Win32
- GetModuleFileName() and CreateFile().
+ * misc.c (GC_set_oom_fn, GC_set_all_interior_pointers,
+ GC_set_finalize_on_demand, GC_set_java_finalization,
+ GC_set_finalizer_notifier, GC_set_dont_expand, GC_set_full_freq,
+ GC_set_no_dls, GC_set_free_space_divisor, GC_set_max_retries,
+ GC_set_dont_precollect, GC_set_time_limit, GC_set_warn_proc):
+ Change return type to void (these API functions no longer return
+ the old value).
+ * include/gc.h: Ditto (for prototypes).
+ * tests/test.c (main, WinMain, test): Remove explicit cast to void
+ for GC_set_warn_proc().
+ * misc.c (GC_get_oom_fn, GC_get_all_interior_pointers,
+ GC_get_finalize_on_demand, GC_get_java_finalization,
+ GC_get_finalizer_notifier, GC_get_dont_expand, GC_get_full_freq,
+ GC_get_no_dls, GC_get_free_space_divisor, GC_get_max_retries,
+ GC_get_dont_precollect, GC_get_time_limit, GC_get_warn_proc): New
+ API functions (to get the current value of the corresponding R/W
+ public variables).
+ * include/gc.h: Ditto (for prototypes).
+ * include/gc.h (GC_set_warn_proc, GC_set_free_space_divisor):
+ Update the comment.
+ * misc.c (GC_ignore_warn_proc): New API call-back function.
+ * include/gc.h (GC_ignore_warn_proc): Ditto (for the prototype).
+ * misc.c (GC_set_find_leak, GC_get_find_leak, GC_set_non_gc_bytes,
+ GC_get_non_gc_bytes): New API setter and getter functions (for the
+ public GC_find_leak and GC_non_gc_bytes variables, respectively).
+ * include/gc.h: Ditto (for prototypes).
+ * include/gc.h (GC_memalign): Add proto to GC API.
+ * mallocx.c (GC_memalign): Use GC_API, GC_CALL for the definition.
+ * tests/test.c (run_one_test): Test GC_memalign() on Win32 too,
+ remove GC_memalign() proto.
+ * misc.c (GC_write): Use multi-byte (A) variants of Win32
+ GetModuleFileName() and CreateFile().
* tests/test.c (main): Replace K&R-style function definition with the
ANSI C one.
-2009-06-12 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski and George Talbot)
+2009-06-12 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski and
+ George Talbot)
(diff95_cvs)
+
* include/private/gcconfig.h (PLATFORM_ANDROID): New macro
- recognized (for Linux on ARM32 without glibc).
- * include/private/gcconfig.h (STRTOULL): Define for all targets
- (define as "strtoul" for most targets except for LLP64/Win64).
- * misc.c (GC_init_inner): Use STRTOULL instead of atoi/atol()
- (cast the result to word type) to decode values of "GC_TRACE",
- "GC_INITIAL_HEAP_SIZE", "GC_MAXIMUM_HEAP_SIZE" environment vars.
+ recognized (for Linux on ARM32 without glibc).
+ * include/private/gcconfig.h (STRTOULL): Define for all targets
+ (define as "strtoul" for most targets except for LLP64/Win64).
+ * misc.c (GC_init_inner): Use STRTOULL instead of atoi/atol()
+ (cast the result to word type) to decode values of "GC_TRACE",
+ "GC_INITIAL_HEAP_SIZE", "GC_MAXIMUM_HEAP_SIZE" environment vars.
2009-06-12 Hans Boehm <Hans.Boehm@hp.com> (Really mostly George Talbot)
+
* include/gc_allocator.h: Add gc_allocator_ignore_off_page.
* tests/test_cpp.cc: Add call to gc_allocator_ignore_off_page.
2009-06-11 Hans Boehm <Hans.Boehm@hp.com>
+
* win32_threads.c (GC_release_mark_lock): Correct misspelling of
AO_load in assertion.
@@ -1029,38 +1101,38 @@
(diff93_cvs: resembling diff27, diff30, diff43, diff44, diff66,
diff76, diff79, diff83 partly)
- * win32_threads.c (MAX_THREADS): Define as 1 if GC_win32_dll_threads
- is defined as FALSE (otherwise the size of dll_thread_table is near
- 200 KiB for 32-bit).
- * win32_threads.c (GC_use_DllMain): Optimize for THREAD_LOCAL_ALLOC.
- * win32_threads.c (GC_Thread_Rep): Add backing_store_end and
- backing_store_ptr fields for IA64 support.
- * win32_threads.c (GC_register_my_thread_inner): Set
- backing_store_end field to reg_base value for IA64 (same as in
- pthread_support.c).
- * win32_threads.c (SET_PTHREAD_MAP_CACHE): Put parentheses in the
- "right" places, remove ';'.
- * win32_threads.c (GC_fault_handler_lock): Declare only
- if MPROTECT_VDB (and not WinCE).
- * win32_threads.c (GC_suspend): Acquire and release
- GC_fault_handler_lock only if MPROTECT_VDB (and not WinCE).
- * win32_threads.c (GC_suspend): Define as STATIC.
- * win32_threads.c (GC_push_stack_for): Fix WARN() format specifier
- (should be word-complient, "%p" is used w/o "0x"), don't cast sp.
- * win32_threads.c (GC_push_all_stacks): Convert a group of printf()
- calls into a single one (for output atomicity).
- * win32_threads.c (GC_get_next_stack): Unprotect thread descriptor
- before altering its last_stack_min ("thread" variable is added).
- * win32_threads.c (GC_get_next_stack): Remove unnecessary checks for
- "s" is non-NULL.
- * win32_threads.c (GC_get_next_stack): Don't call GC_may_be_in_stack
- if WinCE.
- * win32_threads.c (GC_get_next_stack): Pass current_min value to
- GC_get_stack_min as-is (without -1).
- * win32_threads.c (GC_wait_marker): Remove FIXME and use "release"
- version of AO_fetch_and_sub1().
- * win32_threads.c (GC_win32_start_inner, GC_win32_start): convert int
- to pointer (and vice verse) thru word type to suppress warnings.
+ * win32_threads.c (MAX_THREADS): Define as 1 if GC_win32_dll_threads
+ is defined as FALSE (otherwise the size of dll_thread_table is near
+ 200 KiB for 32-bit).
+ * win32_threads.c (GC_use_DllMain): Optimize for THREAD_LOCAL_ALLOC.
+ * win32_threads.c (GC_Thread_Rep): Add backing_store_end and
+ backing_store_ptr fields for IA64 support.
+ * win32_threads.c (GC_register_my_thread_inner): Set
+ backing_store_end field to reg_base value for IA64 (same as in
+ pthread_support.c).
+ * win32_threads.c (SET_PTHREAD_MAP_CACHE): Put parentheses in the
+ "right" places, remove ';'.
+ * win32_threads.c (GC_fault_handler_lock): Declare only
+ if MPROTECT_VDB (and not WinCE).
+ * win32_threads.c (GC_suspend): Acquire and release
+ GC_fault_handler_lock only if MPROTECT_VDB (and not WinCE).
+ * win32_threads.c (GC_suspend): Define as STATIC.
+ * win32_threads.c (GC_push_stack_for): Fix WARN() format specifier
+ (should be word-complient, "%p" is used w/o "0x"), don't cast sp.
+ * win32_threads.c (GC_push_all_stacks): Convert a group of printf()
+ calls into a single one (for output atomicity).
+ * win32_threads.c (GC_get_next_stack): Unprotect thread descriptor
+ before altering its last_stack_min ("thread" variable is added).
+ * win32_threads.c (GC_get_next_stack): Remove unnecessary checks for
+ "s" is non-NULL.
+ * win32_threads.c (GC_get_next_stack): Don't call GC_may_be_in_stack
+ if WinCE.
+ * win32_threads.c (GC_get_next_stack): Pass current_min value to
+ GC_get_stack_min as-is (without -1).
+ * win32_threads.c (GC_wait_marker): Remove FIXME and use "release"
+ version of AO_fetch_and_sub1().
+ * win32_threads.c (GC_win32_start_inner, GC_win32_start): convert int
+ to pointer (and vice verse) thru word type to suppress warnings.
* win32_threads.c (GC_mark_mutex_waitcnt): Fix comment, always
access atomically.
* misc.c: Change GC_THREADS tests back to THREADS.
@@ -1070,95 +1142,95 @@
(diff92_cvs: resembling diff20, diff27, diff34, diff38, diff43, diff45,
diff46, diff56, diff60, diff62, diff74, diff75, diff81 partly)
- * allchblk.c (GC_print_hblkfreelist, GC_dump_regions): Convert
- a group of printf() calls into a single one (for output atomicity).
- * include/gc.h (GC_set_all_interior_pointers, GC_set_full_freq,
- GC_set_time_limit): New prototypes.
- * misc.c (GC_set_all_interior_pointers, GC_set_full_freq,
- GC_set_time_limit): New public setter/getter functions.
- * include/gc.h: Fix (and remove outdated) comments for thread-local
- allocation.
- * include/gc.h: Fix typos in comments.
- * misc.c (GC_init_inner, GC_printf): Ditto.
- * include/gc.h (GC_unregister_disappearing_link): Refine comment.
- * include/gc.h (GC_stack_base): Recognize _M_IA64 macro.
- * misc.c (GC_stack_last_cleared, GC_min_sp, GC_high_water,
- GC_bytes_allocd_at_reset, DEGRADE_RATE): Define only if THREADS.
- * misc.c (GC_stack_last_cleared, GC_min_sp, GC_high_water,
- GC_bytes_allocd_at_reset): Define as STATIC.
- * misc.c (GC_get_heap_size, GC_get_free_bytes,
- GC_get_bytes_since_gc, GC_get_total_bytes): Acquire the GC lock to
- avoid data races.
- * misc.c (GC_write_cs): Define only if THREADS (Win32/WinCE only).
- * misc.c (GC_init_inner): Initialize GC_write_cs only if THREADS.
- * misc.c (GC_init_inner): Use GC_INITIAL_HEAP_SIZE (if available) to
- set the default initial value of initial_heap_sz.
- * misc.c (GC_deinit): Destroy GC_write_cs only if THREADS.
- * misc.c (GC_init_inner): Fix WARN() format specifier (should be
- word-complient, "%p" is used w/o "0x").
- * misc.c (GC_init_inner): Don't recognize "GC_PAUSE_TIME_TARGET"
- environment variable if SMALL_CONFIG.
- * misc.c (GC_init_inner): Recognize "GC_FULL_FREQUENCY" environment
- variable to set initial GC_full_freq value (if not SMALL_CONFIG).
- * doc/README.environment (GC_FULL_FREQUENCY): Add information.
- * doc/README.environment (GC_MARKERS): Refine information.
- * misc.c (GC_init_inner): Change GC_ASSERT to GC_STATIC_ASSERT where
- possible.
- * misc.c (IF_NEED_TO_LOCK): New macro (instead of GC_need_to_lock).
- * misc.c (GC_write): Use IF_NEED_TO_LOCK for handling GC_write_cs.
- * misc.c (GC_abort): Don't define if SMALL_CONFIG.
- * misc.c (GC_abort): Directly use WRITE() instead of GC_err_printf()
- (to prevent possible infinite recursion).
+ * allchblk.c (GC_print_hblkfreelist, GC_dump_regions): Convert
+ a group of printf() calls into a single one (for output atomicity).
+ * include/gc.h (GC_set_all_interior_pointers, GC_set_full_freq,
+ GC_set_time_limit): New prototypes.
+ * misc.c (GC_set_all_interior_pointers, GC_set_full_freq,
+ GC_set_time_limit): New public setter/getter functions.
+ * include/gc.h: Fix (and remove outdated) comments for thread-local
+ allocation.
+ * include/gc.h: Fix typos in comments.
+ * misc.c (GC_init_inner, GC_printf): Ditto.
+ * include/gc.h (GC_unregister_disappearing_link): Refine comment.
+ * include/gc.h (GC_stack_base): Recognize _M_IA64 macro.
+ * misc.c (GC_stack_last_cleared, GC_min_sp, GC_high_water,
+ GC_bytes_allocd_at_reset, DEGRADE_RATE): Define only if THREADS.
+ * misc.c (GC_stack_last_cleared, GC_min_sp, GC_high_water,
+ GC_bytes_allocd_at_reset): Define as STATIC.
+ * misc.c (GC_get_heap_size, GC_get_free_bytes,
+ GC_get_bytes_since_gc, GC_get_total_bytes): Acquire the GC lock to
+ avoid data races.
+ * misc.c (GC_write_cs): Define only if THREADS (Win32/WinCE only).
+ * misc.c (GC_init_inner): Initialize GC_write_cs only if THREADS.
+ * misc.c (GC_init_inner): Use GC_INITIAL_HEAP_SIZE (if available) to
+ set the default initial value of initial_heap_sz.
+ * misc.c (GC_deinit): Destroy GC_write_cs only if THREADS.
+ * misc.c (GC_init_inner): Fix WARN() format specifier (should be
+ word-complient, "%p" is used w/o "0x").
+ * misc.c (GC_init_inner): Don't recognize "GC_PAUSE_TIME_TARGET"
+ environment variable if SMALL_CONFIG.
+ * misc.c (GC_init_inner): Recognize "GC_FULL_FREQUENCY" environment
+ variable to set initial GC_full_freq value (if not SMALL_CONFIG).
+ * doc/README.environment (GC_FULL_FREQUENCY): Add information.
+ * doc/README.environment (GC_MARKERS): Refine information.
+ * misc.c (GC_init_inner): Change GC_ASSERT to GC_STATIC_ASSERT where
+ possible.
+ * misc.c (IF_NEED_TO_LOCK): New macro (instead of GC_need_to_lock).
+ * misc.c (GC_write): Use IF_NEED_TO_LOCK for handling GC_write_cs.
+ * misc.c (GC_abort): Don't define if SMALL_CONFIG.
+ * misc.c (GC_abort): Directly use WRITE() instead of GC_err_printf()
+ (to prevent possible infinite recursion).
2009-06-09 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
diff90_cvs (resembling diff28, diff30, diff32, diff34, diff47,
diff49, diff60, diff62, diff66, diff67, diff68, diff72 partly)
- * finalize.c (finalization_mark_proc): Replace K&R-style declaration
- with ANSI C one.
- * finalize.c (GC_grow_table, GC_register_finalizer_inner,
- GC_enqueue_all_finalizers): Remove outdated comments about disabling
- signals.
- * finalize.c (GC_general_register_disappearing_link): Fix assertion
- to catch NULL "obj" value.
- * finalize.c (GC_unregister_disappearing_link): Check "link"
- alignment before gaining the lock.
- * finalize.c (GC_finalize): Refine comment.
- * finalize.c (GC_finalize): Fix WARN() format specifier (should be
- word-complient, "%p" is used w/o "0x").
- * finalize.c (GC_invoke_finalizers): Initialize "bytes_freed_before"
- variable (to 0) to suppress compiler warning.
- * include/gc_gcj.h (MARK_DESCR_OFFSET): Move to private/gc_pmark.h.
- * include/gc_gcj.h: add "extern C" header and tail.
- * include/private/gc_pmark.h: Remove GC_do_parallel_mark(),
- GC_help_wanted, GC_helper_count, GC_active_count declarations (move
- the comments to the place where these symbols are defined in mark.c).
- * mark.c: Add STATIC GC_do_parallel_mark() declaration (for use by
- GC_mark_some_inner, if PARALLEL_MARK only).
- * mark.c (GC_mark_some_inner, GC_help_wanted, GC_helper_count,
- GC_active_count, GC_do_parallel_mark): Define as STATIC.
- * pthread_support.c (GC_mark_thread): Ditto.
- * typd_mlc.c (GC_explicit_typing_initialized, GC_explicit_kind,
- GC_array_kind, GC_ext_descriptors, GC_ed_size, GC_avail_descr,
- GC_typed_mark_proc_index, GC_array_mark_proc_index, GC_eobjfreelist,
- GC_arobjfreelist): Ditto.
- * include/private/gc_pmark.h (PUSH_CONTENTS_HDR): Change GC_ASSERT
- for HBLKSIZE to GC_STATIC_ASSERT.
- * mark.c (GC_noop): Define for Borland C the same as for Watcom.
- * mark.c (GC_noop, GC_mark_and_push): Add ARGSUSED tag.
- * pthread_support.c (GC_do_blocking_inner): Ditto.
- * mark.c (GC_mark_from): Initialize "limit" (to 0) in the default
- switch branch to suppress compiler warning.
- * mark.c (GC_return_mark_stack): Append new-line to printf message.
- * mark.c: Remove unused GC_true_func(), GC_PUSH_ALL().
- * pthread_support.c (GC_mark_thread): Add dummy "return 0" to
- suppress compiler warning.
- * pthread_support.c (start_mark_threads): Move the code limiting
- "GC_markers" value (and printing a warning) to GC_thr_init().
- * pthread_support.c (GC_thr_init): Silently limit "GC_markers" value
- if based on the number of CPUs.
- * pthread_support.c (GC_thr_init): Treat incorrect "GC_markers"
- values as one.
+ * finalize.c (finalization_mark_proc): Replace K&R-style declaration
+ with ANSI C one.
+ * finalize.c (GC_grow_table, GC_register_finalizer_inner,
+ GC_enqueue_all_finalizers): Remove outdated comments about disabling
+ signals.
+ * finalize.c (GC_general_register_disappearing_link): Fix assertion
+ to catch NULL "obj" value.
+ * finalize.c (GC_unregister_disappearing_link): Check "link"
+ alignment before gaining the lock.
+ * finalize.c (GC_finalize): Refine comment.
+ * finalize.c (GC_finalize): Fix WARN() format specifier (should be
+ word-complient, "%p" is used w/o "0x").
+ * finalize.c (GC_invoke_finalizers): Initialize "bytes_freed_before"
+ variable (to 0) to suppress compiler warning.
+ * include/gc_gcj.h (MARK_DESCR_OFFSET): Move to private/gc_pmark.h.
+ * include/gc_gcj.h: add "extern C" header and tail.
+ * include/private/gc_pmark.h: Remove GC_do_parallel_mark(),
+ GC_help_wanted, GC_helper_count, GC_active_count declarations (move
+ the comments to the place where these symbols are defined in mark.c).
+ * mark.c: Add STATIC GC_do_parallel_mark() declaration (for use by
+ GC_mark_some_inner, if PARALLEL_MARK only).
+ * mark.c (GC_mark_some_inner, GC_help_wanted, GC_helper_count,
+ GC_active_count, GC_do_parallel_mark): Define as STATIC.
+ * pthread_support.c (GC_mark_thread): Ditto.
+ * typd_mlc.c (GC_explicit_typing_initialized, GC_explicit_kind,
+ GC_array_kind, GC_ext_descriptors, GC_ed_size, GC_avail_descr,
+ GC_typed_mark_proc_index, GC_array_mark_proc_index, GC_eobjfreelist,
+ GC_arobjfreelist): Ditto.
+ * include/private/gc_pmark.h (PUSH_CONTENTS_HDR): Change GC_ASSERT
+ for HBLKSIZE to GC_STATIC_ASSERT.
+ * mark.c (GC_noop): Define for Borland C the same as for Watcom.
+ * mark.c (GC_noop, GC_mark_and_push): Add ARGSUSED tag.
+ * pthread_support.c (GC_do_blocking_inner): Ditto.
+ * mark.c (GC_mark_from): Initialize "limit" (to 0) in the default
+ switch branch to suppress compiler warning.
+ * mark.c (GC_return_mark_stack): Append new-line to printf message.
+ * mark.c: Remove unused GC_true_func(), GC_PUSH_ALL().
+ * pthread_support.c (GC_mark_thread): Add dummy "return 0" to
+ suppress compiler warning.
+ * pthread_support.c (start_mark_threads): Move the code limiting
+ "GC_markers" value (and printing a warning) to GC_thr_init().
+ * pthread_support.c (GC_thr_init): Silently limit "GC_markers" value
+ if based on the number of CPUs.
+ * pthread_support.c (GC_thr_init): Treat incorrect "GC_markers"
+ values as one.
* pthread_support.c (GC_register_my_thread_inner): Add a check for
"stack_end" is non-NULL (the same as in win32_threads.c).
* pthread_support.c (pthread_create): Call GC_oom_fn before giving up
@@ -1167,118 +1239,123 @@
to "while" one to suppress "possible extraneous ';'" warning.
2009-06-08 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski
- and Zoltan Varga)
- * darwin_stop_world.c (GC_push_all_stacks): Recognize ARM32.
- * include/private/gc_priv.h (GC_THREAD_STATE_T): Define for ARM32
- (Darwin only).
- * include/private/gcconfig.h: Add machine-specific part for DARWIN.
- * include/private/gcconfig.h (ARM32): Define config parameters for
- DARWIN (iPhone).
+ and Zoltan Varga)
+
+ * darwin_stop_world.c (GC_push_all_stacks): Recognize ARM32.
+ * include/private/gc_priv.h (GC_THREAD_STATE_T): Define for ARM32
+ (Darwin only).
+ * include/private/gcconfig.h: Add machine-specific part for DARWIN.
+ * include/private/gcconfig.h (ARM32): Define config parameters for
+ DARWIN (iPhone).
2009-06-08 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
(diff91_cvs: resembling diff3, diff27, diff33, diff45, diff47, diff49,
diff60, diff67, diff68 partly)
+
* alloc.c (GC_FULL_FREQ, GC_DONT_EXPAND, GC_FREE_SPACE_DIVISOR,
- GC_TIME_LIMIT): New macros (used to control the default initial
- values of GC_full_freq variable, GC_dont_expand,
- GC_free_space_divisor, GC_time_limit respectively).
- * include/private/gc_priv.h (TIME_LIMIT): Remove macro (replaced
- with GC_TIME_LIMIT in alloc.c).
- * alloc.c (GC_need_full_gc, GC_stopped_mark, GC_finish_collection):
- Define as STATIC.
- * mark_rts.c (GC_push_current_stack, GC_push_gc_structures): Ditto.
- * include/private/gc_priv.h (GC_stopped_mark, GC_finish_collection):
- Move the prototypes to alloc.c, make STATIC.
- * include/private/gc_priv.h (GC_push_current_stack,
- GC_push_gc_structures, GC_push_regs_and_stack): Remove prototypes
- (move the comments to the places where these functions are defined).
- * mach_dep.c (GC_push_regs_and_stack): Move to mark_rts.c and define
- as STATIC.
- * alloc.c (GC_timeout_stop_func, GC_stopped_mark,
- GC_print_heap_sects): Convert a group of printf() calls into
- a single one (for output atomicity).
- * mark_rts.c (GC_print_static_roots): Ditto.
- * alloc.c (GC_stopped_mark): Output blank line (when logging) for
- convenience to delimit collections.
- * alloc.c (GC_clear_a_few_frames): Rename NWORDS to CLEAR_NWORDS;
- make "frames" local variable volatile (to prevent optimization).
- * alloc.c (GC_try_to_collect_inner, GC_stopped_mark,
- GC_finish_collection, GC_allocobj): Remove outdated comments about
- disabling signals.
- * include/private/gc_priv.h (GC_register_displacement_inner,
- GC_gcollect_inner): Ditto.
- * alloc.c (GC_try_to_collect_inner, GC_stopped_mark,
- GC_finish_collection): Initialize "start_time" local variable (to 0)
- to suppress compiler warning.
- * mark_rts.c (GC_add_roots_inner): Ditto (for "old" variable).
- * alloc.c (GC_RATE, MAX_PRIOR_ATTEMPTS): Guard with "ifndef".
- * include/private/gc_priv.h (clock, GC_stop_world, GC_start_world,
- GC_acquire_mark_lock, GC_release_mark_lock, GC_notify_all_builder,
- GC_wait_for_reclaim, GC_notify_all_marker, GC_wait_marker): Replace
- K&R-style function prototypes with ANSI C one.
- * include/private/gc_priv.h (ABORT): Define as DebugBreak() for
- Win32/WinCE if SMALL_CONFIG (the same as in GC_abort()).
- * include/private/gc_priv.h (ROUNDED_UP_WORDS, abs): Remove unused
- macros.
- * include/private/gc_priv.h (GC_noop): Declare for Borland C the
- same as for Watcom.
- * mark_rts.c (GC_push_conditional_with_exclusions): Add ARGSUSED tag.
+ GC_TIME_LIMIT): New macros (used to control the default initial
+ values of GC_full_freq variable, GC_dont_expand,
+ GC_free_space_divisor, GC_time_limit respectively).
+ * include/private/gc_priv.h (TIME_LIMIT): Remove macro (replaced
+ with GC_TIME_LIMIT in alloc.c).
+ * alloc.c (GC_need_full_gc, GC_stopped_mark, GC_finish_collection):
+ Define as STATIC.
+ * mark_rts.c (GC_push_current_stack, GC_push_gc_structures): Ditto.
+ * include/private/gc_priv.h (GC_stopped_mark, GC_finish_collection):
+ Move the prototypes to alloc.c, make STATIC.
+ * include/private/gc_priv.h (GC_push_current_stack,
+ GC_push_gc_structures, GC_push_regs_and_stack): Remove prototypes
+ (move the comments to the places where these functions are defined).
+ * mach_dep.c (GC_push_regs_and_stack): Move to mark_rts.c and define
+ as STATIC.
+ * alloc.c (GC_timeout_stop_func, GC_stopped_mark,
+ GC_print_heap_sects): Convert a group of printf() calls into
+ a single one (for output atomicity).
+ * mark_rts.c (GC_print_static_roots): Ditto.
+ * alloc.c (GC_stopped_mark): Output blank line (when logging) for
+ convenience to delimit collections.
+ * alloc.c (GC_clear_a_few_frames): Rename NWORDS to CLEAR_NWORDS;
+ make "frames" local variable volatile (to prevent optimization).
+ * alloc.c (GC_try_to_collect_inner, GC_stopped_mark,
+ GC_finish_collection, GC_allocobj): Remove outdated comments about
+ disabling signals.
+ * include/private/gc_priv.h (GC_register_displacement_inner,
+ GC_gcollect_inner): Ditto.
+ * alloc.c (GC_try_to_collect_inner, GC_stopped_mark,
+ GC_finish_collection): Initialize "start_time" local variable (to 0)
+ to suppress compiler warning.
+ * mark_rts.c (GC_add_roots_inner): Ditto (for "old" variable).
+ * alloc.c (GC_RATE, MAX_PRIOR_ATTEMPTS): Guard with "ifndef".
+ * include/private/gc_priv.h (clock, GC_stop_world, GC_start_world,
+ GC_acquire_mark_lock, GC_release_mark_lock, GC_notify_all_builder,
+ GC_wait_for_reclaim, GC_notify_all_marker, GC_wait_marker): Replace
+ K&R-style function prototypes with ANSI C one.
+ * include/private/gc_priv.h (ABORT): Define as DebugBreak() for
+ Win32/WinCE if SMALL_CONFIG (the same as in GC_abort()).
+ * include/private/gc_priv.h (ROUNDED_UP_WORDS, abs): Remove unused
+ macros.
+ * include/private/gc_priv.h (GC_noop): Declare for Borland C the
+ same as for Watcom.
+ * mark_rts.c (GC_push_conditional_with_exclusions): Add ARGSUSED tag.
2009-06-04 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
(diff89_cvs, resembling diff3, diff27, diff34, diff38, diff47, diff49,
diff59, diff60, diff66, diff67, diff68, diff69a, diff70, diff81
partly)
- * dbg_mlc.c (GC_store_debug_info, GC_store_debug_info_inner): Remove
- outdated comment about disabling signals.
- * mallocx.c (GC_malloc_uncollectable,
- GC_malloc_atomic_uncollectable): Ditto.
- * os_dep.c: Ditto.
- * dbg_mlc.c (GC_debug_change_stubborn, GC_debug_end_stubborn_change):
- Add ARGSUSED tag.
- * pthread_stop_world.c (GC_suspend_handler,
- GC_suspend_handler_inner): Ditto.
- * dbg_mlc.c (GC_debug_free, GC_debug_realloc): Fix printf message.
- * dbg_mlc.c (GC_debug_realloc): Set "result" to NULL in the default
- switch branch to suppress compiler warning.
- * dyn_load.c (GC_init_dyld): Use ABORT() instead of GC_abort().
- * include/private/darwin_semaphore.h (sem_init): Ditto.
- * include/javaxfc.h: Replace "GC_H" with "_GC_H".
- * include/private/dbg_mlc.h (GC_has_other_debug_info,
- GC_store_debug_info): Replace K&R-style function prototypes with ANSI
- C one.
- * include/private/gcconfig.h (GC_FreeBSDGetDataStart, real_malloc,
- GC_win32_get_mem, GC_wince_get_mem, GC_unix_get_mem): Ditto.
- * include/private/pthread_support.h (GC_stop_init): Ditto.
- * include/private/gcconfig.h: Refine comment about setting
- GC_stackbottom.
- * include/private/gcconfig.h (FIXUP_POINTER): Put parentheses in the
- "right" places.
- * include/private/pthread_support.h (GC_Thread_Rep): Refine comment
- for "stack_end" field.
- * mallocx.c (GC_malloc_uncollectable,
- GC_malloc_atomic_uncollectable): Remove cast to undefined "hbklk".
- * os_dep.c (GC_USE_MEM_TOP_DOWN): New macro (for setting
- GC_mem_top_down to MEM_TOP_DOWN for debug purposes).
- * os_dep.c (GC_gww_read_dirty, catch_exception_raise): Fix WARN()
- format specifier (should be word-compliant, "%p" is used w/o "0x").
- * pthread_stop_world.c (GC_suspend_handler_inner): Ditto.
- * os_dep.c (GC_dirty_init): Append new-line to printf messages.
- * os_dep.c (GC_mprotect_thread): Fix GC_err_printf message.
- * os_dep.c (GC_save_callers): Change GC_ASSERT to GC_STATIC_ASSERT.
- * pthread_stop_world.c (GC_retry_signals, GC_suspend_ack_sem): Define
- as STATIC.
- * pthread_stop_world.c (GC_push_all_stacks): Add assertion for that
- "thread_blocked" is not set for the current thread.
- * real_malloc.c: Add "extern GC_quiet" to suppress compiler warning.
- * reclaim.c (GC_reclaim_all): Initialize "start_time" (to 0) to
- suppress compiler warning.
-
+
+ * dbg_mlc.c (GC_store_debug_info, GC_store_debug_info_inner): Remove
+ outdated comment about disabling signals.
+ * mallocx.c (GC_malloc_uncollectable,
+ GC_malloc_atomic_uncollectable): Ditto.
+ * os_dep.c: Ditto.
+ * dbg_mlc.c (GC_debug_change_stubborn, GC_debug_end_stubborn_change):
+ Add ARGSUSED tag.
+ * pthread_stop_world.c (GC_suspend_handler,
+ GC_suspend_handler_inner): Ditto.
+ * dbg_mlc.c (GC_debug_free, GC_debug_realloc): Fix printf message.
+ * dbg_mlc.c (GC_debug_realloc): Set "result" to NULL in the default
+ switch branch to suppress compiler warning.
+ * dyn_load.c (GC_init_dyld): Use ABORT() instead of GC_abort().
+ * include/private/darwin_semaphore.h (sem_init): Ditto.
+ * include/javaxfc.h: Replace "GC_H" with "_GC_H".
+ * include/private/dbg_mlc.h (GC_has_other_debug_info,
+ GC_store_debug_info): Replace K&R-style function prototypes with ANSI
+ C one.
+ * include/private/gcconfig.h (GC_FreeBSDGetDataStart, real_malloc,
+ GC_win32_get_mem, GC_wince_get_mem, GC_unix_get_mem): Ditto.
+ * include/private/pthread_support.h (GC_stop_init): Ditto.
+ * include/private/gcconfig.h: Refine comment about setting
+ GC_stackbottom.
+ * include/private/gcconfig.h (FIXUP_POINTER): Put parentheses in the
+ "right" places.
+ * include/private/pthread_support.h (GC_Thread_Rep): Refine comment
+ for "stack_end" field.
+ * mallocx.c (GC_malloc_uncollectable,
+ GC_malloc_atomic_uncollectable): Remove cast to undefined "hbklk".
+ * os_dep.c (GC_USE_MEM_TOP_DOWN): New macro (for setting
+ GC_mem_top_down to MEM_TOP_DOWN for debug purposes).
+ * os_dep.c (GC_gww_read_dirty, catch_exception_raise): Fix WARN()
+ format specifier (should be word-compliant, "%p" is used w/o "0x").
+ * pthread_stop_world.c (GC_suspend_handler_inner): Ditto.
+ * os_dep.c (GC_dirty_init): Append new-line to printf messages.
+ * os_dep.c (GC_mprotect_thread): Fix GC_err_printf message.
+ * os_dep.c (GC_save_callers): Change GC_ASSERT to GC_STATIC_ASSERT.
+ * pthread_stop_world.c (GC_retry_signals, GC_suspend_ack_sem): Define
+ as STATIC.
+ * pthread_stop_world.c (GC_push_all_stacks): Add assertion for that
+ "thread_blocked" is not set for the current thread.
+ * real_malloc.c: Add "extern GC_quiet" to suppress compiler warning.
+ * reclaim.c (GC_reclaim_all): Initialize "start_time" (to 0) to
+ suppress compiler warning.
+
2009-06-02 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
(adding last bit of diff86_cvs)
+
* tests/test.c (check_heap_stats): Avoid unbalanced brackets in ifdef.
2009-05-27 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
(diff26)
+
* win32_threads.c: restructure parallel marking mutex intialization.
* win32_threads.c, alloc.c, darwin_stop_world.c, mallocx.c, mark.c,
pthread_stop_world.c, pthread_support.c: Add runtime conditions
@@ -1293,44 +1370,51 @@
2009-05-27 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
(diff39)
+
* include/private/gcconfig.h: refine MINGW32 test.
* mark.c: Add win64/gcc tests.
2009-05-27 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
(diff86_cvs, resembling diff28, diff32, diff33, diff38, diff68 partly)
+
* test.c (fork_a_thread, reverse_test, alloc8bytes, tree_test,
- typed_test, run_one_test, check_heap_stats, main, test): Replace
- all K&R-style function definitions with ANSI C ones.
- * trace_test.c (main): Ditto.
- * test.c (GC_COND_INIT): Define as GC_INIT() also in case of
- THREAD_LOCAL_ALLOC.
- * test.c (reverse_test): Call fork_a_thread() only if GC_PTHREADS
- or GC_WIN32_THREADS; remove fork_a_thread() macros definition.
- * test.c (reverse_test): Use "volatile" when clearing "b" and "c"
- local variables (to suppress "assigned value is never used"
- compiler warning).
- * test.c (tree_test): Use public GC_noop1() instead of private
- GC_noop().
- * test.c (typed_test): Ditto.
- * test.c (check_heap_stats): Define and assign value to
- "late_finalize_count" local variable only if its value is used
- (if FINALIZE_ON_DEMAND defined).
- * test.c (main): Remove DJGPP-specific initialization of
- GC_stackbottom (not needed anymore, handled in gcconfig.h).
- * trace_test.c: Guard #define GC_DEBUG with #ifndef.
- * trace_test.c: Include "gc_backptr.h".
- * trace_test.c (main): Call GC_INIT().
- * trace_test.c (main): Add "return 0" statement.
+ typed_test, run_one_test, check_heap_stats, main, test): Replace
+ all K&R-style function definitions with ANSI C ones.
+ * trace_test.c (main): Ditto.
+ * test.c (GC_COND_INIT): Define as GC_INIT() also in case of
+ THREAD_LOCAL_ALLOC.
+ * test.c (reverse_test): Call fork_a_thread() only if GC_PTHREADS
+ or GC_WIN32_THREADS; remove fork_a_thread() macros definition.
+ * test.c (reverse_test): Use "volatile" when clearing "b" and "c"
+ local variables (to suppress "assigned value is never used"
+ compiler warning).
+ * test.c (tree_test): Use public GC_noop1() instead of private
+ GC_noop().
+ * test.c (typed_test): Ditto.
+ * test.c (check_heap_stats): Define and assign value to
+ "late_finalize_count" local variable only if its value is used
+ (if FINALIZE_ON_DEMAND defined).
+ * test.c (main): Remove DJGPP-specific initialization of
+ GC_stackbottom (not needed anymore, handled in gcconfig.h).
+ * trace_test.c: Guard #define GC_DEBUG with #ifndef.
+ * trace_test.c: Include "gc_backptr.h".
+ * trace_test.c (main): Call GC_INIT().
+ * trace_test.c (main): Add "return 0" statement.
2009-05-25 Hans Boehm <Hans.Boehm@hp.com> (Really Petter Urkedal)
+
* dyn_load.c (GC_register_dynlib_callback): Use new index j
instead of i in the inner loop.
-2009-05-24 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski, diff85)
+2009-05-24 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+ (diff85)
+
* tests/test.c: Increment n_tests with fetch_and_add when possible,
avoiding need to export lock.
-2009-05-22 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski, diff63,diff65)
+2009-05-22 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+ (diff63,diff65)
+
* include/gc_pthread_redirects.h:
- dlfcn.h is included for dlopen() proto before undefining
"dlopen" (so, it's possible now to include dlfcn.h after
@@ -1348,29 +1432,36 @@
improve out of memory handling for thread structures, dont
define GC_beginthreadex and GC_endthreadex for winCE.
-2009-05-22 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski, diff71)
+2009-05-22 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski
+ (diff71)
+
* tests/test.c: Change gcj vtable decriptor type from size_t to
GC_word.
2009-05-22 Hans Boehm <Hans.Boehm@hp.com>
+
* gcj_mlc.c: Add comment.
* tests/test.c: Change NTEST to NTHREADS. Fork 5 threads by default.
Run reverse_test a second time in each thread.Add comments.
Dont rely on AO_fetch_and_add.
2009-05-22 Hans Boehm <Hans.Boehm@hp.com> (Largely from Ludovic Cortes)
+
* dyn_load.c (GC_register_dynlib_callback,
GC_register_dynamic_libraries_dl_iterate_phdr): Add support
for GNU_PT_RELRO relocations.
-2009-05-22 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski, diff61)
+2009-05-22 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+ (diff61)
+
* Makefile, Makefile.direct: GC_SOLARIS_PTHREADS was replaced
by GC_SOLARIS_THREADS.
* include/gc.h: Improve finalizer documentation.
* mips_sgi_mach_dep.s: Replace _MIPS_SIM_ABI32 with _ABIO32.
* pthread_stop_world.c, Makefile.dj: Fix typos.
-2009-05-21 Hans Boehm <Hans.Boehm@hp.com>
+2009-05-21 Hans Boehm <Hans.Boehm@hp.com>
+
* win32_threads.c (GC_new_thread): Make first_thread
visible to the whole file.
(UNPROTECT): New macro.
@@ -1382,46 +1473,53 @@
(GC_remove_protection): Check if already unprotected.
2009-05-20 Hans Boehm <Hans.Boehm@hp.com> (really Ivan Maidanski)
+
* doc/README.win32: Add OpenWatcom warning.
* include/private/gcconfig.h: Really check it in.
2009-05-19 Hans Boehm <Hans.Boehm@hp.com> (Mostly Ivan Maidanski, Dave Korn)
+
* os_dep.c (GC_get_stack_base, windows): Replace with Dave Korn's
code from gcc version.
* os_dep.c: make gc compilable (optionally) for Cygwin with
- GetWriteWatch-based virtual dirty bit implementation ("os_dep.c" file).
+ GetWriteWatch-based virtual dirty bit implementation ("os_dep.c" file).
* os_dep.c: Make non-win32 GC_write_fault_handler STATIC.
* mark.c (GC_noop): fix declaration definition mismatch for DMC.
* include/private/gcconfig.h: Enable MPROTECT_VDB and GWW_VDB for
Watcom (Win32 only). It works.
- and GWW_VDB. It works.
+2009-05-07 Hans Boehm <Hans.Boehm@hp.com> (and Mark Sibly)
-2009-05-07 Hans Boehm <Hans.Boehm@hp.com> and Mark Sibly
* mach_dep.c: Don't use __builtin_unwind_init for register
state on PowerPC/Darwin.
2009-04-24 Hans Boehm <Hans.Boehm@hp.com>
+
* doc/gcdescr.html: Improve description of object freelist
structure.
* include/private/gc_priv.h: Fix comment for _size_map.
2009-03-16 Hans Boehm <Hans.Boehm@hp.com>
+
* os_dep.c (GC_linux_stack_base): Relax sanity test.
-2009-03-11 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+2009-03-11 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+
* include/private/gc_pmark.h (PUSH_CONTENTS_HDR for
MARK_BIT_PER_OBJ): Add missing backslash before eoln.
2009-02-28 Hans Boehm <Hans.Boehm@hp.com>
+
* misc.c (GC_set_warn_proc): Implicitly intialize GC on
non-Cygwin win32.
2009-02-28 Hans Boehm <Hans.Boehm@hp.com> (Really Petr Krajca)
+
* configure.ac: Enable thread-local allocation for sparc-linux.
* configure: Regenerate.
2009-02-28 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidansky)
+
* alloc.c (GC_try_to_collect): Remove duplicate initialization
check.
* malloc.c (GC_generic_malloc): Remove lw to eliminate single-
@@ -1429,6 +1527,7 @@
* mallocx.c (GC_generic_malloc_ignore_off_page): Ditto.
2009-02-28 Hans Boehm <Hans.Boehm@hp.com> (Mostly Ivan Maidansky)
+
* allchblk.c, backgraph.c, dbg_mlc.c, dyn_load.c,
finalize.c, include/private/gc_pmark.h, malloc.c, mark.c,
os_dep.c, pthread_stop_world.c, pthread_support.c, reclaim.c,
@@ -1436,10 +1535,12 @@
* misc.c: Refine comment.
2009-02-28 Hans Boehm <Hans.Boehm@hp.com>
+
* os_dep.c: Define GC_GWW_BUF_LEN more intelligently. Add FIXME
comment.
2009-02-28 Hans Boehm <Hans.Boehm@hp.com> (With input from Ivan Maidansky)
+
* win32_threads.c (GC_push_stack_for): Yet another attempt
at the stack_min finding logic. Try to clean up the existing code
while minimizing VirtualQuery calls.
@@ -1448,6 +1549,7 @@
*include/gc.h: Update obsolete comments.
2009-02-24 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidansky)
+
* tests/test.c:
(gcj_class_struct2): Use cast instead of l suffix.
Cast GetLastError to int in various places.
@@ -1456,20 +1558,25 @@
Cast GC_gc_no to unsigned in printf.
2009-02-24 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidansky)
+
* include/gc.h: Fix two typos in comments.
2009-02-24 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidansky)
+
* finalize.c: Fix typo in comment.
2008-12-03 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidansky)
+
* blacklst.c (GC_print_source_pointer): Don't call GC_print_heap_obj
with lock.
2008-12-01 Hans Boehm <Hans.Boehm@hp.com>
+
* reclaim.c: (GC_reclaim_block): Scan even nearly full blocks
if we are checking for leaks.
2008-11-12 Hans Boehm <Hans.Boehm@hp.com> (Really mostly Ivan Maidansky)
+
* win32_threads.c: Remove mark lock spinning.
* win32_threads.c, pthread_support.c: Update GC_unlocked_count,
GC_spin_count, and GC_block_count using atomic operations.
@@ -1477,6 +1584,7 @@
2008-11-11 Hans Boehm <Hans.Boehm@hp.com>
(Really almost entirely Ivan Maidansky)
+
* win32_threads.c: Support PARALLEL_MARK. Make printf arg
types agree with format specifiers. Add missing copyright header.
Add STATIC for GC_threads.
@@ -1487,18 +1595,21 @@
with PARALLEL_MARK or THREAD_LOCAL_ALLOC.
2008-11-10 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidansky)
+
* alloc.c (GC_try_to_collect_inner): Don't print redundant
GC_bytes_allocd and GC_gc_no.
(GC_stopped_mark): Print average world stop time.
* include/private/gc_priv.h (MS_TIME_DIFF): Add cast.
2008-11-10 Hans Boehm <Hans.Boehm@hp.com> (Really mostly Ivan Maidansky)
+
* misc.c, doc/README.environment: Add support for
GC_FREE_SPACE_DIVISOR and GC-disable-incremental.
* include/gc.h: Make GC_set_free_space_divisor correspond to
(somewhat unfortunate) reality.
2008-11-07 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidansky)
+
(Mostly improves LLP64 support.)
* backgraph.c, checksums.c, dbg_mlc.c, finalize.c, mark.c,
misc.c, reclaim.c: Changed some int and long type to word or size_t
@@ -1526,13 +1637,16 @@
* mark.c (GC_check_dirty): Move declaration out of func body.
2008-11-06 Hans Boehm <Hans.Boehm@hp.com>
+
* doc/gcinterface.html: Improve REDIRECT_MALLOC documentation.
* include/gc.h (GC_register_my_thread): Improve comment.
2008-11-04 Hans Boehm <Hans.Boehm@hp.com>
+
* Makefile.direct: Add comment for -DCHECKSUMS.
2008-10-27 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidansky)
+
* thread_local_alloc.c, include/private/thread_local_alloc.h:
Fix typos in comments.
* finalize.c: Declare mark_procs and GC_register_finalizer_inner
@@ -1540,11 +1654,13 @@
* malloc.c (GC_free): Move size calculation below assertion.
2008-10-27 Hans Boehm <Hans.Boehm@hp.com>
+
* win32_threads.c (GC_get_stack_min, GC_may_be_in_stack):
Add one entry VirtualQuery cache, I_HOLD_LOCK assertions.
(GC_push_stack_for, GC_get_next_stack) : Hopefully fix WINCE support.
-2008-10-27 Hans Boehm <Hans.Boehm@hp.com> (Thanks to Klaus Treichel.)
+2008-10-27 Hans Boehm <Hans.Boehm@hp.com> (Thanks to Klaus Treichel)
+
* finalize.c (GC_general_register_disappearing_link): Add
assertion.
* malloc.c (GC_generic_malloc): Round lb to granules, not words.
@@ -1552,20 +1668,24 @@
granules, not words.
2008-10-27 Hans Boehm <Hans.Boehm@hp.com> (Really Rex Dieter and
- Petr Krajca)
+ Petr Krajca)
+
* mach_dep.c (NO_GETCONTEXT): Define for sparc linux.
* configure.ac: Define mach_dep for sparc-linux.
* configure: Regenerate.
2008-10-25 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidansky)
+
* mark_rts.c (GC_approx_sp): Use volatile to avoid common
warning.
2008-10-25 Hans Boehm <Hans.Boehm@hp.com>
+
* dyn_load.c (GC_cond_add_roots): Fix GC_get_next_stack argument
order.
2008-10-24 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+
* alloc.c, dbg_mlc.c, dyn_load.c, finalize.c, gcj_mlc.c,
include/gc.h, include/gc_config_macros.h, include/gc_cpp.h,
include/gc_gcj.h, include/gc_mark.h, include/gc_typed.h,
@@ -1578,6 +1698,7 @@
2008-10-24 Hans Boehm <Hans.Boehm@hp.com>
(Partially based loosely on patch from Ivan Maidanski)
+
* win32_threads.c (GC_may_be_in_stack): New. (GC_Thread_Rep):
Add last_stack_min. (GC_push_stack_for): Use last_stack_min.
(GC_get_next_stack): Add limit argument, use_last_stack_min.
@@ -1593,6 +1714,7 @@
* Makefile.in: Regenerate.
2008-10-21 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+
* include/private/gc_locks.h, include/private/gc_pmark.h,
include/private/gc_priv.h, include/private/gcconfig.h,
mach_dep.c, mark_rts.c, misc.c, os_dep.c, pthread_stop_world.c,
@@ -1600,10 +1722,12 @@
Fix comments.
2008-10-21 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+
* pthread_support.c: Comment out LOCK_STATS.
* include/gc.h: Fix comments.
2008-10-20 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+
* misc.c (GC_init_inner): Enable GC_LOG_FILE on Cygwin.
* include/private/gcconfig.h: Consider USE_MMAP for Cygwin.
* os_dep.c (GC_get_main_stack_base): Use alternate definition
@@ -1611,11 +1735,13 @@
* include/private/gc_priv.h: Sometimes define SETJMP on Cygwin.
2008-10-20 Hans Boehm <Hans.Boehm@hp.com>
+
* doc/README: Make it clearer when Makefile.direct is assumed.
* cord/cord.am: install include/cord.h.
* Makefile.in: Regenerate.
2008-09-24 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+
* win32_threads.c (GC_pthread_join, GC_pthread_start_inner):
Remove unused variables.
* darwin_stop_world.c: Always declare GC_thr_init().
@@ -1625,6 +1751,7 @@
mark, USE_MARK_BITS version): Refer to correct parameter name.
2008-09-24 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+
* finalize.c (GC_general_register_disappearing_link): Remove
redundant code.
* gcj_mlc.c (GC_init_gcj_malloc): Add cast to signed.
@@ -1635,9 +1762,11 @@
defined for X86. (STATIC): Define as "static" with NO_DEBUGGING.
2008-09-24 Hans Boehm <Hans.Boehm@hp.com>
+
* include/private/gc_priv.h: Update MAX_HEAP_SECTS.
2008-09-10 Hans Boehm <Hans.Boehm@hp.com>
+
* dbg_mlc.c (GC_print_smashed_obj): Increase robustness with
smashed string, (GC_debug_free_inner): Mark as free.
* mallocx.c (GC_malloc_many): Always clear new block if
@@ -1650,16 +1779,19 @@
* tests/test.c: Don't define GC_DEBUG if already defined.
2008-08-27 Hans Boehm <Hans.Boehm@hp.com>
+
* doc/simple_example.html: update --enable-full-debug reference,
Make HTML formatting standards compliant.
* doc/debugging.html, doc/leak.html: Fix HTML formatting bugs.
* doc/gcinterface.html: specify encoding.
2008-08-27 Hans Boehm <Hans.Boehm@hp.com> (with help from Marco Maggi)
+
* doc/simple_example.html: Update thread-local allocation
description.
2008-08-26 Hans Boehm <Hans.Boehm@hp.com> (with help from Marco Maggi)
+
* configure.ac: Check for gc-debug earlier; replace remaining
full-debug tests.
* configure: Regenerate.
@@ -1672,6 +1804,7 @@
* tests/test.c: Minimally test GC_..._INCR and friends.
2008-08-21 Hans Boehm <Hans.Boehm@hp.com>
+
* mark.c: (GC_push_next_marked, GC_push_next_marked_dirty,
GC_push_next_marked_uncollectable): Never invoke GC_push_marked
on free hblk.
@@ -1684,13 +1817,16 @@
* include/private/gc_pmark.h: (PUSH_OBJ): Add assertion.
2008-08-21 Hans Boehm <Hans.Boehm@hp.com>
+
* alloc.c, include/gc_mark.h, Makefile.direct: Improve comments.
2008-08-01 Hans Boehm <Hans.Boehm@hp.com> (Really Klaus Treichel)
+
* configure.ac: Set win32_threads on MinGW.
* configure: Regenerate.
2008-07-25 Hans Boehm <Hans.Boehm@hp.com> (Really mostly Ivan Maidanski)
+
Ivan's description of the patch follows. Note that a few pieces like
the GC_malloc(0) patch, were not applied since an alternate had been
previously applied. A few differed stylistically from the rest of
@@ -1700,28 +1836,28 @@
naming bugs (as a few did), where replaced by STATIC, which is
ignored by default.
- - minor bug fixing (for FreeBSD, for THREAD_LOCAL_ALLOC and for
- GC_malloc(0));
- - addition of missing getter/setter functions for public variables
- (may be useful if compiled as Win32 DLL);
- - addition of missing GC_API for some exported functions;
- - addition of missing "static" declarator for internal functions
- and variables (where possible);
- - replacement of all remaining K&R-style definitions with ANSI
- C ones (__STDC__ macro is not used anymore);
- - addition of some Win32 macro definitions (that may be missing in
- the standard headers supplied with a compiler) for GWW_VDB mode;
- - elimination of most compiler warnings (except for
- "uninitialized data" warning);
- - several typos correction;
- - missing parenthesis addition in macros in some header files of
- "libatomic_ops" module.
+ - minor bug fixing (for FreeBSD, for THREAD_LOCAL_ALLOC and for
+ GC_malloc(0));
+ - addition of missing getter/setter functions for public variables
+ (may be useful if compiled as Win32 DLL);
+ - addition of missing GC_API for some exported functions;
+ - addition of missing "static" declarator for internal functions
+ and variables (where possible);
+ - replacement of all remaining K&R-style definitions with ANSI
+ C ones (__STDC__ macro is not used anymore);
+ - addition of some Win32 macro definitions (that may be missing in
+ the standard headers supplied with a compiler) for GWW_VDB mode;
+ - elimination of most compiler warnings (except for
+ "uninitialized data" warning);
+ - several typos correction;
+ - missing parenthesis addition in macros in some header files of
+ "libatomic_ops" module.
My highlights based on reading the patch:
* allchblk.c: Remove GC_freehblk_ptr decl.
Make free_list_index_of() static.
- * include/gc.h: Use __int64 on win64, define GC_oom_func,
+ * include/gc.h: Use __int64 on win64, define GC_oom_func,
GC_finalizer_notifier_proc, GC_finalizer_notifier_proc,
add getter and setters: GC_get_gc_no, GC_get_parallel,
GC_set_oom_fn, GC_set_finalize_on_demand,
@@ -1739,7 +1875,7 @@
blacklist printing.
* misc.c: Fix log file naming based on environment variable
for Windows. Make GC_set_warn_proc and GC_set_free_space_divisor
- just return current value with 0 argument. Add DONT_USER_USER32_DLL.
+ just return current value with 0 argument. Add DONT_USE_USER32_DLL.
Add various getters and setters as in gc.h.
* os_dep.c: Remove no longer used GC_disable/enable_signals
implementations. (GC_get_stack_base): Add pthread_attr_destroy
@@ -1748,6 +1884,7 @@
call GC_init_thread_local.
2008-07-21 Hans Boehm <Hans.Boehm@hp.com>
+
* Makefile.direct, mach_dep.c: Add support for NO_GETCONTEXT.
* mach_dep.c: Include signal.h.
* gc_priv.h: Factor out INLINE declaration.
@@ -1769,7 +1906,7 @@
* include/gc_inline.h: Fix comments.
2008-05-03 Hans Boehm <Hans.Boehm@hp.com>
-
+
* include/gc_version.h, configure.ac, doc/README:
Change to version 7.2alpha1.
* configure: Regenerate.
@@ -1777,7 +1914,7 @@
[7.1]
2008-05-03 Hans Boehm <Hans.Boehm@hp.com>
-
+
* include/gc_version.h, configure.ac, doc/README:
Change to version 7.1.
* configure: Regenerate.
@@ -1898,13 +2035,13 @@
* include/private/gc_priv.h: Comment hb_sz range limit.
2008-01-29 Hans Boehm <Hans.Boehm@hp.com> (with help from Manuel Serrano)
-
+
* mark.c (GC_push_next_marked): correct comment.
* Makefile.direct: document NO_PROC_STAT.
* include/private/gcconfig.h: Accomodate NO_PROC_STAT.
2008-01-10 Hans Boehm <Hans.Boehm@hp.com>
-
+
* include/gc_version.h, configure.ac, doc/README:
Change to version 7.1alpha3.
* configure: Regenerate.
@@ -1912,17 +2049,17 @@
[7.1alpha2]
2008-01-10 Hans Boehm <Hans.Boehm@hp.com>
-
+
* include/gc_version.h, configure.ac, doc/README:
Change to version 7.1alpha2.
* configure: Regenerate.
2008-01-10 Hans Boehm <Hans.Boehm@hp.com>
-
+
* Makefile.am: Mention atomic_ops.c and atomic_ops_sysdeps.S
again. Refer to build directory as ".".
* Makefile.in: Regenerate.
-
+
2008-01-10 Hans Boehm <Hans.Boehm@hp.com>
* configure.ac: Ignore --enable-parallel-mark on Darwin for now.
@@ -1953,7 +2090,7 @@
2007-12-23 Hans Boehm <Hans.Boehm@hp.com>
* Makefile.am: Add NT_X64_THREADS_MAKEFILE.
-
+
2007-12-23 Hans Boehm <Hans.Boehm@hp.com> (Really mostly Friedrich Dominicus)
* NT_X64_STATIC_THREADS_MAKEFILE: Clean up obsolete comment.
@@ -1987,8 +2124,8 @@
GC_NO_THREAD_DECLS, don't test explicitly for GC_SOLARIS_THREADS.
2007-12-20 Hans Boehm <Hans.Boehm@hp.com>
-
- * alloc.c: Deal correctly with address wrapping for
+
+ * alloc.c: Deal correctly with address wrapping for
GC_greatest_plausible_heap_addr and GC_least_plausible_heap_addr.
* finalize.c, include/gc.h (GC_register_disappearing_link,
GC_register_finalizer_inner): Improve out-of-memory handling.
@@ -2012,7 +2149,7 @@
* allchblk.c, configure.ac (add --enable-munmap)
* configure: Regenerate.
-2007-12-10 Andreas Tobler <a.tobler@schweiz.org>
+2007-12-10 Andreas Tobler <a.tobler@schweiz.org>
* dyn_load.c (GC_dyld_image_add): Remove ifdef clause and use the macro
GC_GETSECTBYNAME instead.
@@ -2048,7 +2185,7 @@
* Makefile.in: Regenerate.
2007-08-15 Hans Boehm <Hans.Boehm@hp.com> (really Samuel Thibault)
-
+
* pthread_support.c (GC_thr_init): Use sysconf(_SC_NPROCESSORS_ONLN)
for HURD.
@@ -2071,7 +2208,7 @@
No longer test for RS6000.
2007-08-03 Hans Boehm <Hans.Boehm@hp.com>
-
+
* alloc.c, backgraph.c, headers.c, include/private/gc_priv.h:
Maintain GC_our_memory and GC_n_memory.
* dbg_mlc.c (GC_print_smashed_obj): Improve message.
@@ -2121,12 +2258,12 @@
exist.
2007-07-02 Hans Boehm <Hans.Boehm@hp.com>
-
+
* version.h, configure.ac, doc/README: Change to version 7.1alpha1.
* configure: Regenerate.
2007-07-02 Hans Boehm <Hans.Boehm@hp.com>
-
+
* version.h, configure.ac, doc/README: Change to version 7.0.
* configure: Regenerate.
@@ -2147,7 +2284,7 @@
* backgraph.c (per_object_func): Make argument types consistent.
(GC_traverse_back_graph): Mark GC_deepest_obj.
-
+
2007-06-29 Hans Boehm <Hans.Boehm@hp.com>
* finalize.c (GC_finalize): Change dl_size and fo_size to size_t.
@@ -2230,7 +2367,7 @@
* msvc_dbg.c(GetModuleBase): Replace strcat with strcat_s.
2007-06-06 Hans Boehm <Hans.Boehm@hp.com>
-
+
* include/gc.h: (GC_word, GC_signed_word): Fix win64 definitions.
Don't include windows.h in an extern "C" context.
* include/private/gcconfig.h: Fix win64/X86_64 configuration.
@@ -2279,7 +2416,7 @@
* include/private/gc_locks.h: Format to 80 columns.
2007-05-22 Hans Boehm <Hans.Boehm@hp.com>
-
+
* malloc.c(GC_free): Ignore bad frees on MSWIN32 with REDIRECT_MALLOC.
* NT_MAKEFILE: msvc_dbg.h is in include/private. Don't use cvars
rc.
@@ -2297,7 +2434,7 @@
* tests/thread_leak_test.c: Don't unconditionally define
GC_LINUX_THREADS.
-2007-05-21 Andreas Tobler <a.tobler@schweiz.org>
+2007-05-21 Andreas Tobler <a.tobler@schweiz.org>
* Makefile.am: Remove extra_ldflags_libgc definition.
* Makefile.in: Regenerate.
@@ -2370,7 +2507,7 @@
* include/gc.h: Remove more SRC_M3 references.
* include/private/gcconfig.h: Remove still more SRC_M3 references.
GC_SOLARIS_THREADS no longer needs to be checked separately.
-
+
2007-05-08 Hans Boehm <Hans.Boehm@hp.com>
* thread_local_alloc.c, include/private/thread_local_alloc.h:
@@ -2409,7 +2546,7 @@
* win32_threads.c: Include stdint.h for Mingw. Add GC_API for DllMain.
(GC_use_DllMain): Fix assertion.
-2007-02-14 Andreas Tobler <a.tobler@schweiz.org>
+2007-02-14 Andreas Tobler <a.tobler@schweiz.org>
* configure.ac: Introduce extra_ldflags_libgc. Use it for Darwin.
* configure: Regenerate.
@@ -2419,11 +2556,11 @@
targets. Remove comments.
Prepare ppc64 support for Darwin.
-2007-01-29 Andreas Tobler <a.tobler@schweiz.org>
+2007-01-29 Andreas Tobler <a.tobler@schweiz.org>
* darwin_stop_world.c: Clean up and reformat code.
-2007-01-28 Andreas Tobler <a.tobler@schweiz.org>
+2007-01-28 Andreas Tobler <a.tobler@schweiz.org>
* darwin_stop_world.c (GC_push_all_stacks): Fix compiler warnings.
Make i unsigned.
@@ -2444,7 +2581,7 @@
* configure: Regenerate.
* README.changes: Point to ChangeLog.
-2007-01-25 Andreas Tobler <a.tobler@schweiz.org>
+2007-01-25 Andreas Tobler <a.tobler@schweiz.org>
* darwin_stop_world.c: Move THREAD_FLD defines to ...
* include/private/gc_priv.h: ... here.
@@ -2452,13 +2589,13 @@
* os_dep.c (catch_exception_raise): Use THREAD_FLD for exc_state member
access.
-2007-01-18 Andreas Tobler <a.tobler@schweiz.org>
+2007-01-18 Andreas Tobler <a.tobler@schweiz.org>
* os_dep.c (if defined(MPROTECT_VDB) && defined(DARWIN)): Clean up and
reformat code.
Correct email reference.
-2007-01-11 Andreas Tobler <a.tobler@schweiz.org>
+2007-01-11 Andreas Tobler <a.tobler@schweiz.org>
* configure.ac (i?86*-*-darwin*): Replaced HAS_I386_THREAD_STATE_* with
HAS_X86_THREAD_STATE32_*.
@@ -2485,11 +2622,10 @@
X86_64 Darwin. Replaced old i386_EXCEPTION_STATE_* definition with
x86_EXCEPTION_STATE32_*. Add X86_64 for exc_state.faultvaddr.
-2007-01-09 Andreas Tobler <a.tobler@schweiz.org>
+2007-01-09 Andreas Tobler <a.tobler@schweiz.org>
* libtool.m4: Update to version from libtool-1.5.22.
* ltmain.sh: Likewise.
* ChangeLog: Created.
See doc/README.changes for earlier changes.
-
diff --git a/backgraph.c b/backgraph.c
index be33d742..1243c3ce 100644
--- a/backgraph.c
+++ b/backgraph.c
@@ -28,7 +28,7 @@
#ifdef MAKE_BACK_GRAPH
-#define MAX_IN 10 /* Maximum in-degree we handle directly */
+#define MAX_IN 10 /* Maximum in-degree we handle directly */
/* #include <unistd.h> */
@@ -37,59 +37,59 @@
#endif
/* We store single back pointers directly in the object's oh_bg_ptr field. */
-/* If there is more than one ptr to an object, we store q | FLAG_MANY, */
-/* where q is a pointer to a back_edges object. */
-/* Every once in a while we use a back_edges object even for a single */
+/* If there is more than one ptr to an object, we store q | FLAG_MANY, */
+/* where q is a pointer to a back_edges object. */
+/* Every once in a while we use a back_edges object even for a single */
/* pointer, since we need the other fields in the back_edges structure to */
/* be present in some fraction of the objects. Otherwise we get serious */
-/* performance issues. */
+/* performance issues. */
#define FLAG_MANY 2
typedef struct back_edges_struct {
- word n_edges; /* Number of edges, including those in continuation */
- /* structures. */
+ word n_edges; /* Number of edges, including those in continuation */
+ /* structures. */
unsigned short flags;
-# define RETAIN 1 /* Directly points to a reachable object; */
- /* retain for next GC. */
+# define RETAIN 1 /* Directly points to a reachable object; */
+ /* retain for next GC. */
unsigned short height_gc_no;
- /* If height > 0, then the GC_gc_no value when it */
- /* was computed. If it was computed this cycle, then */
- /* it is current. If it was computed during the */
- /* last cycle, then it represents the old height, */
- /* which is only saved for live objects referenced by */
- /* dead ones. This may grow due to refs from newly */
- /* dead objects. */
+ /* If height > 0, then the GC_gc_no value when it */
+ /* was computed. If it was computed this cycle, then */
+ /* it is current. If it was computed during the */
+ /* last cycle, then it represents the old height, */
+ /* which is only saved for live objects referenced by */
+ /* dead ones. This may grow due to refs from newly */
+ /* dead objects. */
signed_word height;
- /* Longest path through unreachable nodes to this node */
- /* that we found using depth first search. */
-
+ /* Longest path through unreachable nodes to this node */
+ /* that we found using depth first search. */
+
# define HEIGHT_UNKNOWN ((signed_word)(-2))
# define HEIGHT_IN_PROGRESS ((signed_word)(-1))
ptr_t edges[MAX_IN];
struct back_edges_struct *cont;
- /* Pointer to continuation structure; we use only the */
- /* edges field in the continuation. */
- /* also used as free list link. */
+ /* Pointer to continuation structure; we use only the */
+ /* edges field in the continuation. */
+ /* also used as free list link. */
} back_edges;
-/* Allocate a new back edge structure. Should be more sophisticated */
-/* if this were production code. */
+/* Allocate a new back edge structure. Should be more sophisticated */
+/* if this were production code. */
#define MAX_BACK_EDGE_STRUCTS 100000
static back_edges *back_edge_space = 0;
STATIC int GC_n_back_edge_structs = 0;
- /* Serves as pointer to never used */
- /* back_edges space. */
+ /* Serves as pointer to never used */
+ /* back_edges space. */
static back_edges *avail_back_edges = 0;
- /* Pointer to free list of deallocated */
- /* back_edges structures. */
+ /* Pointer to free list of deallocated */
+ /* back_edges structures. */
static back_edges * new_back_edges(void)
{
if (0 == back_edge_space) {
back_edge_space = (back_edges *)
- GET_MEM(MAX_BACK_EDGE_STRUCTS*sizeof(back_edges));
+ GET_MEM(MAX_BACK_EDGE_STRUCTS*sizeof(back_edges));
GC_add_to_our_memory((ptr_t)back_edge_space,
- MAX_BACK_EDGE_STRUCTS*sizeof(back_edges));
+ MAX_BACK_EDGE_STRUCTS*sizeof(back_edges));
}
if (0 != avail_back_edges) {
back_edges * result = avail_back_edges;
@@ -99,12 +99,12 @@ static back_edges * new_back_edges(void)
}
if (GC_n_back_edge_structs >= MAX_BACK_EDGE_STRUCTS - 1) {
ABORT("needed too much space for back edges: adjust "
- "MAX_BACK_EDGE_STRUCTS");
+ "MAX_BACK_EDGE_STRUCTS");
}
return back_edge_space + (GC_n_back_edge_structs++);
}
-/* Deallocate p and its associated continuation structures. */
+/* Deallocate p and its associated continuation structures. */
static void deallocate_back_edges(back_edges *p)
{
back_edges *last = p;
@@ -114,10 +114,10 @@ static void deallocate_back_edges(back_edges *p)
avail_back_edges = p;
}
-/* Table of objects that are currently on the depth-first search */
-/* stack. Only objects with in-degree one are in this table. */
-/* Other objects are identified using HEIGHT_IN_PROGRESS. */
-/* FIXME: This data structure NEEDS IMPROVEMENT. */
+/* Table of objects that are currently on the depth-first search */
+/* stack. Only objects with in-degree one are in this table. */
+/* Other objects are identified using HEIGHT_IN_PROGRESS. */
+/* FIXME: This data structure NEEDS IMPROVEMENT. */
#define INITIAL_IN_PROGRESS 10000
static ptr_t * in_progress_space = 0;
static size_t in_progress_size = 0;
@@ -130,23 +130,23 @@ static void push_in_progress(ptr_t p)
in_progress_size = INITIAL_IN_PROGRESS;
in_progress_space = (ptr_t *)GET_MEM(in_progress_size * sizeof(ptr_t));
GC_add_to_our_memory((ptr_t)in_progress_space,
- in_progress_size * sizeof(ptr_t));
+ in_progress_size * sizeof(ptr_t));
} else {
ptr_t * new_in_progress_space;
in_progress_size *= 2;
new_in_progress_space = (ptr_t *)
- GET_MEM(in_progress_size * sizeof(ptr_t));
+ GET_MEM(in_progress_size * sizeof(ptr_t));
GC_add_to_our_memory((ptr_t)new_in_progress_space,
- in_progress_size * sizeof(ptr_t));
+ in_progress_size * sizeof(ptr_t));
BCOPY(in_progress_space, new_in_progress_space,
- n_in_progress * sizeof(ptr_t));
+ n_in_progress * sizeof(ptr_t));
in_progress_space = new_in_progress_space;
- /* FIXME: This just drops the old space. */
+ /* FIXME: This just drops the old space. */
}
}
if (in_progress_space == 0)
ABORT("MAKE_BACK_GRAPH: Out of in-progress space: "
- "Huge linear data structure?");
+ "Huge linear data structure?");
in_progress_space[n_in_progress++] = p;
}
@@ -166,17 +166,17 @@ static void pop_in_progress(ptr_t p)
}
#define GET_OH_BG_PTR(p) \
- (ptr_t)REVEAL_POINTER(((oh *)(p)) -> oh_bg_ptr)
+ (ptr_t)REVEAL_POINTER(((oh *)(p)) -> oh_bg_ptr)
#define SET_OH_BG_PTR(p,q) (((oh *)(p)) -> oh_bg_ptr) = HIDE_POINTER(q)
-/* Execute s once for each predecessor q of p in the points-to graph. */
-/* s should be a bracketed statement. We declare q. */
+/* Execute s once for each predecessor q of p in the points-to graph. */
+/* s should be a bracketed statement. We declare q. */
#define FOR_EACH_PRED(q, p, s) \
{ \
ptr_t q = GET_OH_BG_PTR(p); \
if (!((word)q & FLAG_MANY)) { \
if (q && !((word)q & 1)) s \
- /* !((word)q & 1) checks for a misnterpreted freelist link */ \
+ /* !((word)q & 1) checks for a misnterpreted freelist link */ \
} else { \
back_edges *orig_be_ = (back_edges *)((word)q & ~FLAG_MANY); \
back_edges *be_ = orig_be_; \
@@ -184,16 +184,16 @@ static void pop_in_progress(ptr_t p)
word total_; \
word n_edges_ = be_ -> n_edges; \
for (total_ = 0, local_ = 0; total_ < n_edges_; ++local_, ++total_) { \
- if (local_ == MAX_IN) { \
- be_ = be_ -> cont; \
- local_ = 0; \
- } \
- q = be_ -> edges[local_]; s \
+ if (local_ == MAX_IN) { \
+ be_ = be_ -> cont; \
+ local_ = 0; \
+ } \
+ q = be_ -> edges[local_]; s \
} \
} \
}
-/* Ensure that p has a back_edges structure associated with it. */
+/* Ensure that p has a back_edges structure associated with it. */
static void ensure_struct(ptr_t p)
{
ptr_t old_back_ptr = GET_OH_BG_PTR(p);
@@ -214,8 +214,8 @@ static void ensure_struct(ptr_t p)
}
}
-/* Add the (forward) edge from p to q to the backward graph. Both p */
-/* q are pointers to the object base, i.e. pointers to an oh. */
+/* Add the (forward) edge from p to q to the backward graph. Both p */
+/* q are pointers to the object base, i.e. pointers to an oh. */
static void add_edge(ptr_t p, ptr_t q)
{
ptr_t old_back_ptr = GET_OH_BG_PTR(q);
@@ -223,22 +223,22 @@ static void add_edge(ptr_t p, ptr_t q)
word i;
static unsigned random_number = 13;
# define GOT_LUCKY_NUMBER (((++random_number) & 0x7f) == 0)
- /* A not very random number we use to occasionally allocate a */
- /* back_edges structure even for a single backward edge. This */
- /* prevents us from repeatedly tracing back through very long */
- /* chains, since we will have some place to store height and */
- /* in_progress flags along the way. */
+ /* A not very random number we use to occasionally allocate a */
+ /* back_edges structure even for a single backward edge. This */
+ /* prevents us from repeatedly tracing back through very long */
+ /* chains, since we will have some place to store height and */
+ /* in_progress flags along the way. */
GC_ASSERT(p == GC_base(p) && q == GC_base(q));
if (!GC_HAS_DEBUG_INFO(q) || !GC_HAS_DEBUG_INFO(p)) {
/* This is really a misinterpreted free list link, since we saw */
- /* a pointer to a free list. Dont overwrite it! */
+ /* a pointer to a free list. Dont overwrite it! */
return;
}
if (0 == old_back_ptr) {
- SET_OH_BG_PTR(q, p);
- if (GOT_LUCKY_NUMBER) ensure_struct(q);
- return;
+ SET_OH_BG_PTR(q, p);
+ if (GOT_LUCKY_NUMBER) ensure_struct(q);
+ return;
}
/* Check whether it was already in the list of predecessors. */
FOR_EACH_PRED(pred, q, { if (p == pred) return; });
@@ -246,21 +246,21 @@ static void add_edge(ptr_t p, ptr_t q)
old_back_ptr = GET_OH_BG_PTR(q);
be = (back_edges *)((word)old_back_ptr & ~FLAG_MANY);
for (i = be -> n_edges, be_cont = be; i > MAX_IN;
- be_cont = be_cont -> cont, i -= MAX_IN) {}
+ be_cont = be_cont -> cont, i -= MAX_IN) {}
if (i == MAX_IN) {
- be_cont -> cont = new_back_edges();
- be_cont = be_cont -> cont;
- i = 0;
+ be_cont -> cont = new_back_edges();
+ be_cont = be_cont -> cont;
+ i = 0;
}
be_cont -> edges[i] = p;
be -> n_edges++;
if (be -> n_edges == 100) {
# if 0
- if (GC_print_stats) {
- GC_err_printf("The following object has in-degree >= 100:\n");
- GC_print_heap_obj(q);
- }
-# endif
+ if (GC_print_stats) {
+ GC_err_printf("The following object has in-degree >= 100:\n");
+ GC_print_heap_obj(q);
+ }
+# endif
}
}
@@ -294,25 +294,25 @@ static void reset_back_edge(ptr_t p, size_t n_bytes, word gc_descr)
if ((word)old_back_ptr & FLAG_MANY) {
back_edges *be = (back_edges *)((word)old_back_ptr & ~FLAG_MANY);
if (!(be -> flags & RETAIN)) {
- deallocate_back_edges(be);
- SET_OH_BG_PTR(p, 0);
+ deallocate_back_edges(be);
+ SET_OH_BG_PTR(p, 0);
} else {
- GC_ASSERT(GC_is_marked(p));
+ GC_ASSERT(GC_is_marked(p));
- /* Back edges may point to objects that will not be retained. */
- /* Delete them for now, but remember the height. */
- /* Some will be added back at next GC. */
- be -> n_edges = 0;
- if (0 != be -> cont) {
- deallocate_back_edges(be -> cont);
- be -> cont = 0;
- }
+ /* Back edges may point to objects that will not be retained. */
+ /* Delete them for now, but remember the height. */
+ /* Some will be added back at next GC. */
+ be -> n_edges = 0;
+ if (0 != be -> cont) {
+ deallocate_back_edges(be -> cont);
+ be -> cont = 0;
+ }
- GC_ASSERT(GC_is_marked(p));
+ GC_ASSERT(GC_is_marked(p));
- /* We only retain things for one GC cycle at a time. */
- be -> flags &= ~RETAIN;
+ /* We only retain things for one GC cycle at a time. */
+ be -> flags &= ~RETAIN;
}
} else /* Simple back pointer */ {
/* Clear to avoid dangling pointer. */
@@ -325,33 +325,33 @@ static void add_back_edges(ptr_t p, size_t n_bytes, word gc_descr)
{
word *currentp = (word *)(p + sizeof(oh));
- /* For now, fix up non-length descriptors conservatively. */
+ /* For now, fix up non-length descriptors conservatively. */
if((gc_descr & GC_DS_TAGS) != GC_DS_LENGTH) {
gc_descr = n_bytes;
}
while (currentp < (word *)(p + gc_descr)) {
word current = *currentp++;
FIXUP_POINTER(current);
- if (current >= (word)GC_least_plausible_heap_addr &&
- current <= (word)GC_greatest_plausible_heap_addr) {
+ if (current >= (word)GC_least_plausible_heap_addr &&
+ current <= (word)GC_greatest_plausible_heap_addr) {
ptr_t target = GC_base((void *)current);
if (0 != target) {
- add_edge(p, target);
+ add_edge(p, target);
}
}
}
}
-/* Rebuild the representation of the backward reachability graph. */
-/* Does not examine mark bits. Can be called before GC. */
+/* Rebuild the representation of the backward reachability graph. */
+/* Does not examine mark bits. Can be called before GC. */
void GC_build_back_graph(void)
{
GC_apply_to_each_object(add_back_edges);
}
-/* Return an approximation to the length of the longest simple path */
-/* through unreachable objects to p. We refer to this as the height */
-/* of p. */
+/* Return an approximation to the length of the longest simple path */
+/* through unreachable objects to p. We refer to this as the height */
+/* of p. */
static word backwards_height(ptr_t p)
{
word result;
@@ -361,8 +361,8 @@ static word backwards_height(ptr_t p)
if (0 == back_ptr) return 1;
if (!((word)back_ptr & FLAG_MANY)) {
if (is_in_progress(p)) return 0; /* DFS back edge, i.e. we followed */
- /* an edge to an object already */
- /* on our stack: ignore */
+ /* an edge to an object already */
+ /* on our stack: ignore */
push_in_progress(p);
result = backwards_height(back_ptr)+1;
pop_in_progress(p);
@@ -379,9 +379,9 @@ static word backwards_height(ptr_t p)
word this_height;
if (GC_is_marked(q) && !(FLAG_MANY & (word)GET_OH_BG_PTR(p))) {
if (GC_print_stats)
- GC_log_printf("Found bogus pointer from %p to %p\n", q, p);
- /* Reachable object "points to" unreachable one. */
- /* Could be caused by our lax treatment of GC descriptors. */
+ GC_log_printf("Found bogus pointer from %p to %p\n", q, p);
+ /* Reachable object "points to" unreachable one. */
+ /* Could be caused by our lax treatment of GC descriptors. */
this_height = 1;
} else {
this_height = backwards_height(q);
@@ -396,12 +396,12 @@ static word backwards_height(ptr_t p)
STATIC word GC_max_height;
STATIC ptr_t GC_deepest_obj;
-/* Compute the maximum height of every unreachable predecessor p of a */
-/* reachable object. Arrange to save the heights of all such objects p */
-/* so that they can be used in calculating the height of objects in the */
-/* next GC. */
-/* Set GC_max_height to be the maximum height we encounter, and */
-/* GC_deepest_obj to be the corresponding object. */
+/* Compute the maximum height of every unreachable predecessor p of a */
+/* reachable object. Arrange to save the heights of all such objects p */
+/* so that they can be used in calculating the height of objects in the */
+/* next GC. */
+/* Set GC_max_height to be the maximum height we encounter, and */
+/* GC_deepest_obj to be the corresponding object. */
/*ARGSUSED*/
static void update_max_height(ptr_t p, size_t n_bytes, word gc_descr)
{
@@ -411,9 +411,9 @@ static void update_max_height(ptr_t p, size_t n_bytes, word gc_descr)
ptr_t back_ptr;
back_edges *be = 0;
- /* If we remembered a height last time, use it as a minimum. */
- /* It may have increased due to newly unreachable chains pointing */
- /* to p, but it can't have decreased. */
+ /* If we remembered a height last time, use it as a minimum. */
+ /* It may have increased due to newly unreachable chains pointing */
+ /* to p, but it can't have decreased. */
back_ptr = GET_OH_BG_PTR(p);
if (0 != back_ptr && ((word)back_ptr & FLAG_MANY)) {
be = (back_edges *)((word)back_ptr & ~FLAG_MANY);
@@ -424,26 +424,26 @@ static void update_max_height(ptr_t p, size_t n_bytes, word gc_descr)
word q_height;
q_height = backwards_height(q);
- if (q_height > p_height) {
- p_height = q_height;
- p_deepest_obj = q;
- }
+ if (q_height > p_height) {
+ p_height = q_height;
+ p_deepest_obj = q;
+ }
}
});
if (p_height > 0) {
/* Remember the height for next time. */
- if (be == 0) {
- ensure_struct(p);
- back_ptr = GET_OH_BG_PTR(p);
- be = (back_edges *)((word)back_ptr & ~FLAG_MANY);
- }
- be -> flags |= RETAIN;
- be -> height = p_height;
- be -> height_gc_no = (unsigned short)GC_gc_no;
+ if (be == 0) {
+ ensure_struct(p);
+ back_ptr = GET_OH_BG_PTR(p);
+ be = (back_edges *)((word)back_ptr & ~FLAG_MANY);
+ }
+ be -> flags |= RETAIN;
+ be -> height = p_height;
+ be -> height_gc_no = (unsigned short)GC_gc_no;
}
if (p_height > GC_max_height) {
- GC_max_height = p_height;
- GC_deepest_obj = p_deepest_obj;
+ GC_max_height = p_height;
+ GC_deepest_obj = p_deepest_obj;
}
}
}
@@ -461,16 +461,16 @@ void GC_traverse_back_graph(void)
void GC_print_back_graph_stats(void)
{
GC_printf("Maximum backwards height of reachable objects at GC %lu is %ld\n",
- (unsigned long) GC_gc_no, (unsigned long)GC_max_height);
+ (unsigned long) GC_gc_no, (unsigned long)GC_max_height);
if (GC_max_height > GC_max_max_height) {
GC_max_max_height = GC_max_height;
GC_printf("The following unreachable object is last in a longest chain "
- "of unreachable objects:\n");
+ "of unreachable objects:\n");
GC_print_heap_obj(GC_deepest_obj);
}
if (GC_print_stats) {
GC_log_printf("Needed max total of %d back-edge structs\n",
- GC_n_back_edge_structs);
+ GC_n_back_edge_structs);
}
GC_apply_to_each_object(reset_back_edge);
GC_deepest_obj = 0;
@@ -479,6 +479,6 @@ void GC_print_back_graph_stats(void)
#else /* !MAKE_BACK_GRAPH */
extern int GC_quiet;
- /* ANSI C doesn't allow translation units to be empty. */
+ /* ANSI C doesn't allow translation units to be empty. */
#endif /* !MAKE_BACK_GRAPH */
diff --git a/blacklst.c b/blacklst.c
index f75ed901..db34ee1d 100644
--- a/blacklst.c
+++ b/blacklst.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
*
@@ -23,10 +23,10 @@
* from elsewhere, since the former can pin a large object that spans the
* block, eventhough it does not start on the dangerous block.
*/
-
+
/*
* Externally callable routines are:
-
+
* GC_add_to_black_list_normal
* GC_add_to_black_list_stack
* GC_promote_black_lists
@@ -35,14 +35,14 @@
* All require that the allocator lock is held.
*/
-/* Pointers to individual tables. We replace one table by another by */
-/* switching these pointers. */
+/* Pointers to individual tables. We replace one table by another by */
+/* switching these pointers. */
STATIC word * GC_old_normal_bl;
- /* Nonstack false references seen at last full */
- /* collection. */
+ /* Nonstack false references seen at last full */
+ /* collection. */
STATIC word * GC_incomplete_normal_bl;
- /* Nonstack false references seen since last */
- /* full collection. */
+ /* Nonstack false references seen since last */
+ /* full collection. */
STATIC word * GC_old_stack_bl;
STATIC word * GC_incomplete_stack_bl;
@@ -57,7 +57,7 @@ void GC_default_print_heap_obj_proc(ptr_t p)
ptr_t base = GC_base(p);
GC_err_printf("start: %p, appr. length: %ld", base,
- (unsigned long)GC_size(base));
+ (unsigned long)GC_size(base));
}
void (*GC_print_heap_obj) (ptr_t p) = GC_default_print_heap_obj_proc;
@@ -67,17 +67,17 @@ STATIC void GC_print_source_ptr(ptr_t p)
{
ptr_t base = GC_base(p);
if (0 == base) {
- if (0 == p) {
- GC_err_printf("in register");
- } else {
- GC_err_printf("in root set");
- }
+ if (0 == p) {
+ GC_err_printf("in register");
+ } else {
+ GC_err_printf("in root set");
+ }
} else {
- GC_err_printf("in object at ");
- /* FIXME: We can't call the debug version of GC_print_heap_obj */
- /* (with PRINT_CALL_CHAIN) here because the lock is held and */
- /* the world is stopped. */
- GC_default_print_heap_obj_proc(base);
+ GC_err_printf("in object at ");
+ /* FIXME: We can't call the debug version of GC_print_heap_obj */
+ /* (with PRINT_CALL_CHAIN) here because the lock is held and */
+ /* the world is stopped. */
+ GC_default_print_heap_obj_proc(base);
}
}
#endif
@@ -86,9 +86,9 @@ void GC_bl_init(void)
{
if (!GC_all_interior_pointers) {
GC_old_normal_bl = (word *)
- GC_scratch_alloc((word)(sizeof (page_hash_table)));
+ GC_scratch_alloc((word)(sizeof (page_hash_table)));
GC_incomplete_normal_bl = (word *)GC_scratch_alloc
- ((word)(sizeof(page_hash_table)));
+ ((word)(sizeof(page_hash_table)));
if (GC_old_normal_bl == 0 || GC_incomplete_normal_bl == 0) {
GC_err_printf("Insufficient memory for black list\n");
EXIT();
@@ -98,7 +98,7 @@ void GC_bl_init(void)
}
GC_old_stack_bl = (word *)GC_scratch_alloc((word)(sizeof(page_hash_table)));
GC_incomplete_stack_bl = (word *)GC_scratch_alloc
- ((word)(sizeof(page_hash_table)));
+ ((word)(sizeof(page_hash_table)));
if (GC_old_stack_bl == 0 || GC_incomplete_stack_bl == 0) {
GC_err_printf("Insufficient memory for black list\n");
EXIT();
@@ -106,7 +106,7 @@ void GC_bl_init(void)
GC_clear_bl(GC_old_stack_bl);
GC_clear_bl(GC_incomplete_stack_bl);
}
-
+
void GC_clear_bl(word *doomed)
{
BZERO(doomed, sizeof(page_hash_table));
@@ -119,13 +119,13 @@ void GC_copy_bl(word *old, word *new)
static word total_stack_black_listed(void);
-/* Signal the completion of a collection. Turn the incomplete black */
-/* lists into new black lists, etc. */
+/* Signal the completion of a collection. Turn the incomplete black */
+/* lists into new black lists, etc. */
void GC_promote_black_lists(void)
{
word * very_old_normal_bl = GC_old_normal_bl;
word * very_old_stack_bl = GC_old_stack_bl;
-
+
GC_old_normal_bl = GC_incomplete_normal_bl;
GC_old_stack_bl = GC_incomplete_stack_bl;
if (!GC_all_interior_pointers) {
@@ -136,21 +136,21 @@ void GC_promote_black_lists(void)
GC_incomplete_stack_bl = very_old_stack_bl;
GC_total_stack_black_listed = total_stack_black_listed();
if (GC_print_stats == VERBOSE)
- GC_log_printf("%ld bytes in heap blacklisted for interior pointers\n",
- (unsigned long)GC_total_stack_black_listed);
+ GC_log_printf("%ld bytes in heap blacklisted for interior pointers\n",
+ (unsigned long)GC_total_stack_black_listed);
if (GC_total_stack_black_listed != 0) {
GC_black_list_spacing =
- HBLKSIZE*(GC_heapsize/GC_total_stack_black_listed);
+ HBLKSIZE*(GC_heapsize/GC_total_stack_black_listed);
}
if (GC_black_list_spacing < 3 * HBLKSIZE) {
- GC_black_list_spacing = 3 * HBLKSIZE;
+ GC_black_list_spacing = 3 * HBLKSIZE;
}
if (GC_black_list_spacing > MAXHINCR * HBLKSIZE) {
- GC_black_list_spacing = MAXHINCR * HBLKSIZE;
- /* Makes it easier to allocate really huge blocks, which otherwise */
- /* may have problems with nonuniform blacklist distributions. */
- /* This way we should always succeed immediately after growing the */
- /* heap. */
+ GC_black_list_spacing = MAXHINCR * HBLKSIZE;
+ /* Makes it easier to allocate really huge blocks, which otherwise */
+ /* may have problems with nonuniform blacklist distributions. */
+ /* This way we should always succeed immediately after growing the */
+ /* heap. */
}
}
@@ -162,9 +162,9 @@ void GC_unpromote_black_lists(void)
GC_copy_bl(GC_old_stack_bl, GC_incomplete_stack_bl);
}
-/* P is not a valid pointer reference, but it falls inside */
-/* the plausible heap bounds. */
-/* Add it to the normal incomplete black list if appropriate. */
+/* P is not a valid pointer reference, but it falls inside */
+/* the plausible heap bounds. */
+/* Add it to the normal incomplete black list if appropriate. */
#ifdef PRINT_BLACK_LIST
void GC_add_to_black_list_normal(word p, ptr_t source)
#else
@@ -174,20 +174,20 @@ void GC_unpromote_black_lists(void)
if (!(GC_modws_valid_offsets[p & (sizeof(word)-1)])) return;
{
word index = PHT_HASH((word)p);
-
+
if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_normal_bl, index)) {
-# ifdef PRINT_BLACK_LIST
- if (!get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
- GC_err_printf(
- "Black listing (normal) %p referenced from %p ",
- (ptr_t) p, source);
- GC_print_source_ptr(source);
- GC_err_puts("\n");
- }
+# ifdef PRINT_BLACK_LIST
+ if (!get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
+ GC_err_printf(
+ "Black listing (normal) %p referenced from %p ",
+ (ptr_t) p, source);
+ GC_print_source_ptr(source);
+ GC_err_puts("\n");
+ }
# endif
set_pht_entry_from_index(GC_incomplete_normal_bl, index);
} /* else this is probably just an interior pointer to an allocated */
- /* object, and isn't worth black listing. */
+ /* object, and isn't worth black listing. */
}
}
@@ -199,18 +199,18 @@ void GC_unpromote_black_lists(void)
#endif
{
word index = PHT_HASH((word)p);
-
+
if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_stack_bl, index)) {
-# ifdef PRINT_BLACK_LIST
- if (!get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
- GC_err_printf(
- "Black listing (stack) %p referenced from %p ",
- (ptr_t)p, source);
- GC_print_source_ptr(source);
- GC_err_puts("\n");
- }
+# ifdef PRINT_BLACK_LIST
+ if (!get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
+ GC_err_printf(
+ "Black listing (stack) %p referenced from %p ",
+ (ptr_t)p, source);
+ GC_print_source_ptr(source);
+ GC_err_puts("\n");
+ }
# endif
- set_pht_entry_from_index(GC_incomplete_stack_bl, index);
+ set_pht_entry_from_index(GC_incomplete_stack_bl, index);
}
}
@@ -234,7 +234,7 @@ struct hblk * GC_is_black_listed(struct hblk *h, word len)
return(h+1);
}
}
-
+
for (i = 0; ; ) {
if (GC_old_stack_bl[divWORDSZ(index)] == 0
&& GC_incomplete_stack_bl[divWORDSZ(index)] == 0) {
@@ -254,17 +254,17 @@ struct hblk * GC_is_black_listed(struct hblk *h, word len)
}
-/* Return the number of blacklisted blocks in a given range. */
-/* Used only for statistical purposes. */
-/* Looks only at the GC_incomplete_stack_bl. */
+/* Return the number of blacklisted blocks in a given range. */
+/* Used only for statistical purposes. */
+/* Looks only at the GC_incomplete_stack_bl. */
word GC_number_stack_black_listed(struct hblk *start, struct hblk *endp1)
{
register struct hblk * h;
word result = 0;
-
+
for (h = start; h < endp1; h++) {
word index = PHT_HASH((word)h);
-
+
if (get_pht_entry_from_index(GC_old_stack_bl, index)) result++;
}
return(result);
@@ -276,14 +276,13 @@ static word total_stack_black_listed(void)
{
register unsigned i;
word total = 0;
-
+
for (i = 0; i < GC_n_heap_sects; i++) {
- struct hblk * start = (struct hblk *) GC_heap_sects[i].hs_start;
- size_t len = (word) GC_heap_sects[i].hs_bytes;
- struct hblk * endp1 = start + len/HBLKSIZE;
-
- total += GC_number_stack_black_listed(start, endp1);
+ struct hblk * start = (struct hblk *) GC_heap_sects[i].hs_start;
+ size_t len = (word) GC_heap_sects[i].hs_bytes;
+ struct hblk * endp1 = start + len/HBLKSIZE;
+
+ total += GC_number_stack_black_listed(start, endp1);
}
return(total * HBLKSIZE);
}
-
diff --git a/checksums.c b/checksums.c
index 6e1df9c8..c0a2fa21 100644
--- a/checksums.c
+++ b/checksums.c
@@ -16,27 +16,27 @@
# ifdef CHECKSUMS
-/* This is debugging code intended to verify the results of dirty bit */
-/* computations. Works only in a single threaded environment. */
-/* We assume that stubborn objects are changed only when they are */
-/* enabled for writing. (Certain kinds of writing are actually */
-/* safe under other conditions.) */
+/* This is debugging code intended to verify the results of dirty bit */
+/* computations. Works only in a single threaded environment. */
+/* We assume that stubborn objects are changed only when they are */
+/* enabled for writing. (Certain kinds of writing are actually */
+/* safe under other conditions.) */
# define NSUMS 10000
# define OFFSET 0x10000
typedef struct {
- GC_bool new_valid;
- word old_sum;
- word new_sum;
- struct hblk * block; /* Block to which this refers + OFFSET */
- /* to hide it from collector. */
+ GC_bool new_valid;
+ word old_sum;
+ word new_sum;
+ struct hblk * block; /* Block to which this refers + OFFSET */
+ /* to hide it from collector. */
} page_entry;
page_entry GC_sums [NSUMS];
-STATIC word GC_faulted[NSUMS]; /* Record of pages on which we saw a write */
- /* fault. */
+STATIC word GC_faulted[NSUMS]; /* Record of pages on which we saw a write */
+ /* fault. */
STATIC size_t GC_n_faulted = 0;
void GC_record_fault(struct hblk * h)
@@ -57,7 +57,7 @@ STATIC GC_bool GC_was_faulted(struct hblk *h)
page += GC_page_size - 1;
page &= ~(GC_page_size - 1);
for (i = 0; i < GC_n_faulted; ++i) {
- if (GC_faulted[i] == page) return TRUE;
+ if (GC_faulted[i] == page) return TRUE;
}
return FALSE;
}
@@ -67,7 +67,7 @@ STATIC word GC_checksum(struct hblk *h)
register word *p = (word *)h;
register word *lim = (word *)(h+1);
register word result = 0;
-
+
while (p < lim) {
result += *p++;
}
@@ -75,14 +75,14 @@ STATIC word GC_checksum(struct hblk *h)
}
# ifdef STUBBORN_ALLOC
-/* Check whether a stubborn object from the given block appears on */
-/* the appropriate free list. */
+/* Check whether a stubborn object from the given block appears on */
+/* the appropriate free list. */
STATIC GC_bool GC_on_free_list(struct hblk *h)
{
hdr * hhdr = HDR(h);
size_t sz = BYTES_TO_WORDS(hhdr -> hb_sz);
ptr_t p;
-
+
if (sz > MAXOBJWORDS) return(FALSE);
for (p = GC_sobjfreelist[sz]; p != 0; p = obj_link(p)) {
if (HBLKPTR(p) == h) return(TRUE);
@@ -90,7 +90,7 @@ STATIC GC_bool GC_on_free_list(struct hblk *h)
return(FALSE);
}
# endif
-
+
int GC_n_dirty_errors;
int GC_n_faulted_dirty_errors;
int GC_n_changed_errors;
@@ -102,7 +102,7 @@ STATIC void GC_update_check_page(struct hblk *h, int index)
page_entry *pe = GC_sums + index;
register hdr * hhdr = HDR(h);
struct hblk *b;
-
+
if (pe -> block != 0 && pe -> block != h + OFFSET) ABORT("goofed");
pe -> old_sum = pe -> new_sum;
pe -> new_sum = GC_checksum(h);
@@ -112,33 +112,33 @@ STATIC void GC_update_check_page(struct hblk *h, int index)
}
# endif
if (GC_page_was_dirty(h)) {
- GC_n_dirty++;
+ GC_n_dirty++;
} else {
- GC_n_clean++;
+ GC_n_clean++;
}
b = h;
while (IS_FORWARDING_ADDR_OR_NIL(hhdr) && hhdr != 0) {
- b -= (word)hhdr;
- hhdr = HDR(b);
+ b -= (word)hhdr;
+ hhdr = HDR(b);
}
if (pe -> new_valid
- && hhdr != 0 && hhdr -> hb_descr != 0 /* may contain pointers */
- && pe -> old_sum != pe -> new_sum) {
- if (!GC_page_was_dirty(h) || !GC_page_was_ever_dirty(h)) {
- GC_bool was_faulted = GC_was_faulted(h);
- /* Set breakpoint here */GC_n_dirty_errors++;
- if (was_faulted) GC_n_faulted_dirty_errors++;
- }
-# ifdef STUBBORN_ALLOC
- if (!HBLK_IS_FREE(hhdr)
- && hhdr -> hb_obj_kind == STUBBORN
- && !GC_page_was_changed(h)
- && !GC_on_free_list(h)) {
- /* if GC_on_free_list(h) then reclaim may have touched it */
- /* without any allocations taking place. */
- /* Set breakpoint here */GC_n_changed_errors++;
- }
-# endif
+ && hhdr != 0 && hhdr -> hb_descr != 0 /* may contain pointers */
+ && pe -> old_sum != pe -> new_sum) {
+ if (!GC_page_was_dirty(h) || !GC_page_was_ever_dirty(h)) {
+ GC_bool was_faulted = GC_was_faulted(h);
+ /* Set breakpoint here */GC_n_dirty_errors++;
+ if (was_faulted) GC_n_faulted_dirty_errors++;
+ }
+# ifdef STUBBORN_ALLOC
+ if (!HBLK_IS_FREE(hhdr)
+ && hhdr -> hb_obj_kind == STUBBORN
+ && !GC_page_was_changed(h)
+ && !GC_on_free_list(h)) {
+ /* if GC_on_free_list(h) then reclaim may have touched it */
+ /* without any allocations taking place. */
+ /* Set breakpoint here */GC_n_changed_errors++;
+ }
+# endif
}
pe -> new_valid = TRUE;
pe -> block = h + OFFSET;
@@ -151,7 +151,7 @@ STATIC void GC_add_block(struct hblk *h, word dummy)
{
hdr * hhdr = HDR(h);
size_t bytes = hhdr -> hb_sz;
-
+
bytes += HBLKSIZE-1;
bytes &= ~(HBLKSIZE-1);
GC_bytes_in_used_blocks += bytes;
@@ -160,15 +160,15 @@ STATIC void GC_add_block(struct hblk *h, word dummy)
STATIC void GC_check_blocks(void)
{
word bytes_in_free_blocks = GC_large_free_bytes;
-
+
GC_bytes_in_used_blocks = 0;
GC_apply_to_all_blocks(GC_add_block, (word)0);
GC_printf("GC_bytes_in_used_blocks = %lu, bytes_in_free_blocks = %lu ",
- (unsigned long)GC_bytes_in_used_blocks,
- (unsigned long)bytes_in_free_blocks);
+ (unsigned long)GC_bytes_in_used_blocks,
+ (unsigned long)bytes_in_free_blocks);
GC_printf("GC_heapsize = %lu\n", (unsigned long)GC_heapsize);
if (GC_bytes_in_used_blocks + bytes_in_free_blocks != GC_heapsize) {
- GC_printf("LOST SOME BLOCKS!!\n");
+ GC_printf("LOST SOME BLOCKS!!\n");
}
}
@@ -179,18 +179,18 @@ void GC_check_dirty(void)
unsigned i;
struct hblk *h;
ptr_t start;
-
+
GC_check_blocks();
-
+
GC_n_dirty_errors = 0;
GC_n_faulted_dirty_errors = 0;
GC_n_changed_errors = 0;
GC_n_clean = 0;
GC_n_dirty = 0;
-
+
index = 0;
for (i = 0; i < GC_n_heap_sects; i++) {
- start = GC_heap_sects[i].hs_start;
+ start = GC_heap_sects[i].hs_start;
for (h = (struct hblk *)start;
h < (struct hblk *)(start + GC_heap_sects[i].hs_bytes);
h++) {
@@ -201,19 +201,19 @@ void GC_check_dirty(void)
}
out:
GC_printf("Checked %lu clean and %lu dirty pages\n",
- (unsigned long) GC_n_clean, (unsigned long) GC_n_dirty);
+ (unsigned long) GC_n_clean, (unsigned long) GC_n_dirty);
if (GC_n_dirty_errors > 0) {
GC_printf("Found %d dirty bit errors (%d were faulted)\n",
- GC_n_dirty_errors, GC_n_faulted_dirty_errors);
+ GC_n_dirty_errors, GC_n_faulted_dirty_errors);
}
if (GC_n_changed_errors > 0) {
- GC_printf("Found %lu changed bit errors\n",
- (unsigned long)GC_n_changed_errors);
- GC_printf("These may be benign (provoked by nonpointer changes)\n");
-# ifdef THREADS
- GC_printf(
- "Also expect 1 per thread currently allocating a stubborn obj.\n");
-# endif
+ GC_printf("Found %lu changed bit errors\n",
+ (unsigned long)GC_n_changed_errors);
+ GC_printf("These may be benign (provoked by nonpointer changes)\n");
+# ifdef THREADS
+ GC_printf(
+ "Also expect 1 per thread currently allocating a stubborn obj.\n");
+# endif
}
for (i = 0; i < GC_n_faulted; ++i) {
GC_faulted[i] = 0; /* Don't expose block pointers to GC */
@@ -224,7 +224,7 @@ out:
# else
extern int GC_quiet;
- /* ANSI C doesn't allow translation units to be empty. */
- /* So we guarantee this one is nonempty. */
+ /* ANSI C doesn't allow translation units to be empty. */
+ /* So we guarantee this one is nonempty. */
# endif /* CHECKSUMS */
diff --git a/darwin_stop_world.c b/darwin_stop_world.c
index b935146a..7c8c3ca1 100644
--- a/darwin_stop_world.c
+++ b/darwin_stop_world.c
@@ -1,3 +1,20 @@
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
+ * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
+ * All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
#include "private/pthread_support.h"
/* This probably needs more porting work to ppc64. */
@@ -23,23 +40,23 @@
#endif
typedef struct StackFrame {
- unsigned long savedSP;
- unsigned long savedCR;
- unsigned long savedLR;
- unsigned long reserved[2];
- unsigned long savedRTOC;
+ unsigned long savedSP;
+ unsigned long savedCR;
+ unsigned long savedLR;
+ unsigned long reserved[2];
+ unsigned long savedRTOC;
} StackFrame;
unsigned long FindTopOfStack(unsigned long stack_start)
{
- StackFrame *frame;
+ StackFrame *frame;
if (stack_start == 0) {
# ifdef POWERPC
# if CPP_WORDSZ == 32
- __asm__ volatile("lwz %0,0(r1)" : "=r" (frame));
+ __asm__ volatile("lwz %0,0(r1)" : "=r" (frame));
# else
- __asm__ volatile("ld %0,0(r1)" : "=r" (frame));
+ __asm__ volatile("ld %0,0(r1)" : "=r" (frame));
# endif
# endif
} else {
@@ -84,8 +101,8 @@ void GC_push_all_stacks(void)
pthread_t me;
ptr_t lo, hi;
GC_THREAD_STATE_T state;
- /* MACHINE_THREAD_STATE_COUNT doesn't seem to be defined everywhere. */
- /* Hence we use our own version. */
+ /* MACHINE_THREAD_STATE_COUNT doesn't seem to be defined everywhere. */
+ /* Hence we use our own version. */
mach_msg_type_number_t thread_state_count = GC_MACH_THREAD_STATE_COUNT;
me = pthread_self();
@@ -96,121 +113,121 @@ void GC_push_all_stacks(void)
for(p = GC_threads[i]; p != 0; p = p->next) {
if(p->flags & FINISHED) continue;
if(pthread_equal(p->id, me)) {
- lo = GC_approx_sp();
+ lo = GC_approx_sp();
} else {
- /* Get the thread state (registers, etc) */
- r = thread_get_state(p->stop_info.mach_thread, GC_MACH_THREAD_STATE,
- (natural_t*)&state, &thread_state_count);
+ /* Get the thread state (registers, etc) */
+ r = thread_get_state(p->stop_info.mach_thread, GC_MACH_THREAD_STATE,
+ (natural_t*)&state, &thread_state_count);
# ifdef DEBUG_THREADS
- GC_printf("thread_get_state return value = %d\n", r);
-# endif
+ GC_printf("thread_get_state return value = %d\n", r);
+# endif
- if(r != KERN_SUCCESS)
- ABORT("thread_get_state failed");
+ if(r != KERN_SUCCESS)
+ ABORT("thread_get_state failed");
# if defined(I386)
- lo = (void*)state . THREAD_FLD (esp);
- GC_push_one(state . THREAD_FLD (eax));
- GC_push_one(state . THREAD_FLD (ebx));
- GC_push_one(state . THREAD_FLD (ecx));
- GC_push_one(state . THREAD_FLD (edx));
- GC_push_one(state . THREAD_FLD (edi));
- GC_push_one(state . THREAD_FLD (esi));
- GC_push_one(state . THREAD_FLD (ebp));
+ lo = (void*)state . THREAD_FLD (esp);
+ GC_push_one(state . THREAD_FLD (eax));
+ GC_push_one(state . THREAD_FLD (ebx));
+ GC_push_one(state . THREAD_FLD (ecx));
+ GC_push_one(state . THREAD_FLD (edx));
+ GC_push_one(state . THREAD_FLD (edi));
+ GC_push_one(state . THREAD_FLD (esi));
+ GC_push_one(state . THREAD_FLD (ebp));
# elif defined(X86_64)
- lo = (void*)state . THREAD_FLD (rsp);
- GC_push_one(state . THREAD_FLD (rax));
- GC_push_one(state . THREAD_FLD (rbx));
- GC_push_one(state . THREAD_FLD (rcx));
- GC_push_one(state . THREAD_FLD (rdx));
- GC_push_one(state . THREAD_FLD (rdi));
- GC_push_one(state . THREAD_FLD (rsi));
- GC_push_one(state . THREAD_FLD (rbp));
- GC_push_one(state . THREAD_FLD (rsp));
- GC_push_one(state . THREAD_FLD (r8));
- GC_push_one(state . THREAD_FLD (r9));
- GC_push_one(state . THREAD_FLD (r10));
- GC_push_one(state . THREAD_FLD (r11));
- GC_push_one(state . THREAD_FLD (r12));
- GC_push_one(state . THREAD_FLD (r13));
- GC_push_one(state . THREAD_FLD (r14));
- GC_push_one(state . THREAD_FLD (r15));
- GC_push_one(state . THREAD_FLD (rip));
- GC_push_one(state . THREAD_FLD (rflags));
- GC_push_one(state . THREAD_FLD (cs));
- GC_push_one(state . THREAD_FLD (fs));
- GC_push_one(state . THREAD_FLD (gs));
+ lo = (void*)state . THREAD_FLD (rsp);
+ GC_push_one(state . THREAD_FLD (rax));
+ GC_push_one(state . THREAD_FLD (rbx));
+ GC_push_one(state . THREAD_FLD (rcx));
+ GC_push_one(state . THREAD_FLD (rdx));
+ GC_push_one(state . THREAD_FLD (rdi));
+ GC_push_one(state . THREAD_FLD (rsi));
+ GC_push_one(state . THREAD_FLD (rbp));
+ GC_push_one(state . THREAD_FLD (rsp));
+ GC_push_one(state . THREAD_FLD (r8));
+ GC_push_one(state . THREAD_FLD (r9));
+ GC_push_one(state . THREAD_FLD (r10));
+ GC_push_one(state . THREAD_FLD (r11));
+ GC_push_one(state . THREAD_FLD (r12));
+ GC_push_one(state . THREAD_FLD (r13));
+ GC_push_one(state . THREAD_FLD (r14));
+ GC_push_one(state . THREAD_FLD (r15));
+ GC_push_one(state . THREAD_FLD (rip));
+ GC_push_one(state . THREAD_FLD (rflags));
+ GC_push_one(state . THREAD_FLD (cs));
+ GC_push_one(state . THREAD_FLD (fs));
+ GC_push_one(state . THREAD_FLD (gs));
# elif defined(POWERPC)
- lo = (void*)(state . THREAD_FLD (r1) - PPC_RED_ZONE_SIZE);
-
- GC_push_one(state . THREAD_FLD (r0));
- GC_push_one(state . THREAD_FLD (r2));
- GC_push_one(state . THREAD_FLD (r3));
- GC_push_one(state . THREAD_FLD (r4));
- GC_push_one(state . THREAD_FLD (r5));
- GC_push_one(state . THREAD_FLD (r6));
- GC_push_one(state . THREAD_FLD (r7));
- GC_push_one(state . THREAD_FLD (r8));
- GC_push_one(state . THREAD_FLD (r9));
- GC_push_one(state . THREAD_FLD (r10));
- GC_push_one(state . THREAD_FLD (r11));
- GC_push_one(state . THREAD_FLD (r12));
- GC_push_one(state . THREAD_FLD (r13));
- GC_push_one(state . THREAD_FLD (r14));
- GC_push_one(state . THREAD_FLD (r15));
- GC_push_one(state . THREAD_FLD (r16));
- GC_push_one(state . THREAD_FLD (r17));
- GC_push_one(state . THREAD_FLD (r18));
- GC_push_one(state . THREAD_FLD (r19));
- GC_push_one(state . THREAD_FLD (r20));
- GC_push_one(state . THREAD_FLD (r21));
- GC_push_one(state . THREAD_FLD (r22));
- GC_push_one(state . THREAD_FLD (r23));
- GC_push_one(state . THREAD_FLD (r24));
- GC_push_one(state . THREAD_FLD (r25));
- GC_push_one(state . THREAD_FLD (r26));
- GC_push_one(state . THREAD_FLD (r27));
- GC_push_one(state . THREAD_FLD (r28));
- GC_push_one(state . THREAD_FLD (r29));
- GC_push_one(state . THREAD_FLD (r30));
- GC_push_one(state . THREAD_FLD (r31));
-
-# elif defined(ARM32)
- lo = (void*)state.__sp;
-
- GC_push_one(state.__r[0]);
- GC_push_one(state.__r[1]);
- GC_push_one(state.__r[2]);
- GC_push_one(state.__r[3]);
- GC_push_one(state.__r[4]);
- GC_push_one(state.__r[5]);
- GC_push_one(state.__r[6]);
- GC_push_one(state.__r[7]);
- GC_push_one(state.__r[8]);
- GC_push_one(state.__r[9]);
- GC_push_one(state.__r[10]);
- GC_push_one(state.__r[11]);
- GC_push_one(state.__r[12]);
- /* GC_push_one(state.__sp); */
- GC_push_one(state.__lr);
- GC_push_one(state.__pc);
- GC_push_one(state.__cpsr);
-
-# else
-# error FIXME for non-x86 || ppc || arm architectures
-# endif
+ lo = (void*)(state . THREAD_FLD (r1) - PPC_RED_ZONE_SIZE);
+
+ GC_push_one(state . THREAD_FLD (r0));
+ GC_push_one(state . THREAD_FLD (r2));
+ GC_push_one(state . THREAD_FLD (r3));
+ GC_push_one(state . THREAD_FLD (r4));
+ GC_push_one(state . THREAD_FLD (r5));
+ GC_push_one(state . THREAD_FLD (r6));
+ GC_push_one(state . THREAD_FLD (r7));
+ GC_push_one(state . THREAD_FLD (r8));
+ GC_push_one(state . THREAD_FLD (r9));
+ GC_push_one(state . THREAD_FLD (r10));
+ GC_push_one(state . THREAD_FLD (r11));
+ GC_push_one(state . THREAD_FLD (r12));
+ GC_push_one(state . THREAD_FLD (r13));
+ GC_push_one(state . THREAD_FLD (r14));
+ GC_push_one(state . THREAD_FLD (r15));
+ GC_push_one(state . THREAD_FLD (r16));
+ GC_push_one(state . THREAD_FLD (r17));
+ GC_push_one(state . THREAD_FLD (r18));
+ GC_push_one(state . THREAD_FLD (r19));
+ GC_push_one(state . THREAD_FLD (r20));
+ GC_push_one(state . THREAD_FLD (r21));
+ GC_push_one(state . THREAD_FLD (r22));
+ GC_push_one(state . THREAD_FLD (r23));
+ GC_push_one(state . THREAD_FLD (r24));
+ GC_push_one(state . THREAD_FLD (r25));
+ GC_push_one(state . THREAD_FLD (r26));
+ GC_push_one(state . THREAD_FLD (r27));
+ GC_push_one(state . THREAD_FLD (r28));
+ GC_push_one(state . THREAD_FLD (r29));
+ GC_push_one(state . THREAD_FLD (r30));
+ GC_push_one(state . THREAD_FLD (r31));
+
+# elif defined(ARM32)
+ lo = (void*)state.__sp;
+
+ GC_push_one(state.__r[0]);
+ GC_push_one(state.__r[1]);
+ GC_push_one(state.__r[2]);
+ GC_push_one(state.__r[3]);
+ GC_push_one(state.__r[4]);
+ GC_push_one(state.__r[5]);
+ GC_push_one(state.__r[6]);
+ GC_push_one(state.__r[7]);
+ GC_push_one(state.__r[8]);
+ GC_push_one(state.__r[9]);
+ GC_push_one(state.__r[10]);
+ GC_push_one(state.__r[11]);
+ GC_push_one(state.__r[12]);
+ /* GC_push_one(state.__sp); */
+ GC_push_one(state.__lr);
+ GC_push_one(state.__pc);
+ GC_push_one(state.__cpsr);
+
+# else
+# error FIXME for non-x86 || ppc || arm architectures
+# endif
} /* p != me */
if(p->flags & MAIN_THREAD)
- hi = GC_stackbottom;
+ hi = GC_stackbottom;
else
- hi = p->stack_end;
+ hi = p->stack_end;
# if DEBUG_THREADS
GC_printf("Darwin: Stack for thread 0x%lx = [%lx,%lx)\n",
- (unsigned long) p -> id, (unsigned long) lo,
- (unsigned long) hi);
+ (unsigned long) p -> id, (unsigned long) lo,
+ (unsigned long) hi);
# endif
GC_push_all_stack(lo, hi);
} /* for(p=GC_threads[i]...) */
@@ -245,152 +262,152 @@ void GC_push_all_stacks(void)
} else {
# if defined(POWERPC)
GC_THREAD_STATE_T info;
- mach_msg_type_number_t outCount = THREAD_STATE_MAX;
- r = thread_get_state(thread, GC_MACH_THREAD_STATE, (natural_t *)&info,
- &outCount);
- if(r != KERN_SUCCESS)
- ABORT("task_get_state failed");
-
- lo = (void*)(info . THREAD_FLD (r1) - PPC_RED_ZONE_SIZE);
- hi = (ptr_t)FindTopOfStack(info . THREAD_FLD (r1));
-
- GC_push_one(info . THREAD_FLD (r0));
- GC_push_one(info . THREAD_FLD (r2));
- GC_push_one(info . THREAD_FLD (r3));
- GC_push_one(info . THREAD_FLD (r4));
- GC_push_one(info . THREAD_FLD (r5));
- GC_push_one(info . THREAD_FLD (r6));
- GC_push_one(info . THREAD_FLD (r7));
- GC_push_one(info . THREAD_FLD (r8));
- GC_push_one(info . THREAD_FLD (r9));
- GC_push_one(info . THREAD_FLD (r10));
- GC_push_one(info . THREAD_FLD (r11));
- GC_push_one(info . THREAD_FLD (r12));
- GC_push_one(info . THREAD_FLD (r13));
- GC_push_one(info . THREAD_FLD (r14));
- GC_push_one(info . THREAD_FLD (r15));
- GC_push_one(info . THREAD_FLD (r16));
- GC_push_one(info . THREAD_FLD (r17));
- GC_push_one(info . THREAD_FLD (r18));
- GC_push_one(info . THREAD_FLD (r19));
- GC_push_one(info . THREAD_FLD (r20));
- GC_push_one(info . THREAD_FLD (r21));
- GC_push_one(info . THREAD_FLD (r22));
- GC_push_one(info . THREAD_FLD (r23));
- GC_push_one(info . THREAD_FLD (r24));
- GC_push_one(info . THREAD_FLD (r25));
- GC_push_one(info . THREAD_FLD (r26));
- GC_push_one(info . THREAD_FLD (r27));
- GC_push_one(info . THREAD_FLD (r28));
- GC_push_one(info . THREAD_FLD (r29));
- GC_push_one(info . THREAD_FLD (r30));
- GC_push_one(info . THREAD_FLD (r31));
+ mach_msg_type_number_t outCount = THREAD_STATE_MAX;
+ r = thread_get_state(thread, GC_MACH_THREAD_STATE, (natural_t *)&info,
+ &outCount);
+ if(r != KERN_SUCCESS)
+ ABORT("task_get_state failed");
+
+ lo = (void*)(info . THREAD_FLD (r1) - PPC_RED_ZONE_SIZE);
+ hi = (ptr_t)FindTopOfStack(info . THREAD_FLD (r1));
+
+ GC_push_one(info . THREAD_FLD (r0));
+ GC_push_one(info . THREAD_FLD (r2));
+ GC_push_one(info . THREAD_FLD (r3));
+ GC_push_one(info . THREAD_FLD (r4));
+ GC_push_one(info . THREAD_FLD (r5));
+ GC_push_one(info . THREAD_FLD (r6));
+ GC_push_one(info . THREAD_FLD (r7));
+ GC_push_one(info . THREAD_FLD (r8));
+ GC_push_one(info . THREAD_FLD (r9));
+ GC_push_one(info . THREAD_FLD (r10));
+ GC_push_one(info . THREAD_FLD (r11));
+ GC_push_one(info . THREAD_FLD (r12));
+ GC_push_one(info . THREAD_FLD (r13));
+ GC_push_one(info . THREAD_FLD (r14));
+ GC_push_one(info . THREAD_FLD (r15));
+ GC_push_one(info . THREAD_FLD (r16));
+ GC_push_one(info . THREAD_FLD (r17));
+ GC_push_one(info . THREAD_FLD (r18));
+ GC_push_one(info . THREAD_FLD (r19));
+ GC_push_one(info . THREAD_FLD (r20));
+ GC_push_one(info . THREAD_FLD (r21));
+ GC_push_one(info . THREAD_FLD (r22));
+ GC_push_one(info . THREAD_FLD (r23));
+ GC_push_one(info . THREAD_FLD (r24));
+ GC_push_one(info . THREAD_FLD (r25));
+ GC_push_one(info . THREAD_FLD (r26));
+ GC_push_one(info . THREAD_FLD (r27));
+ GC_push_one(info . THREAD_FLD (r28));
+ GC_push_one(info . THREAD_FLD (r29));
+ GC_push_one(info . THREAD_FLD (r30));
+ GC_push_one(info . THREAD_FLD (r31));
# elif defined(I386)
- /* FIXME: Remove after testing: */
- WARN("This is completely untested and likely will not work\n", 0);
- GC_THREAD_STATE_T info;
- mach_msg_type_number_t outCount = THREAD_STATE_MAX;
- r = thread_get_state(thread, GC_MACH_THREAD_STATE, (natural_t *)&info,
- &outCount);
- if(r != KERN_SUCCESS)
- ABORT("task_get_state failed");
-
- lo = (void*)info . THREAD_FLD (esp);
- hi = (ptr_t)FindTopOfStack(info . THREAD_FLD (esp));
-
- GC_push_one(info . THREAD_FLD (eax));
- GC_push_one(info . THREAD_FLD (ebx));
- GC_push_one(info . THREAD_FLD (ecx));
- GC_push_one(info . THREAD_FLD (edx));
- GC_push_one(info . THREAD_FLD (edi));
- GC_push_one(info . THREAD_FLD (esi));
- /* GC_push_one(info . THREAD_FLD (ebp)); */
- /* GC_push_one(info . THREAD_FLD (esp)); */
- GC_push_one(info . THREAD_FLD (ss));
- GC_push_one(info . THREAD_FLD (eip));
- GC_push_one(info . THREAD_FLD (cs));
- GC_push_one(info . THREAD_FLD (ds));
- GC_push_one(info . THREAD_FLD (es));
- GC_push_one(info . THREAD_FLD (fs));
- GC_push_one(info . THREAD_FLD (gs));
+ /* FIXME: Remove after testing: */
+ WARN("This is completely untested and likely will not work\n", 0);
+ GC_THREAD_STATE_T info;
+ mach_msg_type_number_t outCount = THREAD_STATE_MAX;
+ r = thread_get_state(thread, GC_MACH_THREAD_STATE, (natural_t *)&info,
+ &outCount);
+ if(r != KERN_SUCCESS)
+ ABORT("task_get_state failed");
+
+ lo = (void*)info . THREAD_FLD (esp);
+ hi = (ptr_t)FindTopOfStack(info . THREAD_FLD (esp));
+
+ GC_push_one(info . THREAD_FLD (eax));
+ GC_push_one(info . THREAD_FLD (ebx));
+ GC_push_one(info . THREAD_FLD (ecx));
+ GC_push_one(info . THREAD_FLD (edx));
+ GC_push_one(info . THREAD_FLD (edi));
+ GC_push_one(info . THREAD_FLD (esi));
+ /* GC_push_one(info . THREAD_FLD (ebp)); */
+ /* GC_push_one(info . THREAD_FLD (esp)); */
+ GC_push_one(info . THREAD_FLD (ss));
+ GC_push_one(info . THREAD_FLD (eip));
+ GC_push_one(info . THREAD_FLD (cs));
+ GC_push_one(info . THREAD_FLD (ds));
+ GC_push_one(info . THREAD_FLD (es));
+ GC_push_one(info . THREAD_FLD (fs));
+ GC_push_one(info . THREAD_FLD (gs));
# elif defined(X86_64)
- GC_THREAD_STATE_T info;
- mach_msg_type_number_t outCount = THREAD_STATE_MAX;
- r = thread_get_state(thread, GC_MACH_THREAD_STATE, (natural_t *)&info,
- &outCount);
- if(r != KERN_SUCCESS)
- ABORT("task_get_state failed");
-
- lo = (void*)info . THREAD_FLD (rsp);
- hi = (ptr_t)FindTopOfStack(info . THREAD_FLD (rsp));
-
- GC_push_one(info . THREAD_FLD (rax));
- GC_push_one(info . THREAD_FLD (rbx));
- GC_push_one(info . THREAD_FLD (rcx));
- GC_push_one(info . THREAD_FLD (rdx));
- GC_push_one(info . THREAD_FLD (rdi));
- GC_push_one(info . THREAD_FLD (rsi));
- GC_push_one(info . THREAD_FLD (rbp));
- GC_push_one(info . THREAD_FLD (rsp));
- GC_push_one(info . THREAD_FLD (r8));
- GC_push_one(info . THREAD_FLD (r9));
- GC_push_one(info . THREAD_FLD (r10));
- GC_push_one(info . THREAD_FLD (r11));
- GC_push_one(info . THREAD_FLD (r12));
- GC_push_one(info . THREAD_FLD (r13));
- GC_push_one(info . THREAD_FLD (r14));
- GC_push_one(info . THREAD_FLD (r15));
- GC_push_one(info . THREAD_FLD (rip));
- GC_push_one(info . THREAD_FLD (rflags));
- GC_push_one(info . THREAD_FLD (cs));
- GC_push_one(info . THREAD_FLD (fs));
- GC_push_one(info . THREAD_FLD (gs));
+ GC_THREAD_STATE_T info;
+ mach_msg_type_number_t outCount = THREAD_STATE_MAX;
+ r = thread_get_state(thread, GC_MACH_THREAD_STATE, (natural_t *)&info,
+ &outCount);
+ if(r != KERN_SUCCESS)
+ ABORT("task_get_state failed");
+
+ lo = (void*)info . THREAD_FLD (rsp);
+ hi = (ptr_t)FindTopOfStack(info . THREAD_FLD (rsp));
+
+ GC_push_one(info . THREAD_FLD (rax));
+ GC_push_one(info . THREAD_FLD (rbx));
+ GC_push_one(info . THREAD_FLD (rcx));
+ GC_push_one(info . THREAD_FLD (rdx));
+ GC_push_one(info . THREAD_FLD (rdi));
+ GC_push_one(info . THREAD_FLD (rsi));
+ GC_push_one(info . THREAD_FLD (rbp));
+ GC_push_one(info . THREAD_FLD (rsp));
+ GC_push_one(info . THREAD_FLD (r8));
+ GC_push_one(info . THREAD_FLD (r9));
+ GC_push_one(info . THREAD_FLD (r10));
+ GC_push_one(info . THREAD_FLD (r11));
+ GC_push_one(info . THREAD_FLD (r12));
+ GC_push_one(info . THREAD_FLD (r13));
+ GC_push_one(info . THREAD_FLD (r14));
+ GC_push_one(info . THREAD_FLD (r15));
+ GC_push_one(info . THREAD_FLD (rip));
+ GC_push_one(info . THREAD_FLD (rflags));
+ GC_push_one(info . THREAD_FLD (cs));
+ GC_push_one(info . THREAD_FLD (fs));
+ GC_push_one(info . THREAD_FLD (gs));
# elif defined(ARM32)
- GC_THREAD_STATE_T info;
- mach_msg_type_number_t outCount = THREAD_STATE_MAX;
- r = thread_get_state(thread, GC_MACH_THREAD_STATE, (natural_t *)&info,
- &outCount);
- if(r != KERN_SUCCESS)
- ABORT("task_get_state failed");
-
- hi = (ptr_t)FindTopOfStack(info . __sp);
-
- lo = (void*)info.__sp;
-
- GC_push_one(info.__r[0]);
- GC_push_one(info.__r[1]);
- GC_push_one(info.__r[2]);
- GC_push_one(info.__r[3]);
- GC_push_one(info.__r[4]);
- GC_push_one(info.__r[5]);
- GC_push_one(info.__r[6]);
- GC_push_one(info.__r[7]);
- GC_push_one(info.__r[8]);
- GC_push_one(info.__r[9]);
- GC_push_one(info.__r[10]);
- GC_push_one(info.__r[11]);
- GC_push_one(info.__r[12]);
- /* GC_push_one(info.__sp); */
- GC_push_one(info.__lr);
- GC_push_one(info.__pc);
- GC_push_one(info.__cpsr);
+ GC_THREAD_STATE_T info;
+ mach_msg_type_number_t outCount = THREAD_STATE_MAX;
+ r = thread_get_state(thread, GC_MACH_THREAD_STATE, (natural_t *)&info,
+ &outCount);
+ if(r != KERN_SUCCESS)
+ ABORT("task_get_state failed");
+
+ hi = (ptr_t)FindTopOfStack(info . __sp);
+
+ lo = (void*)info.__sp;
+
+ GC_push_one(info.__r[0]);
+ GC_push_one(info.__r[1]);
+ GC_push_one(info.__r[2]);
+ GC_push_one(info.__r[3]);
+ GC_push_one(info.__r[4]);
+ GC_push_one(info.__r[5]);
+ GC_push_one(info.__r[6]);
+ GC_push_one(info.__r[7]);
+ GC_push_one(info.__r[8]);
+ GC_push_one(info.__r[9]);
+ GC_push_one(info.__r[10]);
+ GC_push_one(info.__r[11]);
+ GC_push_one(info.__r[12]);
+ /* GC_push_one(info.__sp); */
+ GC_push_one(info.__lr);
+ GC_push_one(info.__pc);
+ GC_push_one(info.__cpsr);
# else
-# error FIXME for non-x86 || ppc || arm architectures
+# error FIXME for non-x86 || ppc || arm architectures
# endif
}
# if DEBUG_THREADS
GC_printf("Darwin: Stack for thread 0x%lx = [%p,%p)\n",
- (unsigned long) thread, lo, hi);
+ (unsigned long) thread, lo, hi);
# endif
GC_push_all_stack(lo, hi);
mach_port_deallocate(my_task, thread);
} /* for(p=GC_threads[i]...) */
vm_deallocate(my_task, (vm_address_t)act_list,
- sizeof(thread_t) * listcount);
+ sizeof(thread_t) * listcount);
mach_port_deallocate(my_task, me);
}
#endif /* !DARWIN_DONT_PARSE_STACK */
@@ -414,7 +431,7 @@ void GC_stop_init(void)
/* returns true if there's a thread in act_list that wasn't in old_list */
STATIC int GC_suspend_thread_list(thread_act_array_t act_list, int count,
- thread_act_array_t old_list, int old_count)
+ thread_act_array_t old_list, int old_count)
{
mach_port_t my_thread = mach_thread_self();
int i, j;
@@ -431,8 +448,8 @@ STATIC int GC_suspend_thread_list(thread_act_array_t act_list, int count,
for(j = 0; j < old_count; j++) {
thread_act_t old_thread = old_list[j];
if (old_thread == thread) {
- found = 1;
- break;
+ found = 1;
+ break;
}
}
if (!found) {
@@ -444,33 +461,33 @@ STATIC int GC_suspend_thread_list(thread_act_array_t act_list, int count,
}
if (thread != my_thread
- && (!GC_use_mach_handler_thread
- || (GC_use_mach_handler_thread
- && GC_mach_handler_thread != thread))) {
+ && (!GC_use_mach_handler_thread
+ || (GC_use_mach_handler_thread
+ && GC_mach_handler_thread != thread))) {
struct thread_basic_info info;
mach_msg_type_number_t outCount = THREAD_INFO_MAX;
kern_return_t kern_result = thread_info(thread, THREAD_BASIC_INFO,
- (thread_info_t)&info, &outCount);
+ (thread_info_t)&info, &outCount);
if(kern_result != KERN_SUCCESS) {
- /* the thread may have quit since the thread_threads () call
- * we mark already_suspended so it's not dealt with anymore later
- */
- if (!found) {
- GC_mach_threads[GC_mach_threads_count].already_suspended = TRUE;
- GC_mach_threads_count++;
- }
- continue;
+ /* the thread may have quit since the thread_threads () call
+ * we mark already_suspended so it's not dealt with anymore later
+ */
+ if (!found) {
+ GC_mach_threads[GC_mach_threads_count].already_suspended = TRUE;
+ GC_mach_threads_count++;
+ }
+ continue;
}
# if DEBUG_THREADS
GC_printf("Thread state for 0x%lx = %d\n", (unsigned long)thread,
- info.run_state);
+ info.run_state);
# endif
if (!found) {
- GC_mach_threads[GC_mach_threads_count].already_suspended
- = info.suspend_count;
+ GC_mach_threads[GC_mach_threads_count].already_suspended
+ = info.suspend_count;
}
if (info.suspend_count)
- continue;
+ continue;
# if DEBUG_THREADS
GC_printf("Suspending 0x%lx\n", (unsigned long)thread);
@@ -478,14 +495,14 @@ STATIC int GC_suspend_thread_list(thread_act_array_t act_list, int count,
/* Suspend the thread */
kern_result = thread_suspend(thread);
if(kern_result != KERN_SUCCESS) {
- /* the thread may have quit since the thread_threads () call
- * we mark already_suspended so it's not dealt with anymore later
- */
- if (!found) {
- GC_mach_threads[GC_mach_threads_count].already_suspended = TRUE;
- GC_mach_threads_count++;
- }
- continue;
+ /* the thread may have quit since the thread_threads () call
+ * we mark already_suspended so it's not dealt with anymore later
+ */
+ if (!found) {
+ GC_mach_threads[GC_mach_threads_count].already_suspended = TRUE;
+ GC_mach_threads_count++;
+ }
+ continue;
}
}
if (!found) GC_mach_threads_count++;
@@ -495,7 +512,7 @@ STATIC int GC_suspend_thread_list(thread_act_array_t act_list, int count,
}
-/* Caller holds allocation lock. */
+/* Caller holds allocation lock. */
void GC_stop_world(void)
{
unsigned int i, changes;
@@ -507,21 +524,21 @@ void GC_stop_world(void)
# if DEBUG_THREADS
GC_printf("Stopping the world from 0x%lx\n",
- (unsigned long)mach_thread_self());
+ (unsigned long)mach_thread_self());
# endif
/* clear out the mach threads list table */
GC_stop_init();
/* Make sure all free list construction has stopped before we start. */
- /* No new construction can start, since free list construction is */
- /* required to acquire and release the GC lock before it starts, */
- /* and we have the lock. */
+ /* No new construction can start, since free list construction is */
+ /* required to acquire and release the GC lock before it starts, */
+ /* and we have the lock. */
# ifdef PARALLEL_MARK
if (GC_parallel) {
- GC_acquire_mark_lock();
- GC_ASSERT(GC_fl_builder_count == 0);
- /* We should have previously waited for it to become zero. */
+ GC_acquire_mark_lock();
+ GC_ASSERT(GC_fl_builder_count == 0);
+ /* We should have previously waited for it to become zero. */
}
# endif /* PARALLEL_MARK */
@@ -543,19 +560,19 @@ void GC_stop_world(void)
kern_result = task_threads(my_task, &act_list, &listcount);
if(kern_result == KERN_SUCCESS) {
- result = GC_suspend_thread_list(act_list, listcount, prev_list,
- prevcount);
- changes = result;
-
- if(prev_list != NULL) {
- for(i = 0; i < prevcount; i++)
- mach_port_deallocate(my_task, prev_list[i]);
-
- vm_deallocate(my_task, (vm_address_t)prev_list,
- sizeof(thread_t) * prevcount);
- }
- prev_list = act_list;
- prevcount = listcount;
+ result = GC_suspend_thread_list(act_list, listcount, prev_list,
+ prevcount);
+ changes = result;
+
+ if(prev_list != NULL) {
+ for(i = 0; i < prevcount; i++)
+ mach_port_deallocate(my_task, prev_list[i]);
+
+ vm_deallocate(my_task, (vm_address_t)prev_list,
+ sizeof(thread_t) * prevcount);
+ }
+ prev_list = act_list;
+ prevcount = listcount;
}
} while (changes);
GC_ASSERT(prev_list != 0);
@@ -563,18 +580,18 @@ void GC_stop_world(void)
mach_port_deallocate(my_task, prev_list[i]);
vm_deallocate(my_task, (vm_address_t)act_list,
- sizeof(thread_t) * listcount);
+ sizeof(thread_t) * listcount);
# ifdef MPROTECT_VDB
if(GC_incremental) {
- extern void GC_mprotect_stop();
- GC_mprotect_stop();
+ extern void GC_mprotect_stop();
+ GC_mprotect_stop();
}
# endif
# ifdef PARALLEL_MARK
if (GC_parallel)
- GC_release_mark_lock();
+ GC_release_mark_lock();
# endif
# if DEBUG_THREADS
GC_printf("World stopped from 0x%lx\n", (unsigned long)my_thread);
@@ -583,8 +600,8 @@ void GC_stop_world(void)
mach_port_deallocate(my_task, my_thread);
}
-/* Caller holds allocation lock, and has held it continuously since */
-/* the world stopped. */
+/* Caller holds allocation lock, and has held it continuously since */
+/* the world stopped. */
void GC_start_world(void)
{
task_t my_task = current_task();
@@ -603,8 +620,8 @@ void GC_start_world(void)
# ifdef MPROTECT_VDB
if(GC_incremental) {
- extern void GC_mprotect_resume();
- GC_mprotect_resume();
+ extern void GC_mprotect_resume();
+ GC_mprotect_resume();
}
# endif
@@ -612,37 +629,37 @@ void GC_start_world(void)
for(i = 0; i < listcount; i++) {
thread_act_t thread = act_list[i];
if (thread != my_thread
- && (!GC_use_mach_handler_thread
- || (GC_use_mach_handler_thread
- && GC_mach_handler_thread != thread))) {
- for(j = 0; j < GC_mach_threads_count; j++) {
- if (thread == GC_mach_threads[j].thread) {
- if (GC_mach_threads[j].already_suspended) {
+ && (!GC_use_mach_handler_thread
+ || (GC_use_mach_handler_thread
+ && GC_mach_handler_thread != thread))) {
+ for(j = 0; j < GC_mach_threads_count; j++) {
+ if (thread == GC_mach_threads[j].thread) {
+ if (GC_mach_threads[j].already_suspended) {
# if DEBUG_THREADS
- GC_printf("Not resuming already suspended thread %p\n", thread);
+ GC_printf("Not resuming already suspended thread %p\n", thread);
# endif
- continue;
- }
- kern_result = thread_info(thread, THREAD_BASIC_INFO,
- (thread_info_t)&info, &outCount);
- if(kern_result != KERN_SUCCESS)
- ABORT("thread_info failed");
+ continue;
+ }
+ kern_result = thread_info(thread, THREAD_BASIC_INFO,
+ (thread_info_t)&info, &outCount);
+ if(kern_result != KERN_SUCCESS)
+ ABORT("thread_info failed");
# if DEBUG_THREADS
- GC_printf("Thread state for 0x%lx = %d\n", (unsigned long)thread,
- info.run_state);
- GC_printf("Resuming 0x%lx\n", (unsigned long)thread);
+ GC_printf("Thread state for 0x%lx = %d\n", (unsigned long)thread,
+ info.run_state);
+ GC_printf("Resuming 0x%lx\n", (unsigned long)thread);
# endif
- /* Resume the thread */
- kern_result = thread_resume(thread);
- if(kern_result != KERN_SUCCESS)
- ABORT("thread_resume failed");
- }
- }
+ /* Resume the thread */
+ kern_result = thread_resume(thread);
+ if(kern_result != KERN_SUCCESS)
+ ABORT("thread_resume failed");
+ }
+ }
}
mach_port_deallocate(my_task, thread);
}
vm_deallocate(my_task, (vm_address_t)act_list,
- sizeof(thread_t) * listcount);
+ sizeof(thread_t) * listcount);
mach_port_deallocate(my_task, my_thread);
# if DEBUG_THREADS
diff --git a/dbg_mlc.c b/dbg_mlc.c
index bf3acb75..2211a5dc 100644
--- a/dbg_mlc.c
+++ b/dbg_mlc.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
* Copyright (c) 1997 by Silicon Graphics. All rights reserved.
@@ -24,33 +24,33 @@
void GC_default_print_heap_obj_proc(ptr_t p);
GC_API void GC_CALL GC_register_finalizer_no_order
- (void * obj, GC_finalization_proc fn, void * cd,
- GC_finalization_proc *ofn, void * *ocd);
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
#ifndef SHORT_DBG_HDRS
-/* Check whether object with base pointer p has debugging info */
-/* p is assumed to point to a legitimate object in our part */
-/* of the heap. */
-/* This excludes the check as to whether the back pointer is */
-/* odd, which is added by the GC_HAS_DEBUG_INFO macro. */
-/* Note that if DBG_HDRS_ALL is set, uncollectable objects */
-/* on free lists may not have debug information set. Thus it's */
-/* not always safe to return TRUE, even if the client does */
-/* its part. */
+/* Check whether object with base pointer p has debugging info */
+/* p is assumed to point to a legitimate object in our part */
+/* of the heap. */
+/* This excludes the check as to whether the back pointer is */
+/* odd, which is added by the GC_HAS_DEBUG_INFO macro. */
+/* Note that if DBG_HDRS_ALL is set, uncollectable objects */
+/* on free lists may not have debug information set. Thus it's */
+/* not always safe to return TRUE, even if the client does */
+/* its part. */
GC_bool GC_has_other_debug_info(ptr_t p)
{
register oh * ohdr = (oh *)p;
register ptr_t body = (ptr_t)(ohdr + 1);
register word sz = GC_size((ptr_t) ohdr);
-
+
if (HBLKPTR((ptr_t)ohdr) != HBLKPTR((ptr_t)body)
|| sz < DEBUG_BYTES + EXTRA_BYTES) {
return(FALSE);
}
if (ohdr -> oh_sz == sz) {
- /* Object may have had debug info, but has been deallocated */
- return(FALSE);
+ /* Object may have had debug info, but has been deallocated */
+ return(FALSE);
}
if (ohdr -> oh_sf == (START_FLAG ^ (word)body)) return(TRUE);
if (((word *)ohdr)[BYTES_TO_WORDS(sz)-1] == (END_FLAG ^ (word)body)) {
@@ -72,11 +72,11 @@ GC_bool GC_has_other_debug_info(ptr_t p)
# endif
/* Store back pointer to source in dest, if that appears to be possible. */
- /* This is not completely safe, since we may mistakenly conclude that */
- /* dest has a debugging wrapper. But the error probability is very */
- /* small, and this shouldn't be used in production code. */
+ /* This is not completely safe, since we may mistakenly conclude that */
+ /* dest has a debugging wrapper. But the error probability is very */
+ /* small, and this shouldn't be used in production code. */
/* We assume that dest is the real base pointer. Source will usually */
- /* be a pointer to the interior of an object. */
+ /* be a pointer to the interior of an object. */
void GC_store_back_pointer(ptr_t source, ptr_t dest)
{
if (GC_HAS_DEBUG_INFO(dest)) {
@@ -88,14 +88,14 @@ GC_bool GC_has_other_debug_info(ptr_t p)
GC_store_back_pointer(MARKED_FOR_FINALIZATION, dest);
}
- /* Store information about the object referencing dest in *base_p */
- /* and *offset_p. */
- /* source is root ==> *base_p = address, *offset_p = 0 */
- /* source is heap object ==> *base_p != 0, *offset_p = offset */
- /* Returns 1 on success, 0 if source couldn't be determined. */
- /* Dest can be any address within a heap object. */
+ /* Store information about the object referencing dest in *base_p */
+ /* and *offset_p. */
+ /* source is root ==> *base_p = address, *offset_p = 0 */
+ /* source is heap object ==> *base_p != 0, *offset_p = offset */
+ /* Returns 1 on success, 0 if source couldn't be determined. */
+ /* Dest can be any address within a heap object. */
GC_API GC_ref_kind GC_CALL GC_get_back_ptr_info(void *dest, void **base_p,
- size_t *offset_p)
+ size_t *offset_p)
{
oh * hdr = (oh *)GC_base(dest);
ptr_t bp;
@@ -106,19 +106,19 @@ GC_bool GC_has_other_debug_info(ptr_t p)
if (MARKED_FROM_REGISTER == bp) return GC_REFD_FROM_REG;
if (NOT_MARKED == bp) return GC_UNREFERENCED;
# if ALIGNMENT == 1
- /* Heuristically try to fix off by 1 errors we introduced by */
- /* insisting on even addresses. */
+ /* Heuristically try to fix off by 1 errors we introduced by */
+ /* insisting on even addresses. */
{
- ptr_t alternate_ptr = bp + 1;
- ptr_t target = *(ptr_t *)bp;
- ptr_t alternate_target = *(ptr_t *)alternate_ptr;
-
- if (alternate_target >= GC_least_plausible_heap_addr
- && alternate_target <= GC_greatest_plausible_heap_addr
- && (target < GC_least_plausible_heap_addr
- || target > GC_greatest_plausible_heap_addr)) {
- bp = alternate_ptr;
- }
+ ptr_t alternate_ptr = bp + 1;
+ ptr_t target = *(ptr_t *)bp;
+ ptr_t alternate_target = *(ptr_t *)alternate_ptr;
+
+ if (alternate_target >= GC_least_plausible_heap_addr
+ && alternate_target <= GC_greatest_plausible_heap_addr
+ && (target < GC_least_plausible_heap_addr
+ || target > GC_greatest_plausible_heap_addr)) {
+ bp = alternate_ptr;
+ }
}
# endif
bp_base = GC_base(bp);
@@ -134,28 +134,28 @@ GC_bool GC_has_other_debug_info(ptr_t p)
}
}
- /* Generate a random heap address. */
- /* The resulting address is in the heap, but */
- /* not necessarily inside a valid object. */
+ /* Generate a random heap address. */
+ /* The resulting address is in the heap, but */
+ /* not necessarily inside a valid object. */
GC_API void * GC_CALL GC_generate_random_heap_address(void)
{
size_t i;
word heap_offset = RANDOM();
if (GC_heapsize > RAND_MAX) {
- heap_offset *= RAND_MAX;
- heap_offset += RANDOM();
+ heap_offset *= RAND_MAX;
+ heap_offset += RANDOM();
}
heap_offset %= GC_heapsize;
- /* This doesn't yield a uniform distribution, especially if */
- /* e.g. RAND_MAX = 1.5* GC_heapsize. But for typical cases, */
- /* it's not too bad. */
+ /* This doesn't yield a uniform distribution, especially if */
+ /* e.g. RAND_MAX = 1.5* GC_heapsize. But for typical cases, */
+ /* it's not too bad. */
for (i = 0; i < GC_n_heap_sects; ++ i) {
- size_t size = GC_heap_sects[i].hs_bytes;
- if (heap_offset < size) {
- return GC_heap_sects[i].hs_start + heap_offset;
- } else {
- heap_offset -= size;
- }
+ size_t size = GC_heap_sects[i].hs_bytes;
+ if (heap_offset < size) {
+ return GC_heap_sects[i].hs_start + heap_offset;
+ } else {
+ heap_offset -= size;
+ }
}
ABORT("GC_generate_random_heap_address: size inconsistency");
/*NOTREACHED*/
@@ -168,11 +168,11 @@ GC_bool GC_has_other_debug_info(ptr_t p)
ptr_t result;
ptr_t base;
for (;;) {
- result = GC_generate_random_heap_address();
- base = GC_base(result);
- if (0 == base) continue;
- if (!GC_is_marked(base)) continue;
- return result;
+ result = GC_generate_random_heap_address();
+ base = GC_base(result);
+ if (0 == base) continue;
+ if (!GC_is_marked(base)) continue;
+ return result;
}
}
@@ -190,41 +190,41 @@ GC_bool GC_has_other_debug_info(ptr_t p)
for (i = 0; ; ++i) {
source = GC_get_back_ptr_info(current, &base, &offset);
if (GC_UNREFERENCED == source) {
- GC_err_printf("Reference could not be found\n");
- goto out;
+ GC_err_printf("Reference could not be found\n");
+ goto out;
}
if (GC_NO_SPACE == source) {
- GC_err_printf("No debug info in object: Can't find reference\n");
- goto out;
+ GC_err_printf("No debug info in object: Can't find reference\n");
+ goto out;
}
GC_err_printf("Reachable via %d levels of pointers from ", i);
switch(source) {
- case GC_REFD_FROM_ROOT:
- GC_err_printf("root at %p\n\n", base);
- goto out;
- case GC_REFD_FROM_REG:
- GC_err_printf("root in register\n\n");
- goto out;
- case GC_FINALIZER_REFD:
- GC_err_printf("list of finalizable objects\n\n");
- goto out;
- case GC_REFD_FROM_HEAP:
- GC_err_printf("offset %ld in object:\n", (unsigned long)offset);
- /* Take GC_base(base) to get real base, i.e. header. */
- GC_print_heap_obj(GC_base(base));
- GC_err_printf("\n");
- break;
- default:
- GC_err_printf("INTERNAL ERROR: UNEXPECTED SOURCE!!!!\n");
- goto out;
+ case GC_REFD_FROM_ROOT:
+ GC_err_printf("root at %p\n\n", base);
+ goto out;
+ case GC_REFD_FROM_REG:
+ GC_err_printf("root in register\n\n");
+ goto out;
+ case GC_FINALIZER_REFD:
+ GC_err_printf("list of finalizable objects\n\n");
+ goto out;
+ case GC_REFD_FROM_HEAP:
+ GC_err_printf("offset %ld in object:\n", (unsigned long)offset);
+ /* Take GC_base(base) to get real base, i.e. header. */
+ GC_print_heap_obj(GC_base(base));
+ GC_err_printf("\n");
+ break;
+ default:
+ GC_err_printf("INTERNAL ERROR: UNEXPECTED SOURCE!!!!\n");
+ goto out;
}
current = base;
}
out:;
}
- /* Force a garbage collection and generate a backtrace from a */
- /* random heap address. */
+ /* Force a garbage collection and generate a backtrace from a */
+ /* random heap address. */
void GC_generate_random_backtrace_no_gc(void)
{
void * current;
@@ -232,24 +232,24 @@ GC_bool GC_has_other_debug_info(ptr_t p)
GC_printf("\n****Chose address %p in object\n", current);
GC_print_backtrace(current);
}
-
+
GC_API void GC_CALL GC_generate_random_backtrace(void)
{
GC_gcollect();
GC_generate_random_backtrace_no_gc();
}
-
+
#endif /* KEEP_BACK_PTRS */
# define CROSSES_HBLK(p, sz) \
- (((word)(p + sizeof(oh) + sz - 1) ^ (word)p) >= HBLKSIZE)
+ (((word)(p + sizeof(oh) + sz - 1) ^ (word)p) >= HBLKSIZE)
/* Store debugging info into p. Return displaced pointer. */
-/* Assumes we don't hold allocation lock. */
+/* Assumes we don't hold allocation lock. */
ptr_t GC_store_debug_info(ptr_t p, word sz, const char *string, word integer)
{
register word * result = (word *)((oh *)p + 1);
DCL_LOCK_STATE;
-
+
LOCK();
GC_ASSERT(GC_size(p) >= sizeof(oh) + sz);
GC_ASSERT(!(SMALL_OBJ(sz) && CROSSES_HBLK(p, sz)));
@@ -272,13 +272,13 @@ ptr_t GC_store_debug_info(ptr_t p, word sz, const char *string, word integer)
}
#ifdef DBG_HDRS_ALL
-/* Store debugging info into p. Return displaced pointer. */
-/* This version assumes we do hold the allocation lock. */
+/* Store debugging info into p. Return displaced pointer. */
+/* This version assumes we do hold the allocation lock. */
STATIC ptr_t GC_store_debug_info_inner(ptr_t p, word sz, char *string,
- word integer)
+ word integer)
{
register word * result = (word *)((oh *)p + 1);
-
+
GC_ASSERT(GC_size(p) >= sizeof(oh) + sz);
GC_ASSERT(!(SMALL_OBJ(sz) && CROSSES_HBLK(p, sz)));
# ifdef KEEP_BACK_PTRS
@@ -300,9 +300,9 @@ STATIC ptr_t GC_store_debug_info_inner(ptr_t p, word sz, char *string,
#endif
#ifndef SHORT_DBG_HDRS
-/* Check the object with debugging info at ohdr */
-/* return NIL if it's OK. Else return clobbered */
-/* address. */
+/* Check the object with debugging info at ohdr */
+/* return NIL if it's OK. Else return clobbered */
+/* address. */
STATIC ptr_t GC_check_annotated_obj(oh *ohdr)
{
register ptr_t body = (ptr_t)(ohdr + 1);
@@ -327,13 +327,13 @@ STATIC ptr_t GC_check_annotated_obj(oh *ohdr)
static GC_describe_type_fn GC_describe_type_fns[MAXOBJKINDS] = {0};
GC_API void GC_CALL GC_register_describe_type_fn(int kind,
- GC_describe_type_fn fn)
+ GC_describe_type_fn fn)
{
GC_describe_type_fns[kind] = fn;
}
-/* Print a type description for the object whose client-visible address */
-/* is p. */
+/* Print a type description for the object whose client-visible address */
+/* is p. */
STATIC void GC_print_type(ptr_t p)
{
hdr * hhdr = GC_find_header(p);
@@ -341,44 +341,44 @@ STATIC void GC_print_type(ptr_t p)
int kind = hhdr -> hb_obj_kind;
if (0 != GC_describe_type_fns[kind] && GC_is_marked(GC_base(p))) {
- /* This should preclude free list objects except with */
- /* thread-local allocation. */
- buffer[GC_TYPE_DESCR_LEN] = 0;
- (GC_describe_type_fns[kind])(p, buffer);
- GC_ASSERT(buffer[GC_TYPE_DESCR_LEN] == 0);
- GC_err_puts(buffer);
+ /* This should preclude free list objects except with */
+ /* thread-local allocation. */
+ buffer[GC_TYPE_DESCR_LEN] = 0;
+ (GC_describe_type_fns[kind])(p, buffer);
+ GC_ASSERT(buffer[GC_TYPE_DESCR_LEN] == 0);
+ GC_err_puts(buffer);
} else {
- switch(kind) {
- case PTRFREE:
- GC_err_puts("PTRFREE");
- break;
- case NORMAL:
- GC_err_puts("NORMAL");
- break;
- case UNCOLLECTABLE:
- GC_err_puts("UNCOLLECTABLE");
- break;
-# ifdef ATOMIC_UNCOLLECTABLE
- case AUNCOLLECTABLE:
- GC_err_puts("ATOMIC UNCOLLECTABLE");
- break;
-# endif
- case STUBBORN:
- GC_err_puts("STUBBORN");
- break;
- default:
- GC_err_printf("kind %d, descr 0x%lx", kind,
- (unsigned long)(hhdr -> hb_descr));
- }
+ switch(kind) {
+ case PTRFREE:
+ GC_err_puts("PTRFREE");
+ break;
+ case NORMAL:
+ GC_err_puts("NORMAL");
+ break;
+ case UNCOLLECTABLE:
+ GC_err_puts("UNCOLLECTABLE");
+ break;
+# ifdef ATOMIC_UNCOLLECTABLE
+ case AUNCOLLECTABLE:
+ GC_err_puts("ATOMIC UNCOLLECTABLE");
+ break;
+# endif
+ case STUBBORN:
+ GC_err_puts("STUBBORN");
+ break;
+ default:
+ GC_err_printf("kind %d, descr 0x%lx", kind,
+ (unsigned long)(hhdr -> hb_descr));
+ }
}
}
-
+
void GC_print_obj(ptr_t p)
{
register oh * ohdr = (oh *)GC_base(p);
-
+
GC_ASSERT(I_DONT_HOLD_LOCK());
GC_err_printf("%p (", ((ptr_t)ohdr + sizeof(oh)));
GC_err_puts(ohdr -> oh_string);
@@ -386,7 +386,7 @@ void GC_print_obj(ptr_t p)
GC_err_printf(":%ld, ", (unsigned long)(ohdr -> oh_int));
# else
GC_err_printf(":%ld, sz=%ld, ", (unsigned long)(ohdr -> oh_int),
- (unsigned long)(ohdr -> oh_sz));
+ (unsigned long)(ohdr -> oh_sz));
# endif
GC_print_type((ptr_t)(ohdr + 1));
GC_err_puts(")\n");
@@ -397,35 +397,35 @@ STATIC void GC_debug_print_heap_obj_proc(ptr_t p)
{
GC_ASSERT(I_DONT_HOLD_LOCK());
if (GC_HAS_DEBUG_INFO(p)) {
- GC_print_obj(p);
+ GC_print_obj(p);
} else {
- GC_default_print_heap_obj_proc(p);
+ GC_default_print_heap_obj_proc(p);
}
}
#ifndef SHORT_DBG_HDRS
-/* Use GC_err_printf and friends to print a description of the object */
-/* whose client-visible address is p, and which was smashed at */
-/* clobbered_addr. */
+/* Use GC_err_printf and friends to print a description of the object */
+/* whose client-visible address is p, and which was smashed at */
+/* clobbered_addr. */
STATIC void GC_print_smashed_obj(ptr_t p, ptr_t clobbered_addr)
{
register oh * ohdr = (oh *)GC_base(p);
-
+
GC_ASSERT(I_DONT_HOLD_LOCK());
if (clobbered_addr <= (ptr_t)(&(ohdr -> oh_sz))
|| ohdr -> oh_string == 0) {
- GC_err_printf(
- "%p in or near object at %p(<smashed>, appr. sz = %lu)\n",
- clobbered_addr, p,
- (unsigned long)(GC_size((ptr_t)ohdr) - DEBUG_BYTES));
+ GC_err_printf(
+ "%p in or near object at %p(<smashed>, appr. sz = %lu)\n",
+ clobbered_addr, p,
+ (unsigned long)(GC_size((ptr_t)ohdr) - DEBUG_BYTES));
} else {
- GC_err_printf("%p in or near object at %p(%s:%lu, sz=%lu)\n",
- clobbered_addr, p,
- (word)(ohdr -> oh_string) < HBLKSIZE ? "(smashed string)" :
- ohdr -> oh_string[0] == '\0' ? "EMPTY(smashed?)" :
- ohdr -> oh_string,
- (unsigned long)(ohdr -> oh_int),
- (unsigned long)(ohdr -> oh_sz));
+ GC_err_printf("%p in or near object at %p(%s:%lu, sz=%lu)\n",
+ clobbered_addr, p,
+ (word)(ohdr -> oh_string) < HBLKSIZE ? "(smashed string)" :
+ ohdr -> oh_string[0] == '\0' ? "EMPTY(smashed?)" :
+ ohdr -> oh_string,
+ (unsigned long)(ohdr -> oh_int),
+ (unsigned long)(ohdr -> oh_sz));
PRINT_CALL_CHAIN(ohdr);
}
}
@@ -463,61 +463,61 @@ GC_API void GC_CALL GC_debug_register_displacement(size_t offset)
GC_API void * GC_CALL GC_debug_malloc(size_t lb, GC_EXTRA_PARAMS)
{
void * result = GC_malloc(lb + DEBUG_BYTES);
-
+
if (result == 0) {
GC_err_printf("GC_debug_malloc(%lu) returning NIL (",
- (unsigned long) lb);
+ (unsigned long) lb);
GC_err_puts(s);
GC_err_printf(":%ld)\n", (unsigned long)i);
return(0);
}
if (!GC_debugging_started) {
- GC_start_debugging();
+ GC_start_debugging();
}
ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
GC_API void * GC_CALL GC_debug_malloc_ignore_off_page(size_t lb,
- GC_EXTRA_PARAMS)
+ GC_EXTRA_PARAMS)
{
void * result = GC_malloc_ignore_off_page(lb + DEBUG_BYTES);
-
+
if (result == 0) {
GC_err_printf("GC_debug_malloc_ignore_off_page(%lu) returning NIL (",
- (unsigned long) lb);
+ (unsigned long) lb);
GC_err_puts(s);
GC_err_printf(":%lu)\n", (unsigned long)i);
return(0);
}
if (!GC_debugging_started) {
- GC_start_debugging();
+ GC_start_debugging();
}
ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
GC_API void * GC_CALL GC_debug_malloc_atomic_ignore_off_page(size_t lb,
- GC_EXTRA_PARAMS)
+ GC_EXTRA_PARAMS)
{
void * result = GC_malloc_atomic_ignore_off_page(lb + DEBUG_BYTES);
-
+
if (result == 0) {
GC_err_printf("GC_debug_malloc_atomic_ignore_off_page(%lu)"
- " returning NIL (", (unsigned long) lb);
+ " returning NIL (", (unsigned long) lb);
GC_err_puts(s);
GC_err_printf(":%lu)\n", (unsigned long)i);
return(0);
}
if (!GC_debugging_started) {
- GC_start_debugging();
+ GC_start_debugging();
}
ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
# ifdef DBG_HDRS_ALL
-/*
+/*
* An allocation function for internal use.
* Normally internally allocated objects do not have debug information.
* But in this case, we need to make sure that all objects have debug
@@ -528,10 +528,10 @@ GC_API void * GC_CALL GC_debug_malloc_atomic_ignore_off_page(size_t lb,
void * GC_debug_generic_malloc_inner(size_t lb, int k)
{
void * result = GC_generic_malloc_inner(lb + DEBUG_BYTES, k);
-
+
if (result == 0) {
GC_err_printf("GC internal allocation (%lu bytes) returning NIL\n",
- (unsigned long) lb);
+ (unsigned long) lb);
return(0);
}
ADD_CALL_CHAIN(result, GC_RETURN_ADDR);
@@ -541,11 +541,11 @@ GC_API void * GC_CALL GC_debug_malloc_atomic_ignore_off_page(size_t lb,
void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb, int k)
{
void * result = GC_generic_malloc_inner_ignore_off_page(
- lb + DEBUG_BYTES, k);
-
+ lb + DEBUG_BYTES, k);
+
if (result == 0) {
GC_err_printf("GC internal allocation (%lu bytes) returning NIL\n",
- (unsigned long) lb);
+ (unsigned long) lb);
return(0);
}
ADD_CALL_CHAIN(result, GC_RETURN_ADDR);
@@ -557,16 +557,16 @@ GC_API void * GC_CALL GC_debug_malloc_atomic_ignore_off_page(size_t lb,
GC_API void * GC_CALL GC_debug_malloc_stubborn(size_t lb, GC_EXTRA_PARAMS)
{
void * result = GC_malloc_stubborn(lb + DEBUG_BYTES);
-
+
if (result == 0) {
GC_err_printf("GC_debug_malloc(%lu) returning NIL (",
- (unsigned long) lb);
+ (unsigned long) lb);
GC_err_puts(s);
GC_err_printf(":%lu)\n", (unsigned long)i);
return(0);
}
if (!GC_debugging_started) {
- GC_start_debugging();
+ GC_start_debugging();
}
ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
@@ -576,7 +576,7 @@ GC_API void GC_CALL GC_debug_change_stubborn(void *p)
{
void * q = GC_base(p);
hdr * hhdr;
-
+
if (q == 0) {
GC_err_printf("Bad argument: %p to GC_debug_change_stubborn\n", p);
ABORT("GC_debug_change_stubborn: bad arg");
@@ -593,7 +593,7 @@ GC_API void GC_CALL GC_debug_end_stubborn_change(void *p)
{
register void * q = GC_base(p);
register hdr * hhdr;
-
+
if (q == 0) {
GC_err_printf("Bad argument: %p to GC_debug_end_stubborn_change\n", p);
ABORT("GC_debug_end_stubborn_change: bad arg");
@@ -628,10 +628,10 @@ GC_API void GC_CALL GC_debug_end_stubborn_change(void *p)
GC_API void * GC_CALL GC_debug_malloc_atomic(size_t lb, GC_EXTRA_PARAMS)
{
void * result = GC_malloc_atomic(lb + DEBUG_BYTES);
-
+
if (result == 0) {
GC_err_printf("GC_debug_malloc_atomic(%lu) returning NIL (",
- (unsigned long) lb);
+ (unsigned long) lb);
GC_err_puts(s);
GC_err_printf(":%lu)\n", (unsigned long)i);
return(0);
@@ -652,7 +652,7 @@ GC_API char * GC_CALL GC_debug_strdup(const char *str, GC_EXTRA_PARAMS)
copy = GC_debug_malloc_atomic(lb, OPT_RA s, i);
if (copy == NULL) {
# ifndef MSWINCE
- errno = ENOMEM;
+ errno = ENOMEM;
# endif
return NULL;
}
@@ -668,10 +668,10 @@ GC_API char * GC_CALL GC_debug_strdup(const char *str, GC_EXTRA_PARAMS)
GC_API void * GC_CALL GC_debug_malloc_uncollectable(size_t lb, GC_EXTRA_PARAMS)
{
void * result = GC_malloc_uncollectable(lb + UNCOLLECTABLE_DEBUG_BYTES);
-
+
if (result == 0) {
GC_err_printf("GC_debug_malloc_uncollectable(%lu) returning NIL (",
- (unsigned long) lb);
+ (unsigned long) lb);
GC_err_puts(s);
GC_err_printf(":%lu)\n", (unsigned long)i);
return(0);
@@ -687,11 +687,11 @@ GC_API void * GC_CALL GC_debug_malloc_uncollectable(size_t lb, GC_EXTRA_PARAMS)
void * GC_debug_malloc_atomic_uncollectable(size_t lb, GC_EXTRA_PARAMS)
{
void * result =
- GC_malloc_atomic_uncollectable(lb + UNCOLLECTABLE_DEBUG_BYTES);
-
+ GC_malloc_atomic_uncollectable(lb + UNCOLLECTABLE_DEBUG_BYTES);
+
if (result == 0) {
GC_err_printf(
- "GC_debug_malloc_atomic_uncollectable(%lu) returning NIL (",
+ "GC_debug_malloc_atomic_uncollectable(%lu) returning NIL (",
(unsigned long) lb);
GC_err_puts(s);
GC_err_printf(":%lu)\n", (unsigned long)i);
@@ -711,7 +711,7 @@ GC_API void GC_CALL GC_debug_free(void * p)
# ifndef SHORT_DBG_HDRS
ptr_t clobbered;
# endif
-
+
if (0 == p) return;
base = GC_base(p);
if (base == 0) {
@@ -720,7 +720,7 @@ GC_API void GC_CALL GC_debug_free(void * p)
}
if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
GC_err_printf(
- "GC_debug_free called on pointer %p w/o debugging info\n", p);
+ "GC_debug_free called on pointer %p w/o debugging info\n", p);
} else {
# ifndef SHORT_DBG_HDRS
clobbered = GC_check_annotated_obj((oh *)base);
@@ -740,26 +740,26 @@ GC_API void GC_CALL GC_debug_free(void * p)
if (GC_find_leak) {
GC_free(base);
} else {
- hdr * hhdr = HDR(p);
- GC_bool uncollectable = FALSE;
+ hdr * hhdr = HDR(p);
+ GC_bool uncollectable = FALSE;
if (hhdr -> hb_obj_kind == UNCOLLECTABLE) {
- uncollectable = TRUE;
- }
-# ifdef ATOMIC_UNCOLLECTABLE
- if (hhdr -> hb_obj_kind == AUNCOLLECTABLE) {
- uncollectable = TRUE;
- }
-# endif
- if (uncollectable) {
- GC_free(base);
- } else {
- size_t i;
- size_t obj_sz = BYTES_TO_WORDS(hhdr -> hb_sz - sizeof(oh));
-
- for (i = 0; i < obj_sz; ++i) ((word *)p)[i] = 0xdeadbeef;
- GC_ASSERT((word *)p + i == (word *)(base + hhdr -> hb_sz));
- }
+ uncollectable = TRUE;
+ }
+# ifdef ATOMIC_UNCOLLECTABLE
+ if (hhdr -> hb_obj_kind == AUNCOLLECTABLE) {
+ uncollectable = TRUE;
+ }
+# endif
+ if (uncollectable) {
+ GC_free(base);
+ } else {
+ size_t i;
+ size_t obj_sz = BYTES_TO_WORDS(hhdr -> hb_sz - sizeof(oh));
+
+ for (i = 0; i < obj_sz; ++i) ((word *)p)[i] = 0xdeadbeef;
+ GC_ASSERT((word *)p + i == (word *)(base + hhdr -> hb_sz));
+ }
} /* !GC_find_leak */
}
@@ -767,7 +767,7 @@ GC_API void GC_CALL GC_debug_free(void * p)
extern void GC_free_inner(void * p);
-/* Used internally; we assume it's called correctly. */
+/* Used internally; we assume it's called correctly. */
void GC_debug_free_inner(void * p)
{
ptr_t base = GC_base(p);
@@ -790,7 +790,7 @@ GC_API void * GC_CALL GC_debug_realloc(void * p, size_t lb, GC_EXTRA_PARAMS)
size_t copy_sz = lb;
size_t old_sz;
hdr * hhdr;
-
+
if (p == 0) return(GC_debug_malloc(lb, OPT_RA s, i));
base = GC_base(p);
if (base == 0) {
@@ -816,12 +816,12 @@ GC_API void * GC_CALL GC_debug_realloc(void * p, size_t lb, GC_EXTRA_PARAMS)
result = GC_debug_malloc_atomic(lb, OPT_RA s, i);
break;
case UNCOLLECTABLE:
- result = GC_debug_malloc_uncollectable(lb, OPT_RA s, i);
- break;
+ result = GC_debug_malloc_uncollectable(lb, OPT_RA s, i);
+ break;
# ifdef ATOMIC_UNCOLLECTABLE
case AUNCOLLECTABLE:
- result = GC_debug_malloc_atomic_uncollectable(lb, OPT_RA s, i);
- break;
+ result = GC_debug_malloc_atomic_uncollectable(lb, OPT_RA s, i);
+ break;
# endif
default:
result = NULL; /* initialized to prevent warning. */
@@ -847,10 +847,10 @@ GC_API void * GC_CALL GC_debug_realloc(void * p, size_t lb, GC_EXTRA_PARAMS)
#ifndef SHORT_DBG_HDRS
-/* List of smashed objects. We defer printing these, since we can't */
-/* always print them nicely with the allocation lock held. */
-/* We put them here instead of in GC_arrays, since it may be useful to */
-/* be able to look at them with the debugger. */
+/* List of smashed objects. We defer printing these, since we can't */
+/* always print them nicely with the allocation lock held. */
+/* We put them here instead of in GC_arrays, since it may be useful to */
+/* be able to look at them with the debugger. */
#define MAX_SMASHED 20
ptr_t GC_smashed[MAX_SMASHED];
unsigned GC_n_smashed = 0;
@@ -860,12 +860,12 @@ STATIC void GC_add_smashed(ptr_t smashed)
GC_ASSERT(GC_is_marked(GC_base(smashed)));
GC_smashed[GC_n_smashed] = smashed;
if (GC_n_smashed < MAX_SMASHED - 1) ++GC_n_smashed;
- /* In case of overflow, we keep the first MAX_SMASHED-1 */
- /* entries plus the last one. */
+ /* In case of overflow, we keep the first MAX_SMASHED-1 */
+ /* entries plus the last one. */
GC_have_errors = TRUE;
}
-/* Print all objects on the list. Clear the list. */
+/* Print all objects on the list. Clear the list. */
STATIC void GC_print_all_smashed_proc(void)
{
unsigned i;
@@ -875,14 +875,14 @@ STATIC void GC_print_all_smashed_proc(void)
GC_err_printf("GC_check_heap_block: found smashed heap objects:\n");
for (i = 0; i < GC_n_smashed; ++i) {
GC_print_smashed_obj((ptr_t)GC_base(GC_smashed[i]) + sizeof(oh),
- GC_smashed[i]);
- GC_smashed[i] = 0;
+ GC_smashed[i]);
+ GC_smashed[i] = 0;
}
GC_n_smashed = 0;
}
-/* Check all marked objects in the given block for validity */
-/* Avoid GC_apply_to_each_object for performance reasons. */
+/* Check all marked objects in the given block for validity */
+/* Avoid GC_apply_to_each_object for performance reasons. */
/*ARGSUSED*/
STATIC void GC_check_heap_block(struct hblk *hbp, word dummy)
{
@@ -890,35 +890,35 @@ STATIC void GC_check_heap_block(struct hblk *hbp, word dummy)
size_t sz = hhdr -> hb_sz;
size_t bit_no;
char *p, *plim;
-
+
p = hbp->hb_body;
bit_no = 0;
if (sz > MAXOBJBYTES) {
- plim = p;
+ plim = p;
} else {
- plim = hbp->hb_body + HBLKSIZE - sz;
+ plim = hbp->hb_body + HBLKSIZE - sz;
}
/* go through all words in block */
- while( p <= plim ) {
- if( mark_bit_from_hdr(hhdr, bit_no)
- && GC_HAS_DEBUG_INFO((ptr_t)p)) {
- ptr_t clobbered = GC_check_annotated_obj((oh *)p);
-
- if (clobbered != 0) GC_add_smashed(clobbered);
- }
- bit_no += MARK_BIT_OFFSET(sz);
- p += sz;
- }
+ while( p <= plim ) {
+ if( mark_bit_from_hdr(hhdr, bit_no)
+ && GC_HAS_DEBUG_INFO((ptr_t)p)) {
+ ptr_t clobbered = GC_check_annotated_obj((oh *)p);
+
+ if (clobbered != 0) GC_add_smashed(clobbered);
+ }
+ bit_no += MARK_BIT_OFFSET(sz);
+ p += sz;
+ }
}
-/* This assumes that all accessible objects are marked, and that */
-/* I hold the allocation lock. Normally called by collector. */
+/* This assumes that all accessible objects are marked, and that */
+/* I hold the allocation lock. Normally called by collector. */
STATIC void GC_check_heap_proc(void)
{
# ifndef SMALL_CONFIG
GC_STATIC_ASSERT((sizeof(oh) & (GRANULE_BYTES - 1)) == 0);
- /* FIXME: Should we check for twice that alignment? */
+ /* FIXME: Should we check for twice that alignment? */
# endif
GC_apply_to_all_blocks(GC_check_heap_block, (word)0);
}
@@ -935,11 +935,11 @@ void * GC_make_closure(GC_finalization_proc fn, void * data)
struct closure * result =
# ifdef DBG_HDRS_ALL
(struct closure *) GC_debug_malloc(sizeof (struct closure),
- GC_EXTRAS);
+ GC_EXTRAS);
# else
(struct closure *) GC_malloc(sizeof (struct closure));
# endif
-
+
result -> cl_fn = fn;
result -> cl_data = data;
return((void *)result);
@@ -948,26 +948,26 @@ void * GC_make_closure(GC_finalization_proc fn, void * data)
void GC_CALLBACK GC_debug_invoke_finalizer(void * obj, void * data)
{
register struct closure * cl = (struct closure *) data;
-
+
(*(cl -> cl_fn))((void *)((char *)obj + sizeof(oh)), cl -> cl_data);
-}
+}
/* Special finalizer_proc value to detect GC_register_finalizer() failure. */
#define OFN_UNSET (GC_finalization_proc)(signed_word)-1
-/* Set ofn and ocd to reflect the values we got back. */
+/* Set ofn and ocd to reflect the values we got back. */
static void store_old (void *obj, GC_finalization_proc my_old_fn,
- struct closure *my_old_cd, GC_finalization_proc *ofn,
- void **ocd)
+ struct closure *my_old_cd, GC_finalization_proc *ofn,
+ void **ocd)
{
if (0 != my_old_fn) {
if (my_old_fn == OFN_UNSET) {
- /* register_finalizer() failed; (*ofn) and (*ocd) are unchanged. */
- return;
- }
+ /* register_finalizer() failed; (*ofn) and (*ocd) are unchanged. */
+ return;
+ }
if (my_old_fn != GC_debug_invoke_finalizer) {
GC_err_printf("Debuggable object at %p had non-debug finalizer.\n",
- obj);
+ obj);
/* This should probably be fatal. */
} else {
if (ofn) *ofn = my_old_cd -> cl_fn;
@@ -980,118 +980,118 @@ static void store_old (void *obj, GC_finalization_proc my_old_fn,
}
GC_API void GC_CALL GC_debug_register_finalizer(void * obj,
- GC_finalization_proc fn,
- void * cd, GC_finalization_proc *ofn,
- void * *ocd)
+ GC_finalization_proc fn,
+ void * cd, GC_finalization_proc *ofn,
+ void * *ocd)
{
GC_finalization_proc my_old_fn = OFN_UNSET;
void * my_old_cd;
ptr_t base = GC_base(obj);
if (0 == base) {
- /* We won't collect it, hence finalizer wouldn't be run. */
- if (ocd) *ocd = 0;
- if (ofn) *ofn = 0;
- return;
+ /* We won't collect it, hence finalizer wouldn't be run. */
+ if (ocd) *ocd = 0;
+ if (ofn) *ofn = 0;
+ return;
}
if ((ptr_t)obj - base != sizeof(oh)) {
GC_err_printf(
- "GC_debug_register_finalizer called with non-base-pointer %p\n",
- obj);
+ "GC_debug_register_finalizer called with non-base-pointer %p\n",
+ obj);
}
if (0 == fn) {
GC_register_finalizer(base, 0, 0, &my_old_fn, &my_old_cd);
} else {
GC_register_finalizer(base, GC_debug_invoke_finalizer,
- GC_make_closure(fn,cd), &my_old_fn, &my_old_cd);
+ GC_make_closure(fn,cd), &my_old_fn, &my_old_cd);
}
store_old(obj, my_old_fn, (struct closure *)my_old_cd, ofn, ocd);
}
GC_API void GC_CALL GC_debug_register_finalizer_no_order
- (void * obj, GC_finalization_proc fn,
- void * cd, GC_finalization_proc *ofn,
- void * *ocd)
+ (void * obj, GC_finalization_proc fn,
+ void * cd, GC_finalization_proc *ofn,
+ void * *ocd)
{
GC_finalization_proc my_old_fn = OFN_UNSET;
void * my_old_cd;
ptr_t base = GC_base(obj);
if (0 == base) {
- /* We won't collect it, hence finalizer wouldn't be run. */
- if (ocd) *ocd = 0;
- if (ofn) *ofn = 0;
- return;
+ /* We won't collect it, hence finalizer wouldn't be run. */
+ if (ocd) *ocd = 0;
+ if (ofn) *ofn = 0;
+ return;
}
if ((ptr_t)obj - base != sizeof(oh)) {
GC_err_printf(
- "GC_debug_register_finalizer_no_order called with "
- "non-base-pointer %p\n",
- obj);
+ "GC_debug_register_finalizer_no_order called with "
+ "non-base-pointer %p\n",
+ obj);
}
if (0 == fn) {
GC_register_finalizer_no_order(base, 0, 0, &my_old_fn, &my_old_cd);
} else {
GC_register_finalizer_no_order(base, GC_debug_invoke_finalizer,
- GC_make_closure(fn,cd), &my_old_fn,
- &my_old_cd);
+ GC_make_closure(fn,cd), &my_old_fn,
+ &my_old_cd);
}
store_old(obj, my_old_fn, (struct closure *)my_old_cd, ofn, ocd);
}
GC_API void GC_CALL GC_debug_register_finalizer_unreachable
- (void * obj, GC_finalization_proc fn,
- void * cd, GC_finalization_proc *ofn,
- void * *ocd)
+ (void * obj, GC_finalization_proc fn,
+ void * cd, GC_finalization_proc *ofn,
+ void * *ocd)
{
GC_finalization_proc my_old_fn = OFN_UNSET;
void * my_old_cd;
ptr_t base = GC_base(obj);
if (0 == base) {
- /* We won't collect it, hence finalizer wouldn't be run. */
- if (ocd) *ocd = 0;
- if (ofn) *ofn = 0;
- return;
+ /* We won't collect it, hence finalizer wouldn't be run. */
+ if (ocd) *ocd = 0;
+ if (ofn) *ofn = 0;
+ return;
}
if ((ptr_t)obj - base != sizeof(oh)) {
GC_err_printf(
- "GC_debug_register_finalizer_unreachable called with "
- "non-base-pointer %p\n",
- obj);
+ "GC_debug_register_finalizer_unreachable called with "
+ "non-base-pointer %p\n",
+ obj);
}
if (0 == fn) {
GC_register_finalizer_unreachable(base, 0, 0, &my_old_fn, &my_old_cd);
} else {
GC_register_finalizer_unreachable(base, GC_debug_invoke_finalizer,
- GC_make_closure(fn,cd), &my_old_fn,
- &my_old_cd);
+ GC_make_closure(fn,cd), &my_old_fn,
+ &my_old_cd);
}
store_old(obj, my_old_fn, (struct closure *)my_old_cd, ofn, ocd);
}
GC_API void GC_CALL GC_debug_register_finalizer_ignore_self
- (void * obj, GC_finalization_proc fn,
- void * cd, GC_finalization_proc *ofn,
- void * *ocd)
+ (void * obj, GC_finalization_proc fn,
+ void * cd, GC_finalization_proc *ofn,
+ void * *ocd)
{
GC_finalization_proc my_old_fn = OFN_UNSET;
void * my_old_cd;
ptr_t base = GC_base(obj);
if (0 == base) {
- /* We won't collect it, hence finalizer wouldn't be run. */
- if (ocd) *ocd = 0;
- if (ofn) *ofn = 0;
- return;
+ /* We won't collect it, hence finalizer wouldn't be run. */
+ if (ocd) *ocd = 0;
+ if (ofn) *ofn = 0;
+ return;
}
if ((ptr_t)obj - base != sizeof(oh)) {
GC_err_printf(
- "GC_debug_register_finalizer_ignore_self called with "
- "non-base-pointer %p\n", obj);
+ "GC_debug_register_finalizer_ignore_self called with "
+ "non-base-pointer %p\n", obj);
}
if (0 == fn) {
GC_register_finalizer_ignore_self(base, 0, 0, &my_old_fn, &my_old_cd);
} else {
GC_register_finalizer_ignore_self(base, GC_debug_invoke_finalizer,
- GC_make_closure(fn,cd), &my_old_fn,
- &my_old_cd);
+ GC_make_closure(fn,cd), &my_old_fn,
+ &my_old_cd);
}
store_old(obj, my_old_fn, (struct closure *)my_old_cd, ofn, ocd);
}
diff --git a/doc/README b/doc/README
index 36a926a4..6b663d97 100644
--- a/doc/README
+++ b/doc/README
@@ -118,7 +118,7 @@ introduced leaks, the amount of unreclaimed memory typically stays
bounded.
In the following, an "object" is defined to be a region of memory allocated
-by the routines described below.
+by the routines described below.
Any objects not intended to be collected must be pointed to either
from other such accessible objects, or from the registers,
@@ -219,7 +219,7 @@ use up to about 30MB of memory. (The multi-threaded version will use more.
to build and test the "cord" string library.)
Makefile.direct will generate a library gc.a which you should link against.
-Typing "make cords" will add the cord library to gc.a.
+Typing "make cords" will add the cord library to gc.a.
The GNU style build process understands the usual targets. "Make check"
runs a number of tests. "Make install" installs at least libgc, and libcord.
@@ -280,9 +280,9 @@ THE C INTERFACE TO THE ALLOCATOR
The following routines are intended to be directly called by the user.
Note that usually only GC_malloc is necessary. GC_clear_roots and GC_add_roots
calls may be required if the collector has to trace from nonstandard places
-(e.g. from dynamic library data areas on a machine on which the
+(e.g. from dynamic library data areas on a machine on which the
collector doesn't already understand them.) On some machines, it may
-be desirable to set GC_stacktop to a good approximation of the stack base.
+be desirable to set GC_stacktop to a good approximation of the stack base.
(This enhances code portability on HP PA machines, since there is no
good way for the collector to compute this value.) Client code may include
"gc.h", which defines all of the following, plus many others.
@@ -332,17 +332,17 @@ good way for the collector to compute this value.) Client code may include
program startup.)
6) GC_malloc_ignore_off_page(bytes)
- - identical to GC_malloc, but the client promises to keep a pointer to
- the somewhere within the first 256 bytes of the object while it is
- live. (This pointer should nortmally be declared volatile to prevent
- interference from compiler optimizations.) This is the recommended
- way to allocate anything that is likely to be larger than 100Kbytes
- or so. (GC_malloc may result in failure to reclaim such objects.)
+ - identical to GC_malloc, but the client promises to keep a pointer to
+ the somewhere within the first 256 bytes of the object while it is
+ live. (This pointer should nortmally be declared volatile to prevent
+ interference from compiler optimizations.) This is the recommended
+ way to allocate anything that is likely to be larger than 100Kbytes
+ or so. (GC_malloc may result in failure to reclaim such objects.)
7) GC_set_warn_proc(proc)
- - Can be used to redirect warnings from the collector. Such warnings
- should be rare, and should not be ignored during code development.
-
+ - Can be used to redirect warnings from the collector. Such warnings
+ should be rare, and should not be ignored during code development.
+
8) GC_enable_incremental()
- Enables generational and incremental collection. Useful for large
heaps on machines that provide access to page dirty information.
@@ -354,7 +354,7 @@ good way for the collector to compute this value.) Client code may include
9) Several routines to allow for registration of finalization code.
User supplied finalization code may be invoked when an object becomes
unreachable. To call (*f)(obj, x) when obj becomes inaccessible, use
- GC_register_finalizer(obj, f, x, 0, 0);
+ GC_register_finalizer(obj, f, x, 0, 0);
For more sophisticated uses, and for finalization ordering issues,
see gc.h.
@@ -370,7 +370,7 @@ in excessive memory consumption.
Some additional tuning is possible through the parameters defined
near the top of gc_priv.h.
-
+
If only GC_malloc is intended to be used, it might be appropriate to define:
#define malloc(n) GC_malloc(n)
@@ -477,7 +477,7 @@ in the header, see the definition of the type oh in debug_malloc.c)
INCREMENTAL/GENERATIONAL COLLECTION:
-The collector normally interrupts client code for the duration of
+The collector normally interrupts client code for the duration of
a garbage collection mark phase. This may be unacceptable if interactive
response is needed for programs with large heaps. The collector
can also run in a "generational" mode, in which it usually attempts to
@@ -519,14 +519,14 @@ objects. Stubborn objects are treated less efficiently than pointerfree
A rough rule of thumb is that, in the absence of VM information, garbage
collection pauses are proportional to the amount of pointerful storage
plus the amount of modified "stubborn" storage that is reachable during
-the collection.
+the collection.
Initial allocation of stubborn objects takes longer than allocation
of other objects, since other data structures need to be maintained.
We recommend against random use of stubborn objects in client
code, since bugs caused by inappropriate writes to stubborn objects
-are likely to be very infrequently observed and hard to trace.
+are likely to be very infrequently observed and hard to trace.
However, their use may be appropriate in a few carefully written
library routines that do not make the objects themselves available
for writing by client code.
@@ -553,4 +553,3 @@ may help in some cases.
Please address bug reports to boehm@acm.org. If you are
contemplating a major addition, you might also send mail to ask whether
it's already been done (or whether we tried and discarded it).
-
diff --git a/doc/README.DGUX386 b/doc/README.DGUX386
index 0b878517..8960b942 100644
--- a/doc/README.DGUX386
+++ b/doc/README.DGUX386
@@ -6,10 +6,10 @@
You need the GCC-3.0.3 rev (DG/UX) compiler to build this tree.
This compiler has the new "dgux386" threads package implemented.
It also supports the switch "-pthread" needed to link correctly
- the DG/UX's -lrte -lthread with -lgcc and the system's -lc.
+ the DG/UX's -lrte -lthread with -lgcc and the system's -lc.
Finally we support parralleli-mark for the SMP DG/UX machines.
To build the garbage collector do:
-
+
./configure --enable-parallel-mark
make
make gctest
@@ -19,19 +19,19 @@
Alternatively you can do a configuration
./configure --enable-parallel-mark --disable-shared
-
+
to build only the static version of libgc.
-
+
To enable debugging messages please do:
- 1) Add the "--enable-gc-debug" flag during configuration.
+ 1) Add the "--enable-gc-debug" flag during configuration.
2) Edit the file linux-threads.c and uncommnect the line:
- /* #define DEBUG_THREADS 1 */ to --->
+ /* #define DEBUG_THREADS 1 */ to --->
#define DEBUG_THREADS 1
Then give "make" as usual.
-
+
In a machine with 4 CPUs (my own machine) the option parallel
mark (aka --enable-parallel-mark) makes a BIG difference.
@@ -51,8 +51,8 @@ Note (HB):
problems with stylistic corrections made by me.
---- ltconfig.ORIG Mon Jan 28 20:22:18 2002
-+++ ltconfig Mon Jan 28 20:44:00 2002
+--- ltconfig.ORIG Mon Jan 28 20:22:18 2002
++++ ltconfig Mon Jan 28 20:44:00 2002
@@ -689,6 +689,11 @@
pic_flag=-Kconform_pic
fi
@@ -68,7 +68,7 @@ Note (HB):
@@ -718,6 +723,12 @@
# We can build DLLs from non-PIC.
;;
-
+
+ dgux*)
+ pic_flag='-KPIC'
+ link_static='-Bstatic'
@@ -81,7 +81,7 @@ Note (HB):
@@ -1154,6 +1165,22 @@
fi
;;
-
+
+ dgux*)
+ ld_shlibs=yes
+ # For both C/C++ ommit the deplibs. This is because we relying on the fact
@@ -103,7 +103,7 @@ Note (HB):
# no search path for DLLs.
@@ -1497,7 +1524,7 @@
;;
-
+
dgux*)
- archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts'
+ archive_cmds='$CC -shared -h $soname -o $lib $libobjs $linkopts'
@@ -112,7 +112,7 @@ Note (HB):
;;
@@ -2092,12 +2119,17 @@
;;
-
+
dgux*)
- version_type=linux
+ version_type=dgux
@@ -129,87 +129,87 @@ Note (HB):
+ hardcode_shlibpath_var=no
+ ac_cv_archive_cmds_needs_lc=no
;;
-
+
sysv4*MP*)
---- ltmain.sh.ORIG Mon Jan 28 20:31:18 2002
-+++ ltmain.sh Tue Jan 29 00:11:29 2002
+--- ltmain.sh.ORIG Mon Jan 28 20:31:18 2002
++++ ltmain.sh Tue Jan 29 00:11:29 2002
@@ -1072,11 +1072,38 @@
- esac
- ;;
-
+ esac
+ ;;
+
+ -thread*)
-+ # DG/UX GCC 2.95.x, 3.x.x rev (DG/UX) links -lthread
-+ # with the switch -threads
-+ if test "$arg" = "-threads"; then
-+ case "$host" in
-+ i[3456]86-*-dgux*)
-+ deplibs="$deplibs $arg"
-+ continue
-+ ;;
-+ esac
-+ fi
-+ ;;
++ # DG/UX GCC 2.95.x, 3.x.x rev (DG/UX) links -lthread
++ # with the switch -threads
++ if test "$arg" = "-threads"; then
++ case "$host" in
++ i[3456]86-*-dgux*)
++ deplibs="$deplibs $arg"
++ continue
++ ;;
++ esac
++ fi
++ ;;
+
+ -pthread*)
-+ # DG/UX GCC 2.95.x, 3.x.x rev (DG/UX) links -lthread
-+ # with the switch -pthread
-+ if test "$arg" = "-pthread"; then
-+ case "$host" in
-+ i[3456]86-*-dgux*)
-+ deplibs="$deplibs $arg"
-+ continue
-+ ;;
-+ esac
-+ fi
-+ ;;
++ # DG/UX GCC 2.95.x, 3.x.x rev (DG/UX) links -lthread
++ # with the switch -pthread
++ if test "$arg" = "-pthread"; then
++ case "$host" in
++ i[3456]86-*-dgux*)
++ deplibs="$deplibs $arg"
++ continue
++ ;;
++ esac
++ fi
++ ;;
+
-l*)
- if test "$arg" = "-lc"; then
- case "$host" in
-- *-*-cygwin* | *-*-mingw* | *-*-os2* | *-*-beos*)
-+ *-*-cygwin* | *-*-mingw* | *-*-os2* | *-*-beos* | i[3456]86-*-dgux*)
- # These systems don't actually have c library (as such)
-+ # It is wrong in DG/UX to add -lc when creating shared/dynamic objs/libs
- continue
- ;;
- esac
+ if test "$arg" = "-lc"; then
+ case "$host" in
+- *-*-cygwin* | *-*-mingw* | *-*-os2* | *-*-beos*)
++ *-*-cygwin* | *-*-mingw* | *-*-os2* | *-*-beos* | i[3456]86-*-dgux*)
+ # These systems don't actually have c library (as such)
++ # It is wrong in DG/UX to add -lc when creating shared/dynamic objs/libs
+ continue
+ ;;
+ esac
@@ -1248,6 +1275,12 @@
- temp_deplibs=
- for deplib in $dependency_libs; do
- case "$deplib" in
-+ -thread*)
-+ temp_deplibs="$temp_deplibs $deplib"
-+ ;;
-+ -pthread)
-+ temp_deplibs="$temp_deplibs $deplib"
-+ ;;
- -R*) temp_xrpath=`$echo "X$deplib" | $Xsed -e 's/^-R//'`
- case " $rpath $xrpath " in
- *" $temp_xrpath "*) ;;
+ temp_deplibs=
+ for deplib in $dependency_libs; do
+ case "$deplib" in
++ -thread*)
++ temp_deplibs="$temp_deplibs $deplib"
++ ;;
++ -pthread)
++ temp_deplibs="$temp_deplibs $deplib"
++ ;;
+ -R*) temp_xrpath=`$echo "X$deplib" | $Xsed -e 's/^-R//'`
+ case " $rpath $xrpath " in
+ *" $temp_xrpath "*) ;;
@@ -1709,6 +1742,13 @@
- done
- ;;
-
-+ dgux)
-+ # Leave mostly blank for DG/UX
-+ major=
-+ versuffix=".$current.$revision";
-+ verstring=
-+ ;;
+ done
+ ;;
+
++ dgux)
++ # Leave mostly blank for DG/UX
++ major=
++ versuffix=".$current.$revision";
++ verstring=
++ ;;
+
- linux)
- major=.`expr $current - $age`
- versuffix="$major.$age.$revision"
+ linux)
+ major=.`expr $current - $age`
+ versuffix="$major.$age.$revision"
@@ -1792,8 +1832,9 @@
-
- dependency_libs="$deplibs"
- case "$host" in
-- *-*-cygwin* | *-*-mingw* | *-*-os2* | *-*-beos*)
-+ *-*-cygwin* | *-*-mingw* | *-*-os2* | *-*-beos* | i[3456]86-*-dgux*)
- # these systems don't actually have a c library (as such)!
-+ # It is wrong in DG/UX to add -lc when creating shared/dynamic objs/libs
- ;;
- *)
- # Add libc to deplibs on all other systems.
+
+ dependency_libs="$deplibs"
+ case "$host" in
+- *-*-cygwin* | *-*-mingw* | *-*-os2* | *-*-beos*)
++ *-*-cygwin* | *-*-mingw* | *-*-os2* | *-*-beos* | i[3456]86-*-dgux*)
+ # these systems don't actually have a c library (as such)!
++ # It is wrong in DG/UX to add -lc when creating shared/dynamic objs/libs
+ ;;
+ *)
+ # Add libc to deplibs on all other systems.
diff --git a/doc/README.autoconf b/doc/README.autoconf
index 6c8ba55a..f8640bec 100644
--- a/doc/README.autoconf
+++ b/doc/README.autoconf
@@ -37,7 +37,7 @@ some, please let me know.)
Note that the distribution comes with a "Makefile" which will be overwritten
by "configure" with one that is not at all equiavelent to the original. The
-distribution contains a copy of the original "Makefile" in "Makefile.direct".
+distribution contains a copy of the original "Makefile" in "Makefile.direct".
Important options to configure:
@@ -48,7 +48,7 @@ Important options to configure:
--enable-threads=TYPE choose threading package
--enable-parallel-mark parallelize marking and free list construction
--enable-gc-debug (--enable-full-debug before about 7.0)
- include full support for pointer backtracing etc.
+ include full support for pointer backtracing etc.
Unless --prefix is set (or --exec-prefix or one of the more obscure options),
@@ -57,5 +57,3 @@ would typically require the "make install" to be run as root.
Most commonly --enable-threads=posix or will be needed. --enable-parallel-mark
is recommended for multiprocessors if it is supported on the platform.
-
-
diff --git a/doc/README.environment b/doc/README.environment
index b6f6399d..ddfa60b9 100644
--- a/doc/README.environment
+++ b/doc/README.environment
@@ -2,18 +2,18 @@ The garbage collector looks at a number of environment variables which are
then used to affect its operation. These are examined only on Un*x-like
platforms and win32.
-GC_INITIAL_HEAP_SIZE=<bytes> - Initial heap size in bytes. May speed up
- process start-up.
+GC_INITIAL_HEAP_SIZE=<bytes> - Initial heap size in bytes. May speed up
+ process start-up.
GC_MAXIMUM_HEAP_SIZE=<bytes> - Maximum collected heap size.
GC_LOOP_ON_ABORT - Causes the collector abort routine to enter a tight loop.
- This may make it easier to debug, such a process, especially
- for multithreaded platforms that don't produce usable core
- files, or if a core file would be too large. On some
- platforms, this also causes SIGSEGV to be caught and
- result in an infinite loop in a handler, allowing
- similar debugging techniques.
+ This may make it easier to debug, such a process, especially
+ for multithreaded platforms that don't produce usable core
+ files, or if a core file would be too large. On some
+ platforms, this also causes SIGSEGV to be caught and
+ result in an infinite loop in a handler, allowing
+ similar debugging techniques.
GC_PRINT_STATS - Turn on GC logging. Not functional with -DSMALL_CONFIG.
@@ -22,137 +22,137 @@ GC_LOG_FILE - The name of the log file. Stderr by default.
GC_PRINT_VERBOSE_STATS - Turn on even more logging.
GC_DUMP_REGULARLY - Generate a GC debugging dump GC_dump() on startup
- and during every collection. Very verbose. Useful
- if you have a bug to report, but please include only the
- last complete dump.
+ and during every collection. Very verbose. Useful
+ if you have a bug to report, but please include only the
+ last complete dump.
GC_BACKTRACES=<n> - Generate n random backtraces (for heap profiling) after
- each GC. Collector must have been built with
- KEEP_BACK_PTRS. This won't generate useful output unless
- most objects in the heap were allocated through debug
- allocators. This is intended to be only a statistical
- sample; individual traces may be erroneous due to
- concurrent heap mutation.
+ each GC. Collector must have been built with
+ KEEP_BACK_PTRS. This won't generate useful output unless
+ most objects in the heap were allocated through debug
+ allocators. This is intended to be only a statistical
+ sample; individual traces may be erroneous due to
+ concurrent heap mutation.
GC_PRINT_ADDRESS_MAP - Linux only. Dump /proc/self/maps, i.e. various address
- maps for the process, to stderr on every GC. Useful for
- mapping root addresses to source for deciphering leak
- reports.
+ maps for the process, to stderr on every GC. Useful for
+ mapping root addresses to source for deciphering leak
+ reports.
GC_NPROCS=<n> - Linux w/threads only. Explicitly sets the number of processors
- that the GC should expect to use. Note that setting this to 1
- when multiple processors are available will preserve
- correctness, but may lead to really horrible performance,
- since the lock implementation will immediately yield without
- first spinning.
+ that the GC should expect to use. Note that setting this to 1
+ when multiple processors are available will preserve
+ correctness, but may lead to really horrible performance,
+ since the lock implementation will immediately yield without
+ first spinning.
GC_MARKERS=<n> - Only if compiled with PARALLEL_MARK. Set the number
- of marker threads. This is normally set to the number of
- processors. It is safer to adjust GC_MARKERS than GC_NPROCS,
- since GC_MARKERS has no impact on the lock implementation.
+ of marker threads. This is normally set to the number of
+ processors. It is safer to adjust GC_MARKERS than GC_NPROCS,
+ since GC_MARKERS has no impact on the lock implementation.
GC_NO_BLACKLIST_WARNING - Prevents the collector from issuing
- warnings about allocations of very large blocks.
- Deprecated. Use GC_LARGE_ALLOC_WARN_INTERVAL instead.
+ warnings about allocations of very large blocks.
+ Deprecated. Use GC_LARGE_ALLOC_WARN_INTERVAL instead.
GC_LARGE_ALLOC_WARN_INTERVAL=<n> - Print every nth warning about very large
- block allocations, starting with the nth one. Small values
- of n are generally benign, in that a bounded number of
- such warnings generally indicate at most a bounded leak.
- For best results it should be set at 1 during testing.
- Default is 5. Very large numbers effectively disable the
- warning.
+ block allocations, starting with the nth one. Small values
+ of n are generally benign, in that a bounded number of
+ such warnings generally indicate at most a bounded leak.
+ For best results it should be set at 1 during testing.
+ Default is 5. Very large numbers effectively disable the
+ warning.
GC_IGNORE_GCJ_INFO - Ignore the type descriptors implicitly supplied by
- GC_gcj_malloc and friends. This is useful for debugging
- descriptor generation problems, and possibly for
- temporarily working around such problems. It forces a
- fully conservative scan of all heap objects except
- those known to be pointerfree, and may thus have other
- adverse effects.
+ GC_gcj_malloc and friends. This is useful for debugging
+ descriptor generation problems, and possibly for
+ temporarily working around such problems. It forces a
+ fully conservative scan of all heap objects except
+ those known to be pointerfree, and may thus have other
+ adverse effects.
GC_PRINT_BACK_HEIGHT - Print max length of chain through unreachable objects
- ending in a reachable one. If this number remains
- bounded, then the program is "GC robust". This ensures
- that a fixed number of misidentified pointers can only
- result in a bounded space leak. This currently only
- works if debugging allocation is used throughout.
- It increases GC space and time requirements appreciably.
- This feature is still somewhat experimental, and requires
- that the collector have been built with MAKE_BACK_GRAPH
- defined. For details, see Boehm, "Bounding Space Usage
- of Conservative Garbage Collectors", POPL 2001, or
- http://lib.hpl.hp.com/techpubs/2001/HPL-2001-251.html .
+ ending in a reachable one. If this number remains
+ bounded, then the program is "GC robust". This ensures
+ that a fixed number of misidentified pointers can only
+ result in a bounded space leak. This currently only
+ works if debugging allocation is used throughout.
+ It increases GC space and time requirements appreciably.
+ This feature is still somewhat experimental, and requires
+ that the collector have been built with MAKE_BACK_GRAPH
+ defined. For details, see Boehm, "Bounding Space Usage
+ of Conservative Garbage Collectors", POPL 2001, or
+ http://lib.hpl.hp.com/techpubs/2001/HPL-2001-251.html .
GC_RETRY_SIGNALS, GC_NO_RETRY_SIGNALS - Try to compensate for lost
- thread suspend signals in linux_threads.c. On by
- default for GC_OSF1_THREADS, off otherwise. Note
- that this does not work around a possible loss of
- thread restart signals. This seems to be necessary for
- some versions of Tru64. Since we've previously seen
- similar issues on some other operating systems, it
- was turned into a runtime flag to enable last-minute
- work-arounds.
+ thread suspend signals in linux_threads.c. On by
+ default for GC_OSF1_THREADS, off otherwise. Note
+ that this does not work around a possible loss of
+ thread restart signals. This seems to be necessary for
+ some versions of Tru64. Since we've previously seen
+ similar issues on some other operating systems, it
+ was turned into a runtime flag to enable last-minute
+ work-arounds.
GC_USE_GETWRITEWATCH=<n> - Only if MPROTECT_VDB and GWW_VDB are both defined
- (Win32 only). Explicitly specify which strategy of
- keeping track of dirtied pages should be used.
- If n=0 then GetWriteWatch() is not used (falling back to
- protecting pages and catching memory faults strategy)
- else the collector tries to use GetWriteWatch-based
- strategy (GWW_VDB) first if available.
+ (Win32 only). Explicitly specify which strategy of
+ keeping track of dirtied pages should be used.
+ If n=0 then GetWriteWatch() is not used (falling back to
+ protecting pages and catching memory faults strategy)
+ else the collector tries to use GetWriteWatch-based
+ strategy (GWW_VDB) first if available.
GC_DISABLE_INCREMENTAL - Ignore runtime requests to enable incremental GC.
- Useful for debugging.
+ Useful for debugging.
The following turn on runtime flags that are also program settable. Checked
only during initialization. We expect that they will usually be set through
other means, but this may help with debugging and testing:
GC_ENABLE_INCREMENTAL - Turn on incremental collection at startup. Note that,
- depending on platform and collector configuration, this
- may involve write protecting pieces of the heap to
- track modifications. These pieces may include pointerfree
- objects or not. Although this is intended to be
- transparent, it may cause unintended system call failures.
- Use with caution.
+ depending on platform and collector configuration, this
+ may involve write protecting pieces of the heap to
+ track modifications. These pieces may include pointerfree
+ objects or not. Although this is intended to be
+ transparent, it may cause unintended system call failures.
+ Use with caution.
GC_PAUSE_TIME_TARGET - Set the desired garbage collector pause time in msecs.
- This only has an effect if incremental collection is
- enabled. If a collection requires appreciably more time
- than this, the client will be restarted, and the collector
- will need to do additional work to compensate. The
- special value "999999" indicates that pause time is
- unlimited, and the incremental collector will behave
- completely like a simple generational collector. If
- the collector is configured for parallel marking, and
- run on a multiprocessor, incremental collection should
- only be used with unlimited pause time.
+ This only has an effect if incremental collection is
+ enabled. If a collection requires appreciably more time
+ than this, the client will be restarted, and the collector
+ will need to do additional work to compensate. The
+ special value "999999" indicates that pause time is
+ unlimited, and the incremental collector will behave
+ completely like a simple generational collector. If
+ the collector is configured for parallel marking, and
+ run on a multiprocessor, incremental collection should
+ only be used with unlimited pause time.
GC_FULL_FREQUENCY - Set the desired number of partial collections between full
- collections. Matters only if GC_incremental is set.
+ collections. Matters only if GC_incremental is set.
GC_FREE_SPACE_DIVISOR - Set GC_free_space_divisor to the indicated value.
Setting it to larger values decreases space consumption
- and increases GC frequency.
+ and increases GC frequency.
GC_UNMAP_THRESHOLD - Set the desired memory blocks unmapping threshold (the
- number of sequential garbage collections for which
- a candidate block for unmapping should remain free). The
- special value "0" completely disables unmapping.
+ number of sequential garbage collections for which
+ a candidate block for unmapping should remain free). The
+ special value "0" completely disables unmapping.
GC_FIND_LEAK - Turns on GC_find_leak and thus leak detection. Forces a
- collection at program termination to detect leaks that would
- otherwise occur after the last GC.
+ collection at program termination to detect leaks that would
+ otherwise occur after the last GC.
GC_ALL_INTERIOR_POINTERS - Turns on GC_all_interior_pointers and thus interior
- pointer recognition.
+ pointer recognition.
GC_DONT_GC - Turns off garbage collection. Use cautiously.
GC_TRACE=addr - Intended for collector debugging. Requires that the collector
- have been built with ENABLE_TRACE defined. Causes the debugger
- to log information about the tracing of address ranges
- containing addr. Typically addr is the address that contains
- a pointer to an object that mysteriously failed to get marked.
- Addr must be specified as a hexadecimal integer.
+ have been built with ENABLE_TRACE defined. Causes the debugger
+ to log information about the tracing of address ranges
+ containing addr. Typically addr is the address that contains
+ a pointer to an object that mysteriously failed to get marked.
+ Addr must be specified as a hexadecimal integer.
diff --git a/doc/README.macros b/doc/README.macros
index 47d526db..b1444c4e 100644
--- a/doc/README.macros
+++ b/doc/README.macros
@@ -1,7 +1,7 @@
The collector uses a large amount of conditional compilation in order to
deal with platform dependencies. This violates a number of known coding
standards. On the other hand, it seems to be the only practical way to
-support this many platforms without excessive code duplication.
+support this many platforms without excessive code duplication.
A few guidelines have mostly been followed in order to keep this manageable:
@@ -24,71 +24,68 @@ either include/private/gcconfig.h or in Makefile.direct. Here is an attempt
at defining some of the remainder: (Thanks to Walter Bright for suggesting
this. This is a work in progress)
-MACRO EXPLANATION
------ -----------
+MACRO EXPLANATION
+----- -----------
-GC_DEBUG Tested by gc.h. Causes all-upper-case macros to
- expand to calls to debug versions of collector routines.
+GC_DEBUG Tested by gc.h. Causes all-upper-case macros to
+ expand to calls to debug versions of collector routines.
GC_NO_THREAD_REDIRECTS Tested by gc.h. Prevents redirection of thread
- creation routines etc. to GC_ versions. Requires the
- programmer to explicitly handle thread registration.
+ creation routines etc. to GC_ versions. Requires the
+ programmer to explicitly handle thread registration.
GC_NO_THREAD_DECLS Tested by gc.h. MS Windows only. Do not declare
- Windows thread creation routines and do not include windows.h.
+ Windows thread creation routines and do not include windows.h.
-__DMC__ Always #define'd by the Digital Mars compiler. Expands
- to the compiler version number in hex, i.e. 0x810 is
- version 8.1b0
+__DMC__ Always #define'd by the Digital Mars compiler. Expands
+ to the compiler version number in hex, i.e. 0x810 is
+ version 8.1b0
_ENABLE_ARRAYNEW
- #define'd by the Digital Mars C++ compiler when
- operator new[] and delete[] are separately
- overloadable. Used in gc_cpp.h.
-
-_MSC_VER Expands to the Visual C++ compiler version. Assumed to
- not be defined for other compilers (at least if they behave
- appreciably differently).
-
-_DLL Defined by Visual C++ (and mingw-w64) if runtime dynamic
- libraries are in use. Used (only if none of GC_DLL,
- GC_NOT_DLL, __GNUC__ are defined) to test whether
- __declspec(dllimport) needs to be added to declarations
- to support the case in which the collector is in a DLL.
-
-GC_DLL Defined by user if dynamic libraries are being built
- or used. Also set by gc.h if _DLL is defined while
- GC_NOT_DLL and __GNUC__ are both undefined.
- This is the macro that is tested internally to determine
- whether the GC is in its own dynamic library. May need
- to be set by clients before including gc.h. Note that
- inside the GC implementation it indicates that the
- collector is in its own dynamic library, should export
- its symbols, etc. But in clients it indicates that the
- GC resides in a different DLL, its entry points should
- be referenced accordingly, and precautions may need to
- be taken to properly deal with statically allocated
- variables in the main program. Used for MS Windows.
- Also used by GCC v4+ (only when the dynamic shared library
- is being built) in conjunction with "-fvisibility=hidden"
- option to hide internally used symbols.
-
-GC_NOT_DLL User-settable macro that overrides _DLL, e.g. if runtime
- dynamic libraries are used, but the collector is in a static
- library.
-
-SUNOS5SIGS Solaris-like signal handling. This is probably misnamed,
- since it really doesn't guarantee much more than Posix.
- Currently set only for Solaris2.X, HPUX, and DRSNX. Should
- probably be set for some other platforms.
-
-PCR Set if the collector is being built as part of the Xerox
- Portable Common Runtime.
+ #define'd by the Digital Mars C++ compiler when
+ operator new[] and delete[] are separately
+ overloadable. Used in gc_cpp.h.
+
+_MSC_VER Expands to the Visual C++ compiler version. Assumed to
+ not be defined for other compilers (at least if they behave
+ appreciably differently).
+
+_DLL Defined by Visual C++ (and mingw-w64) if runtime dynamic
+ libraries are in use. Used (only if none of GC_DLL,
+ GC_NOT_DLL, __GNUC__ are defined) to test whether
+ __declspec(dllimport) needs to be added to declarations
+ to support the case in which the collector is in a DLL.
+
+GC_DLL Defined by user if dynamic libraries are being built
+ or used. Also set by gc.h if _DLL is defined while
+ GC_NOT_DLL and __GNUC__ are both undefined.
+ This is the macro that is tested internally to determine
+ whether the GC is in its own dynamic library. May need
+ to be set by clients before including gc.h. Note that
+ inside the GC implementation it indicates that the
+ collector is in its own dynamic library, should export
+ its symbols, etc. But in clients it indicates that the
+ GC resides in a different DLL, its entry points should
+ be referenced accordingly, and precautions may need to
+ be taken to properly deal with statically allocated
+ variables in the main program. Used for MS Windows.
+ Also used by GCC v4+ (only when the dynamic shared library
+ is being built) in conjunction with "-fvisibility=hidden"
+ option to hide internally used symbols.
+
+GC_NOT_DLL User-settable macro that overrides _DLL, e.g. if runtime
+ dynamic libraries are used, but the collector is in a static
+ library.
+
+SUNOS5SIGS Solaris-like signal handling. This is probably misnamed,
+ since it really doesn't guarantee much more than Posix.
+ Currently set only for Solaris2.X, HPUX, and DRSNX. Should
+ probably be set for some other platforms.
+
+PCR Set if the collector is being built as part of the Xerox
+ Portable Common Runtime.
USE_COMPILER_TLS Assume the existence of __thread-style thread-local
- storage. Set automatically for thread-local allocation with
- the HP/UX vendor compiler. Usable with gcc on sufficiently
- up-to-date ELF platforms.
-
-
-
+ storage. Set automatically for thread-local allocation with
+ the HP/UX vendor compiler. Usable with gcc on sufficiently
+ up-to-date ELF platforms.
diff --git a/doc/README.win32 b/doc/README.win32
index db019f29..d31ebb3b 100644
--- a/doc/README.win32
+++ b/doc/README.win32
@@ -228,4 +228,3 @@ explicitly set GC_WIN32_PTHREADS. Use -DPTW32_STATIC_LIB for the static
threads library. Note that the DEBUG_WIN32_PTHREADS support in
win32_threads.c is currently broken and looking for someone to debug it.
(This information and the port came from Romano Paolo Tenca).
-
diff --git a/dyn_load.c b/dyn_load.c
index fba9ecb2..4056e356 100644
--- a/dyn_load.c
+++ b/dyn_load.c
@@ -50,10 +50,10 @@
# undef GC_must_restore_redefined_dlopen
# endif
-/* A user-supplied routine (custom filter) that might be called to */
-/* determine whether a DSO really needs to be scanned by the GC. */
-/* 0 means no filter installed. May be unused on some platforms. */
-/* FIXME: Add filter support for more platforms. */
+/* A user-supplied routine (custom filter) that might be called to */
+/* determine whether a DSO really needs to be scanned by the GC. */
+/* 0 means no filter installed. May be unused on some platforms. */
+/* FIXME: Add filter support for more platforms. */
STATIC GC_has_static_roots_func GC_has_static_roots = 0;
#if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE)) \
@@ -130,19 +130,19 @@ GC_FirstDLOpenedLinkMap(void)
struct r_debug *r;
static struct link_map * cachedResult = 0;
static ElfW(Dyn) *dynStructureAddr = 0;
- /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
+ /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
# ifdef SUNOS53_SHARED_LIB
- /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
- /* up properly in dynamically linked .so's. This means we have */
- /* to use its value in the set of original object files loaded */
- /* at program startup. */
- if( dynStructureAddr == 0 ) {
- void* startupSyms = dlopen(0, RTLD_LAZY);
- dynStructureAddr = (ElfW(Dyn)*)dlsym(startupSyms, "_DYNAMIC");
- }
+ /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
+ /* up properly in dynamically linked .so's. This means we have */
+ /* to use its value in the set of original object files loaded */
+ /* at program startup. */
+ if( dynStructureAddr == 0 ) {
+ void* startupSyms = dlopen(0, RTLD_LAZY);
+ dynStructureAddr = (ElfW(Dyn)*)dlsym(startupSyms, "_DYNAMIC");
+ }
# else
- dynStructureAddr = &_DYNAMIC;
+ dynStructureAddr = &_DYNAMIC;
# endif
if( dynStructureAddr == 0) {
@@ -170,27 +170,27 @@ GC_FirstDLOpenedLinkMap(void)
# endif
# if defined(SOLARISDL)
-/* Add dynamic library data sections to the root set. */
+/* Add dynamic library data sections to the root set. */
# if !defined(PCR) && !defined(GC_SOLARIS_THREADS) && defined(THREADS)
- --> fix mutual exclusion with dlopen
+ --> fix mutual exclusion with dlopen
# endif
# ifndef USE_PROC_FOR_LIBRARIES
void GC_register_dynamic_libraries(void)
{
struct link_map *lm = GC_FirstDLOpenedLinkMap();
-
+
for (lm = GC_FirstDLOpenedLinkMap();
lm != (struct link_map *) 0; lm = lm->l_next)
{
- ElfW(Ehdr) * e;
+ ElfW(Ehdr) * e;
ElfW(Phdr) * p;
unsigned long offset;
char * start;
register int i;
-
- e = (ElfW(Ehdr) *) lm->l_addr;
+
+ e = (ElfW(Ehdr) *) lm->l_addr;
p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
offset = ((unsigned long)(lm->l_addr));
for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
@@ -209,7 +209,7 @@ void GC_register_dynamic_libraries(void)
default:
break;
}
- }
+ }
}
}
@@ -232,21 +232,21 @@ void GC_register_dynamic_libraries(void)
#define MAPS_BUF_SIZE (32*1024)
extern ssize_t GC_repeat_read(int fd, char *buf, size_t count);
- /* Repeatedly read until buffer is filled, or EOF is encountered */
- /* Defined in os_dep.c. */
+ /* Repeatedly read until buffer is filled, or EOF is encountered */
+ /* Defined in os_dep.c. */
char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
char **prot, unsigned int *maj_dev,
- char **mapping_name);
+ char **mapping_name);
char *GC_get_maps(void);
- /* From os_dep.c */
+ /* From os_dep.c */
-/* Sort an array of HeapSects by start address. */
-/* Unfortunately at least some versions of */
+/* Sort an array of HeapSects by start address. */
+/* Unfortunately at least some versions of */
/* Linux qsort end up calling malloc by way of sysconf, and hence can't */
-/* be used in the collector. Hence we roll our own. Should be */
-/* reasonably fast if the array is already mostly sorted, as we expect */
-/* it to be. */
+/* be used in the collector. Hence we roll our own. Should be */
+/* reasonably fast if the array is already mostly sorted, as we expect */
+/* it to be. */
static void sort_heap_sects(struct HeapSect *base, size_t number_of_elements)
{
signed_word n = (signed_word)number_of_elements;
@@ -255,16 +255,16 @@ static void sort_heap_sects(struct HeapSect *base, size_t number_of_elements)
while (nsorted < n) {
while (nsorted < n &&
- base[nsorted-1].hs_start < base[nsorted].hs_start)
+ base[nsorted-1].hs_start < base[nsorted].hs_start)
++nsorted;
if (nsorted == n) break;
GC_ASSERT(base[nsorted-1].hs_start > base[nsorted].hs_start);
i = nsorted - 1;
while (i >= 0 && base[i].hs_start > base[i+1].hs_start) {
struct HeapSect tmp = base[i];
- base[i] = base[i+1];
- base[i+1] = tmp;
- --i;
+ base[i] = base[i+1];
+ base[i+1] = tmp;
+ --i;
}
GC_ASSERT(base[nsorted-1].hs_start < base[nsorted].hs_start);
++nsorted;
@@ -286,73 +286,73 @@ STATIC word GC_register_map_entries(char *maps)
sort_heap_sects(GC_our_memory, GC_n_memory);
least_ha = GC_our_memory[0].hs_start;
greatest_ha = GC_our_memory[GC_n_memory-1].hs_start
- + GC_our_memory[GC_n_memory-1].hs_bytes;
+ + GC_our_memory[GC_n_memory-1].hs_bytes;
for (;;) {
buf_ptr = GC_parse_map_entry(buf_ptr, &start, &end, &prot, &maj_dev, 0);
- if (buf_ptr == NULL) return 1;
- if (prot[1] == 'w') {
- /* This is a writable mapping. Add it to */
- /* the root set unless it is already otherwise */
- /* accounted for. */
- if (start <= GC_stackbottom && end >= GC_stackbottom) {
- /* Stack mapping; discard */
- continue;
- }
-# ifdef THREADS
- /* This may fail, since a thread may already be */
- /* unregistered, but its thread stack may still be there. */
- /* That can fail because the stack may disappear while */
- /* we're marking. Thus the marker is, and has to be */
- /* prepared to recover from segmentation faults. */
-
- if (GC_segment_is_thread_stack(start, end)) continue;
-
- /* FIXME: NPTL squirrels */
- /* away pointers in pieces of the stack segment that we */
- /* don't scan. We work around this */
- /* by treating anything allocated by libpthread as */
- /* uncollectable, as we do in some other cases. */
- /* A specifically identified problem is that */
- /* thread stacks contain pointers to dynamic thread */
- /* vectors, which may be reused due to thread caching. */
- /* They may not be marked if the thread is still live. */
- /* This specific instance should be addressed by */
- /* INCLUDE_LINUX_THREAD_DESCR, but that doesn't quite */
- /* seem to suffice. */
- /* We currently trace entire thread stacks, if they are */
- /* are currently cached but unused. This is */
- /* very suboptimal for performance reasons. */
-# endif
- /* We no longer exclude the main data segment. */
- if (end <= least_ha || start >= greatest_ha) {
- /* The easy case; just trace entire segment */
- GC_add_roots_inner((char *)start, (char *)end, TRUE);
- continue;
- }
- /* Add sections that dont belong to us. */
- i = 0;
- while (GC_our_memory[i].hs_start + GC_our_memory[i].hs_bytes
- < start)
- ++i;
- GC_ASSERT(i < GC_n_memory);
- if (GC_our_memory[i].hs_start <= start) {
- start = GC_our_memory[i].hs_start
- + GC_our_memory[i].hs_bytes;
- ++i;
- }
- while (i < GC_n_memory && GC_our_memory[i].hs_start < end
- && start < end) {
- if ((char *)start < GC_our_memory[i].hs_start)
- GC_add_roots_inner((char *)start,
- GC_our_memory[i].hs_start, TRUE);
- start = GC_our_memory[i].hs_start
- + GC_our_memory[i].hs_bytes;
- ++i;
- }
- if (start < end)
- GC_add_roots_inner((char *)start, (char *)end, TRUE);
- }
+ if (buf_ptr == NULL) return 1;
+ if (prot[1] == 'w') {
+ /* This is a writable mapping. Add it to */
+ /* the root set unless it is already otherwise */
+ /* accounted for. */
+ if (start <= GC_stackbottom && end >= GC_stackbottom) {
+ /* Stack mapping; discard */
+ continue;
+ }
+# ifdef THREADS
+ /* This may fail, since a thread may already be */
+ /* unregistered, but its thread stack may still be there. */
+ /* That can fail because the stack may disappear while */
+ /* we're marking. Thus the marker is, and has to be */
+ /* prepared to recover from segmentation faults. */
+
+ if (GC_segment_is_thread_stack(start, end)) continue;
+
+ /* FIXME: NPTL squirrels */
+ /* away pointers in pieces of the stack segment that we */
+ /* don't scan. We work around this */
+ /* by treating anything allocated by libpthread as */
+ /* uncollectable, as we do in some other cases. */
+ /* A specifically identified problem is that */
+ /* thread stacks contain pointers to dynamic thread */
+ /* vectors, which may be reused due to thread caching. */
+ /* They may not be marked if the thread is still live. */
+ /* This specific instance should be addressed by */
+ /* INCLUDE_LINUX_THREAD_DESCR, but that doesn't quite */
+ /* seem to suffice. */
+ /* We currently trace entire thread stacks, if they are */
+ /* are currently cached but unused. This is */
+ /* very suboptimal for performance reasons. */
+# endif
+ /* We no longer exclude the main data segment. */
+ if (end <= least_ha || start >= greatest_ha) {
+ /* The easy case; just trace entire segment */
+ GC_add_roots_inner((char *)start, (char *)end, TRUE);
+ continue;
+ }
+ /* Add sections that dont belong to us. */
+ i = 0;
+ while (GC_our_memory[i].hs_start + GC_our_memory[i].hs_bytes
+ < start)
+ ++i;
+ GC_ASSERT(i < GC_n_memory);
+ if (GC_our_memory[i].hs_start <= start) {
+ start = GC_our_memory[i].hs_start
+ + GC_our_memory[i].hs_bytes;
+ ++i;
+ }
+ while (i < GC_n_memory && GC_our_memory[i].hs_start < end
+ && start < end) {
+ if ((char *)start < GC_our_memory[i].hs_start)
+ GC_add_roots_inner((char *)start,
+ GC_our_memory[i].hs_start, TRUE);
+ start = GC_our_memory[i].hs_start
+ + GC_our_memory[i].hs_bytes;
+ ++i;
+ }
+ if (start < end)
+ GC_add_roots_inner((char *)start, (char *)end, TRUE);
+ }
}
return 1;
}
@@ -368,22 +368,22 @@ GC_bool GC_register_main_static_data(void)
{
return FALSE;
}
-
+
# define HAVE_REGISTER_MAIN_STATIC_DATA
#endif /* USE_PROC_FOR_LIBRARIES */
#if !defined(USE_PROC_FOR_LIBRARIES)
-/* The following is the preferred way to walk dynamic libraries */
-/* For glibc 2.2.4+. Unfortunately, it doesn't work for older */
-/* versions. Thanks to Jakub Jelinek for most of the code. */
+/* The following is the preferred way to walk dynamic libraries */
+/* For glibc 2.2.4+. Unfortunately, it doesn't work for older */
+/* versions. Thanks to Jakub Jelinek for most of the code. */
# if (defined(LINUX) || defined (__GLIBC__)) /* Are others OK here, too? */ \
&& (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 2) \
- || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 2 && defined(DT_CONFIG)))
-/* We have the header files for a glibc that includes dl_iterate_phdr. */
+ || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 2 && defined(DT_CONFIG)))
+/* We have the header files for a glibc that includes dl_iterate_phdr. */
/* It may still not be available in the library on the target system. */
-/* Thus we also treat it as a weak symbol. */
+/* Thus we also treat it as a weak symbol. */
#define HAVE_DL_ITERATE_PHDR
#pragma weak dl_iterate_phdr
#endif
@@ -398,12 +398,12 @@ GC_bool GC_register_main_static_data(void)
# ifdef PT_GNU_RELRO
-/* Instead of registering PT_LOAD sections directly, we keep them */
-/* in a temporary list, and filter them by excluding PT_GNU_RELRO */
-/* segments. Processing PT_GNU_RELRO sections with */
-/* GC_exclude_static_roots instead would be superficially cleaner. But */
-/* it runs into trouble if a client registers an overlapping segment, */
-/* which unfortunately seems quite possible. */
+/* Instead of registering PT_LOAD sections directly, we keep them */
+/* in a temporary list, and filter them by excluding PT_GNU_RELRO */
+/* segments. Processing PT_GNU_RELRO sections with */
+/* GC_exclude_static_roots instead would be superficially cleaner. But */
+/* it runs into trouble if a client registers an overlapping segment, */
+/* which unfortunately seems quite possible. */
#define MAX_LOAD_SEGS MAX_ROOT_SETS
@@ -411,7 +411,7 @@ static struct load_segment {
ptr_t start;
ptr_t end;
/* Room for a second segment if we remove a RELRO segment */
- /* from the middle. */
+ /* from the middle. */
ptr_t start2;
ptr_t end2;
} load_segs[MAX_LOAD_SEGS];
@@ -421,7 +421,7 @@ static int n_load_segs;
# endif /* PT_GNU_RELRO */
static int GC_register_dynlib_callback(struct dl_phdr_info * info,
- size_t size, void * ptr)
+ size_t size, void * ptr)
{
const ElfW(Phdr) * p;
ptr_t start, end;
@@ -437,65 +437,65 @@ static int GC_register_dynlib_callback(struct dl_phdr_info * info,
switch( p->p_type ) {
# ifdef PT_GNU_RELRO
case PT_GNU_RELRO:
- /* This entry is known to be constant and will eventually be remapped
- read-only. However, the address range covered by this entry is
- typically a subset of a previously encountered `LOAD' segment, so
- we need to exclude it. */
- {
- int j;
-
- start = ((ptr_t)(p->p_vaddr)) + info->dlpi_addr;
- end = start + p->p_memsz;
- for (j = n_load_segs; --j >= 0; ) {
- if (start >= load_segs[j].start && start < load_segs[j].end) {
- if (load_segs[j].start2 != 0) {
- WARN("More than one GNU_RELRO segment per load seg\n",0);
- } else {
- GC_ASSERT(end <= load_segs[j].end);
- /* Remove from the existing load segment */
- load_segs[j].end2 = load_segs[j].end;
- load_segs[j].end = start;
- load_segs[j].start2 = end;
- }
- break;
- }
- if (j == 0) WARN("Failed to find PT_GNU_RELRO segment"
- " inside PT_LOAD region", 0);
- }
- }
-
- break;
+ /* This entry is known to be constant and will eventually be remapped
+ read-only. However, the address range covered by this entry is
+ typically a subset of a previously encountered `LOAD' segment, so
+ we need to exclude it. */
+ {
+ int j;
+
+ start = ((ptr_t)(p->p_vaddr)) + info->dlpi_addr;
+ end = start + p->p_memsz;
+ for (j = n_load_segs; --j >= 0; ) {
+ if (start >= load_segs[j].start && start < load_segs[j].end) {
+ if (load_segs[j].start2 != 0) {
+ WARN("More than one GNU_RELRO segment per load seg\n",0);
+ } else {
+ GC_ASSERT(end <= load_segs[j].end);
+ /* Remove from the existing load segment */
+ load_segs[j].end2 = load_segs[j].end;
+ load_segs[j].end = start;
+ load_segs[j].start2 = end;
+ }
+ break;
+ }
+ if (j == 0) WARN("Failed to find PT_GNU_RELRO segment"
+ " inside PT_LOAD region", 0);
+ }
+ }
+
+ break;
# endif
case PT_LOAD:
- {
- GC_has_static_roots_func callback = GC_has_static_roots;
- if( !(p->p_flags & PF_W) ) break;
- start = ((char *)(p->p_vaddr)) + info->dlpi_addr;
- end = start + p->p_memsz;
-
- if (callback != 0 && !callback(info->dlpi_name, start, p->p_memsz))
- break;
+ {
+ GC_has_static_roots_func callback = GC_has_static_roots;
+ if( !(p->p_flags & PF_W) ) break;
+ start = ((char *)(p->p_vaddr)) + info->dlpi_addr;
+ end = start + p->p_memsz;
+
+ if (callback != 0 && !callback(info->dlpi_name, start, p->p_memsz))
+ break;
# ifdef PT_GNU_RELRO
- if (n_load_segs >= MAX_LOAD_SEGS) ABORT("Too many PT_LOAD segs");
- load_segs[n_load_segs].start = start;
- load_segs[n_load_segs].end = end;
- load_segs[n_load_segs].start2 = 0;
- load_segs[n_load_segs].end2 = 0;
- ++n_load_segs;
-# else
- GC_add_roots_inner(start, end, TRUE);
+ if (n_load_segs >= MAX_LOAD_SEGS) ABORT("Too many PT_LOAD segs");
+ load_segs[n_load_segs].start = start;
+ load_segs[n_load_segs].end = end;
+ load_segs[n_load_segs].start2 = 0;
+ load_segs[n_load_segs].end2 = 0;
+ ++n_load_segs;
+# else
+ GC_add_roots_inner(start, end, TRUE);
# endif /* PT_GNU_RELRO */
- }
+ }
break;
default:
- break;
+ break;
}
}
- * (int *)ptr = 1; /* Signal that we were called */
+ * (int *)ptr = 1; /* Signal that we were called */
return 0;
-}
+}
/* Return TRUE if we succeed, FALSE if dl_iterate_phdr wasn't there. */
@@ -507,30 +507,30 @@ STATIC GC_bool GC_register_dynamic_libraries_dl_iterate_phdr(void)
# ifdef PT_GNU_RELRO
static GC_bool excluded_segs = FALSE;
n_load_segs = 0;
- if (!excluded_segs) {
+ if (!excluded_segs) {
GC_exclude_static_roots_inner((ptr_t)load_segs,
- (ptr_t)load_segs + sizeof(load_segs));
- excluded_segs = TRUE;
+ (ptr_t)load_segs + sizeof(load_segs));
+ excluded_segs = TRUE;
}
# endif
dl_iterate_phdr(GC_register_dynlib_callback, &did_something);
if (did_something) {
# ifdef PT_GNU_RELRO
- size_t i;
-
- for (i = 0; i < n_load_segs; ++i) {
- if (load_segs[i].end > load_segs[i].start) {
- GC_add_roots_inner(load_segs[i].start, load_segs[i].end, TRUE);
- }
- if (load_segs[i].end2 > load_segs[i].start2) {
- GC_add_roots_inner(load_segs[i].start2, load_segs[i].end2, TRUE);
- }
+ size_t i;
+
+ for (i = 0; i < n_load_segs; ++i) {
+ if (load_segs[i].end > load_segs[i].start) {
+ GC_add_roots_inner(load_segs[i].start, load_segs[i].end, TRUE);
+ }
+ if (load_segs[i].end2 > load_segs[i].start2) {
+ GC_add_roots_inner(load_segs[i].start2, load_segs[i].end2, TRUE);
+ }
}
# endif
} else {
- /* dl_iterate_phdr may forget the static data segment in */
- /* statically linked executables. */
- GC_add_roots_inner(DATASTART, (char *)(DATAEND), TRUE);
+ /* dl_iterate_phdr may forget the static data segment in */
+ /* statically linked executables. */
+ GC_add_roots_inner(DATASTART, (char *)(DATAEND), TRUE);
# if defined(DATASTART2)
GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), TRUE);
# endif
@@ -553,12 +553,12 @@ GC_bool GC_register_main_static_data(void)
# else /* !LINUX || version(glibc) < 2.2.4 */
/* Dynamic loading code for Linux running ELF. Somewhat tested on
- * Linux/x86, untested but hopefully should work on Linux/Alpha.
+ * Linux/x86, untested but hopefully should work on Linux/Alpha.
* This code was derived from the Solaris/ELF support. Thanks to
* whatever kind soul wrote that. - Patrick Bridges */
/* This doesn't necessarily work in all cases, e.g. with preloaded
- * dynamic libraries. */
+ * dynamic libraries. */
#if defined(NETBSD)
# include <sys/exec_elf.h>
@@ -611,24 +611,24 @@ GC_FirstDLOpenedLinkMap(void)
void GC_register_dynamic_libraries(void)
{
struct link_map *lm;
-
+
# ifdef HAVE_DL_ITERATE_PHDR
if (GC_register_dynamic_libraries_dl_iterate_phdr()) {
- return;
+ return;
}
# endif
lm = GC_FirstDLOpenedLinkMap();
for (lm = GC_FirstDLOpenedLinkMap();
lm != (struct link_map *) 0; lm = lm->l_next)
{
- ElfW(Ehdr) * e;
+ ElfW(Ehdr) * e;
ElfW(Phdr) * p;
unsigned long offset;
char * start;
register int i;
-
- e = (ElfW(Ehdr) *) lm->l_addr;
+
+ e = (ElfW(Ehdr) *) lm->l_addr;
p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
offset = ((unsigned long)(lm->l_addr));
for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
@@ -643,7 +643,7 @@ void GC_register_dynamic_libraries(void)
default:
break;
}
- }
+ }
}
}
@@ -664,20 +664,20 @@ void GC_register_dynamic_libraries(void)
#endif
extern void * GC_roots_present(ptr_t);
- /* The type is a lie, since the real type doesn't make sense here, */
- /* and we only test for NULL. */
+ /* The type is a lie, since the real type doesn't make sense here, */
+ /* and we only test for NULL. */
-/* We use /proc to track down all parts of the address space that are */
-/* mapped by the process, and throw out regions we know we shouldn't */
-/* worry about. This may also work under other SVR4 variants. */
+/* We use /proc to track down all parts of the address space that are */
+/* mapped by the process, and throw out regions we know we shouldn't */
+/* worry about. This may also work under other SVR4 variants. */
void GC_register_dynamic_libraries(void)
{
static int fd = -1;
char buf[30];
static prmap_t * addr_map = 0;
- static int current_sz = 0; /* Number of records currently in addr_map */
- static int needed_sz; /* Required size of addr_map */
+ static int current_sz = 0; /* Number of records currently in addr_map */
+ static int needed_sz; /* Required size of addr_map */
int i;
long flags;
ptr_t start;
@@ -691,93 +691,93 @@ void GC_register_dynamic_libraries(void)
if (fd < 0) {
sprintf(buf, "/proc/%ld", (long)getpid());
- /* The above generates a lint complaint, since pid_t varies. */
- /* It's unclear how to improve this. */
+ /* The above generates a lint complaint, since pid_t varies. */
+ /* It's unclear how to improve this. */
fd = open(buf, O_RDONLY);
if (fd < 0) {
- ABORT("/proc open failed");
+ ABORT("/proc open failed");
}
}
if (ioctl(fd, PIOCNMAP, &needed_sz) < 0) {
- GC_err_printf("fd = %d, errno = %d\n", fd, errno);
- ABORT("/proc PIOCNMAP ioctl failed");
+ GC_err_printf("fd = %d, errno = %d\n", fd, errno);
+ ABORT("/proc PIOCNMAP ioctl failed");
}
if (needed_sz >= current_sz) {
current_sz = needed_sz * 2 + 1;
- /* Expansion, plus room for 0 record */
+ /* Expansion, plus room for 0 record */
addr_map = (prmap_t *)GC_scratch_alloc((word)
- (current_sz * sizeof(prmap_t)));
+ (current_sz * sizeof(prmap_t)));
}
if (ioctl(fd, PIOCMAP, addr_map) < 0) {
GC_err_printf("fd = %d, errno = %d, needed_sz = %d, addr_map = %p\n",
fd, errno, needed_sz, addr_map);
- ABORT("/proc PIOCMAP ioctl failed");
+ ABORT("/proc PIOCMAP ioctl failed");
};
if (GC_n_heap_sects > 0) {
- heap_end = GC_heap_sects[GC_n_heap_sects-1].hs_start
- + GC_heap_sects[GC_n_heap_sects-1].hs_bytes;
- if (heap_end < GC_scratch_last_end_ptr) heap_end = GC_scratch_last_end_ptr;
+ heap_end = GC_heap_sects[GC_n_heap_sects-1].hs_start
+ + GC_heap_sects[GC_n_heap_sects-1].hs_bytes;
+ if (heap_end < GC_scratch_last_end_ptr) heap_end = GC_scratch_last_end_ptr;
}
for (i = 0; i < needed_sz; i++) {
flags = addr_map[i].pr_mflags;
- if ((flags & (MA_BREAK | MA_STACK | MA_PHYS
- | MA_FETCHOP | MA_NOTCACHED)) != 0) goto irrelevant;
+ if ((flags & (MA_BREAK | MA_STACK | MA_PHYS
+ | MA_FETCHOP | MA_NOTCACHED)) != 0) goto irrelevant;
if ((flags & (MA_READ | MA_WRITE)) != (MA_READ | MA_WRITE))
goto irrelevant;
- /* The latter test is empirically useless in very old Irix */
- /* versions. Other than the */
- /* main data and stack segments, everything appears to be */
- /* mapped readable, writable, executable, and shared(!!). */
- /* This makes no sense to me. - HB */
+ /* The latter test is empirically useless in very old Irix */
+ /* versions. Other than the */
+ /* main data and stack segments, everything appears to be */
+ /* mapped readable, writable, executable, and shared(!!). */
+ /* This makes no sense to me. - HB */
start = (ptr_t)(addr_map[i].pr_vaddr);
if (GC_roots_present(start)) goto irrelevant;
if (start < heap_end && start >= heap_start)
- goto irrelevant;
-# ifdef MMAP_STACKS
- if (GC_is_thread_stack(start)) goto irrelevant;
-# endif /* MMAP_STACKS */
+ goto irrelevant;
+# ifdef MMAP_STACKS
+ if (GC_is_thread_stack(start)) goto irrelevant;
+# endif /* MMAP_STACKS */
limit = start + addr_map[i].pr_size;
- /* The following seemed to be necessary for very old versions */
- /* of Irix, but it has been reported to discard relevant */
- /* segments under Irix 6.5. */
-# ifndef IRIX6
- if (addr_map[i].pr_off == 0 && strncmp(start, ELFMAG, 4) == 0) {
- /* Discard text segments, i.e. 0-offset mappings against */
- /* executable files which appear to have ELF headers. */
- caddr_t arg;
- int obj;
-# define MAP_IRR_SZ 10
- static ptr_t map_irr[MAP_IRR_SZ];
- /* Known irrelevant map entries */
- static int n_irr = 0;
- struct stat buf;
- register int i;
-
- for (i = 0; i < n_irr; i++) {
- if (map_irr[i] == start) goto irrelevant;
- }
- arg = (caddr_t)start;
- obj = ioctl(fd, PIOCOPENM, &arg);
- if (obj >= 0) {
- fstat(obj, &buf);
- close(obj);
- if ((buf.st_mode & 0111) != 0) {
- if (n_irr < MAP_IRR_SZ) {
- map_irr[n_irr++] = start;
- }
- goto irrelevant;
- }
- }
- }
-# endif /* !IRIX6 */
+ /* The following seemed to be necessary for very old versions */
+ /* of Irix, but it has been reported to discard relevant */
+ /* segments under Irix 6.5. */
+# ifndef IRIX6
+ if (addr_map[i].pr_off == 0 && strncmp(start, ELFMAG, 4) == 0) {
+ /* Discard text segments, i.e. 0-offset mappings against */
+ /* executable files which appear to have ELF headers. */
+ caddr_t arg;
+ int obj;
+# define MAP_IRR_SZ 10
+ static ptr_t map_irr[MAP_IRR_SZ];
+ /* Known irrelevant map entries */
+ static int n_irr = 0;
+ struct stat buf;
+ register int i;
+
+ for (i = 0; i < n_irr; i++) {
+ if (map_irr[i] == start) goto irrelevant;
+ }
+ arg = (caddr_t)start;
+ obj = ioctl(fd, PIOCOPENM, &arg);
+ if (obj >= 0) {
+ fstat(obj, &buf);
+ close(obj);
+ if ((buf.st_mode & 0111) != 0) {
+ if (n_irr < MAP_IRR_SZ) {
+ map_irr[n_irr++] = start;
+ }
+ goto irrelevant;
+ }
+ }
+ }
+# endif /* !IRIX6 */
GC_add_roots_inner(start, limit, TRUE);
irrelevant: ;
}
/* Dont keep cached descriptor, for now. Some kernels don't like us */
- /* to keep a /proc file descriptor around during kill -9. */
- if (close(fd) < 0) ABORT("Couldnt close /proc file");
- fd = -1;
+ /* to keep a /proc file descriptor around during kill -9. */
+ if (close(fd) < 0) ABORT("Couldnt close /proc file");
+ fd = -1;
}
# endif /* USE_PROC || IRIX5 */
@@ -789,28 +789,28 @@ void GC_register_dynamic_libraries(void)
# include <windows.h>
# include <stdlib.h>
- /* We traverse the entire address space and register all segments */
- /* that could possibly have been written to. */
-
+ /* We traverse the entire address space and register all segments */
+ /* that could possibly have been written to. */
+
extern GC_bool GC_is_heap_base (ptr_t p);
# ifdef GC_WIN32_THREADS
extern void GC_get_next_stack(char *start, char * limit, char **lo,
- char **hi);
+ char **hi);
STATIC void GC_cond_add_roots(char *base, char * limit)
{
char * curr_base = base;
char * next_stack_lo;
char * next_stack_hi;
-
+
if (base == limit) return;
for(;;) {
- GC_get_next_stack(curr_base, limit, &next_stack_lo, &next_stack_hi);
- if (next_stack_lo >= limit) break;
- if (next_stack_lo > curr_base)
- GC_add_roots_inner(curr_base, next_stack_lo, TRUE);
- curr_base = next_stack_hi;
+ GC_get_next_stack(curr_base, limit, &next_stack_lo, &next_stack_hi);
+ if (next_stack_lo >= limit) break;
+ if (next_stack_lo > curr_base)
+ GC_add_roots_inner(curr_base, next_stack_lo, TRUE);
+ curr_base = next_stack_hi;
}
if (curr_base < limit) GC_add_roots_inner(curr_base, limit, TRUE);
}
@@ -819,11 +819,11 @@ void GC_register_dynamic_libraries(void)
{
char dummy;
char * stack_top
- = (char *) ((word)(&dummy) & ~(GC_sysinfo.dwAllocationGranularity-1));
+ = (char *) ((word)(&dummy) & ~(GC_sysinfo.dwAllocationGranularity-1));
if (base == limit) return;
if (limit > stack_top && base < GC_stackbottom) {
- /* Part of the stack; ignore it. */
- return;
+ /* Part of the stack; ignore it. */
+ return;
}
GC_add_roots_inner(base, limit, TRUE);
}
@@ -843,32 +843,32 @@ void GC_register_dynamic_libraries(void)
return GC_no_win32_dlls;
}
# endif /* win32 */
-
+
# define HAVE_REGISTER_MAIN_STATIC_DATA
# ifdef DEBUG_VIRTUALQUERY
void GC_dump_meminfo(MEMORY_BASIC_INFORMATION *buf)
{
GC_printf("BaseAddress = %lx, AllocationBase = %lx, RegionSize = %lx(%lu)\n",
- buf -> BaseAddress, buf -> AllocationBase, buf -> RegionSize,
- buf -> RegionSize);
+ buf -> BaseAddress, buf -> AllocationBase, buf -> RegionSize,
+ buf -> RegionSize);
GC_printf("\tAllocationProtect = %lx, State = %lx, Protect = %lx, "
- "Type = %lx\n",
- buf -> AllocationProtect, buf -> State, buf -> Protect,
- buf -> Type);
+ "Type = %lx\n",
+ buf -> AllocationProtect, buf -> State, buf -> Protect,
+ buf -> Type);
}
# endif /* DEBUG_VIRTUALQUERY */
# ifdef MSWINCE
- /* FIXME: Should we really need to scan MEM_PRIVATE sections? */
- /* For now, we don't add MEM_PRIVATE sections to the data roots for */
- /* WinCE because otherwise SEGV fault sometimes happens to occur in */
- /* GC_mark_from() (and, even if we use WRAP_MARK_SOME, WinCE prints */
- /* a "Data Abort" message to the debugging console). */
+ /* FIXME: Should we really need to scan MEM_PRIVATE sections? */
+ /* For now, we don't add MEM_PRIVATE sections to the data roots for */
+ /* WinCE because otherwise SEGV fault sometimes happens to occur in */
+ /* GC_mark_from() (and, even if we use WRAP_MARK_SOME, WinCE prints */
+ /* a "Data Abort" message to the debugging console). */
# define GC_wnt TRUE
# else
- extern GC_bool GC_wnt; /* Is Windows NT derivative. */
- /* Defined and set in os_dep.c. */
+ extern GC_bool GC_wnt; /* Is Windows NT derivative. */
+ /* Defined and set in os_dep.c. */
# endif
void GC_register_dynamic_libraries(void)
@@ -888,42 +888,42 @@ void GC_register_dynamic_libraries(void)
/* Only the first 32 MB of address space belongs to the current process */
while (p < (LPVOID)0x02000000) {
result = VirtualQuery(p, &buf, sizeof(buf));
- if (result == 0) {
- /* Page is free; advance to the next possible allocation base */
- new_limit = (char *)
- (((DWORD) p + GC_sysinfo.dwAllocationGranularity)
- & ~(GC_sysinfo.dwAllocationGranularity-1));
- } else
+ if (result == 0) {
+ /* Page is free; advance to the next possible allocation base */
+ new_limit = (char *)
+ (((DWORD) p + GC_sysinfo.dwAllocationGranularity)
+ & ~(GC_sysinfo.dwAllocationGranularity-1));
+ } else
# else
while (p < GC_sysinfo.lpMaximumApplicationAddress) {
result = VirtualQuery(p, &buf, sizeof(buf));
# endif
- {
- if (result != sizeof(buf)) {
- ABORT("Weird VirtualQuery result");
- }
- new_limit = (char *)p + buf.RegionSize;
- protect = buf.Protect;
- if (buf.State == MEM_COMMIT
- && (protect == PAGE_EXECUTE_READWRITE
- || protect == PAGE_READWRITE)
- && !GC_is_heap_base(buf.AllocationBase)
- /* There is some evidence that we cannot always
- * ignore MEM_PRIVATE sections under Windows ME
- * and predecessors. Hence we now also check for
- * that case. */
- && (buf.Type == MEM_IMAGE ||
- (!GC_wnt && buf.Type == MEM_PRIVATE))) {
-# ifdef DEBUG_VIRTUALQUERY
- GC_dump_meminfo(&buf);
-# endif
- if ((char *)p != limit) {
- GC_cond_add_roots(base, limit);
- base = p;
- }
- limit = new_limit;
- }
- }
+ {
+ if (result != sizeof(buf)) {
+ ABORT("Weird VirtualQuery result");
+ }
+ new_limit = (char *)p + buf.RegionSize;
+ protect = buf.Protect;
+ if (buf.State == MEM_COMMIT
+ && (protect == PAGE_EXECUTE_READWRITE
+ || protect == PAGE_READWRITE)
+ && !GC_is_heap_base(buf.AllocationBase)
+ /* There is some evidence that we cannot always
+ * ignore MEM_PRIVATE sections under Windows ME
+ * and predecessors. Hence we now also check for
+ * that case. */
+ && (buf.Type == MEM_IMAGE ||
+ (!GC_wnt && buf.Type == MEM_PRIVATE))) {
+# ifdef DEBUG_VIRTUALQUERY
+ GC_dump_meminfo(&buf);
+# endif
+ if ((char *)p != limit) {
+ GC_cond_add_roots(base, limit);
+ base = p;
+ }
+ limit = new_limit;
+ }
+ }
if (p > (LPVOID)new_limit /* overflow */) break;
p = (LPVOID)new_limit;
}
@@ -931,7 +931,7 @@ void GC_register_dynamic_libraries(void)
}
#endif /* MSWIN32 || MSWINCE || CYGWIN32 */
-
+
#if defined(ALPHA) && defined(OSF1)
#include <loader.h>
@@ -945,17 +945,17 @@ void GC_register_dynamic_libraries(void)
ldr_module_t moduleid = LDR_NULL_MODULE;
ldr_module_info_t moduleinfo;
size_t moduleinfosize = sizeof(moduleinfo);
- size_t modulereturnsize;
+ size_t modulereturnsize;
/* region */
- ldr_region_t region;
+ ldr_region_t region;
ldr_region_info_t regioninfo;
size_t regioninfosize = sizeof(regioninfo);
size_t regionreturnsize;
/* Obtain id of this process */
mypid = ldr_my_process();
-
+
/* For each module */
while (TRUE) {
@@ -985,7 +985,7 @@ void GC_register_dynamic_libraries(void)
/* Get the module information */
status = ldr_inq_module(mypid, moduleid, &moduleinfo,
- moduleinfosize, &modulereturnsize);
+ moduleinfosize, &modulereturnsize);
if (status != 0 )
ABORT("ldr_inq_module failed");
@@ -997,7 +997,7 @@ void GC_register_dynamic_libraries(void)
GC_printf("---Module---\n");
GC_printf("Module ID = %16ld\n", moduleinfo.lmi_modid);
GC_printf("Count of regions = %16d\n", moduleinfo.lmi_nregion);
- GC_printf("flags for module = %16lx\n", moduleinfo.lmi_flags);
+ GC_printf("flags for module = %16lx\n", moduleinfo.lmi_flags);
GC_printf("pathname of module = \"%s\"\n", moduleinfo.lmi_name);
# endif
@@ -1017,7 +1017,7 @@ void GC_register_dynamic_libraries(void)
# ifdef DL_VERBOSE
GC_printf("--- Region ---\n");
GC_printf("Region number = %16ld\n",
- regioninfo.lri_region_no);
+ regioninfo.lri_region_no);
GC_printf("Protection flags = %016x\n", regioninfo.lri_prot);
GC_printf("Virtual address = %16p\n", regioninfo.lri_vaddr);
GC_printf("Mapped address = %16p\n", regioninfo.lri_mapaddr);
@@ -1058,11 +1058,11 @@ void GC_register_dynamic_libraries(void)
/* Check if this is the end of the list or if some error occured */
if (status != 0) {
-# ifdef GC_HPUX_THREADS
- /* I've seen errno values of 0. The man page is not clear */
- /* as to whether errno should get set on a -1 return. */
- break;
-# else
+# ifdef GC_HPUX_THREADS
+ /* I've seen errno values of 0. The man page is not clear */
+ /* as to whether errno should get set on a -1 return. */
+ break;
+# else
if (errno == EINVAL) {
break; /* Moved past end of shared library list --> finished */
} else {
@@ -1070,10 +1070,10 @@ void GC_register_dynamic_libraries(void)
GC_printf("dynamic_load: %s\n", sys_errlist[errno]);
} else {
GC_printf("dynamic_load: %d\n", errno);
- }
+ }
ABORT("shl_get failed");
}
-# endif
+# endif
}
# ifdef DL_VERBOSE
@@ -1081,7 +1081,7 @@ void GC_register_dynamic_libraries(void)
GC_printf("\tfilename = \"%s\"\n", shl_desc->filename);
GC_printf("\tindex = %d\n", index);
GC_printf("\thandle = %08x\n",
- (unsigned long) shl_desc->handle);
+ (unsigned long) shl_desc->handle);
GC_printf("\ttext seg. start = %08x\n", shl_desc->tstart);
GC_printf("\ttext seg. end = %08x\n", shl_desc->tend);
GC_printf("\tdata seg. start = %08x\n", shl_desc->dstart);
@@ -1091,7 +1091,7 @@ void GC_register_dynamic_libraries(void)
/* register shared library's data segment as a garbage collection root */
GC_add_roots_inner((char *) shl_desc->dstart,
- (char *) shl_desc->dend, TRUE);
+ (char *) shl_desc->dend, TRUE);
index++;
}
@@ -1104,36 +1104,36 @@ void GC_register_dynamic_libraries(void)
#include <sys/errno.h>
void GC_register_dynamic_libraries(void)
{
- int len;
- char *ldibuf;
- int ldibuflen;
- struct ld_info *ldi;
-
- ldibuf = alloca(ldibuflen = 8192);
-
- while ( (len = loadquery(L_GETINFO,ldibuf,ldibuflen)) < 0) {
- if (errno != ENOMEM) {
- ABORT("loadquery failed");
- }
- ldibuf = alloca(ldibuflen *= 2);
- }
-
- ldi = (struct ld_info *)ldibuf;
- while (ldi) {
- len = ldi->ldinfo_next;
- GC_add_roots_inner(
- ldi->ldinfo_dataorg,
- (ptr_t)(unsigned long)ldi->ldinfo_dataorg
- + ldi->ldinfo_datasize,
- TRUE);
- ldi = len ? (struct ld_info *)((char *)ldi + len) : 0;
- }
+ int len;
+ char *ldibuf;
+ int ldibuflen;
+ struct ld_info *ldi;
+
+ ldibuf = alloca(ldibuflen = 8192);
+
+ while ( (len = loadquery(L_GETINFO,ldibuf,ldibuflen)) < 0) {
+ if (errno != ENOMEM) {
+ ABORT("loadquery failed");
+ }
+ ldibuf = alloca(ldibuflen *= 2);
+ }
+
+ ldi = (struct ld_info *)ldibuf;
+ while (ldi) {
+ len = ldi->ldinfo_next;
+ GC_add_roots_inner(
+ ldi->ldinfo_dataorg,
+ (ptr_t)(unsigned long)ldi->ldinfo_dataorg
+ + ldi->ldinfo_datasize,
+ TRUE);
+ ldi = len ? (struct ld_info *)((char *)ldi + len) : 0;
+ }
}
#endif /* AIX */
#ifdef DARWIN
-/* __private_extern__ hack required for pre-3.4 gcc versions. */
+/* __private_extern__ hack required for pre-3.4 gcc versions. */
#ifndef __private_extern__
# define __private_extern__ extern
# include <mach-o/dyld.h>
@@ -1145,7 +1145,7 @@ void GC_register_dynamic_libraries(void)
/*#define DARWIN_DEBUG*/
-const static struct {
+const static struct {
const char *seg;
const char *sect;
} GC_dyld_sections[] = {
@@ -1153,7 +1153,7 @@ const static struct {
{ SEG_DATA, SECT_BSS },
{ SEG_DATA, SECT_COMMON }
};
-
+
static const char *GC_dyld_name_for_hdr(const struct GC_MACH_HEADER *hdr) {
unsigned long i,c;
c = _dyld_image_count();
@@ -1161,7 +1161,7 @@ static const char *GC_dyld_name_for_hdr(const struct GC_MACH_HEADER *hdr) {
return _dyld_get_image_name(i);
return NULL;
}
-
+
/* This should never be called by a thread holding the lock */
static void GC_dyld_image_add(const struct GC_MACH_HEADER *hdr, intptr_t slide)
{
@@ -1178,12 +1178,12 @@ static void GC_dyld_image_add(const struct GC_MACH_HEADER *hdr, intptr_t slide)
# endif
for(i=0;i<sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]);i++) {
sec = GC_GETSECTBYNAME(hdr, GC_dyld_sections[i].seg,
- GC_dyld_sections[i].sect);
+ GC_dyld_sections[i].sect);
if(sec == NULL || sec->size < sizeof(word)) continue;
start = slide + sec->addr;
end = start + sec->size;
LOCK();
- /* The user callback is called holding the lock */
+ /* The user callback is called holding the lock */
if (callback == 0 || callback(name, (void*)start, (size_t)sec->size)) {
# ifdef DARWIN_DEBUG
GC_printf("Adding section at %p-%p (%lu bytes) from image %s\n",
@@ -1200,29 +1200,29 @@ static void GC_dyld_image_add(const struct GC_MACH_HEADER *hdr, intptr_t slide)
/* This should never be called by a thread holding the lock */
static void GC_dyld_image_remove(const struct GC_MACH_HEADER *hdr,
- intptr_t slide)
+ intptr_t slide)
{
unsigned long start,end,i;
const struct GC_MACH_SECTION *sec;
for(i=0;i<sizeof(GC_dyld_sections)/sizeof(GC_dyld_sections[0]);i++) {
sec = GC_GETSECTBYNAME(hdr, GC_dyld_sections[i].seg,
- GC_dyld_sections[i].sect);
+ GC_dyld_sections[i].sect);
if(sec == NULL || sec->size == 0) continue;
start = slide + sec->addr;
end = start + sec->size;
# ifdef DARWIN_DEBUG
GC_printf("Removing section at %p-%p (%lu bytes) from image %s\n",
- start,end,sec->size,GC_dyld_name_for_hdr(hdr));
+ start,end,sec->size,GC_dyld_name_for_hdr(hdr));
# endif
GC_remove_roots((char*)start,(char*)end);
}
# ifdef DARWIN_DEBUG
- GC_print_static_roots();
+ GC_print_static_roots();
# endif
}
void GC_register_dynamic_libraries(void) {
- /* Currently does nothing. The callbacks are setup by GC_init_dyld()
+ /* Currently does nothing. The callbacks are setup by GC_init_dyld()
The dyld library takes it from there. */
}
@@ -1231,28 +1231,28 @@ void GC_register_dynamic_libraries(void) {
Because of this we MUST setup callbacks BEFORE we ever stop the world.
This should be called BEFORE any thread in created and WITHOUT the
allocation lock held. */
-
+
void GC_init_dyld(void) {
static GC_bool initialized = FALSE;
char *bind_fully_env = NULL;
-
+
if(initialized) return;
-
+
# ifdef DARWIN_DEBUG
GC_printf("Registering dyld callbacks...\n");
# endif
-
+
/* Apple's Documentation:
When you call _dyld_register_func_for_add_image, the dynamic linker runtime
calls the specified callback (func) once for each of the images that is
currently loaded into the program. When a new image is added to the program,
- your callback is called again with the mach_header for the new image, and the
- virtual memory slide amount of the new image.
-
- This WILL properly register already linked libraries and libraries
+ your callback is called again with the mach_header for the new image, and the
+ virtual memory slide amount of the new image.
+
+ This WILL properly register already linked libraries and libraries
linked in the future
*/
-
+
_dyld_register_func_for_add_image(GC_dyld_image_add);
_dyld_register_func_for_remove_image(GC_dyld_image_remove);
@@ -1260,12 +1260,12 @@ void GC_init_dyld(void) {
initialized = TRUE;
bind_fully_env = getenv("DYLD_BIND_AT_LAUNCH");
-
+
if (bind_fully_env == NULL) {
# ifdef DARWIN_DEBUG
GC_printf("Forcing full bind of GC code...\n");
# endif
-
+
if(!_dyld_bind_fully_image_containing_address((unsigned long*)GC_malloc))
ABORT("_dyld_bind_fully_image_containing_address failed");
}
@@ -1291,18 +1291,18 @@ GC_bool GC_register_main_static_data(void)
void GC_register_dynamic_libraries(void)
{
- /* Add new static data areas of dynamically loaded modules. */
+ /* Add new static data areas of dynamically loaded modules. */
{
PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
PCR_IL_LoadedSegment * q;
-
+
/* Skip uncommitted files */
while (p != NIL && !(p -> lf_commitPoint)) {
- /* The loading of this file has not yet been committed */
- /* Hence its description could be inconsistent. */
- /* Furthermore, it hasn't yet been run. Hence its data */
- /* segments can't possibly reference heap allocated */
- /* objects. */
+ /* The loading of this file has not yet been committed */
+ /* Hence its description could be inconsistent. */
+ /* Furthermore, it hasn't yet been run. Hence its data */
+ /* segments can't possibly reference heap allocated */
+ /* objects. */
p = p -> lf_prev;
}
for (; p != NIL; p = p -> lf_prev) {
@@ -1310,9 +1310,9 @@ void GC_register_dynamic_libraries(void)
if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
== PCR_IL_SegFlags_Traced_on) {
GC_add_roots_inner
- ((char *)(q -> ls_addr),
- (char *)(q -> ls_addr) + q -> ls_bytes,
- TRUE);
+ ((char *)(q -> ls_addr),
+ (char *)(q -> ls_addr) + q -> ls_bytes,
+ TRUE);
}
}
}
@@ -1340,7 +1340,7 @@ GC_bool GC_register_main_static_data(void)
/* Register a routine to filter dynamic library registration. */
GC_API void GC_CALL GC_register_has_static_roots_callback(
- GC_has_static_roots_func callback)
+ GC_has_static_roots_func callback)
{
GC_has_static_roots = callback;
}
diff --git a/finalize.c b/finalize.c
index 607e8bdd..c72fcea5 100644
--- a/finalize.c
+++ b/finalize.c
@@ -29,9 +29,9 @@
int GC_java_finalization = 0;
# endif
-/* Type of mark procedure used for marking from finalizable object. */
-/* This procedure normally does not mark the object, only its */
-/* descendents. */
+/* Type of mark procedure used for marking from finalizable object. */
+/* This procedure normally does not mark the object, only its */
+/* descendents. */
typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
# define HASH3(addr,size,log_size) \
@@ -47,38 +47,38 @@ struct hash_chain_entry {
static struct disappearing_link {
struct hash_chain_entry prolog;
# define dl_hidden_link prolog.hidden_key
- /* Field to be cleared. */
+ /* Field to be cleared. */
# define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
# define dl_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
- word dl_hidden_obj; /* Pointer to object base */
+ word dl_hidden_obj; /* Pointer to object base */
} **dl_head = 0;
static signed_word log_dl_table_size = -1;
- /* Binary log of */
- /* current size of array pointed to by dl_head. */
- /* -1 ==> size is 0. */
+ /* Binary log of */
+ /* current size of array pointed to by dl_head. */
+ /* -1 ==> size is 0. */
STATIC word GC_dl_entries = 0;
- /* Number of entries currently in disappearing */
- /* link table. */
+ /* Number of entries currently in disappearing */
+ /* link table. */
static struct finalizable_object {
struct hash_chain_entry prolog;
# define fo_hidden_base prolog.hidden_key
- /* Pointer to object base. */
- /* No longer hidden once object */
- /* is on finalize_now queue. */
+ /* Pointer to object base. */
+ /* No longer hidden once object */
+ /* is on finalize_now queue. */
# define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
# define fo_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
- GC_finalization_proc fo_fn; /* Finalizer. */
+ GC_finalization_proc fo_fn; /* Finalizer. */
ptr_t fo_client_data;
- word fo_object_size; /* In bytes. */
- finalization_mark_proc fo_mark_proc; /* Mark-through procedure */
+ word fo_object_size; /* In bytes. */
+ finalization_mark_proc fo_mark_proc; /* Mark-through procedure */
} **fo_head = 0;
STATIC struct finalizable_object * GC_finalize_now = 0;
- /* List of objects that should be finalized now. */
+ /* List of objects that should be finalized now. */
static signed_word log_fo_table_size = -1;
@@ -89,16 +89,16 @@ void GC_push_finalizer_structures(void)
GC_push_all((ptr_t)(&dl_head), (ptr_t)(&dl_head) + sizeof(word));
GC_push_all((ptr_t)(&fo_head), (ptr_t)(&fo_head) + sizeof(word));
GC_push_all((ptr_t)(&GC_finalize_now),
- (ptr_t)(&GC_finalize_now) + sizeof(word));
+ (ptr_t)(&GC_finalize_now) + sizeof(word));
}
-/* Double the size of a hash table. *size_ptr is the log of its current */
-/* size. May be a no-op. */
-/* *table is a pointer to an array of hash headers. If we succeed, we */
-/* update both *table and *log_size_ptr. */
-/* Lock is held. */
+/* Double the size of a hash table. *size_ptr is the log of its current */
+/* size. May be a no-op. */
+/* *table is a pointer to an array of hash headers. If we succeed, we */
+/* update both *table and *log_size_ptr. */
+/* Lock is held. */
STATIC void GC_grow_table(struct hash_chain_entry ***table,
- signed_word *log_size_ptr)
+ signed_word *log_size_ptr)
{
register word i;
register struct hash_chain_entry *p;
@@ -108,15 +108,15 @@ STATIC void GC_grow_table(struct hash_chain_entry ***table,
word new_size = (word)1 << log_new_size;
/* FIXME: Power of 2 size often gets rounded up to one more page. */
struct hash_chain_entry **new_table = (struct hash_chain_entry **)
- GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
- (size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL);
-
+ GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
+ (size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL);
+
if (new_table == 0) {
- if (*table == 0) {
- ABORT("Insufficient space for initial table allocation");
- } else {
- return;
- }
+ if (*table == 0) {
+ ABORT("Insufficient space for initial table allocation");
+ } else {
+ return;
+ }
}
for (i = 0; i < old_size; i++) {
p = (*table)[i];
@@ -124,7 +124,7 @@ STATIC void GC_grow_table(struct hash_chain_entry ***table,
ptr_t real_key = (ptr_t)REVEAL_POINTER(p -> hidden_key);
struct hash_chain_entry *next = p -> next;
size_t new_hash = HASH3(real_key, new_size, log_new_size);
-
+
p -> next = new_table[new_hash];
new_table[new_hash] = p;
p = next;
@@ -137,33 +137,33 @@ STATIC void GC_grow_table(struct hash_chain_entry ***table,
GC_API int GC_CALL GC_register_disappearing_link(void * * link)
{
ptr_t base;
-
+
base = (ptr_t)GC_base((void *)link);
if (base == 0)
- ABORT("Bad arg to GC_register_disappearing_link");
+ ABORT("Bad arg to GC_register_disappearing_link");
return(GC_general_register_disappearing_link(link, base));
}
GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
- void * obj)
+ void * obj)
{
struct disappearing_link *curr_dl;
size_t index;
struct disappearing_link * new_dl;
DCL_LOCK_STATE;
-
+
if (((word)link & (ALIGNMENT-1)) || link == NULL)
- ABORT("Bad arg to GC_general_register_disappearing_link");
+ ABORT("Bad arg to GC_general_register_disappearing_link");
LOCK();
GC_ASSERT(obj != NULL && GC_base(obj) == obj);
if (log_dl_table_size == -1
|| GC_dl_entries > ((word)1 << log_dl_table_size)) {
- GC_grow_table((struct hash_chain_entry ***)(&dl_head),
- &log_dl_table_size);
- if (GC_print_stats) {
- GC_log_printf("Grew dl table to %u entries\n",
- (1 << (unsigned)log_dl_table_size));
- }
+ GC_grow_table((struct hash_chain_entry ***)(&dl_head),
+ &log_dl_table_size);
+ if (GC_print_stats) {
+ GC_log_printf("Grew dl table to %u entries\n",
+ (1 << (unsigned)log_dl_table_size));
+ }
}
index = HASH2(link, log_dl_table_size);
for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
@@ -174,30 +174,30 @@ GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
}
}
new_dl = (struct disappearing_link *)
- GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
+ GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
if (0 == new_dl) {
GC_oom_func oom_fn = GC_oom_fn;
UNLOCK();
new_dl = (struct disappearing_link *)
- (*oom_fn)(sizeof(struct disappearing_link));
+ (*oom_fn)(sizeof(struct disappearing_link));
if (0 == new_dl) {
- return GC_NO_MEMORY;
+ return GC_NO_MEMORY;
}
/* It's not likely we'll make it here, but ... */
LOCK();
- /* Recalculate index since the table may grow. */
+ /* Recalculate index since the table may grow. */
index = HASH2(link, log_dl_table_size);
/* Check again that our disappearing link not in the table. */
for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
- if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
- curr_dl -> dl_hidden_obj = HIDE_POINTER(obj);
- UNLOCK();
-# ifndef DBG_HDRS_ALL
- /* Free unused new_dl returned by GC_oom_fn() */
- GC_free((void *)new_dl);
-# endif
- return GC_DUPLICATE;
- }
+ if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
+ curr_dl -> dl_hidden_obj = HIDE_POINTER(obj);
+ UNLOCK();
+# ifndef DBG_HDRS_ALL
+ /* Free unused new_dl returned by GC_oom_fn() */
+ GC_free((void *)new_dl);
+# endif
+ return GC_DUPLICATE;
+ }
}
}
new_dl -> dl_hidden_obj = HIDE_POINTER(obj);
@@ -214,7 +214,7 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
struct disappearing_link *curr_dl, *prev_dl;
size_t index;
DCL_LOCK_STATE;
-
+
if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
LOCK();
@@ -229,11 +229,11 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
}
GC_dl_entries--;
UNLOCK();
-# ifdef DBG_HDRS_ALL
- dl_set_next(curr_dl, 0);
-# else
+# ifdef DBG_HDRS_ALL
+ dl_set_next(curr_dl, 0);
+# else
GC_free((void *)curr_dl);
-# endif
+# endif
return(1);
}
prev_dl = curr_dl;
@@ -243,19 +243,19 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
return(0);
}
-/* Possible finalization_marker procedures. Note that mark stack */
-/* overflow is handled by the caller, and is not a disaster. */
+/* Possible finalization_marker procedures. Note that mark stack */
+/* overflow is handled by the caller, and is not a disaster. */
STATIC void GC_normal_finalize_mark_proc(ptr_t p)
{
hdr * hhdr = HDR(p);
-
+
PUSH_OBJ(p, hhdr, GC_mark_stack_top,
- &(GC_mark_stack[GC_mark_stack_size]));
+ &(GC_mark_stack[GC_mark_stack_size]));
}
-/* This only pays very partial attention to the mark descriptor. */
-/* It does the right thing for normal and atomic objects, and treats */
-/* most others as normal. */
+/* This only pays very partial attention to the mark descriptor. */
+/* It does the right thing for normal and atomic objects, and treats */
+/* most others as normal. */
STATIC void GC_ignore_self_finalize_mark_proc(ptr_t p)
{
hdr * hhdr = HDR(p);
@@ -264,17 +264,17 @@ STATIC void GC_ignore_self_finalize_mark_proc(ptr_t p)
word r;
ptr_t scan_limit;
ptr_t target_limit = p + hhdr -> hb_sz - 1;
-
+
if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
scan_limit = p + descr - sizeof(word);
} else {
scan_limit = target_limit + 1 - sizeof(word);
}
for (q = p; q <= scan_limit; q += ALIGNMENT) {
- r = *(word *)q;
- if ((ptr_t)r < p || (ptr_t)r > target_limit) {
- GC_PUSH_ONE_HEAP(r, q);
- }
+ r = *(word *)q;
+ if ((ptr_t)r < p || (ptr_t)r > target_limit) {
+ GC_PUSH_ONE_HEAP(r, q);
+ }
}
}
@@ -283,14 +283,14 @@ STATIC void GC_null_finalize_mark_proc(ptr_t p)
{
}
-/* Possible finalization_marker procedures. Note that mark stack */
-/* overflow is handled by the caller, and is not a disaster. */
+/* Possible finalization_marker procedures. Note that mark stack */
+/* overflow is handled by the caller, and is not a disaster. */
-/* GC_unreachable_finalize_mark_proc is an alias for normal marking, */
-/* but it is explicitly tested for, and triggers different */
-/* behavior. Objects registered in this way are not finalized */
-/* if they are reachable by other finalizable objects, even if those */
-/* other objects specify no ordering. */
+/* GC_unreachable_finalize_mark_proc is an alias for normal marking, */
+/* but it is explicitly tested for, and triggers different */
+/* behavior. Objects registered in this way are not finalized */
+/* if they are reachable by other finalizable objects, even if those */
+/* other objects specify no ordering. */
STATIC void GC_unreachable_finalize_mark_proc(ptr_t p)
{
GC_normal_finalize_mark_proc(p);
@@ -298,15 +298,15 @@ STATIC void GC_unreachable_finalize_mark_proc(ptr_t p)
-/* Register a finalization function. See gc.h for details. */
-/* The last parameter is a procedure that determines */
-/* marking for finalization ordering. Any objects marked */
-/* by that procedure will be guaranteed to not have been */
-/* finalized when this finalizer is invoked. */
+/* Register a finalization function. See gc.h for details. */
+/* The last parameter is a procedure that determines */
+/* marking for finalization ordering. Any objects marked */
+/* by that procedure will be guaranteed to not have been */
+/* finalized when this finalizer is invoked. */
STATIC void GC_register_finalizer_inner(void * obj,
- GC_finalization_proc fn, void *cd,
- GC_finalization_proc *ofn, void **ocd,
- finalization_mark_proc mp)
+ GC_finalization_proc fn, void *cd,
+ GC_finalization_proc *ofn, void **ocd,
+ finalization_mark_proc mp)
{
ptr_t base;
struct finalizable_object * curr_fo, * prev_fo;
@@ -319,98 +319,98 @@ STATIC void GC_register_finalizer_inner(void * obj,
LOCK();
if (log_fo_table_size == -1
|| GC_fo_entries > ((word)1 << log_fo_table_size)) {
- GC_grow_table((struct hash_chain_entry ***)(&fo_head),
- &log_fo_table_size);
- if (GC_print_stats) {
- GC_log_printf("Grew fo table to %u entries\n",
- (1 << (unsigned)log_fo_table_size));
- }
+ GC_grow_table((struct hash_chain_entry ***)(&fo_head),
+ &log_fo_table_size);
+ if (GC_print_stats) {
+ GC_log_printf("Grew fo table to %u entries\n",
+ (1 << (unsigned)log_fo_table_size));
+ }
}
- /* in the THREADS case we hold allocation lock. */
+ /* in the THREADS case we hold allocation lock. */
base = (ptr_t)obj;
for (;;) {
index = HASH2(base, log_fo_table_size);
prev_fo = 0; curr_fo = fo_head[index];
while (curr_fo != 0) {
- GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
- if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
- /* Interruption by a signal in the middle of this */
- /* should be safe. The client may see only *ocd */
- /* updated, but we'll declare that to be his problem. */
- if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
- if (ofn) *ofn = curr_fo -> fo_fn;
- /* Delete the structure for base. */
- if (prev_fo == 0) {
- fo_head[index] = fo_next(curr_fo);
- } else {
- fo_set_next(prev_fo, fo_next(curr_fo));
- }
- if (fn == 0) {
- GC_fo_entries--;
- /* May not happen if we get a signal. But a high */
- /* estimate will only make the table larger than */
- /* necessary. */
-# if !defined(THREADS) && !defined(DBG_HDRS_ALL)
- GC_free((void *)curr_fo);
-# endif
- } else {
- curr_fo -> fo_fn = fn;
- curr_fo -> fo_client_data = (ptr_t)cd;
- curr_fo -> fo_mark_proc = mp;
- /* Reinsert it. We deleted it first to maintain */
- /* consistency in the event of a signal. */
- if (prev_fo == 0) {
- fo_head[index] = curr_fo;
- } else {
- fo_set_next(prev_fo, curr_fo);
- }
- }
- UNLOCK();
-# ifndef DBG_HDRS_ALL
- if (EXPECT(new_fo != 0, FALSE)) {
- /* Free unused new_fo returned by GC_oom_fn() */
- GC_free((void *)new_fo);
- }
-# endif
- return;
- }
- prev_fo = curr_fo;
- curr_fo = fo_next(curr_fo);
+ GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
+ if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
+ /* Interruption by a signal in the middle of this */
+ /* should be safe. The client may see only *ocd */
+ /* updated, but we'll declare that to be his problem. */
+ if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
+ if (ofn) *ofn = curr_fo -> fo_fn;
+ /* Delete the structure for base. */
+ if (prev_fo == 0) {
+ fo_head[index] = fo_next(curr_fo);
+ } else {
+ fo_set_next(prev_fo, fo_next(curr_fo));
+ }
+ if (fn == 0) {
+ GC_fo_entries--;
+ /* May not happen if we get a signal. But a high */
+ /* estimate will only make the table larger than */
+ /* necessary. */
+# if !defined(THREADS) && !defined(DBG_HDRS_ALL)
+ GC_free((void *)curr_fo);
+# endif
+ } else {
+ curr_fo -> fo_fn = fn;
+ curr_fo -> fo_client_data = (ptr_t)cd;
+ curr_fo -> fo_mark_proc = mp;
+ /* Reinsert it. We deleted it first to maintain */
+ /* consistency in the event of a signal. */
+ if (prev_fo == 0) {
+ fo_head[index] = curr_fo;
+ } else {
+ fo_set_next(prev_fo, curr_fo);
+ }
+ }
+ UNLOCK();
+# ifndef DBG_HDRS_ALL
+ if (EXPECT(new_fo != 0, FALSE)) {
+ /* Free unused new_fo returned by GC_oom_fn() */
+ GC_free((void *)new_fo);
+ }
+# endif
+ return;
+ }
+ prev_fo = curr_fo;
+ curr_fo = fo_next(curr_fo);
}
if (EXPECT(new_fo != 0, FALSE)) {
- /* new_fo is returned GC_oom_fn(), so fn != 0 and hhdr != 0. */
+ /* new_fo is returned GC_oom_fn(), so fn != 0 and hhdr != 0. */
break;
}
if (fn == 0) {
- if (ocd) *ocd = 0;
- if (ofn) *ofn = 0;
- UNLOCK();
- return;
+ if (ocd) *ocd = 0;
+ if (ofn) *ofn = 0;
+ UNLOCK();
+ return;
}
GET_HDR(base, hhdr);
if (EXPECT(0 == hhdr, FALSE)) {
- /* We won't collect it, hence finalizer wouldn't be run. */
- if (ocd) *ocd = 0;
- if (ofn) *ofn = 0;
- UNLOCK();
- return;
+ /* We won't collect it, hence finalizer wouldn't be run. */
+ if (ocd) *ocd = 0;
+ if (ofn) *ofn = 0;
+ UNLOCK();
+ return;
}
new_fo = (struct finalizable_object *)
- GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
+ GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
if (EXPECT(new_fo != 0, TRUE))
- break;
+ break;
oom_fn = GC_oom_fn;
UNLOCK();
new_fo = (struct finalizable_object *)
- (*oom_fn)(sizeof(struct finalizable_object));
+ (*oom_fn)(sizeof(struct finalizable_object));
if (0 == new_fo) {
- /* No enough memory. *ocd and *ofn remains unchanged. */
- return;
+ /* No enough memory. *ocd and *ofn remains unchanged. */
+ return;
}
/* It's not likely we'll make it here, but ... */
LOCK();
- /* Recalculate index since the table may grow and */
- /* check again that our finalizer is not in the table. */
+ /* Recalculate index since the table may grow and */
+ /* check again that our finalizer is not in the table. */
}
GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
if (ocd) *ocd = 0;
@@ -427,40 +427,40 @@ STATIC void GC_register_finalizer_inner(void * obj,
}
GC_API void GC_CALL GC_register_finalizer(void * obj,
- GC_finalization_proc fn, void * cd,
- GC_finalization_proc *ofn, void ** ocd)
+ GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void ** ocd)
{
GC_register_finalizer_inner(obj, fn, cd, ofn,
- ocd, GC_normal_finalize_mark_proc);
+ ocd, GC_normal_finalize_mark_proc);
}
GC_API void GC_CALL GC_register_finalizer_ignore_self(void * obj,
- GC_finalization_proc fn, void * cd,
- GC_finalization_proc *ofn, void ** ocd)
+ GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void ** ocd)
{
GC_register_finalizer_inner(obj, fn, cd, ofn,
- ocd, GC_ignore_self_finalize_mark_proc);
+ ocd, GC_ignore_self_finalize_mark_proc);
}
GC_API void GC_CALL GC_register_finalizer_no_order(void * obj,
- GC_finalization_proc fn, void * cd,
- GC_finalization_proc *ofn, void ** ocd)
+ GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void ** ocd)
{
GC_register_finalizer_inner(obj, fn, cd, ofn,
- ocd, GC_null_finalize_mark_proc);
+ ocd, GC_null_finalize_mark_proc);
}
static GC_bool need_unreachable_finalization = FALSE;
- /* Avoid the work if this isn't used. */
+ /* Avoid the work if this isn't used. */
GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj,
- GC_finalization_proc fn, void * cd,
- GC_finalization_proc *ofn, void ** ocd)
+ GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void ** ocd)
{
need_unreachable_finalization = TRUE;
GC_ASSERT(GC_java_finalization);
GC_register_finalizer_inner(obj, fn, cd, ofn,
- ocd, GC_unreachable_finalize_mark_proc);
+ ocd, GC_unreachable_finalize_mark_proc);
}
#ifndef NO_DEBUGGING
@@ -496,31 +496,31 @@ void GC_dump_finalization(void)
#endif
extern unsigned GC_fail_count;
- /* How many consecutive GC/expansion failures? */
- /* Reset by GC_allochblk(); defined in alloc.c. */
+ /* How many consecutive GC/expansion failures? */
+ /* Reset by GC_allochblk(); defined in alloc.c. */
#ifdef THREADS
- /* Defined in pthread_support.c or win32_threads.c. Called with the */
- /* allocation lock held. */
+ /* Defined in pthread_support.c or win32_threads.c. Called with the */
+ /* allocation lock held. */
void GC_reset_finalizer_nested(void);
unsigned *GC_check_finalizer_nested(void);
#else
- /* Global variables to minimize the level of recursion when a client */
- /* finalizer allocates memory. */
+ /* Global variables to minimize the level of recursion when a client */
+ /* finalizer allocates memory. */
STATIC unsigned GC_finalizer_nested = 0;
STATIC unsigned GC_finalizer_skipped = 0;
- /* Checks and updates the level of finalizers recursion. */
- /* Returns NULL if GC_invoke_finalizers() should not be called by the */
- /* collector (to minimize the risk of a deep finalizers recursion), */
- /* otherwise returns a pointer to GC_finalizer_nested. */
+ /* Checks and updates the level of finalizers recursion. */
+ /* Returns NULL if GC_invoke_finalizers() should not be called by the */
+ /* collector (to minimize the risk of a deep finalizers recursion), */
+ /* otherwise returns a pointer to GC_finalizer_nested. */
STATIC unsigned *GC_check_finalizer_nested(void)
{
unsigned nesting_level = GC_finalizer_nested;
if (nesting_level) {
- /* We are inside another GC_invoke_finalizers(). */
- /* Skip some implicitly-called GC_invoke_finalizers() */
- /* depending on the nesting (recursion) level. */
+ /* We are inside another GC_invoke_finalizers(). */
+ /* Skip some implicitly-called GC_invoke_finalizers() */
+ /* depending on the nesting (recursion) level. */
if (++GC_finalizer_skipped < (1U << nesting_level)) return NULL;
GC_finalizer_skipped = 0;
}
@@ -529,9 +529,9 @@ extern unsigned GC_fail_count;
}
#endif /* THREADS */
-/* Called with held lock (but the world is running). */
-/* Cause disappearing links to disappear and unreachable objects to be */
-/* enqueued for finalization. */
+/* Called with held lock (but the world is running). */
+/* Cause disappearing links to disappear and unreachable objects to be */
+/* enqueued for finalization. */
void GC_finalize(void)
{
struct disappearing_link * curr_dl, * prev_dl, * next_dl;
@@ -540,7 +540,7 @@ void GC_finalize(void)
size_t i;
size_t dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
size_t fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
-
+
# ifndef SMALL_CONFIG
/* Save current GC_dl_entries value for stats printing */
GC_old_dl_entries = GC_dl_entries;
@@ -570,15 +570,15 @@ void GC_finalize(void)
}
}
}
- /* Mark all objects reachable via chains of 1 or more pointers */
- /* from finalizable objects. */
+ /* Mark all objects reachable via chains of 1 or more pointers */
+ /* from finalizable objects. */
GC_ASSERT(GC_mark_state == MS_NONE);
for (i = 0; i < fo_size; i++) {
for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
if (!GC_is_marked(real_ptr)) {
- GC_MARKED_FOR_FINALIZATION(real_ptr);
+ GC_MARKED_FOR_FINALIZATION(real_ptr);
GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
if (GC_is_marked(real_ptr)) {
WARN("Finalization cycle involving %p\n", real_ptr);
@@ -586,8 +586,8 @@ void GC_finalize(void)
}
}
}
- /* Enqueue for finalization all objects that are still */
- /* unreachable. */
+ /* Enqueue for finalization all objects that are still */
+ /* unreachable. */
GC_bytes_finalized = 0;
for (i = 0; i < fo_size; i++) {
curr_fo = fo_head[i];
@@ -595,9 +595,9 @@ void GC_finalize(void)
while (curr_fo != 0) {
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
if (!GC_is_marked(real_ptr)) {
- if (!GC_java_finalization) {
+ if (!GC_java_finalization) {
GC_set_mark_bit(real_ptr);
- }
+ }
/* Delete from hash table */
next_fo = fo_next(curr_fo);
if (prev_fo == 0) {
@@ -606,17 +606,17 @@ void GC_finalize(void)
fo_set_next(prev_fo, next_fo);
}
GC_fo_entries--;
- /* Add to list of objects awaiting finalization. */
+ /* Add to list of objects awaiting finalization. */
fo_set_next(curr_fo, GC_finalize_now);
GC_finalize_now = curr_fo;
- /* unhide object pointer so any future collections will */
- /* see it. */
- curr_fo -> fo_hidden_base =
- (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
+ /* unhide object pointer so any future collections will */
+ /* see it. */
+ curr_fo -> fo_hidden_base =
+ (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
GC_bytes_finalized +=
- curr_fo -> fo_object_size
- + sizeof(struct finalizable_object);
- GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo)));
+ curr_fo -> fo_object_size
+ + sizeof(struct finalizable_object);
+ GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo)));
curr_fo = next_fo;
} else {
prev_fo = curr_fo;
@@ -628,17 +628,17 @@ void GC_finalize(void)
if (GC_java_finalization) {
/* make sure we mark everything reachable from objects finalized
using the no_order mark_proc */
- for (curr_fo = GC_finalize_now;
- curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
- real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
- if (!GC_is_marked(real_ptr)) {
- if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
- GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
- }
- if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
- GC_set_mark_bit(real_ptr);
- }
- }
+ for (curr_fo = GC_finalize_now;
+ curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
+ real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
+ if (!GC_is_marked(real_ptr)) {
+ if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
+ GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
+ }
+ if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
+ GC_set_mark_bit(real_ptr);
+ }
+ }
}
/* now revive finalize-when-unreachable objects reachable from
@@ -647,31 +647,31 @@ void GC_finalize(void)
curr_fo = GC_finalize_now;
prev_fo = 0;
while (curr_fo != 0) {
- next_fo = fo_next(curr_fo);
- if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
- real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
- if (!GC_is_marked(real_ptr)) {
- GC_set_mark_bit(real_ptr);
- } else {
- if (prev_fo == 0)
- GC_finalize_now = next_fo;
- else
- fo_set_next(prev_fo, next_fo);
+ next_fo = fo_next(curr_fo);
+ if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
+ real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
+ if (!GC_is_marked(real_ptr)) {
+ GC_set_mark_bit(real_ptr);
+ } else {
+ if (prev_fo == 0)
+ GC_finalize_now = next_fo;
+ else
+ fo_set_next(prev_fo, next_fo);
curr_fo -> fo_hidden_base =
- (word) HIDE_POINTER(curr_fo -> fo_hidden_base);
+ (word) HIDE_POINTER(curr_fo -> fo_hidden_base);
GC_bytes_finalized -=
- curr_fo -> fo_object_size + sizeof(struct finalizable_object);
-
- i = HASH2(real_ptr, log_fo_table_size);
- fo_set_next (curr_fo, fo_head[i]);
- GC_fo_entries++;
- fo_head[i] = curr_fo;
- curr_fo = prev_fo;
- }
- }
- prev_fo = curr_fo;
- curr_fo = next_fo;
+ curr_fo -> fo_object_size + sizeof(struct finalizable_object);
+
+ i = HASH2(real_ptr, log_fo_table_size);
+ fo_set_next (curr_fo, fo_head[i]);
+ GC_fo_entries++;
+ fo_head[i] = curr_fo;
+ curr_fo = prev_fo;
+ }
+ }
+ prev_fo = curr_fo;
+ curr_fo = next_fo;
}
}
}
@@ -699,8 +699,8 @@ void GC_finalize(void)
}
}
if (GC_fail_count) {
- /* Don't prevent running finalizers if there has been an allocation */
- /* failure recently. */
+ /* Don't prevent running finalizers if there has been an allocation */
+ /* failure recently. */
# ifdef THREADS
GC_reset_finalizer_nested();
# else
@@ -711,14 +711,14 @@ void GC_finalize(void)
#ifndef JAVA_FINALIZATION_NOT_NEEDED
-/* Enqueue all remaining finalizers to be run - Assumes lock is held. */
+/* Enqueue all remaining finalizers to be run - Assumes lock is held. */
STATIC void GC_enqueue_all_finalizers(void)
{
struct finalizable_object * curr_fo, * prev_fo, * next_fo;
ptr_t real_ptr;
register int i;
int fo_size;
-
+
fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
GC_bytes_finalized = 0;
for (i = 0; i < fo_size; i++) {
@@ -728,7 +728,7 @@ STATIC void GC_enqueue_all_finalizers(void)
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
GC_set_mark_bit(real_ptr);
-
+
/* Delete from hash table */
next_fo = fo_next(curr_fo);
if (prev_fo == 0) {
@@ -738,17 +738,17 @@ STATIC void GC_enqueue_all_finalizers(void)
}
GC_fo_entries--;
- /* Add to list of objects awaiting finalization. */
+ /* Add to list of objects awaiting finalization. */
fo_set_next(curr_fo, GC_finalize_now);
GC_finalize_now = curr_fo;
- /* unhide object pointer so any future collections will */
- /* see it. */
- curr_fo -> fo_hidden_base =
- (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
+ /* unhide object pointer so any future collections will */
+ /* see it. */
+ curr_fo -> fo_hidden_base =
+ (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
GC_bytes_finalized +=
- curr_fo -> fo_object_size + sizeof(struct finalizable_object);
+ curr_fo -> fo_object_size + sizeof(struct finalizable_object);
curr_fo = next_fo;
}
}
@@ -756,8 +756,8 @@ STATIC void GC_enqueue_all_finalizers(void)
return;
}
-/* Invoke all remaining finalizers that haven't yet been run.
- * This is needed for strict compliance with the Java standard,
+/* Invoke all remaining finalizers that haven't yet been run.
+ * This is needed for strict compliance with the Java standard,
* which can make the runtime guarantee that all finalizers are run.
* Unfortunately, the Java standard implies we have to keep running
* finalizers until there are no more left, a potential infinite loop.
@@ -767,8 +767,8 @@ STATIC void GC_enqueue_all_finalizers(void)
* may have been finalized when these finalizers are run.
* Finalizers run at this point must be prepared to deal with a
* mostly broken world.
- * This routine is externally callable, so is called without
- * the allocation lock.
+ * This routine is externally callable, so is called without
+ * the allocation lock.
*/
GC_API void GC_CALL GC_finalize_all(void)
{
@@ -779,73 +779,73 @@ GC_API void GC_CALL GC_finalize_all(void)
GC_enqueue_all_finalizers();
UNLOCK();
GC_invoke_finalizers();
- /* Running the finalizers in this thread is arguably not a good */
- /* idea when we should be notifying another thread to run them. */
- /* But otherwise we don't have a great way to wait for them to */
- /* run. */
+ /* Running the finalizers in this thread is arguably not a good */
+ /* idea when we should be notifying another thread to run them. */
+ /* But otherwise we don't have a great way to wait for them to */
+ /* run. */
LOCK();
}
UNLOCK();
}
#endif
-/* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
-/* finalizers can only be called from some kind of `safe state' and */
-/* getting into that safe state is expensive.) */
+/* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
+/* finalizers can only be called from some kind of `safe state' and */
+/* getting into that safe state is expensive.) */
GC_API int GC_CALL GC_should_invoke_finalizers(void)
{
return GC_finalize_now != 0;
}
-/* Invoke finalizers for all objects that are ready to be finalized. */
-/* Should be called without allocation lock. */
+/* Invoke finalizers for all objects that are ready to be finalized. */
+/* Should be called without allocation lock. */
GC_API int GC_CALL GC_invoke_finalizers(void)
{
struct finalizable_object * curr_fo;
int count = 0;
word bytes_freed_before = 0; /* initialized to prevent warning. */
DCL_LOCK_STATE;
-
+
while (GC_finalize_now != 0) {
-# ifdef THREADS
- LOCK();
-# endif
- if (count == 0) {
- bytes_freed_before = GC_bytes_freed;
- /* Don't do this outside, since we need the lock. */
- }
- curr_fo = GC_finalize_now;
-# ifdef THREADS
- if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo);
- UNLOCK();
- if (curr_fo == 0) break;
-# else
- GC_finalize_now = fo_next(curr_fo);
-# endif
- fo_set_next(curr_fo, 0);
- (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
- curr_fo -> fo_client_data);
- curr_fo -> fo_client_data = 0;
- ++count;
-# ifdef UNDEFINED
- /* This is probably a bad idea. It throws off accounting if */
- /* nearly all objects are finalizable. O.w. it shouldn't */
- /* matter. */
- GC_free((void *)curr_fo);
-# endif
+# ifdef THREADS
+ LOCK();
+# endif
+ if (count == 0) {
+ bytes_freed_before = GC_bytes_freed;
+ /* Don't do this outside, since we need the lock. */
+ }
+ curr_fo = GC_finalize_now;
+# ifdef THREADS
+ if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo);
+ UNLOCK();
+ if (curr_fo == 0) break;
+# else
+ GC_finalize_now = fo_next(curr_fo);
+# endif
+ fo_set_next(curr_fo, 0);
+ (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
+ curr_fo -> fo_client_data);
+ curr_fo -> fo_client_data = 0;
+ ++count;
+# ifdef UNDEFINED
+ /* This is probably a bad idea. It throws off accounting if */
+ /* nearly all objects are finalizable. O.w. it shouldn't */
+ /* matter. */
+ GC_free((void *)curr_fo);
+# endif
}
/* bytes_freed_before is initialized whenever count != 0 */
if (count != 0 && bytes_freed_before != GC_bytes_freed) {
LOCK();
- GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
- UNLOCK();
+ GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
+ UNLOCK();
}
return count;
}
-/* All accesses to it should be synchronized to avoid data races. */
+/* All accesses to it should be synchronized to avoid data races. */
GC_finalizer_notifier_proc GC_finalizer_notifier =
- (GC_finalizer_notifier_proc)0;
+ (GC_finalizer_notifier_proc)0;
static GC_word last_finalizer_notification = 0;
@@ -853,39 +853,39 @@ void GC_notify_or_invoke_finalizers(void)
{
GC_finalizer_notifier_proc notifier_fn = 0;
# if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
- static word last_back_trace_gc_no = 1; /* Skip first one. */
+ static word last_back_trace_gc_no = 1; /* Skip first one. */
# elif defined(THREADS)
- /* Quick check (while unlocked) for an empty finalization queue. */
+ /* Quick check (while unlocked) for an empty finalization queue. */
if (GC_finalize_now == 0) return;
# endif
LOCK();
/* This is a convenient place to generate backtraces if appropriate, */
- /* since that code is not callable with the allocation lock. */
+ /* since that code is not callable with the allocation lock. */
# if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
if (GC_gc_no > last_back_trace_gc_no) {
-# ifdef KEEP_BACK_PTRS
- long i;
- /* Stops when GC_gc_no wraps; that's OK. */
- last_back_trace_gc_no = (word)(-1); /* disable others. */
- for (i = 0; i < GC_backtraces; ++i) {
- /* FIXME: This tolerates concurrent heap mutation, */
- /* which may cause occasional mysterious results. */
- /* We need to release the GC lock, since GC_print_callers */
- /* acquires it. It probably shouldn't. */
- UNLOCK();
- GC_generate_random_backtrace_no_gc();
- LOCK();
- }
- last_back_trace_gc_no = GC_gc_no;
-# endif
+# ifdef KEEP_BACK_PTRS
+ long i;
+ /* Stops when GC_gc_no wraps; that's OK. */
+ last_back_trace_gc_no = (word)(-1); /* disable others. */
+ for (i = 0; i < GC_backtraces; ++i) {
+ /* FIXME: This tolerates concurrent heap mutation, */
+ /* which may cause occasional mysterious results. */
+ /* We need to release the GC lock, since GC_print_callers */
+ /* acquires it. It probably shouldn't. */
+ UNLOCK();
+ GC_generate_random_backtrace_no_gc();
+ LOCK();
+ }
+ last_back_trace_gc_no = GC_gc_no;
+# endif
# ifdef MAKE_BACK_GRAPH
- if (GC_print_back_height) {
- UNLOCK();
+ if (GC_print_back_height) {
+ UNLOCK();
GC_print_back_graph_stats();
- LOCK();
- }
-# endif
+ LOCK();
+ }
+# endif
}
# endif
if (GC_finalize_now == 0) {
@@ -898,31 +898,31 @@ void GC_notify_or_invoke_finalizers(void)
UNLOCK();
/* Skip GC_invoke_finalizers() if nested */
if (pnested != NULL) {
- (void) GC_invoke_finalizers();
- *pnested = 0; /* Reset since no more finalizers. */
-# ifndef THREADS
- GC_ASSERT(GC_finalize_now == 0);
-# endif /* Otherwise GC can run concurrently and add more */
+ (void) GC_invoke_finalizers();
+ *pnested = 0; /* Reset since no more finalizers. */
+# ifndef THREADS
+ GC_ASSERT(GC_finalize_now == 0);
+# endif /* Otherwise GC can run concurrently and add more */
}
return;
}
- /* These variables require synchronization to avoid data races. */
+ /* These variables require synchronization to avoid data races. */
if (last_finalizer_notification != GC_gc_no) {
- last_finalizer_notification = GC_gc_no;
- notifier_fn = GC_finalizer_notifier;
+ last_finalizer_notification = GC_gc_no;
+ notifier_fn = GC_finalizer_notifier;
}
UNLOCK();
if (notifier_fn != 0)
- (*notifier_fn)(); /* Invoke the notifier */
+ (*notifier_fn)(); /* Invoke the notifier */
}
GC_API void * GC_CALL GC_call_with_alloc_lock(GC_fn_type fn,
- void * client_data)
+ void * client_data)
{
void * result;
DCL_LOCK_STATE;
-
+
# ifdef THREADS
LOCK();
/* FIXME - This looks wrong!! */
@@ -946,12 +946,12 @@ void GC_print_finalization_stats(void)
unsigned long ready = 0;
GC_log_printf(
- "%lu finalization table entries; %lu disappearing links alive\n",
- (unsigned long)GC_fo_entries, (unsigned long)GC_dl_entries);
+ "%lu finalization table entries; %lu disappearing links alive\n",
+ (unsigned long)GC_fo_entries, (unsigned long)GC_dl_entries);
for (; 0 != fo; fo = fo_next(fo)) ++ready;
GC_log_printf("%lu objects are eligible for immediate finalization; "
- "%ld links cleared\n",
- ready, (long)GC_old_dl_entries - (long)GC_dl_entries);
+ "%ld links cleared\n",
+ ready, (long)GC_old_dl_entries - (long)GC_dl_entries);
}
#endif /* SMALL_CONFIG */
diff --git a/gc_cpp.cc b/gc_cpp.cc
index 4d001309..381a3144 100644
--- a/gc_cpp.cc
+++ b/gc_cpp.cc
@@ -1,15 +1,16 @@
-/*************************************************************************
-Copyright (c) 1994 by Xerox Corporation. All rights reserved.
-
-THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
-OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
-
- Last modified on Sat Nov 19 19:31:14 PST 1994 by ellis
- on Sat Jun 8 15:10:00 PST 1994 by boehm
-
-Permission is hereby granted to copy this code for any purpose,
-provided the above notices are retained on all copies.
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Last modified on Sat Nov 19 19:31:14 PST 1994 by ellis
+ *
+ * Permission is hereby granted to copy this code for any purpose,
+ * provided the above notices are retained on all copies.
+ */
+/*************************************************************************
This implementation module for gc_c++.h provides an implementation of
the global operators "new" and "delete" that calls the Boehm
allocator. All objects allocated by this implementation will be
@@ -35,15 +36,15 @@ Authors: John R. Ellis and Jesse Hull
void* operator new( size_t size ) {
return GC_MALLOC_UNCOLLECTABLE( size );}
-
+
void operator delete( void* obj ) {
GC_FREE( obj );}
-
+
#ifdef GC_OPERATOR_NEW_ARRAY
void* operator new[]( size_t size ) {
return GC_MALLOC_UNCOLLECTABLE( size );}
-
+
void operator delete[]( void* obj ) {
GC_FREE( obj );}
@@ -53,14 +54,14 @@ void operator delete[]( void* obj ) {
// This new operator is used by VC++ in case of Debug builds !
void* operator new( size_t size,
- int ,//nBlockUse,
- const char * szFileName,
- int nLine )
+ int ,//nBlockUse,
+ const char * szFileName,
+ int nLine )
{
#ifndef GC_DEBUG
- return GC_malloc_uncollectable( size );
+ return GC_malloc_uncollectable( size );
#else
- return GC_debug_malloc_uncollectable(size, szFileName, nLine);
+ return GC_debug_malloc_uncollectable(size, szFileName, nLine);
#endif
}
@@ -73,4 +74,3 @@ void* operator new[](size_t size, int nBlockUse, const char* szFileName, int nLi
#endif
#endif /* _MSC_VER */
-
diff --git a/gc_dlopen.c b/gc_dlopen.c
index 69abfbe1..c0ca658a 100644
--- a/gc_dlopen.c
+++ b/gc_dlopen.c
@@ -39,28 +39,28 @@
GC_bool GC_collection_in_progress(void);
- /* Make sure we're not in the middle of a collection, and make */
- /* sure we don't start any. Returns previous value of GC_dont_gc. */
- /* This is invoked prior to a dlopen call to avoid synchronization */
- /* issues. We can't just acquire the allocation lock, since startup */
- /* code in dlopen may try to allocate. */
- /* This solution risks heap growth in the presence of many dlopen */
- /* calls in either a multithreaded environment, or if the library */
- /* initialization code allocates substantial amounts of GC'ed memory. */
- /* But I don't know of a better solution. */
+ /* Make sure we're not in the middle of a collection, and make */
+ /* sure we don't start any. Returns previous value of GC_dont_gc. */
+ /* This is invoked prior to a dlopen call to avoid synchronization */
+ /* issues. We can't just acquire the allocation lock, since startup */
+ /* code in dlopen may try to allocate. */
+ /* This solution risks heap growth in the presence of many dlopen */
+ /* calls in either a multithreaded environment, or if the library */
+ /* initialization code allocates substantial amounts of GC'ed memory. */
+ /* But I don't know of a better solution. */
static void disable_gc_for_dlopen(void)
{
LOCK();
while (GC_incremental && GC_collection_in_progress()) {
- GC_collect_a_little_inner(1000);
+ GC_collect_a_little_inner(1000);
}
++GC_dont_gc;
UNLOCK();
}
- /* Redefine dlopen to guarantee mutual exclusion with */
- /* GC_register_dynamic_libraries. */
- /* Should probably happen for other operating systems, too. */
+ /* Redefine dlopen to guarantee mutual exclusion with */
+ /* GC_register_dynamic_libraries. */
+ /* Should probably happen for other operating systems, too. */
#include <dlfcn.h>
@@ -75,7 +75,7 @@
GC_API void * WRAP_FUNC(dlopen)(const char *path, int mode)
{
void * result;
-
+
# ifndef USE_PROC_FOR_LIBRARIES
disable_gc_for_dlopen();
# endif
@@ -86,6 +86,3 @@ GC_API void * WRAP_FUNC(dlopen)(const char *path, int mode)
return(result);
}
# endif /* GC_PTHREADS || GC_SOLARIS_THREADS ... */
-
-
-
diff --git a/gcj_mlc.c b/gcj_mlc.c
index 7a1a0462..4eedfe24 100644
--- a/gcj_mlc.c
+++ b/gcj_mlc.c
@@ -42,19 +42,19 @@
GC_bool GC_gcj_malloc_initialized = FALSE;
-int GC_gcj_kind; /* Object kind for objects with descriptors */
- /* in "vtable". */
-int GC_gcj_debug_kind; /* The kind of objects that is always marked */
- /* with a mark proc call. */
+int GC_gcj_kind; /* Object kind for objects with descriptors */
+ /* in "vtable". */
+int GC_gcj_debug_kind; /* The kind of objects that is always marked */
+ /* with a mark proc call. */
ptr_t * GC_gcjobjfreelist;
ptr_t * GC_gcjdebugobjfreelist;
/*ARGSUSED*/
STATIC struct GC_ms_entry * GC_gcj_fake_mark_proc(word * addr,
- struct GC_ms_entry *mark_stack_ptr,
- struct GC_ms_entry *mark_stack_limit,
- word env)
+ struct GC_ms_entry *mark_stack_ptr,
+ struct GC_ms_entry *mark_stack_limit,
+ word env)
{
ABORT("No client gcj mark proc is specified");
return mark_stack_ptr;
@@ -62,15 +62,15 @@ STATIC struct GC_ms_entry * GC_gcj_fake_mark_proc(word * addr,
/* Caller does not hold allocation lock. */
GC_API void GC_CALL GC_init_gcj_malloc(int mp_index,
- void * /* really GC_mark_proc */mp)
+ void * /* really GC_mark_proc */mp)
{
GC_bool ignore_gcj_info;
DCL_LOCK_STATE;
- if (mp == 0) /* In case GC_DS_PROC is unused. */
+ if (mp == 0) /* In case GC_DS_PROC is unused. */
mp = (void *)(word)GC_gcj_fake_mark_proc;
- GC_init(); /* In case it's not already done. */
+ GC_init(); /* In case it's not already done. */
LOCK();
if (GC_gcj_malloc_initialized) {
UNLOCK();
@@ -89,34 +89,34 @@ GC_API void GC_CALL GC_init_gcj_malloc(int mp_index,
GC_ASSERT(GC_mark_procs[mp_index] == (GC_mark_proc)0); /* unused */
GC_mark_procs[mp_index] = (GC_mark_proc)(word)mp;
if ((unsigned)mp_index >= GC_n_mark_procs)
- ABORT("GC_init_gcj_malloc: bad index");
+ ABORT("GC_init_gcj_malloc: bad index");
/* Set up object kind gcj-style indirect descriptor. */
GC_gcjobjfreelist = (ptr_t *)GC_new_free_list_inner();
if (ignore_gcj_info) {
- /* Use a simple length-based descriptor, thus forcing a fully */
- /* conservative scan. */
- GC_gcj_kind = GC_new_kind_inner((void **)GC_gcjobjfreelist,
- (0 | GC_DS_LENGTH),
- TRUE, TRUE);
+ /* Use a simple length-based descriptor, thus forcing a fully */
+ /* conservative scan. */
+ GC_gcj_kind = GC_new_kind_inner((void **)GC_gcjobjfreelist,
+ (0 | GC_DS_LENGTH),
+ TRUE, TRUE);
} else {
- GC_gcj_kind = GC_new_kind_inner(
- (void **)GC_gcjobjfreelist,
- (((word)(-(signed_word)MARK_DESCR_OFFSET
- - GC_INDIR_PER_OBJ_BIAS))
- | GC_DS_PER_OBJECT),
- FALSE, TRUE);
+ GC_gcj_kind = GC_new_kind_inner(
+ (void **)GC_gcjobjfreelist,
+ (((word)(-(signed_word)MARK_DESCR_OFFSET
+ - GC_INDIR_PER_OBJ_BIAS))
+ | GC_DS_PER_OBJECT),
+ FALSE, TRUE);
}
- /* Set up object kind for objects that require mark proc call. */
+ /* Set up object kind for objects that require mark proc call. */
if (ignore_gcj_info) {
- GC_gcj_debug_kind = GC_gcj_kind;
+ GC_gcj_debug_kind = GC_gcj_kind;
GC_gcjdebugobjfreelist = GC_gcjobjfreelist;
} else {
GC_gcjdebugobjfreelist = (ptr_t *)GC_new_free_list_inner();
- GC_gcj_debug_kind = GC_new_kind_inner(
- (void **)GC_gcjdebugobjfreelist,
- GC_MAKE_PROC(mp_index,
- 1 /* allocated with debug info */),
- FALSE, TRUE);
+ GC_gcj_debug_kind = GC_new_kind_inner(
+ (void **)GC_gcjdebugobjfreelist,
+ GC_MAKE_PROC(mp_index,
+ 1 /* allocated with debug info */),
+ FALSE, TRUE);
}
UNLOCK();
}
@@ -125,17 +125,17 @@ void * GC_clear_stack(void *);
#define GENERAL_MALLOC(lb,k) \
GC_clear_stack(GC_generic_malloc_inner((word)lb, k))
-
+
#define GENERAL_MALLOC_IOP(lb,k) \
GC_clear_stack(GC_generic_malloc_inner_ignore_off_page(lb, k))
-/* We need a mechanism to release the lock and invoke finalizers. */
-/* We don't really have an opportunity to do this on a rarely executed */
-/* path on which the lock is not held. Thus we check at a */
-/* rarely executed point at which it is safe to release the lock. */
-/* We do this even where we could just call GC_INVOKE_FINALIZERS, */
-/* since it's probably cheaper and certainly more uniform. */
-/* FIXME - Consider doing the same elsewhere? */
+/* We need a mechanism to release the lock and invoke finalizers. */
+/* We don't really have an opportunity to do this on a rarely executed */
+/* path on which the lock is not held. Thus we check at a */
+/* rarely executed point at which it is safe to release the lock. */
+/* We do this even where we could just call GC_INVOKE_FINALIZERS, */
+/* since it's probably cheaper and certainly more uniform. */
+/* FIXME - Consider doing the same elsewhere? */
static void maybe_finalize(void)
{
static word last_finalized_no = 0;
@@ -148,14 +148,14 @@ static void maybe_finalize(void)
last_finalized_no = GC_gc_no;
}
-/* Allocate an object, clear it, and store the pointer to the */
-/* type structure (vtable in gcj). */
+/* Allocate an object, clear it, and store the pointer to the */
+/* type structure (vtable in gcj). */
/* This adds a byte at the end of the object if GC_malloc would.*/
#ifdef THREAD_LOCAL_ALLOC
void * GC_core_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr)
#else
GC_API void * GC_CALL GC_gcj_malloc(size_t lb,
- void * ptr_to_struct_containing_descr)
+ void * ptr_to_struct_containing_descr)
#endif
{
ptr_t op;
@@ -164,59 +164,59 @@ static void maybe_finalize(void)
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
- lg = GC_size_map[lb];
- opp = &(GC_gcjobjfreelist[lg]);
- LOCK();
- op = *opp;
+ lg = GC_size_map[lb];
+ opp = &(GC_gcjobjfreelist[lg]);
+ LOCK();
+ op = *opp;
if(EXPECT(op == 0, 0)) {
- maybe_finalize();
+ maybe_finalize();
op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind);
- if (0 == op) {
- GC_oom_func oom_fn = GC_oom_fn;
- UNLOCK();
- return((*oom_fn)(lb));
- }
+ if (0 == op) {
+ GC_oom_func oom_fn = GC_oom_fn;
+ UNLOCK();
+ return((*oom_fn)(lb));
+ }
} else {
*opp = obj_link(op);
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
}
- *(void **)op = ptr_to_struct_containing_descr;
- GC_ASSERT(((void **)op)[1] == 0);
- UNLOCK();
+ *(void **)op = ptr_to_struct_containing_descr;
+ GC_ASSERT(((void **)op)[1] == 0);
+ UNLOCK();
} else {
- LOCK();
- maybe_finalize();
- op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind);
- if (0 == op) {
- GC_oom_func oom_fn = GC_oom_fn;
- UNLOCK();
- return((*oom_fn)(lb));
- }
- *(void **)op = ptr_to_struct_containing_descr;
- UNLOCK();
+ LOCK();
+ maybe_finalize();
+ op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind);
+ if (0 == op) {
+ GC_oom_func oom_fn = GC_oom_fn;
+ UNLOCK();
+ return((*oom_fn)(lb));
+ }
+ *(void **)op = ptr_to_struct_containing_descr;
+ UNLOCK();
}
return((void *) op);
}
void GC_start_debugging(void);
-/* Similar to GC_gcj_malloc, but add debug info. This is allocated */
-/* with GC_gcj_debug_kind. */
+/* Similar to GC_gcj_malloc, but add debug info. This is allocated */
+/* with GC_gcj_debug_kind. */
GC_API void * GC_CALL GC_debug_gcj_malloc(size_t lb,
- void * ptr_to_struct_containing_descr, GC_EXTRA_PARAMS)
+ void * ptr_to_struct_containing_descr, GC_EXTRA_PARAMS)
{
void * result;
- /* We're careful to avoid extra calls, which could */
- /* confuse the backtrace. */
+ /* We're careful to avoid extra calls, which could */
+ /* confuse the backtrace. */
LOCK();
maybe_finalize();
result = GC_generic_malloc_inner(lb + DEBUG_BYTES, GC_gcj_debug_kind);
if (result == 0) {
- GC_oom_func oom_fn = GC_oom_fn;
- UNLOCK();
+ GC_oom_func oom_fn = GC_oom_fn;
+ UNLOCK();
GC_err_printf("GC_debug_gcj_malloc(%ld, %p) returning NIL (",
- (unsigned long)lb, ptr_to_struct_containing_descr);
+ (unsigned long)lb, ptr_to_struct_containing_descr);
GC_err_puts(s);
GC_err_printf(":%d)\n", i);
return((*oom_fn)(lb));
@@ -224,15 +224,15 @@ GC_API void * GC_CALL GC_debug_gcj_malloc(size_t lb,
*((void **)((ptr_t)result + sizeof(oh))) = ptr_to_struct_containing_descr;
UNLOCK();
if (!GC_debugging_started) {
- GC_start_debugging();
+ GC_start_debugging();
}
ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, (word)i));
}
-/* There is no THREAD_LOCAL_ALLOC for GC_gcj_malloc_ignore_off_page(). */
+/* There is no THREAD_LOCAL_ALLOC for GC_gcj_malloc_ignore_off_page(). */
GC_API void * GC_CALL GC_gcj_malloc_ignore_off_page(size_t lb,
- void * ptr_to_struct_containing_descr)
+ void * ptr_to_struct_containing_descr)
{
ptr_t op;
ptr_t * opp;
@@ -240,30 +240,30 @@ GC_API void * GC_CALL GC_gcj_malloc_ignore_off_page(size_t lb,
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
- lg = GC_size_map[lb];
- opp = &(GC_gcjobjfreelist[lg]);
- LOCK();
+ lg = GC_size_map[lb];
+ opp = &(GC_gcjobjfreelist[lg]);
+ LOCK();
if( (op = *opp) == 0 ) {
- maybe_finalize();
+ maybe_finalize();
op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_gcj_kind);
- if (0 == op) {
- GC_oom_func oom_fn = GC_oom_fn;
- UNLOCK();
- return((*oom_fn)(lb));
- }
+ if (0 == op) {
+ GC_oom_func oom_fn = GC_oom_fn;
+ UNLOCK();
+ return((*oom_fn)(lb));
+ }
} else {
*opp = obj_link(op);
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
}
} else {
- LOCK();
- maybe_finalize();
+ LOCK();
+ maybe_finalize();
op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_gcj_kind);
- if (0 == op) {
- GC_oom_func oom_fn = GC_oom_fn;
- UNLOCK();
- return((*oom_fn)(lb));
- }
+ if (0 == op) {
+ GC_oom_func oom_fn = GC_oom_fn;
+ UNLOCK();
+ return((*oom_fn)(lb));
+ }
}
*(void **)op = ptr_to_struct_containing_descr;
UNLOCK();
@@ -273,6 +273,6 @@ GC_API void * GC_CALL GC_gcj_malloc_ignore_off_page(size_t lb,
#else
extern int GC_quiet;
- /* ANSI C doesn't allow translation units to be empty. */
+ /* ANSI C doesn't allow translation units to be empty. */
#endif /* GC_GCJ_SUPPORT */
diff --git a/headers.c b/headers.c
index 774cf8d4..3cf8ad4b 100644
--- a/headers.c
+++ b/headers.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996 by Silicon Graphics. All rights reserved.
@@ -12,7 +12,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-
+
/*
* This implements:
* 1. allocation of heap block headers
@@ -21,36 +21,36 @@
* Access speed is crucial. We implement an index structure based on a 2
* level tree.
*/
-
+
# include "private/gc_priv.h"
STATIC bottom_index * GC_all_bottom_indices = 0;
- /* Pointer to first (lowest addr) */
- /* bottom_index. */
+ /* Pointer to first (lowest addr) */
+ /* bottom_index. */
STATIC bottom_index * GC_all_bottom_indices_end = 0;
- /* Pointer to last (highest addr) */
- /* bottom_index. */
-
+ /* Pointer to last (highest addr) */
+ /* bottom_index. */
+
/* Non-macro version of header location routine */
hdr * GC_find_header(ptr_t h)
{
# ifdef HASH_TL
- hdr * result;
- GET_HDR(h, result);
- return(result);
+ hdr * result;
+ GET_HDR(h, result);
+ return(result);
# else
- return(HDR_INNER(h));
+ return(HDR_INNER(h));
# endif
}
-/* Handle a header cache miss. Returns a pointer to the */
-/* header corresponding to p, if p can possibly be a valid */
-/* object pointer, and 0 otherwise. */
-/* GUARANTEED to return 0 for a pointer past the first page */
-/* of an object unless both GC_all_interior_pointers is set */
-/* and p is in fact a valid object pointer. */
-/* Never returns a pointer to a free hblk. */
+/* Handle a header cache miss. Returns a pointer to the */
+/* header corresponding to p, if p can possibly be a valid */
+/* object pointer, and 0 otherwise. */
+/* GUARANTEED to return 0 for a pointer past the first page */
+/* of an object unless both GC_all_interior_pointers is set */
+/* and p is in fact a valid object pointer. */
+/* Never returns a pointer to a free hblk. */
#ifdef PRINT_BLACK_LIST
hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source)
#else
@@ -63,34 +63,34 @@ hdr * GC_find_header(ptr_t h)
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
if (GC_all_interior_pointers) {
if (hhdr != 0) {
- ptr_t current = p;
-
- current = (ptr_t)HBLKPTR(current);
- do {
- current = current - HBLKSIZE*(word)hhdr;
- hhdr = HDR(current);
- } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
- /* current points to near the start of the large object */
- if (hhdr -> hb_flags & IGNORE_OFF_PAGE)
- return 0;
- if (HBLK_IS_FREE(hhdr)
- || p - current >= (ptrdiff_t)(hhdr->hb_sz)) {
- GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
- /* Pointer past the end of the block */
- return 0;
- }
+ ptr_t current = p;
+
+ current = (ptr_t)HBLKPTR(current);
+ do {
+ current = current - HBLKSIZE*(word)hhdr;
+ hhdr = HDR(current);
+ } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
+ /* current points to near the start of the large object */
+ if (hhdr -> hb_flags & IGNORE_OFF_PAGE)
+ return 0;
+ if (HBLK_IS_FREE(hhdr)
+ || p - current >= (ptrdiff_t)(hhdr->hb_sz)) {
+ GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
+ /* Pointer past the end of the block */
+ return 0;
+ }
} else {
- GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
- /* And return zero: */
+ GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
+ /* And return zero: */
}
GC_ASSERT(hhdr == 0 || !HBLK_IS_FREE(hhdr));
return hhdr;
- /* Pointers past the first page are probably too rare */
- /* to add them to the cache. We don't. */
- /* And correctness relies on the fact that we don't. */
+ /* Pointers past the first page are probably too rare */
+ /* to add them to the cache. We don't. */
+ /* And correctness relies on the fact that we don't. */
} else {
if (hhdr == 0) {
- GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
+ GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
}
return 0;
}
@@ -100,20 +100,20 @@ hdr * GC_find_header(ptr_t h)
return 0;
} else {
hce -> block_addr = (word)(p) >> LOG_HBLKSIZE;
- hce -> hce_hdr = hhdr;
+ hce -> hce_hdr = hhdr;
return hhdr;
}
- }
+ }
}
-
+
/* Routines to dynamically allocate collector data structures that will */
-/* never be freed. */
-
+/* never be freed. */
+
static ptr_t scratch_free_ptr = 0;
-
+
/* GC_scratch_last_end_ptr is end point of last obtained scratch area. */
-/* GC_scratch_end_ptr is end point of current scratch area. */
-
+/* GC_scratch_end_ptr is end point of current scratch area. */
+
ptr_t GC_scratch_alloc(size_t bytes)
{
register ptr_t result = scratch_free_ptr;
@@ -126,34 +126,34 @@ ptr_t GC_scratch_alloc(size_t bytes)
}
{
word bytes_to_get = MINHINCR * HBLKSIZE;
-
+
if (bytes_to_get <= bytes) {
/* Undo the damage, and get memory directly */
- bytes_to_get = bytes;
-# ifdef USE_MMAP
- bytes_to_get += GC_page_size - 1;
- bytes_to_get &= ~(GC_page_size - 1);
-# endif
- result = (ptr_t)GET_MEM(bytes_to_get);
- GC_add_to_our_memory(result, bytes_to_get);
+ bytes_to_get = bytes;
+# ifdef USE_MMAP
+ bytes_to_get += GC_page_size - 1;
+ bytes_to_get &= ~(GC_page_size - 1);
+# endif
+ result = (ptr_t)GET_MEM(bytes_to_get);
+ GC_add_to_our_memory(result, bytes_to_get);
scratch_free_ptr -= bytes;
- GC_scratch_last_end_ptr = result + bytes;
+ GC_scratch_last_end_ptr = result + bytes;
return(result);
}
result = (ptr_t)GET_MEM(bytes_to_get);
GC_add_to_our_memory(result, bytes_to_get);
if (result == 0) {
- if (GC_print_stats)
+ if (GC_print_stats)
GC_printf("Out of memory - trying to allocate less\n");
scratch_free_ptr -= bytes;
- bytes_to_get = bytes;
-# ifdef USE_MMAP
- bytes_to_get += GC_page_size - 1;
- bytes_to_get &= ~(GC_page_size - 1);
-# endif
+ bytes_to_get = bytes;
+# ifdef USE_MMAP
+ bytes_to_get += GC_page_size - 1;
+ bytes_to_get &= ~(GC_page_size - 1);
+# endif
result = (ptr_t)GET_MEM(bytes_to_get);
GC_add_to_our_memory(result, bytes_to_get);
- return result;
+ return result;
}
scratch_free_ptr = result;
GC_scratch_end_ptr = scratch_free_ptr + bytes_to_get;
@@ -168,7 +168,7 @@ static hdr * hdr_free_list = 0;
static hdr * alloc_hdr(void)
{
register hdr * result;
-
+
if (hdr_free_list == 0) {
result = (hdr *) GC_scratch_alloc((word)(sizeof(hdr)));
} else {
@@ -188,11 +188,11 @@ static void free_hdr(hdr * hhdr)
word GC_hdr_cache_hits = 0;
word GC_hdr_cache_misses = 0;
#endif
-
+
void GC_init_headers(void)
{
register unsigned i;
-
+
GC_all_nils = (bottom_index *)GC_scratch_alloc((word)sizeof(bottom_index));
BZERO(GC_all_nils, sizeof(bottom_index));
for (i = 0; i < TOP_SZ; i++) {
@@ -201,7 +201,7 @@ void GC_init_headers(void)
}
/* Make sure that there is a bottom level index block for address addr */
-/* Return FALSE on failure. */
+/* Return FALSE on failure. */
static GC_bool get_index(word addr)
{
word hi = (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
@@ -209,11 +209,11 @@ static GC_bool get_index(word addr)
bottom_index * p;
bottom_index ** prev;
bottom_index *pi;
-
+
# ifdef HASH_TL
word i = TL_HASH(hi);
bottom_index * old;
-
+
old = p = GC_top_index[i];
while(p != GC_all_nils) {
if (p -> key == hi) return(TRUE);
@@ -233,35 +233,35 @@ static GC_bool get_index(word addr)
# endif
r -> key = hi;
/* Add it to the list of bottom indices */
- prev = &GC_all_bottom_indices; /* pointer to p */
- pi = 0; /* bottom_index preceding p */
+ prev = &GC_all_bottom_indices; /* pointer to p */
+ pi = 0; /* bottom_index preceding p */
while ((p = *prev) != 0 && p -> key < hi) {
- pi = p;
- prev = &(p -> asc_link);
+ pi = p;
+ prev = &(p -> asc_link);
}
r -> desc_link = pi;
if (0 == p) {
- GC_all_bottom_indices_end = r;
+ GC_all_bottom_indices_end = r;
} else {
- p -> desc_link = r;
+ p -> desc_link = r;
}
r -> asc_link = p;
*prev = r;
return(TRUE);
}
-/* Install a header for block h. */
-/* The header is uninitialized. */
-/* Returns the header or 0 on failure. */
+/* Install a header for block h. */
+/* The header is uninitialized. */
+/* Returns the header or 0 on failure. */
struct hblkhdr * GC_install_header(struct hblk *h)
{
hdr * result;
-
+
if (!get_index((word) h)) return(0);
result = alloc_hdr();
SET_HDR(h, result);
# ifdef USE_MUNMAP
- result -> hb_last_reclaimed = (unsigned short)GC_gc_no;
+ result -> hb_last_reclaimed = (unsigned short)GC_gc_no;
# endif
return(result);
}
@@ -271,7 +271,7 @@ GC_bool GC_install_counts(struct hblk *h, size_t sz/* bytes */)
{
struct hblk * hbp;
word i;
-
+
for (hbp = h; (char *)hbp < (char *)h + sz; hbp += BOTTOM_SZ) {
if (!get_index((word) hbp)) return(FALSE);
}
@@ -287,7 +287,7 @@ GC_bool GC_install_counts(struct hblk *h, size_t sz/* bytes */)
void GC_remove_header(struct hblk *h)
{
hdr ** ha;
-
+
GET_HDR_ADDR(h, ha);
free_hdr(*ha);
*ha = 0;
@@ -297,7 +297,7 @@ void GC_remove_header(struct hblk *h)
void GC_remove_counts(struct hblk *h, size_t sz/* bytes */)
{
register struct hblk * hbp;
-
+
for (hbp = h+1; (char *)hbp < (char *)h + sz; hbp += 1) {
SET_HDR(hbp, 0);
}
@@ -306,19 +306,19 @@ void GC_remove_counts(struct hblk *h, size_t sz/* bytes */)
/* Apply fn to all allocated blocks */
/*VARARGS1*/
void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
- word client_data)
+ word client_data)
{
signed_word j;
bottom_index * index_p;
-
+
for (index_p = GC_all_bottom_indices; index_p != 0;
index_p = index_p -> asc_link) {
for (j = BOTTOM_SZ-1; j >= 0;) {
if (!IS_FORWARDING_ADDR_OR_NIL(index_p->index[j])) {
if (!HBLK_IS_FREE(index_p->index[j])) {
(*fn)(((struct hblk *)
- (((index_p->key << LOG_BOTTOM_SZ) + (word)j)
- << LOG_HBLKSIZE)),
+ (((index_p->key << LOG_BOTTOM_SZ) + (word)j)
+ << LOG_HBLKSIZE)),
client_data);
}
j--;
@@ -331,13 +331,13 @@ void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
}
}
-/* Get the next valid block whose address is at least h */
-/* Return 0 if there is none. */
+/* Get the next valid block whose address is at least h */
+/* Return 0 if there is none. */
struct hblk * GC_next_used_block(struct hblk *h)
{
register bottom_index * bi;
register word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
-
+
GET_BI(h, bi);
if (bi == GC_all_nils) {
register word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
@@ -347,14 +347,14 @@ struct hblk * GC_next_used_block(struct hblk *h)
}
while(bi != 0) {
while (j < BOTTOM_SZ) {
- hdr * hhdr = bi -> index[j];
+ hdr * hhdr = bi -> index[j];
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
j++;
} else {
if (!HBLK_IS_FREE(hhdr)) {
return((struct hblk *)
- (((bi -> key << LOG_BOTTOM_SZ) + j)
- << LOG_HBLKSIZE));
+ (((bi -> key << LOG_BOTTOM_SZ) + j)
+ << LOG_HBLKSIZE));
} else {
j += divHBLKSZ(hhdr -> hb_sz);
}
@@ -366,14 +366,14 @@ struct hblk * GC_next_used_block(struct hblk *h)
return(0);
}
-/* Get the last (highest address) block whose address is */
-/* at most h. Return 0 if there is none. */
-/* Unlike the above, this may return a free block. */
+/* Get the last (highest address) block whose address is */
+/* at most h. Return 0 if there is none. */
+/* Unlike the above, this may return a free block. */
struct hblk * GC_prev_block(struct hblk *h)
{
register bottom_index * bi;
register signed_word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
-
+
GET_BI(h, bi);
if (bi == GC_all_nils) {
register word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
@@ -383,15 +383,15 @@ struct hblk * GC_prev_block(struct hblk *h)
}
while(bi != 0) {
while (j >= 0) {
- hdr * hhdr = bi -> index[j];
- if (0 == hhdr) {
- --j;
+ hdr * hhdr = bi -> index[j];
+ if (0 == hhdr) {
+ --j;
} else if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
j -= (signed_word)hhdr;
} else {
return((struct hblk *)
(((bi -> key << LOG_BOTTOM_SZ) + j)
- << LOG_HBLKSIZE));
+ << LOG_HBLKSIZE));
}
}
j = BOTTOM_SZ - 1;
diff --git a/include/gc_allocator.h b/include/gc_allocator.h
index 686c985e..086fac4b 100644
--- a/include/gc_allocator.h
+++ b/include/gc_allocator.h
@@ -79,7 +79,7 @@ GC_DECLARE_PTRFREE(unsigned long);
GC_DECLARE_PTRFREE(float);
GC_DECLARE_PTRFREE(double);
GC_DECLARE_PTRFREE(long double);
-/* The client may want to add others. */
+/* The client may want to add others. */
// In the following GC_Tp is GC_true_type if we are allocating a
// pointer-free object.
@@ -128,8 +128,8 @@ public:
GC_Tp* allocate(size_type GC_n, const void* = 0) {
GC_type_traits<GC_Tp> traits;
return static_cast<GC_Tp *>
- (GC_selective_alloc(GC_n * sizeof(GC_Tp),
- traits.GC_is_ptr_free, false));
+ (GC_selective_alloc(GC_n * sizeof(GC_Tp),
+ traits.GC_is_ptr_free, false));
}
// __p is not permitted to be a null pointer.
@@ -193,7 +193,7 @@ public:
// MSVC++ 6.0 do not support member templates
template <class GC_Tp1>
gc_allocator_ignore_off_page(const gc_allocator_ignore_off_page<GC_Tp1>&)
- throw() {}
+ throw() {}
# endif
~gc_allocator_ignore_off_page() throw() {}
@@ -205,8 +205,8 @@ public:
GC_Tp* allocate(size_type GC_n, const void* = 0) {
GC_type_traits<GC_Tp> traits;
return static_cast<GC_Tp *>
- (GC_selective_alloc(GC_n * sizeof(GC_Tp),
- traits.GC_is_ptr_free, true));
+ (GC_selective_alloc(GC_n * sizeof(GC_Tp),
+ traits.GC_is_ptr_free, true));
}
// __p is not permitted to be a null pointer.
@@ -272,7 +272,7 @@ public:
# if !(GC_NO_MEMBER_TEMPLATES || 0 < _MSC_VER && _MSC_VER <= 1200)
// MSVC++ 6.0 do not support member templates
template <class GC_Tp1> traceable_allocator
- (const traceable_allocator<GC_Tp1>&) throw() {}
+ (const traceable_allocator<GC_Tp1>&) throw() {}
# endif
~traceable_allocator() throw() {}
diff --git a/include/gc_backptr.h b/include/gc_backptr.h
index 3a63ab57..1b7ad99b 100644
--- a/include/gc_backptr.h
+++ b/include/gc_backptr.h
@@ -1,4 +1,21 @@
/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
+ * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
+ * All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/*
* This is a simple API to implement pointer back tracing, i.e.
* to answer questions such as "who is pointing to this" or
* "why is this object being retained by the collector"
@@ -33,24 +50,24 @@
/* Store information about the object referencing dest in *base_p */
/* and *offset_p. */
-/* If multiple objects or roots point to dest, the one reported */
+/* If multiple objects or roots point to dest, the one reported */
/* will be the last on used by the garbage collector to trace the */
-/* object. */
-/* source is root ==> *base_p = address, *offset_p = 0 */
+/* object. */
+/* source is root ==> *base_p = address, *offset_p = 0 */
/* source is heap object ==> *base_p != 0, *offset_p = offset */
/* Returns 1 on success, 0 if source couldn't be determined. */
/* Dest can be any address within a heap object. */
-typedef enum { GC_UNREFERENCED, /* No reference info available. */
- GC_NO_SPACE, /* Dest not allocated with debug alloc */
- GC_REFD_FROM_ROOT, /* Referenced directly by root *base_p */
- GC_REFD_FROM_REG, /* Referenced from a register, i.e. */
- /* a root without an address. */
- GC_REFD_FROM_HEAP, /* Referenced from another heap obj. */
- GC_FINALIZER_REFD /* Finalizable and hence accessible. */
+typedef enum { GC_UNREFERENCED, /* No reference info available. */
+ GC_NO_SPACE, /* Dest not allocated with debug alloc */
+ GC_REFD_FROM_ROOT, /* Referenced directly by root *base_p */
+ GC_REFD_FROM_REG, /* Referenced from a register, i.e. */
+ /* a root without an address. */
+ GC_REFD_FROM_HEAP, /* Referenced from another heap obj. */
+ GC_FINALIZER_REFD /* Finalizable and hence accessible. */
} GC_ref_kind;
GC_API GC_ref_kind GC_CALL GC_get_back_ptr_info(void *dest, void **base_p,
- size_t *offset_p);
+ size_t *offset_p);
/* Generate a random heap address. */
/* The resulting address is in the heap, but */
@@ -64,12 +81,12 @@ GC_API void * GC_CALL GC_generate_random_valid_address(void);
/* random heap address. */
/* This uses the GC logging mechanism (GC_printf) to produce */
/* output. It can often be called from a debugger. The */
-/* source in dbg_mlc.c also serves as a sample client. */
+/* source in dbg_mlc.c also serves as a sample client. */
GC_API void GC_CALL GC_generate_random_backtrace(void);
-/* Print a backtrace from a specific address. Used by the */
-/* above. The client should call GC_gcollect() immediately */
-/* before invocation. */
+/* Print a backtrace from a specific address. Used by the */
+/* above. The client should call GC_gcollect() immediately */
+/* before invocation. */
GC_API void GC_CALL GC_print_backtrace(void *);
# ifdef __cplusplus
diff --git a/include/gc_config_macros.h b/include/gc_config_macros.h
index 7e0e0355..009f5e71 100644
--- a/include/gc_config_macros.h
+++ b/include/gc_config_macros.h
@@ -1,14 +1,31 @@
/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
+ * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
+ * All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/*
* This should never be included directly. It is included only from gc.h.
* We separate it only to make gc.h more suitable as documentation.
- *
+ *
* Some tests for old macros. These violate our namespace rules and will
* disappear shortly. Use the GC_ names.
*/
#if defined(SOLARIS_THREADS) || defined(_SOLARIS_THREADS) \
|| defined(_SOLARIS_PTHREADS) || defined(GC_SOLARIS_PTHREADS)
- /* We no longer support old style Solaris threads. */
- /* GC_SOLARIS_THREADS now means pthreads. */
+ /* We no longer support old style Solaris threads. */
+ /* GC_SOLARIS_THREADS now means pthreads. */
# ifndef GC_SOLARIS_THREADS
# define GC_SOLARIS_THREADS
# endif
@@ -41,20 +58,20 @@
#endif
#if !defined(GC_THREADS) && \
- (defined(GC_SOLARIS_THREADS) || defined(GC_IRIX_THREADS) || \
- defined(GC_DGUX386_THREADS) || defined(GC_AIX_THREADS) || \
- defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS) || \
- defined(GC_LINUX_THREADS) || defined(GC_WIN32_THREADS))
+ (defined(GC_SOLARIS_THREADS) || defined(GC_IRIX_THREADS) || \
+ defined(GC_DGUX386_THREADS) || defined(GC_AIX_THREADS) || \
+ defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS) || \
+ defined(GC_LINUX_THREADS) || defined(GC_WIN32_THREADS))
# define GC_THREADS
#endif
# if defined(GC_SOLARIS_THREADS) || defined(GC_FREEBSD_THREADS) || \
- defined(GC_IRIX_THREADS) || defined(GC_LINUX_THREADS) || \
- defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS) || \
- defined(GC_DGUX386_THREADS) || defined(GC_DARWIN_THREADS) || \
+ defined(GC_IRIX_THREADS) || defined(GC_LINUX_THREADS) || \
+ defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS) || \
+ defined(GC_DGUX386_THREADS) || defined(GC_DARWIN_THREADS) || \
defined(GC_AIX_THREADS) || defined(GC_NETBSD_THREADS) || \
(defined(GC_WIN32_THREADS) && defined(__CYGWIN32__)) || \
- defined(GC_GNU_THREADS)
+ defined(GC_GNU_THREADS)
# define GC_PTHREADS
# endif
@@ -70,7 +87,7 @@
# endif
# if !defined(__linux__) && (defined(_PA_RISC1_1) || defined(_PA_RISC2_0) \
|| defined(hppa) || defined(__HPPA)) \
- || (defined(__ia64) && defined(_HPUX_SOURCE))
+ || (defined(__ia64) && defined(_HPUX_SOURCE))
# define GC_HPUX_THREADS
# define GC_PTHREADS
# endif
@@ -84,7 +101,7 @@
# endif
# if defined(__sparc) && !defined(__linux__) \
|| defined(sun) && (defined(i386) || defined(__i386__) \
- || defined(__amd64__))
+ || defined(__amd64__))
# define GC_SOLARIS_THREADS
# define GC_PTHREADS
# endif
@@ -119,14 +136,14 @@
#endif
#if !defined(_REENTRANT) && (defined(GC_SOLARIS_THREADS) \
- || defined(GC_HPUX_THREADS) \
- || defined(GC_AIX_THREADS) \
- || defined(GC_LINUX_THREADS) \
- || defined(GC_NETBSD_THREADS) \
- || defined(GC_GNU_THREADS))
+ || defined(GC_HPUX_THREADS) \
+ || defined(GC_AIX_THREADS) \
+ || defined(GC_LINUX_THREADS) \
+ || defined(GC_NETBSD_THREADS) \
+ || defined(GC_GNU_THREADS))
# define _REENTRANT
- /* Better late than never. This fails if system headers that */
- /* depend on this were previously included. */
+ /* Better late than never. This fails if system headers that */
+ /* depend on this were previously included. */
#endif
#if defined(GC_THREADS) && !defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) \
@@ -144,13 +161,13 @@
# include <stddef.h>
# if defined(__MINGW32__) && !defined(_WIN32_WCE)
# include <stdint.h>
- /* We mention uintptr_t. */
+ /* We mention uintptr_t. */
/* Perhaps this should be included in pure msft environments */
- /* as well? */
+ /* as well? */
# endif
# else /* ! _WIN32_WCE */
/* Yet more kludges for WinCE */
-# include <stdlib.h> /* size_t is defined here */
+# include <stdlib.h> /* size_t is defined here */
# ifndef _PTRDIFF_T_DEFINED
/* ptrdiff_t is not defined */
# define _PTRDIFF_T_DEFINED
@@ -159,7 +176,7 @@
# endif
#if defined(_DLL) && !defined(GC_NOT_DLL) && !defined(GC_DLL) \
- && !defined(__GNUC__)
+ && !defined(__GNUC__)
# define GC_DLL
#endif
@@ -173,7 +190,7 @@
# endif
# elif defined(_MSC_VER) || defined(__DMC__) || defined(__BORLANDC__) \
- || defined(__CYGWIN__)
+ || defined(__CYGWIN__)
# ifdef GC_BUILD
# define GC_API extern __declspec(dllexport)
# else
@@ -209,13 +226,13 @@
#endif
#ifndef GC_ATTR_MALLOC
- /* 'malloc' attribute should be used for all malloc-like functions */
- /* (to tell the compiler that a function may be treated as if any */
- /* non-NULL pointer it returns cannot alias any other pointer valid */
- /* when the function returns). If the client code violates this rule */
- /* by using custom GC_oom_func then define GC_OOM_FUNC_RETURNS_ALIAS. */
+ /* 'malloc' attribute should be used for all malloc-like functions */
+ /* (to tell the compiler that a function may be treated as if any */
+ /* non-NULL pointer it returns cannot alias any other pointer valid */
+ /* when the function returns). If the client code violates this rule */
+ /* by using custom GC_oom_func then define GC_OOM_FUNC_RETURNS_ALIAS. */
# if !defined(GC_OOM_FUNC_RETURNS_ALIAS) && defined(__GNUC__) \
- && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
+ && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
# define GC_ATTR_MALLOC __attribute__((__malloc__))
# else
# define GC_ATTR_MALLOC
@@ -223,10 +240,10 @@
#endif
#ifndef GC_ATTR_ALLOC_SIZE
- /* 'alloc_size' attribute improves __builtin_object_size correctness. */
- /* Only single-argument form of 'alloc_size' attribute is used. */
+ /* 'alloc_size' attribute improves __builtin_object_size correctness. */
+ /* Only single-argument form of 'alloc_size' attribute is used. */
# if defined(__GNUC__) && (__GNUC__ > 4 \
- || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3 && !defined(__ICC)))
+ || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3 && !defined(__ICC)))
# define GC_ATTR_ALLOC_SIZE(argnum) __attribute__((__alloc_size__(argnum)))
# else
# define GC_ATTR_ALLOC_SIZE(argnum)
diff --git a/include/gc_cpp.h b/include/gc_cpp.h
index 4a5bc7cb..a1eabe3d 100644
--- a/include/gc_cpp.h
+++ b/include/gc_cpp.h
@@ -1,21 +1,23 @@
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program for any
+ * purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is
+ * granted, provided the above notices are retained, and a notice that
+ * the code was modified is included with the above copyright notice.
+ */
+
#ifndef GC_CPP_H
#define GC_CPP_H
-/****************************************************************************
-Copyright (c) 1994 by Xerox Corporation. All rights reserved.
-
-THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
-OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
-
-Permission is hereby granted to use or copy this program for any
-purpose, provided the above notices are retained on all copies.
-Permission to modify the code and to distribute modified code is
-granted, provided the above notices are retained, and a notice that
-the code was modified is included with the above copyright notice.
-****************************************************************************
+/****************************************************************************
C++ Interface to the Boehm Collector
- John R. Ellis and Jesse Hull
+ John R. Ellis and Jesse Hull
This interface provides access to the Boehm collector. It provides
basic facilities similar to those described in "Safe, Efficient
@@ -34,7 +36,7 @@ Objects allocated with the built-in "::operator new" are uncollectable.
Objects derived from class "gc" are collectable. For example:
class A: public gc {...};
- A* a = new A; // a is collectable.
+ A* a = new A; // a is collectable.
Collectable instances of non-class types can be allocated using the GC
(or UseGC) placement:
@@ -149,9 +151,9 @@ by UseGC. GC is an alias for UseGC, unless GC_NAME_CONFLICT is defined.
#if ! defined( GC_NO_OPERATOR_NEW_ARRAY ) \
&& !defined(_ENABLE_ARRAYNEW) /* Digimars */ \
&& (defined(__BORLANDC__) && (__BORLANDC__ < 0x450) \
- || (defined(__GNUC__) && \
- (__GNUC__ < 2 || __GNUC__ == 2 && __GNUC_MINOR__ < 6)) \
- || (defined(__WATCOMC__) && __WATCOMC__ < 1050))
+ || (defined(__GNUC__) && \
+ (__GNUC__ < 2 || __GNUC__ == 2 && __GNUC_MINOR__ < 6)) \
+ || (defined(__WATCOMC__) && __WATCOMC__ < 1050))
# define GC_NO_OPERATOR_NEW_ARRAY
#endif
@@ -167,7 +169,7 @@ by UseGC. GC is an alias for UseGC, unless GC_NAME_CONFLICT is defined.
enum GCPlacement {UseGC,
#ifndef GC_NAME_CONFLICT
- GC=UseGC,
+ GC=UseGC,
#endif
NoGC, PointerFreeGC};
@@ -175,12 +177,12 @@ class gc {public:
inline void* operator new( size_t size );
inline void* operator new( size_t size, GCPlacement gcp );
inline void* operator new( size_t size, void *p );
- /* Must be redefined here, since the other overloadings */
- /* hide the global definition. */
+ /* Must be redefined here, since the other overloadings */
+ /* hide the global definition. */
inline void operator delete( void* obj );
# ifdef GC_PLACEMENT_DELETE
inline void operator delete( void*, GCPlacement );
- /* called if construction fails. */
+ /* called if construction fails. */
inline void operator delete( void*, void* );
# endif
@@ -194,9 +196,9 @@ class gc {public:
inline void operator delete[]( void*, void* );
# endif
#endif /* GC_OPERATOR_NEW_ARRAY */
- };
+ };
/*
- Instances of classes derived from "gc" will be allocated in the
+ Instances of classes derived from "gc" will be allocated in the
collected heap by default, unless an explicit NoGC placement is
specified. */
@@ -222,8 +224,8 @@ extern "C" {
# pragma warning(disable:4291)
#endif
-inline void* operator new(
- size_t size,
+inline void* operator new(
+ size_t size,
GCPlacement gcp,
GCCleanUpFunc cleanup = 0,
void* clientData = 0 );
@@ -250,11 +252,11 @@ inline void* operator new(
* undefined, which is what seems to happen on VC++ 6 for some reason
* if we define a multi-argument operator new[].
* There seems to be no way to redirect new in this environment without
- * including this everywhere.
+ * including this everywhere.
*/
#if _MSC_VER > 1020
void *operator new[]( size_t size );
-
+
void operator delete[](void* obj);
#endif
@@ -264,16 +266,16 @@ inline void* operator new(
// This new operator is used by VC++ in case of Debug builds !
void* operator new( size_t size,
- int ,//nBlockUse,
- const char * szFileName,
- int nLine );
+ int ,//nBlockUse,
+ const char * szFileName,
+ int nLine );
#endif /* _MSC_VER */
#ifdef GC_OPERATOR_NEW_ARRAY
inline void* operator new[](
- size_t size,
+ size_t size,
GCPlacement gcp,
GCCleanUpFunc cleanup = 0,
void* clientData = 0 );
@@ -290,12 +292,12 @@ Inline implementation
inline void* gc::operator new( size_t size ) {
return GC_MALLOC( size );}
-
+
inline void* gc::operator new( size_t size, GCPlacement gcp ) {
- if (gcp == UseGC)
+ if (gcp == UseGC)
return GC_MALLOC( size );
else if (gcp == PointerFreeGC)
- return GC_MALLOC_ATOMIC( size );
+ return GC_MALLOC_ATOMIC( size );
else
return GC_MALLOC_UNCOLLECTABLE( size );}
@@ -304,7 +306,7 @@ inline void* gc::operator new( size_t size, void *p ) {
inline void gc::operator delete( void* obj ) {
GC_FREE( obj );}
-
+
#ifdef GC_PLACEMENT_DELETE
inline void gc::operator delete( void*, void* ) {}
@@ -317,7 +319,7 @@ inline void gc::operator delete( void* obj ) {
inline void* gc::operator new[]( size_t size ) {
return gc::operator new( size );}
-
+
inline void* gc::operator new[]( size_t size, GCPlacement gcp ) {
return gc::operator new( size, gcp );}
@@ -334,7 +336,7 @@ inline void gc::operator delete[]( void* obj ) {
gc::operator delete(p); }
#endif
-
+
#endif /* GC_OPERATOR_NEW_ARRAY */
@@ -350,14 +352,14 @@ inline gc_cleanup::gc_cleanup() {
void* base = GC_base( (void *) this );
if (0 != base) {
// Don't call the debug version, since this is a real base address.
- GC_register_finalizer_ignore_self(
- base, (GC_finalization_proc)cleanup, (void*) ((char*) this - (char*) base),
+ GC_register_finalizer_ignore_self(
+ base, (GC_finalization_proc)cleanup, (void*) ((char*) this - (char*) base),
&oldProc, &oldData );
if (0 != oldProc) {
GC_register_finalizer_ignore_self( base, oldProc, oldData, 0, 0 );}}}
-inline void* operator new(
- size_t size,
+inline void* operator new(
+ size_t size,
GCPlacement gcp,
GCCleanUpFunc cleanup,
void* clientData )
@@ -366,18 +368,18 @@ inline void* operator new(
if (gcp == UseGC) {
obj = GC_MALLOC( size );
- if (cleanup != 0)
- GC_REGISTER_FINALIZER_IGNORE_SELF(
+ if (cleanup != 0)
+ GC_REGISTER_FINALIZER_IGNORE_SELF(
obj, cleanup, clientData, 0, 0 );}
else if (gcp == PointerFreeGC) {
obj = GC_MALLOC_ATOMIC( size );}
else {
obj = GC_MALLOC_UNCOLLECTABLE( size );};
return obj;}
-
+
# ifdef GC_PLACEMENT_DELETE
-inline void operator delete (
- void *p,
+inline void operator delete (
+ void *p,
GCPlacement gcp,
GCCleanUpFunc cleanup,
void* clientData )
@@ -388,8 +390,8 @@ inline void operator delete (
#ifdef GC_OPERATOR_NEW_ARRAY
-inline void* operator new[](
- size_t size,
+inline void* operator new[](
+ size_t size,
GCPlacement gcp,
GCCleanUpFunc cleanup,
void* clientData )
@@ -398,6 +400,4 @@ inline void* operator new[](
#endif /* GC_OPERATOR_NEW_ARRAY */
-
#endif /* GC_CPP_H */
-
diff --git a/include/gc_gcj.h b/include/gc_gcj.h
index 0880e65b..dab2ef72 100644
--- a/include/gc_gcj.h
+++ b/include/gc_gcj.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
* Copyright 1996-1999 by Silicon Graphics. All rights reserved.
@@ -27,14 +27,14 @@
#define GC_GCJ_H
- /* Gcj keeps GC descriptor as second word of vtable. This */
- /* probably needs to be adjusted for other clients. */
- /* We currently assume that this offset is such that: */
- /* - all objects of this kind are large enough to have */
- /* a value at that offset, and */
- /* - it is not zero. */
- /* These assumptions allow objects on the free list to be */
- /* marked normally. */
+ /* Gcj keeps GC descriptor as second word of vtable. This */
+ /* probably needs to be adjusted for other clients. */
+ /* We currently assume that this offset is such that: */
+ /* - all objects of this kind are large enough to have */
+ /* a value at that offset, and */
+ /* - it is not zero. */
+ /* These assumptions allow objects on the free list to be */
+ /* marked normally. */
#ifndef _GC_H
# include "gc.h"
@@ -44,53 +44,53 @@
extern "C" {
# endif
-/* The following allocators signal an out of memory condition with */
-/* return GC_oom_fn(bytes); */
+/* The following allocators signal an out of memory condition with */
+/* return GC_oom_fn(bytes); */
-/* The following function must be called before the gcj allocators */
-/* can be invoked. */
-/* mp_index and mp are the index and mark_proc (see gc_mark.h) */
-/* respectively for the allocated objects. Mark_proc will be */
-/* used to build the descriptor for objects allocated through the */
-/* debugging interface. The mark_proc will be invoked on all such */
-/* objects with an "environment" value of 1. The client may choose */
+/* The following function must be called before the gcj allocators */
+/* can be invoked. */
+/* mp_index and mp are the index and mark_proc (see gc_mark.h) */
+/* respectively for the allocated objects. Mark_proc will be */
+/* used to build the descriptor for objects allocated through the */
+/* debugging interface. The mark_proc will be invoked on all such */
+/* objects with an "environment" value of 1. The client may choose */
/* to use the same mark_proc for some of its generated mark descriptors.*/
-/* In that case, it should use a different "environment" value to */
-/* detect the presence or absence of the debug header. */
-/* Mp is really of type mark_proc, as defined in gc_mark.h. We don't */
-/* want to include that here for namespace pollution reasons. */
-/* Passing in mp_index here instead of having GC_init_gcj_malloc() */
-/* internally call GC_new_proc() is quite ugly, but in typical usage */
-/* scenarios a compiler also has to know about mp_index, so */
-/* generating it dynamically is not acceptable. Mp_index will */
-/* typically be an integer < RESERVED_MARK_PROCS, so that it doesn't */
-/* collide with GC_new_proc allocated indices. If the application */
-/* needs no other reserved indices, zero */
-/* (GC_GCJ_RESERVED_MARK_PROC_INDEX in gc_mark.h) is an obvious choice. */
+/* In that case, it should use a different "environment" value to */
+/* detect the presence or absence of the debug header. */
+/* Mp is really of type mark_proc, as defined in gc_mark.h. We don't */
+/* want to include that here for namespace pollution reasons. */
+/* Passing in mp_index here instead of having GC_init_gcj_malloc() */
+/* internally call GC_new_proc() is quite ugly, but in typical usage */
+/* scenarios a compiler also has to know about mp_index, so */
+/* generating it dynamically is not acceptable. Mp_index will */
+/* typically be an integer < RESERVED_MARK_PROCS, so that it doesn't */
+/* collide with GC_new_proc allocated indices. If the application */
+/* needs no other reserved indices, zero */
+/* (GC_GCJ_RESERVED_MARK_PROC_INDEX in gc_mark.h) is an obvious choice. */
GC_API void GC_CALL GC_init_gcj_malloc(int /* mp_index */,
- void * /* really mark_proc */ /* mp */);
+ void * /* really mark_proc */ /* mp */);
-/* Allocate an object, clear it, and store the pointer to the */
-/* type structure (vtable in gcj). */
+/* Allocate an object, clear it, and store the pointer to the */
+/* type structure (vtable in gcj). */
/* This adds a byte at the end of the object if GC_malloc would.*/
GC_API void * GC_CALL GC_gcj_malloc(size_t /* lb */,
- void * /* ptr_to_struct_containing_descr */)
- GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1);
-/* The debug versions allocate such that the specified mark_proc */
-/* is always invoked. */
+ void * /* ptr_to_struct_containing_descr */)
+ GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1);
+/* The debug versions allocate such that the specified mark_proc */
+/* is always invoked. */
GC_API void * GC_CALL GC_debug_gcj_malloc(size_t /* lb */,
- void * /* ptr_to_struct_containing_descr */,
- GC_EXTRA_PARAMS)
- GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1);
+ void * /* ptr_to_struct_containing_descr */,
+ GC_EXTRA_PARAMS)
+ GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1);
-/* Similar to GC_gcj_malloc, but assumes that a pointer to near the */
-/* beginning of the resulting object is always maintained. */
+/* Similar to GC_gcj_malloc, but assumes that a pointer to near the */
+/* beginning of the resulting object is always maintained. */
GC_API void * GC_CALL GC_gcj_malloc_ignore_off_page(size_t /* lb */,
- void * /* ptr_to_struct_containing_descr */)
- GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1);
+ void * /* ptr_to_struct_containing_descr */)
+ GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1);
-/* The kind numbers of normal and debug gcj objects. */
-/* Useful only for debug support, we hope. */
+/* The kind numbers of normal and debug gcj objects. */
+/* Useful only for debug support, we hope. */
GC_API int GC_gcj_kind;
GC_API int GC_gcj_debug_kind;
@@ -101,7 +101,7 @@ GC_API int GC_gcj_debug_kind;
# else
# define GC_GCJ_MALLOC(s,d) GC_gcj_malloc(s,d)
# define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d) \
- GC_gcj_malloc_ignore_off_page(s,d)
+ GC_gcj_malloc_ignore_off_page(s,d)
# endif
# ifdef __cplusplus
diff --git a/include/gc_inline.h b/include/gc_inline.h
index cf2e9f67..c97a5127 100644
--- a/include/gc_inline.h
+++ b/include/gc_inline.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
* Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
@@ -12,18 +12,18 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-
-/* WARNING: */
-/* Note that for these routines, it is the clients responsibility to */
+
+/* WARNING: */
+/* Note that for these routines, it is the clients responsibility to */
/* add the extra byte at the end to deal with one-past-the-end pointers.*/
-/* In the standard collector configuration, the collector assumes that */
-/* such a byte has been added, and hence does not trace the last word */
-/* in the resulting object. */
-/* This is not an issue if the collector is compiled with */
-/* -DDONT_ADD_BYTE_AT_END, or if GC_all_interior_pointers is not set. */
-/* This interface is most useful for compilers that generate C. */
-/* It is also used internally for thread-local allocation. */
-/* Manual use is hereby discouraged. */
+/* In the standard collector configuration, the collector assumes that */
+/* such a byte has been added, and hence does not trace the last word */
+/* in the resulting object. */
+/* This is not an issue if the collector is compiled with */
+/* -DDONT_ADD_BYTE_AT_END, or if GC_all_interior_pointers is not set. */
+/* This interface is most useful for compilers that generate C. */
+/* It is also used internally for thread-local allocation. */
+/* Manual use is hereby discouraged. */
#include "gc.h"
#include "gc_tiny_fl.h"
@@ -35,58 +35,58 @@
# define GC_EXPECT(expr, outcome) (expr)
#endif /* __GNUC__ */
-/* The ultimately general inline allocation macro. Allocate an object */
+/* The ultimately general inline allocation macro. Allocate an object */
/* of size granules, putting the resulting pointer in result. Tiny_fl */
-/* is a "tiny" free list array, which will be used first, if the size */
-/* is appropriate. If granules is too large, we allocate with */
-/* default_expr instead. If we need to refill the free list, we use */
-/* GC_generic_malloc_many with the indicated kind. */
-/* Tiny_fl should be an array of GC_TINY_FREELISTS void * pointers. */
-/* If num_direct is nonzero, and the individual free list pointers */
-/* are initialized to (void *)1, then we allocate numdirect granules */
-/* directly using gmalloc before putting multiple objects into the */
-/* tiny_fl entry. If num_direct is zero, then the free lists may also */
-/* be initialized to (void *)0. */
-/* Note that we use the zeroth free list to hold objects 1 granule in */
-/* size that are used to satisfy size 0 allocation requests. */
-/* We rely on much of this hopefully getting optimized away in the */
-/* num_direct = 0 case. */
-/* Particularly if granules is constant, this should generate a small */
-/* amount of code. */
+/* is a "tiny" free list array, which will be used first, if the size */
+/* is appropriate. If granules is too large, we allocate with */
+/* default_expr instead. If we need to refill the free list, we use */
+/* GC_generic_malloc_many with the indicated kind. */
+/* Tiny_fl should be an array of GC_TINY_FREELISTS void * pointers. */
+/* If num_direct is nonzero, and the individual free list pointers */
+/* are initialized to (void *)1, then we allocate numdirect granules */
+/* directly using gmalloc before putting multiple objects into the */
+/* tiny_fl entry. If num_direct is zero, then the free lists may also */
+/* be initialized to (void *)0. */
+/* Note that we use the zeroth free list to hold objects 1 granule in */
+/* size that are used to satisfy size 0 allocation requests. */
+/* We rely on much of this hopefully getting optimized away in the */
+/* num_direct = 0 case. */
+/* Particularly if granules is constant, this should generate a small */
+/* amount of code. */
# define GC_FAST_MALLOC_GRANS(result,granules,tiny_fl,num_direct,\
- kind,default_expr,init) \
+ kind,default_expr,init) \
{ \
if (GC_EXPECT((granules) >= GC_TINY_FREELISTS,0)) { \
result = (default_expr); \
} else { \
- void **my_fl = (tiny_fl) + (granules); \
+ void **my_fl = (tiny_fl) + (granules); \
void *my_entry=*my_fl; \
- void *next; \
+ void *next; \
\
- while (GC_EXPECT((GC_word)my_entry \
- <= (num_direct) + GC_TINY_FREELISTS + 1, 0)) { \
- /* Entry contains counter or NULL */ \
- if ((GC_word)my_entry - 1 < (num_direct)) { \
- /* Small counter value, not NULL */ \
+ while (GC_EXPECT((GC_word)my_entry \
+ <= (num_direct) + GC_TINY_FREELISTS + 1, 0)) { \
+ /* Entry contains counter or NULL */ \
+ if ((GC_word)my_entry - 1 < (num_direct)) { \
+ /* Small counter value, not NULL */ \
*my_fl = (char *)my_entry + (granules) + 1; \
result = (default_expr); \
- goto out; \
+ goto out; \
} else { \
- /* Large counter or NULL */ \
+ /* Large counter or NULL */ \
GC_generic_malloc_many(((granules) == 0? GC_GRANULE_BYTES : \
- GC_RAW_BYTES_FROM_INDEX(granules)), \
- kind, my_fl); \
- my_entry = *my_fl; \
+ GC_RAW_BYTES_FROM_INDEX(granules)), \
+ kind, my_fl); \
+ my_entry = *my_fl; \
if (my_entry == 0) { \
- result = (*GC_get_oom_fn())((granules)*GC_GRANULE_BYTES); \
- goto out; \
- } \
- } \
+ result = (*GC_get_oom_fn())((granules)*GC_GRANULE_BYTES); \
+ goto out; \
+ } \
+ } \
} \
next = *(void **)(my_entry); \
result = (void *)my_entry; \
*my_fl = next; \
- init; \
+ init; \
PREFETCH_FOR_WRITE(next); \
GC_ASSERT(GC_size(result) >= (granules)*GC_GRANULE_BYTES); \
GC_ASSERT((kind) == PTRFREE || ((GC_word *)result)[1] == 0); \
@@ -95,38 +95,38 @@
}
# define GC_WORDS_TO_WHOLE_GRANULES(n) \
- GC_WORDS_TO_GRANULES((n) + GC_GRANULE_WORDS - 1)
+ GC_WORDS_TO_GRANULES((n) + GC_GRANULE_WORDS - 1)
-/* Allocate n words (NOT BYTES). X is made to point to the result. */
-/* This should really only be used if GC_all_interior_pointers is */
-/* not set, or DONT_ADD_BYTE_AT_END is set. See above. */
-/* The semantics changed in version 7.0; we no longer lock, and */
-/* the caller is responsible for supplying a cleared tiny_fl */
-/* free list array. For single-threaded applications, this may be */
-/* a global array. */
+/* Allocate n words (NOT BYTES). X is made to point to the result. */
+/* This should really only be used if GC_all_interior_pointers is */
+/* not set, or DONT_ADD_BYTE_AT_END is set. See above. */
+/* The semantics changed in version 7.0; we no longer lock, and */
+/* the caller is responsible for supplying a cleared tiny_fl */
+/* free list array. For single-threaded applications, this may be */
+/* a global array. */
# define GC_MALLOC_WORDS(result,n,tiny_fl) \
-{ \
+{ \
size_t grans = GC_WORDS_TO_WHOLE_GRANULES(n); \
GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, \
- NORMAL, GC_malloc(grans*GC_GRANULE_BYTES), \
- *(void **)result = 0); \
+ NORMAL, GC_malloc(grans*GC_GRANULE_BYTES), \
+ *(void **)result = 0); \
}
# define GC_MALLOC_ATOMIC_WORDS(result,n,tiny_fl) \
-{ \
+{ \
size_t grans = GC_WORDS_TO_WHOLE_GRANULES(n); \
GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, \
- PTRFREE, GC_malloc_atomic(grans*GC_GRANULE_BYTES), \
- (void)0 /* no initialization */); \
+ PTRFREE, GC_malloc_atomic(grans*GC_GRANULE_BYTES), \
+ (void)0 /* no initialization */); \
}
/* And once more for two word initialized objects: */
# define GC_CONS(result, first, second, tiny_fl) \
-{ \
+{ \
size_t grans = GC_WORDS_TO_WHOLE_GRANULES(2); \
GC_FAST_MALLOC_GRANS(result, grans, tiny_fl, 0, \
- NORMAL, GC_malloc(grans*GC_GRANULE_BYTES), \
- *(void **)result = (void *)(first)); \
- ((void **)(result))[1] = (void *)(second); \
+ NORMAL, GC_malloc(grans*GC_GRANULE_BYTES), \
+ *(void **)result = (void *)(first)); \
+ ((void **)(result))[1] = (void *)(second); \
}
diff --git a/include/gc_mark.h b/include/gc_mark.h
index ca8b85c9..3895e152 100644
--- a/include/gc_mark.h
+++ b/include/gc_mark.h
@@ -18,7 +18,7 @@
* clients that provide detailed heap layout information to the collector.
* This interface should not be used by normal C or C++ clients.
* It will be useful to runtimes for other languages.
- *
+ *
* This is an experts-only interface! There are many ways to break the
* collector in subtle ways by using this functionality.
*/
@@ -33,119 +33,119 @@
extern "C" {
# endif
-/* A client supplied mark procedure. Returns new mark stack pointer. */
-/* Primary effect should be to push new entries on the mark stack. */
-/* Mark stack pointer values are passed and returned explicitly. */
-/* Global variables describing mark stack are not necessarily valid. */
-/* (This usually saves a few cycles by keeping things in registers.) */
-/* Assumed to scan about GC_PROC_BYTES on average. If it needs to do */
-/* much more work than that, it should do it in smaller pieces by */
-/* pushing itself back on the mark stack. */
-/* Note that it should always do some work (defined as marking some */
-/* objects) before pushing more than one entry on the mark stack. */
-/* This is required to ensure termination in the event of mark stack */
-/* overflows. */
+/* A client supplied mark procedure. Returns new mark stack pointer. */
+/* Primary effect should be to push new entries on the mark stack. */
+/* Mark stack pointer values are passed and returned explicitly. */
+/* Global variables describing mark stack are not necessarily valid. */
+/* (This usually saves a few cycles by keeping things in registers.) */
+/* Assumed to scan about GC_PROC_BYTES on average. If it needs to do */
+/* much more work than that, it should do it in smaller pieces by */
+/* pushing itself back on the mark stack. */
+/* Note that it should always do some work (defined as marking some */
+/* objects) before pushing more than one entry on the mark stack. */
+/* This is required to ensure termination in the event of mark stack */
+/* overflows. */
/* This procedure is always called with at least one empty entry on the */
-/* mark stack. */
-/* Currently we require that mark procedures look for pointers in a */
-/* subset of the places the conservative marker would. It must be safe */
-/* to invoke the normal mark procedure instead. */
+/* mark stack. */
+/* Currently we require that mark procedures look for pointers in a */
+/* subset of the places the conservative marker would. It must be safe */
+/* to invoke the normal mark procedure instead. */
/* WARNING: Such a mark procedure may be invoked on an unused object */
-/* residing on a free list. Such objects are cleared, except for a */
-/* free list link field in the first word. Thus mark procedures may */
-/* not count on the presence of a type descriptor, and must handle this */
-/* case correctly somehow. */
+/* residing on a free list. Such objects are cleared, except for a */
+/* free list link field in the first word. Thus mark procedures may */
+/* not count on the presence of a type descriptor, and must handle this */
+/* case correctly somehow. */
# define GC_PROC_BYTES 100
struct GC_ms_entry;
typedef struct GC_ms_entry * (*GC_mark_proc) (
- GC_word * addr, struct GC_ms_entry * mark_stack_ptr,
- struct GC_ms_entry * mark_stack_limit, GC_word env);
+ GC_word * addr, struct GC_ms_entry * mark_stack_ptr,
+ struct GC_ms_entry * mark_stack_limit, GC_word env);
# define GC_LOG_MAX_MARK_PROCS 6
# define GC_MAX_MARK_PROCS (1 << GC_LOG_MAX_MARK_PROCS)
-/* In a few cases it's necessary to assign statically known indices to */
-/* certain mark procs. Thus we reserve a few for well known clients. */
-/* (This is necessary if mark descriptors are compiler generated.) */
+/* In a few cases it's necessary to assign statically known indices to */
+/* certain mark procs. Thus we reserve a few for well known clients. */
+/* (This is necessary if mark descriptors are compiler generated.) */
#define GC_RESERVED_MARK_PROCS 8
# define GC_GCJ_RESERVED_MARK_PROC_INDEX 0
-/* Object descriptors on mark stack or in objects. Low order two */
-/* bits are tags distinguishing among the following 4 possibilities */
-/* for the high order 30 bits. */
+/* Object descriptors on mark stack or in objects. Low order two */
+/* bits are tags distinguishing among the following 4 possibilities */
+/* for the high order 30 bits. */
#define GC_DS_TAG_BITS 2
#define GC_DS_TAGS ((1 << GC_DS_TAG_BITS) - 1)
-#define GC_DS_LENGTH 0 /* The entire word is a length in bytes that */
- /* must be a multiple of 4. */
-#define GC_DS_BITMAP 1 /* 30 (62) bits are a bitmap describing pointer */
- /* fields. The msb is 1 if the first word */
- /* is a pointer. */
- /* (This unconventional ordering sometimes */
- /* makes the marker slightly faster.) */
- /* Zeroes indicate definite nonpointers. Ones */
- /* indicate possible pointers. */
- /* Only usable if pointers are word aligned. */
+#define GC_DS_LENGTH 0 /* The entire word is a length in bytes that */
+ /* must be a multiple of 4. */
+#define GC_DS_BITMAP 1 /* 30 (62) bits are a bitmap describing pointer */
+ /* fields. The msb is 1 if the first word */
+ /* is a pointer. */
+ /* (This unconventional ordering sometimes */
+ /* makes the marker slightly faster.) */
+ /* Zeroes indicate definite nonpointers. Ones */
+ /* indicate possible pointers. */
+ /* Only usable if pointers are word aligned. */
#define GC_DS_PROC 2
- /* The objects referenced by this object can be */
- /* pushed on the mark stack by invoking */
- /* PROC(descr). ENV(descr) is passed as the */
- /* last argument. */
+ /* The objects referenced by this object can be */
+ /* pushed on the mark stack by invoking */
+ /* PROC(descr). ENV(descr) is passed as the */
+ /* last argument. */
# define GC_MAKE_PROC(proc_index, env) \
- (((((env) << GC_LOG_MAX_MARK_PROCS) \
- | (proc_index)) << GC_DS_TAG_BITS) | GC_DS_PROC)
-#define GC_DS_PER_OBJECT 3 /* The real descriptor is at the */
- /* byte displacement from the beginning of the */
- /* object given by descr & ~DS_TAGS */
- /* If the descriptor is negative, the real */
- /* descriptor is at (*<object_start>) - */
- /* (descr & ~DS_TAGS) - GC_INDIR_PER_OBJ_BIAS */
- /* The latter alternative can be used if each */
- /* object contains a type descriptor in the */
- /* first word. */
- /* Note that in multithreaded environments */
- /* per object descriptors must be located in */
- /* either the first two or last two words of */
- /* the object, since only those are guaranteed */
- /* to be cleared while the allocation lock is */
- /* held. */
+ (((((env) << GC_LOG_MAX_MARK_PROCS) \
+ | (proc_index)) << GC_DS_TAG_BITS) | GC_DS_PROC)
+#define GC_DS_PER_OBJECT 3 /* The real descriptor is at the */
+ /* byte displacement from the beginning of the */
+ /* object given by descr & ~DS_TAGS */
+ /* If the descriptor is negative, the real */
+ /* descriptor is at (*<object_start>) - */
+ /* (descr & ~DS_TAGS) - GC_INDIR_PER_OBJ_BIAS */
+ /* The latter alternative can be used if each */
+ /* object contains a type descriptor in the */
+ /* first word. */
+ /* Note that in multithreaded environments */
+ /* per object descriptors must be located in */
+ /* either the first two or last two words of */
+ /* the object, since only those are guaranteed */
+ /* to be cleared while the allocation lock is */
+ /* held. */
#define GC_INDIR_PER_OBJ_BIAS 0x10
-
+
GC_API void * GC_least_plausible_heap_addr;
GC_API void * GC_greatest_plausible_heap_addr;
- /* Bounds on the heap. Guaranteed valid */
- /* Likely to include future heap expansion. */
- /* Hence usually includes not-yet-mapped */
- /* memory. */
-
-/* Handle nested references in a custom mark procedure. */
-/* Check if obj is a valid object. If so, ensure that it is marked. */
-/* If it was not previously marked, push its contents onto the mark */
-/* stack for future scanning. The object will then be scanned using */
-/* its mark descriptor. */
-/* Returns the new mark stack pointer. */
-/* Handles mark stack overflows correctly. */
-/* Since this marks first, it makes progress even if there are mark */
-/* stack overflows. */
-/* Src is the address of the pointer to obj, which is used only */
-/* for back pointer-based heap debugging. */
-/* It is strongly recommended that most objects be handled without mark */
-/* procedures, e.g. with bitmap descriptors, and that mark procedures */
-/* be reserved for exceptional cases. That will ensure that */
-/* performance of this call is not extremely performance critical. */
-/* (Otherwise we would need to inline GC_mark_and_push completely, */
-/* which would tie the client code to a fixed collector version.) */
-/* Note that mark procedures should explicitly call FIXUP_POINTER() */
-/* if required. */
+ /* Bounds on the heap. Guaranteed valid */
+ /* Likely to include future heap expansion. */
+ /* Hence usually includes not-yet-mapped */
+ /* memory. */
+
+/* Handle nested references in a custom mark procedure. */
+/* Check if obj is a valid object. If so, ensure that it is marked. */
+/* If it was not previously marked, push its contents onto the mark */
+/* stack for future scanning. The object will then be scanned using */
+/* its mark descriptor. */
+/* Returns the new mark stack pointer. */
+/* Handles mark stack overflows correctly. */
+/* Since this marks first, it makes progress even if there are mark */
+/* stack overflows. */
+/* Src is the address of the pointer to obj, which is used only */
+/* for back pointer-based heap debugging. */
+/* It is strongly recommended that most objects be handled without mark */
+/* procedures, e.g. with bitmap descriptors, and that mark procedures */
+/* be reserved for exceptional cases. That will ensure that */
+/* performance of this call is not extremely performance critical. */
+/* (Otherwise we would need to inline GC_mark_and_push completely, */
+/* which would tie the client code to a fixed collector version.) */
+/* Note that mark procedures should explicitly call FIXUP_POINTER() */
+/* if required. */
GC_API struct GC_ms_entry * GC_CALL GC_mark_and_push(void * obj,
- struct GC_ms_entry * mark_stack_ptr,
- struct GC_ms_entry * mark_stack_limit,
- void * *src);
+ struct GC_ms_entry * mark_stack_ptr,
+ struct GC_ms_entry * mark_stack_limit,
+ void * *src);
#define GC_MARK_AND_PUSH(obj, msp, lim, src) \
- (((GC_word)obj >= (GC_word)GC_least_plausible_heap_addr && \
- (GC_word)obj <= (GC_word)GC_greatest_plausible_heap_addr)? \
- GC_mark_and_push(obj, msp, lim, src) : \
- msp)
+ (((GC_word)obj >= (GC_word)GC_least_plausible_heap_addr && \
+ (GC_word)obj <= (GC_word)GC_greatest_plausible_heap_addr)? \
+ GC_mark_and_push(obj, msp, lim, src) : \
+ msp)
GC_API size_t GC_debug_header_size;
/* The size of the header added to objects allocated through */
@@ -154,59 +154,59 @@ GC_API size_t GC_debug_header_size;
/* need to be recompiled for collector version changes. */
#define GC_USR_PTR_FROM_BASE(p) ((void *)((char *)(p) + GC_debug_header_size))
-/* And some routines to support creation of new "kinds", e.g. with */
-/* custom mark procedures, by language runtimes. */
-/* The _inner versions assume the caller holds the allocation lock. */
+/* And some routines to support creation of new "kinds", e.g. with */
+/* custom mark procedures, by language runtimes. */
+/* The _inner versions assume the caller holds the allocation lock. */
-/* Return a new free list array. */
+/* Return a new free list array. */
GC_API void ** GC_CALL GC_new_free_list(void);
GC_API void ** GC_CALL GC_new_free_list_inner(void);
/* Return a new kind, as specified. */
GC_API unsigned GC_CALL GC_new_kind(void **free_list,
- GC_word mark_descriptor_template,
- int add_size_to_descriptor,
- int clear_new_objects);
- /* The last two parameters must be zero or one. */
+ GC_word mark_descriptor_template,
+ int add_size_to_descriptor,
+ int clear_new_objects);
+ /* The last two parameters must be zero or one. */
GC_API unsigned GC_CALL GC_new_kind_inner(void **free_list,
- GC_word mark_descriptor_template,
- int add_size_to_descriptor,
- int clear_new_objects);
+ GC_word mark_descriptor_template,
+ int add_size_to_descriptor,
+ int clear_new_objects);
-/* Return a new mark procedure identifier, suitable for use as */
-/* the first argument in GC_MAKE_PROC. */
+/* Return a new mark procedure identifier, suitable for use as */
+/* the first argument in GC_MAKE_PROC. */
GC_API unsigned GC_CALL GC_new_proc(GC_mark_proc);
GC_API unsigned GC_CALL GC_new_proc_inner(GC_mark_proc);
-/* Allocate an object of a given kind. Note that in multithreaded */
-/* contexts, this is usually unsafe for kinds that have the descriptor */
-/* in the object itself, since there is otherwise a window in which */
-/* the descriptor is not correct. Even in the single-threaded case, */
-/* we need to be sure that cleared objects on a free list don't */
-/* cause a GC crash if they are accidentally traced. */
+/* Allocate an object of a given kind. Note that in multithreaded */
+/* contexts, this is usually unsafe for kinds that have the descriptor */
+/* in the object itself, since there is otherwise a window in which */
+/* the descriptor is not correct. Even in the single-threaded case, */
+/* we need to be sure that cleared objects on a free list don't */
+/* cause a GC crash if they are accidentally traced. */
GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k);
typedef void (GC_CALLBACK * GC_describe_type_fn) (void *p, char *out_buf);
- /* A procedure which */
- /* produces a human-readable */
- /* description of the "type" of object */
- /* p into the buffer out_buf of length */
- /* GC_TYPE_DESCR_LEN. This is used by */
- /* the debug support when printing */
- /* objects. */
- /* These functions should be as robust */
- /* as possible, though we do avoid */
- /* invoking them on objects on the */
- /* global free list. */
-# define GC_TYPE_DESCR_LEN 40
+ /* A procedure which */
+ /* produces a human-readable */
+ /* description of the "type" of object */
+ /* p into the buffer out_buf of length */
+ /* GC_TYPE_DESCR_LEN. This is used by */
+ /* the debug support when printing */
+ /* objects. */
+ /* These functions should be as robust */
+ /* as possible, though we do avoid */
+ /* invoking them on objects on the */
+ /* global free list. */
+# define GC_TYPE_DESCR_LEN 40
GC_API void GC_CALL GC_register_describe_type_fn(int kind,
- GC_describe_type_fn knd);
- /* Register a describe_type function */
- /* to be used when printing objects */
- /* of a particular kind. */
+ GC_describe_type_fn knd);
+ /* Register a describe_type function */
+ /* to be used when printing objects */
+ /* of a particular kind. */
-/* See gc.h for the description of these "inner" functions. */
+/* See gc.h for the description of these "inner" functions. */
GC_API size_t GC_CALL GC_get_heap_size_inner(void);
GC_API size_t GC_CALL GC_get_free_bytes_inner(void);
@@ -215,4 +215,3 @@ GC_API size_t GC_CALL GC_get_free_bytes_inner(void);
# endif
#endif /* GC_MARK_H */
-
diff --git a/include/gc_pthread_redirects.h b/include/gc_pthread_redirects.h
index a61f108e..270236bb 100644
--- a/include/gc_pthread_redirects.h
+++ b/include/gc_pthread_redirects.h
@@ -1,19 +1,36 @@
-/* Our pthread support normally needs to intercept a number of thread */
-/* calls. We arrange to do that here, if appropriate. */
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
+ * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
+ * All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/* Our pthread support normally needs to intercept a number of thread */
+/* calls. We arrange to do that here, if appropriate. */
#ifndef GC_PTHREAD_REDIRECTS_H
#define GC_PTHREAD_REDIRECTS_H
#if !defined(GC_USE_LD_WRAP) && defined(GC_PTHREADS)
-/* We need to intercept calls to many of the threads primitives, so */
+/* We need to intercept calls to many of the threads primitives, so */
/* that we can locate thread stacks and stop the world. */
/* Note also that the collector cannot always see thread specific data. */
/* Thread specific data should generally consist of pointers to */
/* uncollectable objects (allocated with GC_malloc_uncollectable, */
/* not the system malloc), which are deallocated using the destructor */
/* facility in thr_keycreate. Alternatively, keep a redundant pointer */
-/* to thread specific data on the thread stack. */
+/* to thread specific data on the thread stack. */
# include <pthread.h>
# include <signal.h>
@@ -22,7 +39,7 @@
# include <dlfcn.h>
GC_API int GC_pthread_sigmask(int /* how */, const sigset_t *,
- sigset_t * /* oset */);
+ sigset_t * /* oset */);
GC_API void *GC_dlopen(const char * /* path */, int /* mode */);
# undef pthread_sigmask
@@ -32,13 +49,13 @@
# endif
GC_API int GC_pthread_create(pthread_t *, const pthread_attr_t *,
- void *(*)(void *), void * /* arg */);
+ void *(*)(void *), void * /* arg */);
GC_API int GC_pthread_join(pthread_t, void ** /* retval */);
GC_API int GC_pthread_detach(pthread_t);
-/* Unless the compiler supports #pragma extern_prefix, the Tru64 UNIX */
-/* <pthread.h> redefines some POSIX thread functions to use mangled */
-/* names. Anyway, it's safe to undef them before redefining. */
+/* Unless the compiler supports #pragma extern_prefix, the Tru64 UNIX */
+/* <pthread.h> redefines some POSIX thread functions to use mangled */
+/* names. Anyway, it's safe to undef them before redefining. */
#undef pthread_create
#undef pthread_join
#undef pthread_detach
diff --git a/include/gc_typed.h b/include/gc_typed.h
index 323d9a99..3c75a9f0 100644
--- a/include/gc_typed.h
+++ b/include/gc_typed.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright 1996 Silicon Graphics. All rights reserved.
@@ -33,14 +33,14 @@
extern "C" {
#endif
typedef GC_word * GC_bitmap;
- /* The least significant bit of the first word is one if */
- /* the first word in the object may be a pointer. */
-
+ /* The least significant bit of the first word is one if */
+ /* the first word in the object may be a pointer. */
+
# define GC_WORDSZ (8*sizeof(GC_word))
# define GC_get_bit(bm, index) \
- (((bm)[index/GC_WORDSZ] >> (index%GC_WORDSZ)) & 1)
+ (((bm)[index/GC_WORDSZ] >> (index%GC_WORDSZ)) & 1)
# define GC_set_bit(bm, index) \
- (bm)[index/GC_WORDSZ] |= ((GC_word)1 << (index%GC_WORDSZ))
+ (bm)[index/GC_WORDSZ] |= ((GC_word)1 << (index%GC_WORDSZ))
# define GC_WORD_OFFSET(t, f) (offsetof(t,f)/sizeof(GC_word))
# define GC_WORD_LEN(t) (sizeof(t)/ sizeof(GC_word))
# define GC_BITMAP_SIZE(t) ((GC_WORD_LEN(t) + GC_WORDSZ-1)/GC_WORDSZ)
@@ -48,60 +48,60 @@ typedef GC_word * GC_bitmap;
typedef GC_word GC_descr;
GC_API GC_descr GC_CALL GC_make_descriptor(GC_bitmap bm, size_t len);
- /* Return a type descriptor for the object whose layout */
- /* is described by the argument. */
- /* The least significant bit of the first word is one */
- /* if the first word in the object may be a pointer. */
- /* The second argument specifies the number of */
- /* meaningful bits in the bitmap. The actual object */
- /* may be larger (but not smaller). Any additional */
- /* words in the object are assumed not to contain */
- /* pointers. */
- /* Returns a conservative approximation in the */
- /* (unlikely) case of insufficient memory to build */
- /* the descriptor. Calls to GC_make_descriptor */
- /* may consume some amount of a finite resource. This */
- /* is intended to be called once per type, not once */
- /* per allocation. */
+ /* Return a type descriptor for the object whose layout */
+ /* is described by the argument. */
+ /* The least significant bit of the first word is one */
+ /* if the first word in the object may be a pointer. */
+ /* The second argument specifies the number of */
+ /* meaningful bits in the bitmap. The actual object */
+ /* may be larger (but not smaller). Any additional */
+ /* words in the object are assumed not to contain */
+ /* pointers. */
+ /* Returns a conservative approximation in the */
+ /* (unlikely) case of insufficient memory to build */
+ /* the descriptor. Calls to GC_make_descriptor */
+ /* may consume some amount of a finite resource. This */
+ /* is intended to be called once per type, not once */
+ /* per allocation. */
-/* It is possible to generate a descriptor for a C type T with */
-/* word aligned pointer fields f1, f2, ... as follows: */
-/* */
+/* It is possible to generate a descriptor for a C type T with */
+/* word aligned pointer fields f1, f2, ... as follows: */
+/* */
/* GC_descr T_descr; */
-/* GC_word T_bitmap[GC_BITMAP_SIZE(T)] = {0}; */
-/* GC_set_bit(T_bitmap, GC_WORD_OFFSET(T,f1)); */
-/* GC_set_bit(T_bitmap, GC_WORD_OFFSET(T,f2)); */
-/* ... */
-/* T_descr = GC_make_descriptor(T_bitmap, GC_WORD_LEN(T)); */
+/* GC_word T_bitmap[GC_BITMAP_SIZE(T)] = {0}; */
+/* GC_set_bit(T_bitmap, GC_WORD_OFFSET(T,f1)); */
+/* GC_set_bit(T_bitmap, GC_WORD_OFFSET(T,f2)); */
+/* ... */
+/* T_descr = GC_make_descriptor(T_bitmap, GC_WORD_LEN(T)); */
GC_API void * GC_CALL GC_malloc_explicitly_typed(size_t size_in_bytes,
- GC_descr d);
- /* Allocate an object whose layout is described by d. */
- /* The resulting object MAY NOT BE PASSED TO REALLOC. */
- /* The returned object is cleared. */
+ GC_descr d);
+ /* Allocate an object whose layout is described by d. */
+ /* The resulting object MAY NOT BE PASSED TO REALLOC. */
+ /* The returned object is cleared. */
GC_API void * GC_CALL GC_malloc_explicitly_typed_ignore_off_page
(size_t size_in_bytes, GC_descr d);
-
+
GC_API void * GC_CALL GC_calloc_explicitly_typed(size_t nelements,
- size_t element_size_in_bytes,
- GC_descr d);
- /* Allocate an array of nelements elements, each of the */
- /* given size, and with the given descriptor. */
- /* The element size must be a multiple of the byte */
- /* alignment required for pointers. E.g. on a 32-bit */
- /* machine with 16-bit aligned pointers, size_in_bytes */
- /* must be a multiple of 2. */
- /* Returned object is cleared. */
+ size_t element_size_in_bytes,
+ GC_descr d);
+ /* Allocate an array of nelements elements, each of the */
+ /* given size, and with the given descriptor. */
+ /* The element size must be a multiple of the byte */
+ /* alignment required for pointers. E.g. on a 32-bit */
+ /* machine with 16-bit aligned pointers, size_in_bytes */
+ /* must be a multiple of 2. */
+ /* Returned object is cleared. */
#ifdef GC_DEBUG
# define GC_MALLOC_EXPLICITLY_TYPED(bytes, d) GC_MALLOC(bytes)
# define GC_CALLOC_EXPLICITLY_TYPED(n, bytes, d) GC_MALLOC(n*bytes)
#else
# define GC_MALLOC_EXPLICITLY_TYPED(bytes, d) \
- GC_malloc_explicitly_typed(bytes, d)
+ GC_malloc_explicitly_typed(bytes, d)
# define GC_CALLOC_EXPLICITLY_TYPED(n, bytes, d) \
- GC_calloc_explicitly_typed(n, bytes, d)
+ GC_calloc_explicitly_typed(n, bytes, d)
#endif /* !GC_DEBUG */
#ifdef __cplusplus
@@ -109,4 +109,3 @@ GC_API void * GC_CALL GC_calloc_explicitly_typed(size_t nelements,
#endif
#endif /* _GC_TYPED_H */
-
diff --git a/include/gc_version.h b/include/gc_version.h
index cc6c7d1d..40a1a4c9 100644
--- a/include/gc_version.h
+++ b/include/gc_version.h
@@ -1,6 +1,23 @@
-/* The version here should match that in configure/configure.ac */
-/* Eventually this one may become unnecessary. For now we need */
-/* it to keep the old-style build process working. */
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
+ * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
+ * All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/* The version here should match that in configure/configure.ac */
+/* Eventually this one may become unnecessary. For now we need */
+/* it to keep the old-style build process working. */
#define GC_TMP_VERSION_MAJOR 7
#define GC_TMP_VERSION_MINOR 2
#define GC_TMP_ALPHA_VERSION 3
diff --git a/include/javaxfc.h b/include/javaxfc.h
index 3878727b..00ed385d 100644
--- a/include/javaxfc.h
+++ b/include/javaxfc.h
@@ -1,3 +1,20 @@
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
+ * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
+ * All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
# ifndef _GC_H
# include "gc.h"
# endif
diff --git a/include/new_gc_alloc.h b/include/new_gc_alloc.h
index 6b6902b7..67da7938 100644
--- a/include/new_gc_alloc.h
+++ b/include/new_gc_alloc.h
@@ -32,10 +32,10 @@
// with g++ 2.7.2 and earlier.
//
// Unlike its predecessor, this one simply defines
-// gc_alloc
-// single_client_gc_alloc
-// traceable_alloc
-// single_client_traceable_alloc
+// gc_alloc
+// single_client_gc_alloc
+// traceable_alloc
+// single_client_traceable_alloc
//
// It does not redefine alloc. Nor does it change the default allocator,
// though the user may wish to do so. (The argument against changing
@@ -64,8 +64,8 @@
#endif
#endif
-/* A hack to deal with gcc 3.1. If you are using gcc3.1 and later, */
-/* you should probably really use gc_allocator.h instead. */
+/* A hack to deal with gcc 3.1. If you are using gcc3.1 and later, */
+/* you should probably really use gc_allocator.h instead. */
#if defined (__GNUC__) && \
(__GNUC__ > 3 || (__GNUC__ == 3 && (__GNUC_MINOR__ >= 1)))
# define simple_alloc __simple_alloc
@@ -92,7 +92,7 @@ extern "C" {
GC_API void GC_CALL GC_incr_bytes_freed(size_t bytes);
GC_API char * GC_CALL GC_generic_malloc_words_small(size_t word, int kind);
- /* FIXME: Doesn't exist anymore. */
+ /* FIXME: Doesn't exist anymore. */
}
// Object kinds; must match PTRFREE, NORMAL, UNCOLLECTABLE, and
@@ -188,66 +188,66 @@ typedef GC_aux_template<0> GC_aux;
template <int dummy>
class single_client_gc_alloc_template {
public:
- static void * allocate(size_t n)
+ static void * allocate(size_t n)
{
- size_t nwords = GC_round_up(n);
- void ** flh;
- void * op;
-
- if (n > GC_max_fast_bytes) return GC_malloc(n);
- flh = GC_objfreelist_ptr + nwords;
- if (0 == (op = *flh)) {
- return GC_aux::GC_out_of_line_malloc(nwords, GC_NORMAL);
- }
- *flh = GC_obj_link(op);
- GC_aux::GC_bytes_recently_allocd += nwords * GC_bytes_per_word;
- return op;
+ size_t nwords = GC_round_up(n);
+ void ** flh;
+ void * op;
+
+ if (n > GC_max_fast_bytes) return GC_malloc(n);
+ flh = GC_objfreelist_ptr + nwords;
+ if (0 == (op = *flh)) {
+ return GC_aux::GC_out_of_line_malloc(nwords, GC_NORMAL);
+ }
+ *flh = GC_obj_link(op);
+ GC_aux::GC_bytes_recently_allocd += nwords * GC_bytes_per_word;
+ return op;
}
- static void * ptr_free_allocate(size_t n)
+ static void * ptr_free_allocate(size_t n)
{
- size_t nwords = GC_round_up(n);
- void ** flh;
- void * op;
-
- if (n > GC_max_fast_bytes) return GC_malloc_atomic(n);
- flh = GC_aobjfreelist_ptr + nwords;
- if (0 == (op = *flh)) {
- return GC_aux::GC_out_of_line_malloc(nwords, GC_PTRFREE);
- }
- *flh = GC_obj_link(op);
- GC_aux::GC_bytes_recently_allocd += nwords * GC_bytes_per_word;
- return op;
+ size_t nwords = GC_round_up(n);
+ void ** flh;
+ void * op;
+
+ if (n > GC_max_fast_bytes) return GC_malloc_atomic(n);
+ flh = GC_aobjfreelist_ptr + nwords;
+ if (0 == (op = *flh)) {
+ return GC_aux::GC_out_of_line_malloc(nwords, GC_PTRFREE);
+ }
+ *flh = GC_obj_link(op);
+ GC_aux::GC_bytes_recently_allocd += nwords * GC_bytes_per_word;
+ return op;
}
- static void deallocate(void *p, size_t n)
- {
+ static void deallocate(void *p, size_t n)
+ {
size_t nwords = GC_round_up(n);
void ** flh;
-
- if (n > GC_max_fast_bytes) {
- GC_free(p);
- } else {
- flh = GC_objfreelist_ptr + nwords;
- GC_obj_link(p) = *flh;
- memset(reinterpret_cast<char *>(p) + GC_bytes_per_word, 0,
- GC_bytes_per_word * (nwords - 1));
- *flh = p;
- GC_aux::GC_bytes_recently_freed += nwords * GC_bytes_per_word;
- }
- }
- static void ptr_free_deallocate(void *p, size_t n)
- {
+
+ if (n > GC_max_fast_bytes) {
+ GC_free(p);
+ } else {
+ flh = GC_objfreelist_ptr + nwords;
+ GC_obj_link(p) = *flh;
+ memset(reinterpret_cast<char *>(p) + GC_bytes_per_word, 0,
+ GC_bytes_per_word * (nwords - 1));
+ *flh = p;
+ GC_aux::GC_bytes_recently_freed += nwords * GC_bytes_per_word;
+ }
+ }
+ static void ptr_free_deallocate(void *p, size_t n)
+ {
size_t nwords = GC_round_up(n);
void ** flh;
-
- if (n > GC_max_fast_bytes) {
- GC_free(p);
- } else {
- flh = GC_aobjfreelist_ptr + nwords;
- GC_obj_link(p) = *flh;
- *flh = p;
- GC_aux::GC_bytes_recently_freed += nwords * GC_bytes_per_word;
- }
- }
+
+ if (n > GC_max_fast_bytes) {
+ GC_free(p);
+ } else {
+ flh = GC_aobjfreelist_ptr + nwords;
+ GC_obj_link(p) = *flh;
+ *flh = p;
+ GC_aux::GC_bytes_recently_freed += nwords * GC_bytes_per_word;
+ }
+ }
};
typedef single_client_gc_alloc_template<0> single_client_gc_alloc;
@@ -256,68 +256,68 @@ typedef single_client_gc_alloc_template<0> single_client_gc_alloc;
template <int dummy>
class single_client_traceable_alloc_template {
public:
- static void * allocate(size_t n)
+ static void * allocate(size_t n)
{
- size_t nwords = GC_round_up_uncollectable(n);
- void ** flh;
- void * op;
-
- if (n > GC_max_fast_bytes) return GC_malloc_uncollectable(n);
- flh = GC_uobjfreelist_ptr + nwords;
- if (0 == (op = *flh)) {
- return GC_aux::GC_out_of_line_malloc(nwords, GC_UNCOLLECTABLE);
- }
- *flh = GC_obj_link(op);
- GC_aux::GC_uncollectable_bytes_recently_allocd +=
- nwords * GC_bytes_per_word;
- return op;
+ size_t nwords = GC_round_up_uncollectable(n);
+ void ** flh;
+ void * op;
+
+ if (n > GC_max_fast_bytes) return GC_malloc_uncollectable(n);
+ flh = GC_uobjfreelist_ptr + nwords;
+ if (0 == (op = *flh)) {
+ return GC_aux::GC_out_of_line_malloc(nwords, GC_UNCOLLECTABLE);
+ }
+ *flh = GC_obj_link(op);
+ GC_aux::GC_uncollectable_bytes_recently_allocd +=
+ nwords * GC_bytes_per_word;
+ return op;
}
- static void * ptr_free_allocate(size_t n)
+ static void * ptr_free_allocate(size_t n)
{
- size_t nwords = GC_round_up_uncollectable(n);
- void ** flh;
- void * op;
-
- if (n > GC_max_fast_bytes) return GC_malloc_atomic_uncollectable(n);
- flh = GC_auobjfreelist_ptr + nwords;
- if (0 == (op = *flh)) {
- return GC_aux::GC_out_of_line_malloc(nwords, GC_AUNCOLLECTABLE);
- }
- *flh = GC_obj_link(op);
- GC_aux::GC_uncollectable_bytes_recently_allocd +=
- nwords * GC_bytes_per_word;
- return op;
+ size_t nwords = GC_round_up_uncollectable(n);
+ void ** flh;
+ void * op;
+
+ if (n > GC_max_fast_bytes) return GC_malloc_atomic_uncollectable(n);
+ flh = GC_auobjfreelist_ptr + nwords;
+ if (0 == (op = *flh)) {
+ return GC_aux::GC_out_of_line_malloc(nwords, GC_AUNCOLLECTABLE);
+ }
+ *flh = GC_obj_link(op);
+ GC_aux::GC_uncollectable_bytes_recently_allocd +=
+ nwords * GC_bytes_per_word;
+ return op;
}
- static void deallocate(void *p, size_t n)
- {
+ static void deallocate(void *p, size_t n)
+ {
size_t nwords = GC_round_up_uncollectable(n);
void ** flh;
-
- if (n > GC_max_fast_bytes) {
- GC_free(p);
- } else {
- flh = GC_uobjfreelist_ptr + nwords;
- GC_obj_link(p) = *flh;
- *flh = p;
- GC_aux::GC_uncollectable_bytes_recently_freed +=
- nwords * GC_bytes_per_word;
- }
- }
- static void ptr_free_deallocate(void *p, size_t n)
- {
+
+ if (n > GC_max_fast_bytes) {
+ GC_free(p);
+ } else {
+ flh = GC_uobjfreelist_ptr + nwords;
+ GC_obj_link(p) = *flh;
+ *flh = p;
+ GC_aux::GC_uncollectable_bytes_recently_freed +=
+ nwords * GC_bytes_per_word;
+ }
+ }
+ static void ptr_free_deallocate(void *p, size_t n)
+ {
size_t nwords = GC_round_up_uncollectable(n);
void ** flh;
-
- if (n > GC_max_fast_bytes) {
- GC_free(p);
- } else {
- flh = GC_auobjfreelist_ptr + nwords;
- GC_obj_link(p) = *flh;
- *flh = p;
- GC_aux::GC_uncollectable_bytes_recently_freed +=
- nwords * GC_bytes_per_word;
- }
- }
+
+ if (n > GC_max_fast_bytes) {
+ GC_free(p);
+ } else {
+ flh = GC_auobjfreelist_ptr + nwords;
+ GC_obj_link(p) = *flh;
+ *flh = p;
+ GC_aux::GC_uncollectable_bytes_recently_freed +=
+ nwords * GC_bytes_per_word;
+ }
+ }
};
typedef single_client_traceable_alloc_template<0> single_client_traceable_alloc;
@@ -325,11 +325,11 @@ typedef single_client_traceable_alloc_template<0> single_client_traceable_alloc;
template < int dummy >
class gc_alloc_template {
public:
- static void * allocate(size_t n) { return GC_malloc(n); }
- static void * ptr_free_allocate(size_t n)
- { return GC_malloc_atomic(n); }
- static void deallocate(void *, size_t) { }
- static void ptr_free_deallocate(void *, size_t) { }
+ static void * allocate(size_t n) { return GC_malloc(n); }
+ static void * ptr_free_allocate(size_t n)
+ { return GC_malloc_atomic(n); }
+ static void deallocate(void *, size_t) { }
+ static void ptr_free_deallocate(void *, size_t) { }
};
typedef gc_alloc_template < 0 > gc_alloc;
@@ -337,11 +337,11 @@ typedef gc_alloc_template < 0 > gc_alloc;
template < int dummy >
class traceable_alloc_template {
public:
- static void * allocate(size_t n) { return GC_malloc_uncollectable(n); }
- static void * ptr_free_allocate(size_t n)
- { return GC_malloc_atomic_uncollectable(n); }
- static void deallocate(void *p, size_t) { GC_free(p); }
- static void ptr_free_deallocate(void *p, size_t) { GC_free(p); }
+ static void * allocate(size_t n) { return GC_malloc_uncollectable(n); }
+ static void * ptr_free_allocate(size_t n)
+ { return GC_malloc_atomic_uncollectable(n); }
+ static void deallocate(void *p, size_t) { GC_free(p); }
+ static void ptr_free_deallocate(void *p, size_t) { GC_free(p); }
};
typedef traceable_alloc_template < 0 > traceable_alloc;
@@ -355,14 +355,14 @@ typedef traceable_alloc_template < 0 > traceable_alloc;
class simple_alloc<T, alloc> { \
public: \
static T *allocate(size_t n) \
- { return 0 == n? 0 : \
- reinterpret_cast<T*>(alloc::ptr_free_allocate(n * sizeof (T))); } \
+ { return 0 == n? 0 : \
+ reinterpret_cast<T*>(alloc::ptr_free_allocate(n * sizeof (T))); } \
static T *allocate(void) \
- { return reinterpret_cast<T*>(alloc::ptr_free_allocate(sizeof (T))); } \
+ { return reinterpret_cast<T*>(alloc::ptr_free_allocate(sizeof (T))); } \
static void deallocate(T *p, size_t n) \
- { if (0 != n) alloc::ptr_free_deallocate(p, n * sizeof (T)); } \
+ { if (0 != n) alloc::ptr_free_deallocate(p, n * sizeof (T)); } \
static void deallocate(T *p) \
- { alloc::ptr_free_deallocate(p, sizeof (T)); } \
+ { alloc::ptr_free_deallocate(p, sizeof (T)); } \
};
__STL_BEGIN_NAMESPACE
diff --git a/include/private/darwin_semaphore.h b/include/private/darwin_semaphore.h
index 9baceebe..379a7f71 100644
--- a/include/private/darwin_semaphore.h
+++ b/include/private/darwin_semaphore.h
@@ -1,3 +1,20 @@
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
+ * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
+ * All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
#ifndef GC_DARWIN_SEMAPHORE_H
#define GC_DARWIN_SEMAPHORE_H
@@ -11,7 +28,7 @@
safe. This isn't a problem because signals aren't used to
suspend threads on darwin.
*/
-
+
typedef struct {
pthread_mutex_t mutex;
pthread_cond_t cond;
@@ -23,7 +40,7 @@ static int sem_init(sem_t *sem, int pshared, int value) {
if(pshared)
ABORT("sem_init with pshared set");
sem->value = value;
-
+
ret = pthread_mutex_init(&sem->mutex,NULL);
if(ret < 0) return -1;
ret = pthread_cond_init(&sem->cond,NULL);
@@ -52,7 +69,7 @@ static int sem_wait(sem_t *sem) {
}
sem->value--;
if(pthread_mutex_unlock(&sem->mutex) < 0)
- return -1;
+ return -1;
return 0;
}
diff --git a/include/private/dbg_mlc.h b/include/private/dbg_mlc.h
index 34443270..aa652ded 100644
--- a/include/private/dbg_mlc.h
+++ b/include/private/dbg_mlc.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
* Copyright (c) 1997 by Silicon Graphics. All rights reserved.
@@ -33,9 +33,9 @@
# endif
#ifndef HIDE_POINTER
- /* Gc.h was previously included, and hence the I_HIDE_POINTERS */
- /* definition had no effect. Repeat the gc.h definitions here to */
- /* get them anyway. */
+ /* Gc.h was previously included, and hence the I_HIDE_POINTERS */
+ /* definition had no effect. Repeat the gc.h definitions here to */
+ /* get them anyway. */
typedef GC_word GC_hidden_pointer;
# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
# define REVEAL_POINTER(p) ((void *)(HIDE_POINTER(p)))
@@ -43,95 +43,95 @@
# define START_FLAG ((word)0xfedcedcb)
# define END_FLAG ((word)0xbcdecdef)
- /* Stored both one past the end of user object, and one before */
- /* the end of the object as seen by the allocator. */
+ /* Stored both one past the end of user object, and one before */
+ /* the end of the object as seen by the allocator. */
# if defined(KEEP_BACK_PTRS) || defined(PRINT_BLACK_LIST) \
|| defined(MAKE_BACK_GRAPH)
- /* Pointer "source"s that aren't real locations. */
- /* Used in oh_back_ptr fields and as "source" */
- /* argument to some marking functions. */
-# define NOT_MARKED (ptr_t)(0)
-# define MARKED_FOR_FINALIZATION ((ptr_t)(word)2)
- /* Object was marked because it is finalizable. */
-# define MARKED_FROM_REGISTER ((ptr_t)(word)4)
- /* Object was marked from a register. Hence the */
- /* source of the reference doesn't have an address. */
+ /* Pointer "source"s that aren't real locations. */
+ /* Used in oh_back_ptr fields and as "source" */
+ /* argument to some marking functions. */
+# define NOT_MARKED (ptr_t)(0)
+# define MARKED_FOR_FINALIZATION ((ptr_t)(word)2)
+ /* Object was marked because it is finalizable. */
+# define MARKED_FROM_REGISTER ((ptr_t)(word)4)
+ /* Object was marked from a register. Hence the */
+ /* source of the reference doesn't have an address. */
# endif /* KEEP_BACK_PTRS || PRINT_BLACK_LIST */
/* Object header */
typedef struct {
# if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
- /* We potentially keep two different kinds of back */
- /* pointers. KEEP_BACK_PTRS stores a single back */
- /* pointer in each reachable object to allow reporting */
- /* of why an object was retained. MAKE_BACK_GRAPH */
- /* builds a graph containing the inverse of all */
- /* "points-to" edges including those involving */
- /* objects that have just become unreachable. This */
- /* allows detection of growing chains of unreachable */
- /* objects. It may be possible to eventually combine */
- /* both, but for now we keep them separate. Both */
- /* kinds of back pointers are hidden using the */
- /* following macros. In both cases, the plain version */
- /* is constrained to have an least significant bit of 1,*/
- /* to allow it to be distinguished from a free list */
- /* link. This means the plain version must have an */
- /* lsb of 0. */
- /* Note that blocks dropped by black-listing will */
- /* also have the lsb clear once debugging has */
- /* started. */
- /* We're careful never to overwrite a value with lsb 0. */
+ /* We potentially keep two different kinds of back */
+ /* pointers. KEEP_BACK_PTRS stores a single back */
+ /* pointer in each reachable object to allow reporting */
+ /* of why an object was retained. MAKE_BACK_GRAPH */
+ /* builds a graph containing the inverse of all */
+ /* "points-to" edges including those involving */
+ /* objects that have just become unreachable. This */
+ /* allows detection of growing chains of unreachable */
+ /* objects. It may be possible to eventually combine */
+ /* both, but for now we keep them separate. Both */
+ /* kinds of back pointers are hidden using the */
+ /* following macros. In both cases, the plain version */
+ /* is constrained to have an least significant bit of 1,*/
+ /* to allow it to be distinguished from a free list */
+ /* link. This means the plain version must have an */
+ /* lsb of 0. */
+ /* Note that blocks dropped by black-listing will */
+ /* also have the lsb clear once debugging has */
+ /* started. */
+ /* We're careful never to overwrite a value with lsb 0. */
# if ALIGNMENT == 1
- /* Fudge back pointer to be even. */
-# define HIDE_BACK_PTR(p) HIDE_POINTER(~1 & (GC_word)(p))
-# else
-# define HIDE_BACK_PTR(p) HIDE_POINTER(p)
-# endif
-
+ /* Fudge back pointer to be even. */
+# define HIDE_BACK_PTR(p) HIDE_POINTER(~1 & (GC_word)(p))
+# else
+# define HIDE_BACK_PTR(p) HIDE_POINTER(p)
+# endif
+
# ifdef KEEP_BACK_PTRS
- GC_hidden_pointer oh_back_ptr;
-# endif
-# ifdef MAKE_BACK_GRAPH
- GC_hidden_pointer oh_bg_ptr;
-# endif
-# if defined(KEEP_BACK_PTRS) != defined(MAKE_BACK_GRAPH)
- /* Keep double-pointer-sized alignment. */
- word oh_dummy;
-# endif
+ GC_hidden_pointer oh_back_ptr;
+# endif
+# ifdef MAKE_BACK_GRAPH
+ GC_hidden_pointer oh_bg_ptr;
+# endif
+# if defined(KEEP_BACK_PTRS) != defined(MAKE_BACK_GRAPH)
+ /* Keep double-pointer-sized alignment. */
+ word oh_dummy;
+# endif
# endif
- const char * oh_string; /* object descriptor string */
- word oh_int; /* object descriptor integers */
+ const char * oh_string; /* object descriptor string */
+ word oh_int; /* object descriptor integers */
# ifdef NEED_CALLINFO
struct callinfo oh_ci[NFRAMES];
# endif
# ifndef SHORT_DBG_HDRS
- word oh_sz; /* Original malloc arg. */
- word oh_sf; /* start flag */
+ word oh_sz; /* Original malloc arg. */
+ word oh_sf; /* start flag */
# endif /* SHORT_DBG_HDRS */
} oh;
-/* The size of the above structure is assumed not to de-align things, */
-/* and to be a multiple of the word length. */
+/* The size of the above structure is assumed not to de-align things, */
+/* and to be a multiple of the word length. */
#ifdef SHORT_DBG_HDRS
# define DEBUG_BYTES (sizeof (oh))
# define UNCOLLECTABLE_DEBUG_BYTES DEBUG_BYTES
#else
- /* Add space for END_FLAG, but use any extra space that was already */
- /* added to catch off-the-end pointers. */
- /* For uncollectable objects, the extra byte is not added. */
+ /* Add space for END_FLAG, but use any extra space that was already */
+ /* added to catch off-the-end pointers. */
+ /* For uncollectable objects, the extra byte is not added. */
# define UNCOLLECTABLE_DEBUG_BYTES (sizeof (oh) + sizeof (word))
# define DEBUG_BYTES (UNCOLLECTABLE_DEBUG_BYTES - EXTRA_BYTES)
#endif
-/* Round bytes to words without adding extra byte at end. */
+/* Round bytes to words without adding extra byte at end. */
#define SIMPLE_ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
-/* ADD_CALL_CHAIN stores a (partial) call chain into an object */
-/* header. It may be called with or without the allocation */
-/* lock. */
-/* PRINT_CALL_CHAIN prints the call chain stored in an object */
-/* to stderr. It requires that we do not hold the lock. */
+/* ADD_CALL_CHAIN stores a (partial) call chain into an object */
+/* header. It may be called with or without the allocation */
+/* lock. */
+/* PRINT_CALL_CHAIN prints the call chain stored in an object */
+/* to stderr. It requires that we do not hold the lock. */
#if defined(SAVE_CALL_CHAIN)
struct callinfo;
void GC_save_callers(struct callinfo info[NFRAMES]);
@@ -155,9 +155,9 @@ typedef struct {
# endif
-/* Check whether object with base pointer p has debugging info */
-/* p is assumed to point to a legitimate object in our part */
-/* of the heap. */
+/* Check whether object with base pointer p has debugging info */
+/* p is assumed to point to a legitimate object in our part */
+/* of the heap. */
#ifdef SHORT_DBG_HDRS
# define GC_has_other_debug_info(p) TRUE
#else
@@ -166,13 +166,13 @@ typedef struct {
#if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
# define GC_HAS_DEBUG_INFO(p) \
- ((*((word *)p) & 1) && GC_has_other_debug_info(p))
+ ((*((word *)p) & 1) && GC_has_other_debug_info(p))
#else
# define GC_HAS_DEBUG_INFO(p) GC_has_other_debug_info(p)
#endif
/* Store debugging info into p. Return displaced pointer. */
-/* Assumes we don't hold allocation lock. */
+/* Assumes we don't hold allocation lock. */
ptr_t GC_store_debug_info(ptr_t p, word sz, const char *str, word integer);
#endif /* _DBG_MLC_H */
diff --git a/include/private/gc_hdrs.h b/include/private/gc_hdrs.h
index 0e4c927d..40dbe5b8 100644
--- a/include/private/gc_hdrs.h
+++ b/include/private/gc_hdrs.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
*
@@ -17,7 +17,7 @@
typedef struct hblkhdr hdr;
# if CPP_WORDSZ != 32 && CPP_WORDSZ < 36
- --> Get a real machine.
+ --> Get a real machine.
# endif
/*
@@ -39,10 +39,10 @@ typedef struct hblkhdr hdr;
# define HASH_TL
# endif
-/* Define appropriate out-degrees for each of the two tree levels */
+/* Define appropriate out-degrees for each of the two tree levels */
# ifdef SMALL_CONFIG
# define LOG_BOTTOM_SZ 11
- /* Keep top index size reasonable with smaller blocks. */
+ /* Keep top index size reasonable with smaller blocks. */
# else
# define LOG_BOTTOM_SZ 10
# endif
@@ -67,98 +67,98 @@ typedef struct hblkhdr hdr;
# endif
typedef struct hce {
- word block_addr; /* right shifted by LOG_HBLKSIZE */
+ word block_addr; /* right shifted by LOG_HBLKSIZE */
hdr * hce_hdr;
} hdr_cache_entry;
# define HDR_CACHE_SIZE 8 /* power of 2 */
# define DECLARE_HDR_CACHE \
- hdr_cache_entry hdr_cache[HDR_CACHE_SIZE]
+ hdr_cache_entry hdr_cache[HDR_CACHE_SIZE]
# define INIT_HDR_CACHE BZERO(hdr_cache, sizeof(hdr_cache))
# define HCE(h) hdr_cache + (((word)(h) >> LOG_HBLKSIZE) & (HDR_CACHE_SIZE-1))
# define HCE_VALID_FOR(hce,h) ((hce) -> block_addr == \
- ((word)(h) >> LOG_HBLKSIZE))
+ ((word)(h) >> LOG_HBLKSIZE))
# define HCE_HDR(h) ((hce) -> hce_hdr)
#ifdef PRINT_BLACK_LIST
hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source);
# define HEADER_CACHE_MISS(p, hce, source) \
- GC_header_cache_miss(p, hce, source)
+ GC_header_cache_miss(p, hce, source)
#else
hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce);
# define HEADER_CACHE_MISS(p, hce, source) GC_header_cache_miss(p, hce)
#endif
-/* Set hhdr to the header for p. Analogous to GET_HDR below, */
-/* except that in the case of large objects, it */
-/* gets the header for the object beginning, if GC_all_interior_ptrs */
-/* is set. */
-/* Returns zero if p points to somewhere other than the first page */
-/* of an object, and it is not a valid pointer to the object. */
+/* Set hhdr to the header for p. Analogous to GET_HDR below, */
+/* except that in the case of large objects, it */
+/* gets the header for the object beginning, if GC_all_interior_ptrs */
+/* is set. */
+/* Returns zero if p points to somewhere other than the first page */
+/* of an object, and it is not a valid pointer to the object. */
# define HC_GET_HDR(p, hhdr, source, exit_label) \
- { \
- hdr_cache_entry * hce = HCE(p); \
- if (EXPECT(HCE_VALID_FOR(hce, p), 1)) { \
- HC_HIT(); \
- hhdr = hce -> hce_hdr; \
- } else { \
- hhdr = HEADER_CACHE_MISS(p, hce, source); \
- if (0 == hhdr) goto exit_label; \
- } \
- }
+ { \
+ hdr_cache_entry * hce = HCE(p); \
+ if (EXPECT(HCE_VALID_FOR(hce, p), 1)) { \
+ HC_HIT(); \
+ hhdr = hce -> hce_hdr; \
+ } else { \
+ hhdr = HEADER_CACHE_MISS(p, hce, source); \
+ if (0 == hhdr) goto exit_label; \
+ } \
+ }
typedef struct bi {
hdr * index[BOTTOM_SZ];
- /*
- * The bottom level index contains one of three kinds of values:
- * 0 means we're not responsible for this block,
- * or this is a block other than the first one in a free block.
- * 1 < (long)X <= MAX_JUMP means the block starts at least
- * X * HBLKSIZE bytes before the current address.
- * A valid pointer points to a hdr structure. (The above can't be
- * valid pointers due to the GET_MEM return convention.)
- */
- struct bi * asc_link; /* All indices are linked in */
- /* ascending order... */
- struct bi * desc_link; /* ... and in descending order. */
- word key; /* high order address bits. */
+ /*
+ * The bottom level index contains one of three kinds of values:
+ * 0 means we're not responsible for this block,
+ * or this is a block other than the first one in a free block.
+ * 1 < (long)X <= MAX_JUMP means the block starts at least
+ * X * HBLKSIZE bytes before the current address.
+ * A valid pointer points to a hdr structure. (The above can't be
+ * valid pointers due to the GET_MEM return convention.)
+ */
+ struct bi * asc_link; /* All indices are linked in */
+ /* ascending order... */
+ struct bi * desc_link; /* ... and in descending order. */
+ word key; /* high order address bits. */
# ifdef HASH_TL
- struct bi * hash_link; /* Hash chain link. */
+ struct bi * hash_link; /* Hash chain link. */
# endif
} bottom_index;
/* extern bottom_index GC_all_nils; - really part of GC_arrays */
/* extern bottom_index * GC_top_index []; - really part of GC_arrays */
- /* Each entry points to a bottom_index. */
- /* On a 32 bit machine, it points to */
- /* the index for a set of high order */
- /* bits equal to the index. For longer */
- /* addresses, we hash the high order */
- /* bits to compute the index in */
- /* GC_top_index, and each entry points */
- /* to a hash chain. */
- /* The last entry in each chain is */
- /* GC_all_nils. */
+ /* Each entry points to a bottom_index. */
+ /* On a 32 bit machine, it points to */
+ /* the index for a set of high order */
+ /* bits equal to the index. For longer */
+ /* addresses, we hash the high order */
+ /* bits to compute the index in */
+ /* GC_top_index, and each entry points */
+ /* to a hash chain. */
+ /* The last entry in each chain is */
+ /* GC_all_nils. */
# define MAX_JUMP (HBLKSIZE - 1)
# define HDR_FROM_BI(bi, p) \
- ((bi)->index[((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
+ ((bi)->index[((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
# ifndef HASH_TL
# define BI(p) (GC_top_index \
- [(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)])
+ [(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)])
# define HDR_INNER(p) HDR_FROM_BI(BI(p),p)
# ifdef SMALL_CONFIG
-# define HDR(p) GC_find_header((ptr_t)(p))
+# define HDR(p) GC_find_header((ptr_t)(p))
# else
-# define HDR(p) HDR_INNER(p)
+# define HDR(p) HDR_INNER(p)
# endif
# define GET_BI(p, bottom_indx) (bottom_indx) = BI(p)
# define GET_HDR(p, hhdr) (hhdr) = HDR(p)
@@ -169,34 +169,34 @@ typedef struct bi {
# define TL_HASH(hi) ((hi) & (TOP_SZ - 1))
/* Set bottom_indx to point to the bottom index for address p */
# define GET_BI(p, bottom_indx) \
- { \
- register word hi = \
- (word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
- register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
- \
- while (_bi -> key != hi && _bi != GC_all_nils) \
- _bi = _bi -> hash_link; \
- (bottom_indx) = _bi; \
- }
+ { \
+ register word hi = \
+ (word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
+ register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
+ \
+ while (_bi -> key != hi && _bi != GC_all_nils) \
+ _bi = _bi -> hash_link; \
+ (bottom_indx) = _bi; \
+ }
# define GET_HDR_ADDR(p, ha) \
- { \
- register bottom_index * bi; \
- \
- GET_BI(p, bi); \
- (ha) = &(HDR_FROM_BI(bi, p)); \
- }
+ { \
+ register bottom_index * bi; \
+ \
+ GET_BI(p, bi); \
+ (ha) = &(HDR_FROM_BI(bi, p)); \
+ }
# define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
- (hhdr) = *_ha; }
+ (hhdr) = *_ha; }
# define SET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
- *_ha = (hhdr); }
+ *_ha = (hhdr); }
# define HDR(p) GC_find_header((ptr_t)(p))
# endif
-
-/* Is the result a forwarding address to someplace closer to the */
-/* beginning of the block or NIL? */
+
+/* Is the result a forwarding address to someplace closer to the */
+/* beginning of the block or NIL? */
# define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((size_t) (hhdr) <= MAX_JUMP)
/* Get an HBLKSIZE aligned address closer to the beginning of the block */
-/* h. Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr). */
+/* h. Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr). */
# define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (size_t)(hhdr))
# endif /* GC_HEADERS_H */
diff --git a/include/private/gc_locks.h b/include/private/gc_locks.h
index 7a9a6689..f7038f8f 100644
--- a/include/private/gc_locks.h
+++ b/include/private/gc_locks.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
@@ -25,7 +25,7 @@
*
* Note that I_HOLD_LOCK and I_DONT_HOLD_LOCK are used only positively
* in assertions, and may return TRUE in the "dont know" case.
- */
+ */
# ifdef THREADS
# if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS)
@@ -38,7 +38,7 @@
# include <th/PCR_Th.h>
extern PCR_Th_ML GC_allocate_ml;
# define DCL_LOCK_STATE \
- PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
+ PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
# define UNCOND_LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
# define UNCOND_UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
# endif
@@ -58,11 +58,11 @@
extern CRITICAL_SECTION GC_allocate_ml;
# ifdef GC_ASSERTIONS
# define UNCOND_LOCK() \
- { EnterCriticalSection(&GC_allocate_ml); \
- SET_LOCK_HOLDER(); }
+ { EnterCriticalSection(&GC_allocate_ml); \
+ SET_LOCK_HOLDER(); }
# define UNCOND_UNLOCK() \
- { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
- LeaveCriticalSection(&GC_allocate_ml); }
+ { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
+ LeaveCriticalSection(&GC_allocate_ml); }
# else
# define UNCOND_LOCK() EnterCriticalSection(&GC_allocate_ml);
# define UNCOND_UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
@@ -70,20 +70,20 @@
# define SET_LOCK_HOLDER() GC_lock_holder = GetCurrentThreadId()
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
# define I_HOLD_LOCK() (!GC_need_to_lock \
- || GC_lock_holder == GetCurrentThreadId())
+ || GC_lock_holder == GetCurrentThreadId())
# define I_DONT_HOLD_LOCK() (!GC_need_to_lock \
- || GC_lock_holder != GetCurrentThreadId())
+ || GC_lock_holder != GetCurrentThreadId())
# elif defined(GC_PTHREADS)
# include <pthread.h>
-
- /* Posix allows pthread_t to be a struct, though it rarely is. */
- /* Unfortunately, we need to use a pthread_t to index a data */
- /* structure. It also helps if comparisons don't involve a */
- /* function call. Hence we introduce platform-dependent macros */
- /* to compare pthread_t ids and to map them to integers. */
- /* the mapping to integers does not need to result in different */
- /* integers for each thread, though that should be true as much */
- /* as possible. */
+
+ /* Posix allows pthread_t to be a struct, though it rarely is. */
+ /* Unfortunately, we need to use a pthread_t to index a data */
+ /* structure. It also helps if comparisons don't involve a */
+ /* function call. Hence we introduce platform-dependent macros */
+ /* to compare pthread_t ids and to map them to integers. */
+ /* the mapping to integers does not need to result in different */
+ /* integers for each thread, though that should be true as much */
+ /* as possible. */
/* Refine to exclude platforms on which pthread_t is struct */
# if !defined(GC_WIN32_PTHREADS)
# define NUMERIC_THREAD_ID(id) ((unsigned long)(id))
@@ -91,47 +91,47 @@
# define NUMERIC_THREAD_ID_UNIQUE
# else
# if defined(GC_WIN32_PTHREADS)
-# define NUMERIC_THREAD_ID(id) ((unsigned long)(id.p))
- /* Using documented internal details of win32_pthread library. */
- /* Faster than pthread_equal(). Should not change with */
- /* future versions of win32_pthread library. */
-# define THREAD_EQUAL(id1, id2) ((id1.p == id2.p) && (id1.x == id2.x))
+# define NUMERIC_THREAD_ID(id) ((unsigned long)(id.p))
+ /* Using documented internal details of win32_pthread library. */
+ /* Faster than pthread_equal(). Should not change with */
+ /* future versions of win32_pthread library. */
+# define THREAD_EQUAL(id1, id2) ((id1.p == id2.p) && (id1.x == id2.x))
# undef NUMERIC_THREAD_ID_UNIQUE
# else
- /* Generic definitions that always work, but will result in */
- /* poor performance and weak assertion checking. */
-# define NUMERIC_THREAD_ID(id) 1l
-# define THREAD_EQUAL(id1, id2) pthread_equal(id1, id2)
+ /* Generic definitions that always work, but will result in */
+ /* poor performance and weak assertion checking. */
+# define NUMERIC_THREAD_ID(id) 1l
+# define THREAD_EQUAL(id1, id2) pthread_equal(id1, id2)
# undef NUMERIC_THREAD_ID_UNIQUE
# endif
# endif
# define NO_THREAD ((unsigned long)(-1l))
- /* != NUMERIC_THREAD_ID(pthread_self()) for any thread */
+ /* != NUMERIC_THREAD_ID(pthread_self()) for any thread */
# if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
- /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to */
- /* be held for long periods, if it is held at all. Thus spinning */
- /* and sleeping for fixed periods are likely to result in */
+ /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to */
+ /* be held for long periods, if it is held at all. Thus spinning */
+ /* and sleeping for fixed periods are likely to result in */
/* significant wasted time. We thus rely mostly on queued locks. */
# define USE_SPIN_LOCK
extern volatile AO_TS_t GC_allocate_lock;
extern void GC_lock(void);
- /* Allocation lock holder. Only set if acquired by client through */
- /* GC_call_with_alloc_lock. */
+ /* Allocation lock holder. Only set if acquired by client through */
+ /* GC_call_with_alloc_lock. */
# ifdef GC_ASSERTIONS
# define UNCOND_LOCK() \
- { if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_SET) \
- GC_lock(); \
- SET_LOCK_HOLDER(); }
+ { if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_SET) \
+ GC_lock(); \
+ SET_LOCK_HOLDER(); }
# define UNCOND_UNLOCK() \
- { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
- AO_CLEAR(&GC_allocate_lock); }
+ { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
+ AO_CLEAR(&GC_allocate_lock); }
# else
# define UNCOND_LOCK() \
- { if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_SET) \
- GC_lock(); }
+ { if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_SET) \
+ GC_lock(); }
# define UNCOND_UNLOCK() \
- AO_CLEAR(&GC_allocate_lock)
+ AO_CLEAR(&GC_allocate_lock)
# endif /* !GC_ASSERTIONS */
# else /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCKS */
# ifndef USE_PTHREAD_LOCKS
@@ -143,33 +143,33 @@
extern pthread_mutex_t GC_allocate_ml;
# ifdef GC_ASSERTIONS
# define UNCOND_LOCK() \
- { GC_lock(); \
- SET_LOCK_HOLDER(); }
+ { GC_lock(); \
+ SET_LOCK_HOLDER(); }
# define UNCOND_UNLOCK() \
- { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
- pthread_mutex_unlock(&GC_allocate_ml); }
+ { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
+ pthread_mutex_unlock(&GC_allocate_ml); }
# else /* !GC_ASSERTIONS */
# if defined(NO_PTHREAD_TRYLOCK)
# define UNCOND_LOCK() GC_lock();
# else /* !defined(NO_PTHREAD_TRYLOCK) */
# define UNCOND_LOCK() \
- { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
+ { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
# endif
# define UNCOND_UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
# endif /* !GC_ASSERTIONS */
# endif /* USE_PTHREAD_LOCKS */
# define SET_LOCK_HOLDER() \
- GC_lock_holder = NUMERIC_THREAD_ID(pthread_self())
+ GC_lock_holder = NUMERIC_THREAD_ID(pthread_self())
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
# define I_HOLD_LOCK() \
- (!GC_need_to_lock || \
- GC_lock_holder == NUMERIC_THREAD_ID(pthread_self()))
+ (!GC_need_to_lock || \
+ GC_lock_holder == NUMERIC_THREAD_ID(pthread_self()))
# ifndef NUMERIC_THREAD_ID_UNIQUE
# define I_DONT_HOLD_LOCK() 1 /* Conservatively say yes */
# else
# define I_DONT_HOLD_LOCK() \
- (!GC_need_to_lock \
- || GC_lock_holder != NUMERIC_THREAD_ID(pthread_self()))
+ (!GC_need_to_lock \
+ || GC_lock_holder != NUMERIC_THREAD_ID(pthread_self()))
# endif
extern volatile GC_bool GC_collecting;
# define ENTER_GC() GC_collecting = 1;
@@ -189,14 +189,14 @@
# define UNSET_LOCK_HOLDER()
# define I_HOLD_LOCK() TRUE
# define I_DONT_HOLD_LOCK() TRUE
- /* Used only in positive assertions or to test whether */
- /* we still need to acquire the lock. TRUE works in */
- /* either case. */
+ /* Used only in positive assertions or to test whether */
+ /* we still need to acquire the lock. TRUE works in */
+ /* either case. */
# endif /* !THREADS */
-#if defined(UNCOND_LOCK) && !defined(LOCK)
+#if defined(UNCOND_LOCK) && !defined(LOCK)
extern GC_bool GC_need_to_lock;
- /* At least two thread running; need to lock. */
+ /* At least two thread running; need to lock. */
# define LOCK() if (GC_need_to_lock) { UNCOND_LOCK(); }
# define UNLOCK() if (GC_need_to_lock) { UNCOND_UNLOCK(); }
#endif
diff --git a/include/private/gc_pmark.h b/include/private/gc_pmark.h
index 3dc46a85..dd371b1b 100644
--- a/include/private/gc_pmark.h
+++ b/include/private/gc_pmark.h
@@ -43,14 +43,14 @@
# include "gc_priv.h"
# endif
-/* The real declarations of the following is in gc_priv.h, so that */
-/* we can avoid scanning the following table. */
+/* The real declarations of the following is in gc_priv.h, so that */
+/* we can avoid scanning the following table. */
/*
extern mark_proc GC_mark_procs[MAX_MARK_PROCS];
*/
#ifndef MARK_DESCR_OFFSET
-# define MARK_DESCR_OFFSET sizeof(word)
+# define MARK_DESCR_OFFSET sizeof(word)
#endif
/*
@@ -59,22 +59,22 @@ extern mark_proc GC_mark_procs[MAX_MARK_PROCS];
*/
# define BITMAP_BITS (WORDSZ - GC_DS_TAG_BITS)
# define PROC(descr) \
- (GC_mark_procs[((descr) >> GC_DS_TAG_BITS) & (GC_MAX_MARK_PROCS-1)])
+ (GC_mark_procs[((descr) >> GC_DS_TAG_BITS) & (GC_MAX_MARK_PROCS-1)])
# define ENV(descr) \
- ((descr) >> (GC_DS_TAG_BITS + GC_LOG_MAX_MARK_PROCS))
+ ((descr) >> (GC_DS_TAG_BITS + GC_LOG_MAX_MARK_PROCS))
# define MAX_ENV \
- (((word)1 << (WORDSZ - GC_DS_TAG_BITS - GC_LOG_MAX_MARK_PROCS)) - 1)
+ (((word)1 << (WORDSZ - GC_DS_TAG_BITS - GC_LOG_MAX_MARK_PROCS)) - 1)
extern unsigned GC_n_mark_procs;
-/* Number of mark stack entries to discard on overflow. */
+/* Number of mark stack entries to discard on overflow. */
#define GC_MARK_STACK_DISCARDS (INITIAL_MARK_STACK_SIZE/8)
typedef struct GC_ms_entry {
ptr_t mse_start; /* First word of object, word aligned */
- GC_word mse_descr; /* Descriptor; low order two bits are tags, */
- /* as described in gc_mark.h. */
+ GC_word mse_descr; /* Descriptor; low order two bits are tags, */
+ /* as described in gc_mark.h. */
} mse;
extern size_t GC_mark_stack_size;
@@ -95,19 +95,19 @@ extern mse * GC_mark_stack;
* This works roughly as follows:
* The main mark stack never shrinks, but it can grow.
*
- * The initiating threads holds the GC lock, and sets GC_help_wanted.
- *
+ * The initiating threads holds the GC lock, and sets GC_help_wanted.
+ *
* Other threads:
* 1) update helper_count (while holding mark_lock.)
- * 2) allocate a local mark stack
+ * 2) allocate a local mark stack
* repeatedly:
- * 3) Steal a global mark stack entry by atomically replacing
- * its descriptor with 0.
- * 4) Copy it to the local stack.
- * 5) Mark on the local stack until it is empty, or
- * it may be profitable to copy it back.
- * 6) If necessary, copy local stack to global one,
- * holding mark lock.
+ * 3) Steal a global mark stack entry by atomically replacing
+ * its descriptor with 0.
+ * 4) Copy it to the local stack.
+ * 5) Mark on the local stack until it is empty, or
+ * it may be profitable to copy it back.
+ * 6) If necessary, copy local stack to global one,
+ * holding mark lock.
* 7) Stop when the global mark stack is empty.
* 8) decrement helper_count (holding mark_lock).
*
@@ -116,7 +116,7 @@ extern mse * GC_mark_stack;
* also less performant, way.
*/
- /* GC_mark_stack_top is protected by mark lock. */
+ /* GC_mark_stack_top is protected by mark lock. */
/*
* GC_notify_all_marker() is used when GC_help_wanted is first set,
@@ -128,14 +128,14 @@ extern mse * GC_mark_stack;
*/
#endif /* PARALLEL_MARK */
-/* Return a pointer to within 1st page of object. */
-/* Set *new_hdr_p to corr. hdr. */
+/* Return a pointer to within 1st page of object. */
+/* Set *new_hdr_p to corr. hdr. */
ptr_t GC_find_start(ptr_t current, hdr *hhdr, hdr **new_hdr_p);
mse * GC_signal_mark_stack_overflow(mse *msp);
-/* Push the object obj with corresponding heap block header hhdr onto */
-/* the mark stack. */
+/* Push the object obj with corresponding heap block header hhdr onto */
+/* the mark stack. */
# define PUSH_OBJ(obj, hhdr, mark_stack_top, mark_stack_limit) \
{ \
register word _descr = (hhdr) -> hb_descr; \
@@ -151,22 +151,22 @@ mse * GC_signal_mark_stack_overflow(mse *msp);
} \
}
-/* Push the contents of current onto the mark stack if it is a valid */
-/* ptr to a currently unmarked object. Mark it. */
-/* If we assumed a standard-conforming compiler, we could probably */
-/* generate the exit_label transparently. */
+/* Push the contents of current onto the mark stack if it is a valid */
+/* ptr to a currently unmarked object. Mark it. */
+/* If we assumed a standard-conforming compiler, we could probably */
+/* generate the exit_label transparently. */
# define PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit, \
- source, exit_label) \
+ source, exit_label) \
{ \
hdr * my_hhdr; \
\
HC_GET_HDR(current, my_hhdr, source, exit_label); \
PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
- source, exit_label, my_hhdr, TRUE); \
+ source, exit_label, my_hhdr, TRUE); \
exit_label: ; \
}
-/* Set mark bit, exit if it was already set. */
+/* Set mark bit, exit if it was already set. */
# ifdef USE_MARK_BITS
# ifdef PARALLEL_MARK
@@ -201,111 +201,111 @@ exit_label: ; \
# if defined(I386) && defined(__GNUC__)
# define LONG_MULT(hprod, lprod, x, y) { \
- asm("mull %2" : "=a"(lprod), "=d"(hprod) : "g"(y), "0"(x)); \
+ asm("mull %2" : "=a"(lprod), "=d"(hprod) : "g"(y), "0"(x)); \
}
# else /* No in-line X86 assembly code */
# define LONG_MULT(hprod, lprod, x, y) { \
- unsigned long long prod = (unsigned long long)x \
- * (unsigned long long)y; \
- hprod = prod >> 32; \
- lprod = (unsigned32)prod; \
+ unsigned long long prod = (unsigned long long)x \
+ * (unsigned long long)y; \
+ hprod = prod >> 32; \
+ lprod = (unsigned32)prod; \
}
# endif
#ifdef USE_MARK_BYTES
- /* There is a race here, and we may set */
- /* the bit twice in the concurrent case. This can result in the */
- /* object being pushed twice. But that's only a performance issue. */
+ /* There is a race here, and we may set */
+ /* the bit twice in the concurrent case. This can result in the */
+ /* object being pushed twice. But that's only a performance issue. */
# define SET_MARK_BIT_EXIT_IF_SET(hhdr,bit_no,exit_label) \
{ \
char * mark_byte_addr = (char *)hhdr -> hb_marks + (bit_no); \
char mark_byte = *mark_byte_addr; \
\
- if (mark_byte) goto exit_label; \
- *mark_byte_addr = 1; \
- }
+ if (mark_byte) goto exit_label; \
+ *mark_byte_addr = 1; \
+ }
#endif /* USE_MARK_BYTES */
#ifdef PARALLEL_MARK
# define INCR_MARKS(hhdr) \
- AO_store(&(hhdr -> hb_n_marks), AO_load(&(hhdr -> hb_n_marks))+1);
+ AO_store(&(hhdr -> hb_n_marks), AO_load(&(hhdr -> hb_n_marks))+1);
#else
# define INCR_MARKS(hhdr) ++(hhdr -> hb_n_marks)
#endif
#ifdef ENABLE_TRACE
# define TRACE(source, cmd) \
- if (GC_trace_addr != 0 && (ptr_t)(source) == GC_trace_addr) cmd
+ if (GC_trace_addr != 0 && (ptr_t)(source) == GC_trace_addr) cmd
# define TRACE_TARGET(target, cmd) \
- if (GC_trace_addr != 0 && (target) == *(ptr_t *)GC_trace_addr) cmd
+ if (GC_trace_addr != 0 && (target) == *(ptr_t *)GC_trace_addr) cmd
#else
# define TRACE(source, cmd)
# define TRACE_TARGET(source, cmd)
#endif
-/* If the mark bit corresponding to current is not set, set it, and */
-/* push the contents of the object on the mark stack. Current points */
-/* to the beginning of the object. We rely on the fact that the */
-/* preceding header calculation will succeed for a pointer past the */
-/* first page of an object, only if it is in fact a valid pointer */
-/* to the object. Thus we can omit the otherwise necessary tests */
-/* here. Note in particular that the "displ" value is the displacement */
-/* from the beginning of the heap block, which may itself be in the */
-/* interior of a large object. */
+/* If the mark bit corresponding to current is not set, set it, and */
+/* push the contents of the object on the mark stack. Current points */
+/* to the beginning of the object. We rely on the fact that the */
+/* preceding header calculation will succeed for a pointer past the */
+/* first page of an object, only if it is in fact a valid pointer */
+/* to the object. Thus we can omit the otherwise necessary tests */
+/* here. Note in particular that the "displ" value is the displacement */
+/* from the beginning of the heap block, which may itself be in the */
+/* interior of a large object. */
#ifdef MARK_BIT_PER_GRANULE
# define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
- source, exit_label, hhdr, do_offset_check) \
+ source, exit_label, hhdr, do_offset_check) \
{ \
size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
- /* displ is always within range. If current doesn't point to */ \
- /* first block, then we are in the all_interior_pointers case, and */ \
- /* it is safe to use any displacement value. */ \
+ /* displ is always within range. If current doesn't point to */ \
+ /* first block, then we are in the all_interior_pointers case, and */ \
+ /* it is safe to use any displacement value. */ \
size_t gran_displ = BYTES_TO_GRANULES(displ); \
- size_t gran_offset = hhdr -> hb_map[gran_displ]; \
+ size_t gran_offset = hhdr -> hb_map[gran_displ]; \
size_t byte_offset = displ & (GRANULE_BYTES - 1); \
ptr_t base = current; \
/* The following always fails for large block references. */ \
if (EXPECT((gran_offset | byte_offset) != 0, FALSE)) { \
- if (hhdr -> hb_large_block) { \
- /* gran_offset is bogus. */ \
- size_t obj_displ; \
- base = (ptr_t)(hhdr -> hb_block); \
- obj_displ = (ptr_t)(current) - base; \
- if (obj_displ != displ) { \
- GC_ASSERT(obj_displ < hhdr -> hb_sz); \
- /* Must be in all_interior_pointer case, not first block */ \
- /* already did validity check on cache miss. */ \
- ; \
- } else { \
- if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
- GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
- goto exit_label; \
- } \
- } \
- gran_displ = 0; \
- GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
- hhdr -> hb_block == HBLKPTR(current)); \
- GC_ASSERT((ptr_t)(hhdr -> hb_block) <= (ptr_t) current); \
- } else { \
- size_t obj_displ = GRANULES_TO_BYTES(gran_offset) \
- + byte_offset; \
- if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
- GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
- goto exit_label; \
- } \
- gran_displ -= gran_offset; \
- base -= obj_displ; \
- } \
+ if (hhdr -> hb_large_block) { \
+ /* gran_offset is bogus. */ \
+ size_t obj_displ; \
+ base = (ptr_t)(hhdr -> hb_block); \
+ obj_displ = (ptr_t)(current) - base; \
+ if (obj_displ != displ) { \
+ GC_ASSERT(obj_displ < hhdr -> hb_sz); \
+ /* Must be in all_interior_pointer case, not first block */ \
+ /* already did validity check on cache miss. */ \
+ ; \
+ } else { \
+ if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
+ goto exit_label; \
+ } \
+ } \
+ gran_displ = 0; \
+ GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
+ hhdr -> hb_block == HBLKPTR(current)); \
+ GC_ASSERT((ptr_t)(hhdr -> hb_block) <= (ptr_t) current); \
+ } else { \
+ size_t obj_displ = GRANULES_TO_BYTES(gran_offset) \
+ + byte_offset; \
+ if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
+ goto exit_label; \
+ } \
+ gran_displ -= gran_offset; \
+ base -= obj_displ; \
+ } \
} \
GC_ASSERT(hhdr == GC_find_header(base)); \
GC_ASSERT(gran_displ % BYTES_TO_GRANULES(hhdr -> hb_sz) == 0); \
TRACE(source, GC_log_printf("GC:%u: passed validity tests\n", \
- (unsigned)GC_gc_no)); \
+ (unsigned)GC_gc_no)); \
SET_MARK_BIT_EXIT_IF_SET(hhdr, gran_displ, exit_label); \
TRACE(source, GC_log_printf("GC:%u: previously unmarked\n", \
- (unsigned)GC_gc_no)); \
+ (unsigned)GC_gc_no)); \
TRACE_TARGET(base, \
- GC_log_printf("GC:%u: marking %p from %p instead\n", (unsigned)GC_gc_no, \
- base, source)); \
+ GC_log_printf("GC:%u: marking %p from %p instead\n", (unsigned)GC_gc_no, \
+ base, source)); \
INCR_MARKS(hhdr); \
GC_STORE_BACK_PTR((ptr_t)source, base); \
PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
@@ -314,7 +314,7 @@ exit_label: ; \
#ifdef MARK_BIT_PER_OBJ
# define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
- source, exit_label, hhdr, do_offset_check) \
+ source, exit_label, hhdr, do_offset_check) \
{ \
size_t displ = HBLKDISPL(current); /* Displacement in block; in bytes. */\
unsigned32 low_prod, high_prod; \
@@ -323,48 +323,48 @@ exit_label: ; \
LONG_MULT(high_prod, low_prod, displ, inv_sz); \
/* product is > and within sz_in_bytes of displ * sz_in_bytes * 2**32 */ \
if (EXPECT(low_prod >> 16 != 0, FALSE)) { \
- FIXME: fails if offset is a multiple of HBLKSIZE which becomes 0 \
- if (inv_sz == LARGE_INV_SZ) { \
- size_t obj_displ; \
- base = (ptr_t)(hhdr -> hb_block); \
- obj_displ = (ptr_t)(current) - base; \
- if (obj_displ != displ) { \
- GC_ASSERT(obj_displ < hhdr -> hb_sz); \
- /* Must be in all_interior_pointer case, not first block */ \
- /* already did validity check on cache miss. */ \
- ; \
- } else { \
- if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
- GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
- goto exit_label; \
- } \
- } \
- GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
- hhdr -> hb_block == HBLKPTR(current)); \
- GC_ASSERT((ptr_t)(hhdr -> hb_block) < (ptr_t) current); \
- } else { \
- /* Accurate enough if HBLKSIZE <= 2**15. */ \
- GC_STATIC_ASSERT(HBLKSIZE <= (1 << 15)); \
- size_t obj_displ = (((low_prod >> 16) + 1) * (hhdr -> hb_sz)) >> 16; \
- if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
- GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
- goto exit_label; \
- } \
- base -= obj_displ; \
- } \
+ FIXME: fails if offset is a multiple of HBLKSIZE which becomes 0 \
+ if (inv_sz == LARGE_INV_SZ) { \
+ size_t obj_displ; \
+ base = (ptr_t)(hhdr -> hb_block); \
+ obj_displ = (ptr_t)(current) - base; \
+ if (obj_displ != displ) { \
+ GC_ASSERT(obj_displ < hhdr -> hb_sz); \
+ /* Must be in all_interior_pointer case, not first block */ \
+ /* already did validity check on cache miss. */ \
+ ; \
+ } else { \
+ if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
+ goto exit_label; \
+ } \
+ } \
+ GC_ASSERT(hhdr -> hb_sz > HBLKSIZE || \
+ hhdr -> hb_block == HBLKPTR(current)); \
+ GC_ASSERT((ptr_t)(hhdr -> hb_block) < (ptr_t) current); \
+ } else { \
+ /* Accurate enough if HBLKSIZE <= 2**15. */ \
+ GC_STATIC_ASSERT(HBLKSIZE <= (1 << 15)); \
+ size_t obj_displ = (((low_prod >> 16) + 1) * (hhdr -> hb_sz)) >> 16; \
+ if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
+ GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
+ goto exit_label; \
+ } \
+ base -= obj_displ; \
+ } \
} \
- /* May get here for pointer to start of block not at */ \
+ /* May get here for pointer to start of block not at */ \
/* beginning of object. If so, it's valid, and we're fine. */ \
GC_ASSERT(high_prod >= 0 && high_prod <= HBLK_OBJS(hhdr -> hb_sz)); \
TRACE(source, GC_log_printf("GC:%u: passed validity tests\n", \
- (unsigned)GC_gc_no)); \
+ (unsigned)GC_gc_no)); \
SET_MARK_BIT_EXIT_IF_SET(hhdr, high_prod, exit_label); \
TRACE(source, GC_log_printf("GC:%u: previously unmarked\n", \
- (unsigned)GC_gc_no)); \
+ (unsigned)GC_gc_no)); \
TRACE_TARGET(base, \
- GC_log_printf("GC:%u: marking %p from %p instead\n", \
- (unsigned)GC_gc_no, \
- base, source)); \
+ GC_log_printf("GC:%u: marking %p from %p instead\n", \
+ (unsigned)GC_gc_no, \
+ base, source)); \
INCR_MARKS(hhdr); \
GC_STORE_BACK_PTR((ptr_t)source, base); \
PUSH_OBJ(base, hhdr, mark_stack_top, mark_stack_limit); \
@@ -373,10 +373,10 @@ exit_label: ; \
#if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
# define PUSH_ONE_CHECKED_STACK(p, source) \
- GC_mark_and_push_stack((ptr_t)(p), (ptr_t)(source))
+ GC_mark_and_push_stack((ptr_t)(p), (ptr_t)(source))
#else
# define PUSH_ONE_CHECKED_STACK(p, source) \
- GC_mark_and_push_stack((ptr_t)(p))
+ GC_mark_and_push_stack((ptr_t)(p))
#endif
/*
@@ -388,22 +388,22 @@ exit_label: ; \
*/
# if NEED_FIXUP_POINTER
- /* Try both the raw version and the fixed up one. */
+ /* Try both the raw version and the fixed up one. */
# define GC_PUSH_ONE_STACK(p, source) \
- if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
- && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
- PUSH_ONE_CHECKED_STACK(p, source); \
+ if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
+ && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
+ PUSH_ONE_CHECKED_STACK(p, source); \
} \
FIXUP_POINTER(p); \
- if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
- && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
- PUSH_ONE_CHECKED_STACK(p, source); \
+ if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
+ && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
+ PUSH_ONE_CHECKED_STACK(p, source); \
}
# else /* !NEED_FIXUP_POINTER */
# define GC_PUSH_ONE_STACK(p, source) \
- if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
- && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
- PUSH_ONE_CHECKED_STACK(p, source); \
+ if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
+ && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
+ PUSH_ONE_CHECKED_STACK(p, source); \
}
# endif
@@ -414,27 +414,27 @@ exit_label: ; \
*/
# define GC_PUSH_ONE_HEAP(p,source) \
FIXUP_POINTER(p); \
- if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
- && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
- GC_mark_stack_top = GC_mark_and_push( \
- (void *)(p), GC_mark_stack_top, \
- GC_mark_stack_limit, (void * *)(source)); \
+ if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
+ && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
+ GC_mark_stack_top = GC_mark_and_push( \
+ (void *)(p), GC_mark_stack_top, \
+ GC_mark_stack_limit, (void * *)(source)); \
}
-/* Mark starting at mark stack entry top (incl.) down to */
-/* mark stack entry bottom (incl.). Stop after performing */
-/* about one page worth of work. Return the new mark stack */
-/* top entry. */
+/* Mark starting at mark stack entry top (incl.) down to */
+/* mark stack entry bottom (incl.). Stop after performing */
+/* about one page worth of work. Return the new mark stack */
+/* top entry. */
mse * GC_mark_from(mse * top, mse * bottom, mse *limit);
#define MARK_FROM_MARK_STACK() \
- GC_mark_stack_top = GC_mark_from(GC_mark_stack_top, \
- GC_mark_stack, \
- GC_mark_stack + GC_mark_stack_size);
+ GC_mark_stack_top = GC_mark_from(GC_mark_stack_top, \
+ GC_mark_stack, \
+ GC_mark_stack + GC_mark_stack_size);
/*
* Mark from one finalizable object using the specified
- * mark proc. May not mark the object pointed to by
+ * mark proc. May not mark the object pointed to by
* real_ptr. That is the job of the caller, if appropriate.
* Note that this is called with the mutator running, but
* with us holding the allocation lock. This is safe only if the
@@ -452,50 +452,49 @@ mse * GC_mark_from(mse * top, mse * bottom, mse *limit);
}
extern GC_bool GC_mark_stack_too_small;
- /* We need a larger mark stack. May be */
- /* set by client supplied mark routines.*/
-
-typedef int mark_state_t; /* Current state of marking, as follows:*/
- /* Used to remember where we are during */
- /* concurrent marking. */
-
- /* We say something is dirty if it was */
- /* written since the last time we */
- /* retrieved dirty bits. We say it's */
- /* grungy if it was marked dirty in the */
- /* last set of bits we retrieved. */
-
- /* Invariant I: all roots and marked */
- /* objects p are either dirty, or point */
- /* to objects q that are either marked */
- /* or a pointer to q appears in a range */
- /* on the mark stack. */
-
-# define MS_NONE 0 /* No marking in progress. I holds. */
- /* Mark stack is empty. */
-
-# define MS_PUSH_RESCUERS 1 /* Rescuing objects are currently */
- /* being pushed. I holds, except */
- /* that grungy roots may point to */
- /* unmarked objects, as may marked */
- /* grungy objects above scan_ptr. */
+ /* We need a larger mark stack. May be */
+ /* set by client supplied mark routines.*/
+
+typedef int mark_state_t; /* Current state of marking, as follows:*/
+ /* Used to remember where we are during */
+ /* concurrent marking. */
+
+ /* We say something is dirty if it was */
+ /* written since the last time we */
+ /* retrieved dirty bits. We say it's */
+ /* grungy if it was marked dirty in the */
+ /* last set of bits we retrieved. */
+
+ /* Invariant I: all roots and marked */
+ /* objects p are either dirty, or point */
+ /* to objects q that are either marked */
+ /* or a pointer to q appears in a range */
+ /* on the mark stack. */
+
+# define MS_NONE 0 /* No marking in progress. I holds. */
+ /* Mark stack is empty. */
+
+# define MS_PUSH_RESCUERS 1 /* Rescuing objects are currently */
+ /* being pushed. I holds, except */
+ /* that grungy roots may point to */
+ /* unmarked objects, as may marked */
+ /* grungy objects above scan_ptr. */
# define MS_PUSH_UNCOLLECTABLE 2
- /* I holds, except that marked */
- /* uncollectable objects above scan_ptr */
- /* may point to unmarked objects. */
- /* Roots may point to unmarked objects */
+ /* I holds, except that marked */
+ /* uncollectable objects above scan_ptr */
+ /* may point to unmarked objects. */
+ /* Roots may point to unmarked objects */
-# define MS_ROOTS_PUSHED 3 /* I holds, mark stack may be nonempty */
+# define MS_ROOTS_PUSHED 3 /* I holds, mark stack may be nonempty */
-# define MS_PARTIALLY_INVALID 4 /* I may not hold, e.g. because of M.S. */
- /* overflow. However marked heap */
- /* objects below scan_ptr point to */
- /* marked or stacked objects. */
+# define MS_PARTIALLY_INVALID 4 /* I may not hold, e.g. because of M.S. */
+ /* overflow. However marked heap */
+ /* objects below scan_ptr point to */
+ /* marked or stacked objects. */
-# define MS_INVALID 5 /* I may not hold. */
+# define MS_INVALID 5 /* I may not hold. */
extern mark_state_t GC_mark_state;
#endif /* GC_PMARK_H */
-
diff --git a/include/private/gcconfig.h b/include/private/gcconfig.h
index e5b18e15..79d5e81c 100644
--- a/include/private/gcconfig.h
+++ b/include/private/gcconfig.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996 by Silicon Graphics. All rights reserved.
@@ -21,20 +21,20 @@
* case, a few declarations relying on types declared in gc_priv.h will be
* omitted.
*/
-
+
#ifndef GCCONFIG_H
# define GCCONFIG_H
# ifndef GC_PRIVATE_H
- /* Fake ptr_t declaration, just to avoid compilation errors. */
- /* This avoids many instances if "ifndef GC_PRIVATE_H" below. */
+ /* Fake ptr_t declaration, just to avoid compilation errors. */
+ /* This avoids many instances if "ifndef GC_PRIVATE_H" below. */
typedef struct GC_undefined_struct * ptr_t;
-# include <stddef.h> /* For size_t etc. */
+# include <stddef.h> /* For size_t etc. */
# endif
-/* Machine dependent parameters. Some tuning parameters can be found */
-/* near the top of gc_private.h. */
+/* Machine dependent parameters. Some tuning parameters can be found */
+/* near the top of gc_private.h. */
/* Machine specific parts contributed by various people. See README file. */
@@ -70,7 +70,7 @@
# if defined(__arm__) || defined(__thumb__)
# define ARM32
# if !defined(LINUX) && !defined(NETBSD) && !defined(DARWIN) \
- && !defined(_WIN32) && !defined(__CEGCC__)
+ && !defined(_WIN32) && !defined(__CEGCC__)
# define NOSYS
# define mach_type_known
# endif
@@ -108,9 +108,9 @@
# if defined(vax)
# define VAX
# ifdef ultrix
-# define ULTRIX
+# define ULTRIX
# else
-# define BSD
+# define BSD
# endif
# define mach_type_known
# endif
@@ -125,9 +125,9 @@
# endif
# if !defined(LINUX) && !defined(EWS4800) && !defined(NETBSD)
# if defined(ultrix) || defined(__ultrix)
-# define ULTRIX
+# define ULTRIX
# else
-# define IRIX5 /* or IRIX 6.X */
+# define IRIX5 /* or IRIX 6.X */
# endif
# endif /* !LINUX */
# if defined(__NetBSD__) && defined(__MIPSEL__)
@@ -189,7 +189,7 @@
# define mach_type_known
# endif
# if defined(_M_XENIX) && defined(_M_SYSV) && defined(_M_I386)
- /* The above test may need refinement */
+ /* The above test may need refinement */
# define I386
# if defined(_SCO_ELF)
# define SCO_ELF
@@ -239,12 +239,12 @@
# endif
# if defined(LINUX) && defined(__cris__)
# ifndef CRIS
-# define CRIS
+# define CRIS
# endif
# define mach_type_known
# endif
# if defined(LINUX) && (defined(powerpc) || defined(__powerpc__) || \
- defined(powerpc64) || defined(__powerpc64__))
+ defined(powerpc64) || defined(__powerpc64__))
# define POWERPC
# define mach_type_known
# endif
@@ -275,14 +275,14 @@
# if defined(__alpha) || defined(__alpha__)
# define ALPHA
# if !defined(LINUX) && !defined(NETBSD) && !defined(OPENBSD) && !defined(FREEBSD)
-# define OSF1 /* a.k.a Digital Unix */
+# define OSF1 /* a.k.a Digital Unix */
# endif
# define mach_type_known
# endif
# if defined(_AMIGA) && !defined(AMIGA)
# define AMIGA
# endif
-# ifdef AMIGA
+# ifdef AMIGA
# define M68K
# define mach_type_known
# endif
@@ -384,17 +384,17 @@
# if (defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \
|| defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__)
# if defined(__LP64__) || defined(_WIN64)
-# define X86_64
+# define X86_64
# else
# define I386
# endif
-# define MSWIN32 /* or Win64 */
+# define MSWIN32 /* or Win64 */
# define mach_type_known
# endif
# if defined(_MSC_VER) && defined(_M_IA64)
# define IA64
-# define MSWIN32 /* Really win64, but we don't treat 64-bit */
- /* variants as a different platform. */
+# define MSWIN32 /* Really win64, but we don't treat 64-bit */
+ /* variants as a different platform. */
# endif
# endif
# if defined(__DJGPP__)
@@ -426,8 +426,8 @@
# endif
# if defined(__pj__)
# error PicoJava no longer supported
- /* The implementation had problems, and I haven't heard of users */
- /* in ages. If you want it resurrected, let me know. */
+ /* The implementation had problems, and I haven't heard of users */
+ /* in ages. If you want it resurrected, let me know. */
# endif
# if defined(__embedded__) && defined(PPC)
# define POWERPC
@@ -456,11 +456,11 @@
# endif
# if defined(__GNU__)
# if defined(__i386__)
-/* The Debian Hurd running on generic PC */
+/* The Debian Hurd running on generic PC */
# define HURD
# define I386
# define mach_type_known
-# endif
+# endif
# endif
# if defined(__TANDEM)
/* Nonstop S-series */
@@ -472,53 +472,53 @@
/* Feel free to add more clauses here */
-/* Or manually define the machine type here. A machine type is */
-/* characterized by the architecture. Some */
-/* machine types are further subdivided by OS. */
-/* Macros such as LINUX, FREEBSD, etc. distinguish them. */
-/* SYSV on an M68K actually means A/UX. */
+/* Or manually define the machine type here. A machine type is */
+/* characterized by the architecture. Some */
+/* machine types are further subdivided by OS. */
+/* Macros such as LINUX, FREEBSD, etc. distinguish them. */
+/* SYSV on an M68K actually means A/UX. */
/* The distinction in these cases is usually the stack starting address */
# ifndef mach_type_known
# error "The collector has not been ported to this machine/OS combination."
# endif
- /* Mapping is: M68K ==> Motorola 680X0 */
- /* (NEXT, and SYSV (A/UX), */
- /* MACOS and AMIGA variants) */
- /* I386 ==> Intel 386 */
- /* (SEQUENT, OS2, SCO, LINUX, NETBSD, */
- /* FREEBSD, THREE86BSD, MSWIN32, */
- /* BSDI,SOLARIS, NEXT, other variants) */
- /* NS32K ==> Encore Multimax */
- /* MIPS ==> R2000 through R14K */
- /* (many variants) */
- /* VAX ==> DEC VAX */
- /* (BSD, ULTRIX variants) */
- /* HP_PA ==> HP9000/700 & /800 */
- /* HP/UX, LINUX */
- /* SPARC ==> SPARC v7/v8/v9 */
- /* (SOLARIS, LINUX, DRSNX variants) */
- /* ALPHA ==> DEC Alpha */
- /* (OSF1 and LINUX variants) */
- /* M88K ==> Motorola 88XX0 */
- /* (CX_UX and DGUX) */
- /* S370 ==> 370-like machine */
- /* running Amdahl UTS4 */
+ /* Mapping is: M68K ==> Motorola 680X0 */
+ /* (NEXT, and SYSV (A/UX), */
+ /* MACOS and AMIGA variants) */
+ /* I386 ==> Intel 386 */
+ /* (SEQUENT, OS2, SCO, LINUX, NETBSD, */
+ /* FREEBSD, THREE86BSD, MSWIN32, */
+ /* BSDI,SOLARIS, NEXT, other variants) */
+ /* NS32K ==> Encore Multimax */
+ /* MIPS ==> R2000 through R14K */
+ /* (many variants) */
+ /* VAX ==> DEC VAX */
+ /* (BSD, ULTRIX variants) */
+ /* HP_PA ==> HP9000/700 & /800 */
+ /* HP/UX, LINUX */
+ /* SPARC ==> SPARC v7/v8/v9 */
+ /* (SOLARIS, LINUX, DRSNX variants) */
+ /* ALPHA ==> DEC Alpha */
+ /* (OSF1 and LINUX variants) */
+ /* M88K ==> Motorola 88XX0 */
+ /* (CX_UX and DGUX) */
+ /* S370 ==> 370-like machine */
+ /* running Amdahl UTS4 */
/* S390 ==> 390-like machine */
- /* running LINUX */
- /* ARM32 ==> Intel StrongARM */
- /* IA64 ==> Intel IPF */
- /* (e.g. Itanium) */
- /* (LINUX and HPUX) */
- /* SH ==> Hitachi SuperH */
- /* (LINUX & MSWINCE) */
- /* X86_64 ==> AMD x86-64 */
- /* POWERPC ==> IBM/Apple PowerPC */
- /* (MACOS(<=9),DARWIN(incl.MACOSX),*/
- /* LINUX, NETBSD, AIX, NOSYS */
- /* variants) */
- /* Handles 32 and 64-bit variants. */
- /* CRIS ==> Axis Etrax */
- /* M32R ==> Renesas M32R */
+ /* running LINUX */
+ /* ARM32 ==> Intel StrongARM */
+ /* IA64 ==> Intel IPF */
+ /* (e.g. Itanium) */
+ /* (LINUX and HPUX) */
+ /* SH ==> Hitachi SuperH */
+ /* (LINUX & MSWINCE) */
+ /* X86_64 ==> AMD x86-64 */
+ /* POWERPC ==> IBM/Apple PowerPC */
+ /* (MACOS(<=9),DARWIN(incl.MACOSX),*/
+ /* LINUX, NETBSD, AIX, NOSYS */
+ /* variants) */
+ /* Handles 32 and 64-bit variants. */
+ /* CRIS ==> Axis Etrax */
+ /* M32R ==> Renesas M32R */
/*
@@ -560,10 +560,10 @@
* For each machine, the following should:
* 1) define STACK_GROWS_UP if the stack grows toward higher addresses, and
* 2) define exactly one of
- * STACKBOTTOM (should be defined to be an expression)
- * LINUX_STACKBOTTOM
- * HEURISTIC1
- * HEURISTIC2
+ * STACKBOTTOM (should be defined to be an expression)
+ * LINUX_STACKBOTTOM
+ * HEURISTIC1
+ * HEURISTIC2
* If STACKBOTTOM is defined, then it's value will be used directly as the
* stack base. If LINUX_STACKBOTTOM is defined, then it will be determined
* with a method appropriate for most Linux systems. Currently we look
@@ -573,13 +573,13 @@
* If either of the last two macros are defined, then STACKBOTTOM is computed
* during collector startup using one of the following two heuristics:
* HEURISTIC1: Take an address inside GC_init's frame, and round it up to
- * the next multiple of STACK_GRAN.
+ * the next multiple of STACK_GRAN.
* HEURISTIC2: Take an address inside GC_init's frame, increment it repeatedly
- * in small steps (decrement if STACK_GROWS_UP), and read the value
- * at each location. Remember the value when the first
- * Segmentation violation or Bus error is signaled. Round that
- * to the nearest plausible page boundary, and use that instead
- * of STACKBOTTOM.
+ * in small steps (decrement if STACK_GROWS_UP), and read the value
+ * at each location. Remember the value when the first
+ * Segmentation violation or Bus error is signaled. Round that
+ * to the nearest plausible page boundary, and use that instead
+ * of STACKBOTTOM.
*
* Gustavo Rodriguez-Rivera points out that on most (all?) Unix machines,
* the value of environ is a pointer that can serve as STACKBOTTOM.
@@ -605,17 +605,17 @@
* the original main program. The new main program would read something
* like (provided real_main() is not inlined by the compiler):
*
- * # include "gc_private.h"
+ * # include "gc_private.h"
*
- * main(argc, argv, envp)
- * int argc;
- * char **argv, **envp;
- * {
- * int dummy;
+ * main(argc, argv, envp)
+ * int argc;
+ * char **argv, **envp;
+ * {
+ * int dummy;
*
- * GC_stackbottom = (ptr_t)(&dummy);
- * return(real_main(argc, argv, envp));
- * }
+ * GC_stackbottom = (ptr_t)(&dummy);
+ * return(real_main(argc, argv, envp));
+ * }
*
*
* Each architecture may also define the style of virtual dirty bit
@@ -647,8 +647,8 @@
* to push the relevant registers onto the stack.
*/
# if defined(__GNUC__) && ((__GNUC__ >= 3) || \
- (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)) \
- && !defined(__INTEL_COMPILER) && !defined(__PATHCC__)
+ (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)) \
+ && !defined(__INTEL_COMPILER) && !defined(__PATHCC__)
# define HAVE_BUILTIN_UNWIND_INIT
# endif
@@ -657,25 +657,25 @@
# define MACH_TYPE "M68K"
# define ALIGNMENT 2
# ifdef OPENBSD
-# define OS_TYPE "OPENBSD"
-# define HEURISTIC2
-# ifdef __ELF__
-# define DATASTART GC_data_start
-# define DYNAMIC_LOADING
-# else
- extern char etext[];
-# define DATASTART ((ptr_t)(etext))
+# define OS_TYPE "OPENBSD"
+# define HEURISTIC2
+# ifdef __ELF__
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
+# else
+ extern char etext[];
+# define DATASTART ((ptr_t)(etext))
# endif
# endif
# ifdef NETBSD
-# define OS_TYPE "NETBSD"
-# define HEURISTIC2
-# ifdef __ELF__
-# define DATASTART GC_data_start
-# define DYNAMIC_LOADING
-# else
- extern char etext[];
-# define DATASTART ((ptr_t)(etext))
+# define OS_TYPE "NETBSD"
+# define HEURISTIC2
+# ifdef __ELF__
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
+# else
+ extern char etext[];
+# define DATASTART ((ptr_t)(etext))
# endif
# endif
# ifdef LINUX
@@ -684,10 +684,10 @@
# define MPROTECT_VDB
# ifdef __ELF__
# define DYNAMIC_LOADING
-# include <features.h>
-# if defined(__GLIBC__)&& __GLIBC__>=2
+# include <features.h>
+# if defined(__GLIBC__)&& __GLIBC__>=2
# define SEARCH_FOR_DATA_START
-# else /* !GLIBC2 */
+# else /* !GLIBC2 */
extern char **__environ;
# define DATASTART ((ptr_t)(&__environ))
/* hideous kludge: __environ is the first */
@@ -698,7 +698,7 @@
/* would include .rodata, which may */
/* contain large read-only data tables */
/* that we'd rather not scan. */
-# endif /* !GLIBC2 */
+# endif /* !GLIBC2 */
extern int _end[];
# define DATAEND (ptr_t)(_end)
# else
@@ -707,39 +707,39 @@
# endif
# endif
# ifdef AMIGA
-# define OS_TYPE "AMIGA"
- /* STACKBOTTOM and DATASTART handled specially */
- /* in os_dep.c */
-# define DATAEND /* not needed */
-# define GETPAGESIZE() 4096
+# define OS_TYPE "AMIGA"
+ /* STACKBOTTOM and DATASTART handled specially */
+ /* in os_dep.c */
+# define DATAEND /* not needed */
+# define GETPAGESIZE() 4096
# endif
# ifdef MACOS
# ifndef __LOWMEM__
# include <LowMem.h>
# endif
# define OS_TYPE "MACOS"
- /* see os_dep.c for details of global data segments. */
+ /* see os_dep.c for details of global data segments. */
# define STACKBOTTOM ((ptr_t) LMGetCurStackBase())
-# define DATAEND /* not needed */
+# define DATAEND /* not needed */
# define GETPAGESIZE() 4096
# endif
# ifdef NEXT
-# define OS_TYPE "NEXT"
-# define DATASTART ((ptr_t) get_etext())
-# define STACKBOTTOM ((ptr_t) 0x4000000)
-# define DATAEND /* not needed */
+# define OS_TYPE "NEXT"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t) 0x4000000)
+# define DATAEND /* not needed */
# endif
# endif
# if defined(POWERPC)
# define MACH_TYPE "POWERPC"
# ifdef MACOS
-# define ALIGNMENT 2 /* Still necessary? Could it be 4? */
+# define ALIGNMENT 2 /* Still necessary? Could it be 4? */
# ifndef __LOWMEM__
# include <LowMem.h>
# endif
# define OS_TYPE "MACOS"
- /* see os_dep.c for details of global data segments. */
+ /* see os_dep.c for details of global data segments. */
# define STACKBOTTOM ((ptr_t) LMGetCurStackBase())
# define DATAEND /* not needed */
# endif
@@ -754,8 +754,8 @@
# define ALIGNMENT 4
# endif
# define OS_TYPE "LINUX"
- /* HEURISTIC1 has been reliably reported to fail for a 32-bit */
- /* executable on a 64 bit kernel. */
+ /* HEURISTIC1 has been reliably reported to fail for a 32-bit */
+ /* executable on a 64 bit kernel. */
# define LINUX_STACKBOTTOM
# define DYNAMIC_LOADING
# define SEARCH_FOR_DATA_START
@@ -778,9 +778,9 @@
# define STACKBOTTOM ((ptr_t) 0xc0000000)
# endif
/* XXX: see get_end(3), get_etext() and get_end() should not be used.
- These aren't used when dyld support is enabled (it is by default) */
+ These aren't used when dyld support is enabled (it is by default) */
# define DATASTART ((ptr_t) get_etext())
-# define DATAEND ((ptr_t) get_end())
+# define DATAEND ((ptr_t) get_end())
# define USE_MMAP
# define USE_MMAP_ANON
# ifdef GC_DARWIN_THREADS
@@ -789,14 +789,14 @@
# include <unistd.h>
# define GETPAGESIZE() getpagesize()
# if defined(USE_PPC_PREFETCH) && defined(__GNUC__)
- /* The performance impact of prefetches is untested */
-# define PREFETCH(x) \
- __asm__ __volatile__ ("dcbt 0,%0" : : "r" ((const void *) (x)))
-# define PREFETCH_FOR_WRITE(x) \
- __asm__ __volatile__ ("dcbtst 0,%0" : : "r" ((const void *) (x)))
+ /* The performance impact of prefetches is untested */
+# define PREFETCH(x) \
+ __asm__ __volatile__ ("dcbt 0,%0" : : "r" ((const void *) (x)))
+# define PREFETCH_FOR_WRITE(x) \
+ __asm__ __volatile__ ("dcbtst 0,%0" : : "r" ((const void *) (x)))
# endif
/* There seems to be some issues with trylock hanging on darwin. This
- should be looked into some more */
+ should be looked into some more */
# define NO_PTHREAD_TRYLOCK
# endif
# ifdef FREEBSD
@@ -825,11 +825,11 @@
# endif
# ifdef AIX
# define OS_TYPE "AIX"
-# undef ALIGNMENT /* in case it's defined */
+# undef ALIGNMENT /* in case it's defined */
# ifdef IA64
# undef IA64
/* DOB: some AIX installs stupidly define IA64 in */
- /* /usr/include/sys/systemcfg.h */
+ /* /usr/include/sys/systemcfg.h */
# endif
# ifdef __64BIT__
# define ALIGNMENT 8
@@ -842,19 +842,19 @@
# endif
# define USE_MMAP
# define USE_MMAP_ANON
- /* From AIX linker man page:
- _text Specifies the first location of the program.
- _etext Specifies the first location after the program.
- _data Specifies the first location of the data.
- _edata Specifies the first location after the initialized data
- _end or end Specifies the first location after all data.
- */
+ /* From AIX linker man page:
+ _text Specifies the first location of the program.
+ _etext Specifies the first location after the program.
+ _data Specifies the first location of the data.
+ _edata Specifies the first location after the initialized data
+ _end or end Specifies the first location after all data.
+ */
extern int _data[], _end[];
# define DATASTART ((ptr_t)((ulong)_data))
# define DATAEND ((ptr_t)((ulong)_end))
extern int errno;
# define DYNAMIC_LOADING
- /* For really old versions of AIX, this may have to be removed. */
+ /* For really old versions of AIX, this may have to be removed. */
# endif
# ifdef NOSYS
@@ -863,7 +863,7 @@
extern void __end[], __dso_handle[];
# define DATASTART (__dso_handle) /* OK, that's ugly. */
# define DATAEND (ptr_t)(__end)
- /* Stack starts at 0xE0000000 for the simulator. */
+ /* Stack starts at 0xE0000000 for the simulator. */
# undef STACK_GRAN
# define STACK_GRAN 0x10000000
# define HEURISTIC1
@@ -872,17 +872,17 @@
# ifdef VAX
# define MACH_TYPE "VAX"
-# define ALIGNMENT 4 /* Pointers are longword aligned by 4.2 C compiler */
+# define ALIGNMENT 4 /* Pointers are longword aligned by 4.2 C compiler */
extern char etext[];
# define DATASTART ((ptr_t)(etext))
# ifdef BSD
-# define OS_TYPE "BSD"
-# define HEURISTIC1
- /* HEURISTIC2 may be OK, but it's hard to test. */
+# define OS_TYPE "BSD"
+# define HEURISTIC1
+ /* HEURISTIC2 may be OK, but it's hard to test. */
# endif
# ifdef ULTRIX
-# define OS_TYPE "ULTRIX"
-# define STACKBOTTOM ((ptr_t) 0x7fffc800)
+# define OS_TYPE "ULTRIX"
+# define STACKBOTTOM ((ptr_t) 0x7fffc800)
# endif
# endif
@@ -893,57 +893,57 @@
# define CPP_WORDSZ 64
# define ELF_CLASS ELFCLASS64
# else
-# define ALIGNMENT 4 /* Required by hardware */
+# define ALIGNMENT 4 /* Required by hardware */
# define CPP_WORDSZ 32
# endif
- /* Don't define USE_ASM_PUSH_REGS. We do use an asm helper, but */
- /* not to push the registers on the mark stack. */
+ /* Don't define USE_ASM_PUSH_REGS. We do use an asm helper, but */
+ /* not to push the registers on the mark stack. */
# ifdef SOLARIS
-# define OS_TYPE "SOLARIS"
- extern int _etext[];
- extern int _end[];
- extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define OS_TYPE "SOLARIS"
+ extern int _etext[];
+ extern int _end[];
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)_etext)
-# define DATAEND (ptr_t)(_end)
-# if !defined(USE_MMAP) && defined(REDIRECT_MALLOC)
-# define USE_MMAP
- /* Otherwise we now use calloc. Mmap may result in the */
- /* heap interleaved with thread stacks, which can result in */
- /* excessive blacklisting. Sbrk is unusable since it */
- /* doesn't interact correctly with the system malloc. */
-# endif
+# define DATAEND (ptr_t)(_end)
+# if !defined(USE_MMAP) && defined(REDIRECT_MALLOC)
+# define USE_MMAP
+ /* Otherwise we now use calloc. Mmap may result in the */
+ /* heap interleaved with thread stacks, which can result in */
+ /* excessive blacklisting. Sbrk is unusable since it */
+ /* doesn't interact correctly with the system malloc. */
+# endif
# ifdef USE_MMAP
# define HEAP_START (ptr_t)0x40000000
# else
-# define HEAP_START DATAEND
+# define HEAP_START DATAEND
# endif
-# define PROC_VDB
-/* HEURISTIC1 reportedly no longer works under 2.7. */
-/* HEURISTIC2 probably works, but this appears to be preferable. */
-/* Apparently USRSTACK is defined to be USERLIMIT, but in some */
-/* installations that's undefined. We work around this with a */
-/* gross hack: */
+# define PROC_VDB
+/* HEURISTIC1 reportedly no longer works under 2.7. */
+/* HEURISTIC2 probably works, but this appears to be preferable. */
+/* Apparently USRSTACK is defined to be USERLIMIT, but in some */
+/* installations that's undefined. We work around this with a */
+/* gross hack: */
# include <sys/vmparam.h>
-# ifdef USERLIMIT
- /* This should work everywhere, but doesn't. */
-# define STACKBOTTOM ((ptr_t) USRSTACK)
+# ifdef USERLIMIT
+ /* This should work everywhere, but doesn't. */
+# define STACKBOTTOM ((ptr_t) USRSTACK)
# else
-# define HEURISTIC2
+# define HEURISTIC2
# endif
-# include <unistd.h>
+# include <unistd.h>
# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
- /* getpagesize() appeared to be missing from at least one */
- /* Solaris 5.4 installation. Weird. */
-# define DYNAMIC_LOADING
+ /* getpagesize() appeared to be missing from at least one */
+ /* Solaris 5.4 installation. Weird. */
+# define DYNAMIC_LOADING
# endif
# ifdef DRSNX
-# define OS_TYPE "DRSNX"
- extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
- extern int etext[];
+# define OS_TYPE "DRSNX"
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+ extern int etext[];
# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)etext)
-# define MPROTECT_VDB
+# define MPROTECT_VDB
# define STACKBOTTOM ((ptr_t) 0xdfff0000)
-# define DYNAMIC_LOADING
+# define DYNAMIC_LOADING
# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
@@ -958,7 +958,7 @@
# define SVR4
extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
# ifdef __arch64__
-# define DATASTART GC_SysVGetDataStart(0x100000, (ptr_t)_etext)
+# define DATASTART GC_SysVGetDataStart(0x100000, (ptr_t)_etext)
# else
# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)_etext)
# endif
@@ -974,29 +974,29 @@
# define OS_TYPE "NETBSD"
# define HEURISTIC2
# ifdef __ELF__
-# define DATASTART GC_data_start
-# define DYNAMIC_LOADING
+# define DATASTART GC_data_start
+# define DYNAMIC_LOADING
# else
- extern char etext[];
-# define DATASTART ((ptr_t)(etext))
+ extern char etext[];
+# define DATASTART ((ptr_t)(etext))
# endif
# endif
# ifdef FREEBSD
-# define OS_TYPE "FREEBSD"
-# define SIG_SUSPEND SIGUSR1
-# define SIG_THR_RESTART SIGUSR2
-# define FREEBSD_STACKBOTTOM
-# ifdef __ELF__
-# define DYNAMIC_LOADING
-# endif
- extern char etext[];
- extern char edata[];
- extern char end[];
-# define NEED_FIND_LIMIT
-# define DATASTART ((ptr_t)(&etext))
-# define DATAEND (GC_find_limit (DATASTART, TRUE))
-# define DATASTART2 ((ptr_t)(&edata))
-# define DATAEND2 ((ptr_t)(&end))
+# define OS_TYPE "FREEBSD"
+# define SIG_SUSPEND SIGUSR1
+# define SIG_THR_RESTART SIGUSR2
+# define FREEBSD_STACKBOTTOM
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
+ extern char etext[];
+ extern char edata[];
+ extern char end[];
+# define NEED_FIND_LIMIT
+# define DATASTART ((ptr_t)(&etext))
+# define DATAEND (GC_find_limit (DATASTART, TRUE))
+# define DATASTART2 ((ptr_t)(&edata))
+# define DATAEND2 ((ptr_t)(&end))
# endif
# endif
@@ -1007,16 +1007,16 @@
# else
# define CPP_WORDSZ 32
# define ALIGNMENT 4
- /* Appears to hold for all "32 bit" compilers */
- /* except Borland. The -a4 option fixes */
- /* Borland. */
+ /* Appears to hold for all "32 bit" compilers */
+ /* except Borland. The -a4 option fixes */
+ /* Borland. */
/* Ivan Demakov: For Watcom the option is -zp4. */
# endif
# ifdef SEQUENT
-# define OS_TYPE "SEQUENT"
- extern int etext[];
+# define OS_TYPE "SEQUENT"
+ extern int etext[];
# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
-# define STACKBOTTOM ((ptr_t) 0x3ffff000)
+# define STACKBOTTOM ((ptr_t) 0x3ffff000)
# endif
# ifdef BEOS
# define OS_TYPE "BEOS"
@@ -1026,44 +1026,44 @@
# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
# endif
# ifdef SOLARIS
-# define OS_TYPE "SOLARIS"
+# define OS_TYPE "SOLARIS"
extern int _etext[], _end[];
- extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
# define DATASTART GC_SysVGetDataStart(0x1000, (ptr_t)_etext)
-# define DATAEND (ptr_t)(_end)
-/* # define STACKBOTTOM ((ptr_t)(_start)) worked through 2.7, */
-/* but reportedly breaks under 2.8. It appears that the stack */
-/* base is a property of the executable, so this should not break */
-/* old executables. */
-/* HEURISTIC2 probably works, but this appears to be preferable. */
+# define DATAEND (ptr_t)(_end)
+/* # define STACKBOTTOM ((ptr_t)(_start)) worked through 2.7, */
+/* but reportedly breaks under 2.8. It appears that the stack */
+/* base is a property of the executable, so this should not break */
+/* old executables. */
+/* HEURISTIC2 probably works, but this appears to be preferable. */
# include <sys/vm.h>
-# define STACKBOTTOM ((ptr_t) USRSTACK)
+# define STACKBOTTOM ((ptr_t) USRSTACK)
/* At least in Solaris 2.5, PROC_VDB gives wrong values for dirty bits. */
-/* It appears to be fixed in 2.8 and 2.9. */
-# ifdef SOLARIS25_PROC_VDB_BUG_FIXED
-# define PROC_VDB
-# endif
-# define DYNAMIC_LOADING
-# if !defined(USE_MMAP) && defined(REDIRECT_MALLOC)
-# define USE_MMAP
- /* Otherwise we now use calloc. Mmap may result in the */
- /* heap interleaved with thread stacks, which can result in */
- /* excessive blacklisting. Sbrk is unusable since it */
- /* doesn't interact correctly with the system malloc. */
-# endif
+/* It appears to be fixed in 2.8 and 2.9. */
+# ifdef SOLARIS25_PROC_VDB_BUG_FIXED
+# define PROC_VDB
+# endif
+# define DYNAMIC_LOADING
+# if !defined(USE_MMAP) && defined(REDIRECT_MALLOC)
+# define USE_MMAP
+ /* Otherwise we now use calloc. Mmap may result in the */
+ /* heap interleaved with thread stacks, which can result in */
+ /* excessive blacklisting. Sbrk is unusable since it */
+ /* doesn't interact correctly with the system malloc. */
+# endif
# ifdef USE_MMAP
# define HEAP_START (ptr_t)0x40000000
# else
-# define HEAP_START DATAEND
+# define HEAP_START DATAEND
# endif
# endif
# ifdef SCO
-# define OS_TYPE "SCO"
- extern int etext[];
-# define DATASTART ((ptr_t)((((word) (etext)) + 0x3fffff) \
- & ~0x3fffff) \
- +((word)etext & 0xfff))
-# define STACKBOTTOM ((ptr_t) 0x7ffffffc)
+# define OS_TYPE "SCO"
+ extern int etext[];
+# define DATASTART ((ptr_t)((((word) (etext)) + 0x3fffff) \
+ & ~0x3fffff) \
+ +((word)etext & 0xfff))
+# define STACKBOTTOM ((ptr_t) 0x7ffffffc)
# endif
# ifdef SCO_ELF
# define OS_TYPE "SCO_ELF"
@@ -1071,107 +1071,107 @@
# define DATASTART ((ptr_t)(etext))
# define STACKBOTTOM ((ptr_t) 0x08048000)
# define DYNAMIC_LOADING
-# define ELF_CLASS ELFCLASS32
+# define ELF_CLASS ELFCLASS32
# endif
# ifdef DGUX
-# define OS_TYPE "DGUX"
- extern int _etext, _end;
- extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
-# define DATASTART GC_SysVGetDataStart(0x1000, (ptr_t)(&_etext))
-# define DATAEND (ptr_t)(&_end)
-# define STACK_GROWS_DOWN
-# define HEURISTIC2
-# include <unistd.h>
-# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
-# define DYNAMIC_LOADING
-# ifndef USE_MMAP
-# define USE_MMAP
-# endif /* USE_MMAP */
-# define MAP_FAILED (void *) ((word)-1)
-# ifdef USE_MMAP
-# define HEAP_START (ptr_t)0x40000000
-# else /* USE_MMAP */
-# define HEAP_START DATAEND
-# endif /* USE_MMAP */
+# define OS_TYPE "DGUX"
+ extern int _etext, _end;
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define DATASTART GC_SysVGetDataStart(0x1000, (ptr_t)(&_etext))
+# define DATAEND (ptr_t)(&_end)
+# define STACK_GROWS_DOWN
+# define HEURISTIC2
+# include <unistd.h>
+# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
+# define DYNAMIC_LOADING
+# ifndef USE_MMAP
+# define USE_MMAP
+# endif /* USE_MMAP */
+# define MAP_FAILED (void *) ((word)-1)
+# ifdef USE_MMAP
+# define HEAP_START (ptr_t)0x40000000
+# else /* USE_MMAP */
+# define HEAP_START DATAEND
+# endif /* USE_MMAP */
# endif /* DGUX */
# ifdef LINUX
-# define OS_TYPE "LINUX"
+# define OS_TYPE "LINUX"
# define LINUX_STACKBOTTOM
-# if 0
-# define HEURISTIC1
+# if 0
+# define HEURISTIC1
# undef STACK_GRAN
# define STACK_GRAN 0x10000000
- /* STACKBOTTOM is usually 0xc0000000, but this changes with */
- /* different kernel configurations. In particular, systems */
- /* with 2GB physical memory will usually move the user */
- /* address space limit, and hence initial SP to 0x80000000. */
+ /* STACKBOTTOM is usually 0xc0000000, but this changes with */
+ /* different kernel configurations. In particular, systems */
+ /* with 2GB physical memory will usually move the user */
+ /* address space limit, and hence initial SP to 0x80000000. */
# endif
# if !defined(GC_LINUX_THREADS) || !defined(REDIRECT_MALLOC)
-# define MPROTECT_VDB
-# else
- /* We seem to get random errors in incremental mode, */
- /* possibly because Linux threads is itself a malloc client */
- /* and can't deal with the signals. */
-# endif
-# define HEAP_START (ptr_t)0x1000
- /* This encourages mmap to give us low addresses, */
- /* thus allowing the heap to grow to ~3GB */
+# define MPROTECT_VDB
+# else
+ /* We seem to get random errors in incremental mode, */
+ /* possibly because Linux threads is itself a malloc client */
+ /* and can't deal with the signals. */
+# endif
+# define HEAP_START (ptr_t)0x1000
+ /* This encourages mmap to give us low addresses, */
+ /* thus allowing the heap to grow to ~3GB */
# ifdef __ELF__
# define DYNAMIC_LOADING
-# ifdef UNDEFINED /* includes ro data */
- extern int _etext[];
+# ifdef UNDEFINED /* includes ro data */
+ extern int _etext[];
# define DATASTART ((ptr_t)((((word) (_etext)) + 0xfff) & ~0xfff))
-# endif
-# include <features.h>
-# if defined(__GLIBC__) && __GLIBC__ >= 2
-# define SEARCH_FOR_DATA_START
-# else
- extern char **__environ;
+# endif
+# include <features.h>
+# if defined(__GLIBC__) && __GLIBC__ >= 2
+# define SEARCH_FOR_DATA_START
+# else
+ extern char **__environ;
# define DATASTART ((ptr_t)(&__environ))
- /* hideous kludge: __environ is the first */
- /* word in crt0.o, and delimits the start */
- /* of the data segment, no matter which */
- /* ld options were passed through. */
- /* We could use _etext instead, but that */
- /* would include .rodata, which may */
- /* contain large read-only data tables */
- /* that we'd rather not scan. */
-# endif
- extern int _end[];
-# define DATAEND (ptr_t)(_end)
-# else
- extern int etext[];
+ /* hideous kludge: __environ is the first */
+ /* word in crt0.o, and delimits the start */
+ /* of the data segment, no matter which */
+ /* ld options were passed through. */
+ /* We could use _etext instead, but that */
+ /* would include .rodata, which may */
+ /* contain large read-only data tables */
+ /* that we'd rather not scan. */
+# endif
+ extern int _end[];
+# define DATAEND (ptr_t)(_end)
+# else
+ extern int etext[];
# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
# endif
-# ifdef USE_I686_PREFETCH
- /* FIXME: Thus should use __builtin_prefetch, but we'll leave that */
- /* for the next rtelease. */
-# define PREFETCH(x) \
- __asm__ __volatile__ (" prefetchnta %0": : "m"(*(char *)(x)))
- /* Empirically prefetcht0 is much more effective at reducing */
- /* cache miss stalls for the targeted load instructions. But it */
- /* seems to interfere enough with other cache traffic that the net */
- /* result is worse than prefetchnta. */
-# if 0
- /* Using prefetches for write seems to have a slight negative */
- /* impact on performance, at least for a PIII/500. */
-# define PREFETCH_FOR_WRITE(x) \
- __asm__ __volatile__ (" prefetcht0 %0": : "m"(*(char *)(x)))
-# endif
-# endif
-# ifdef USE_3DNOW_PREFETCH
-# define PREFETCH(x) \
- __asm__ __volatile__ (" prefetch %0": : "m"(*(char *)(x)))
-# define PREFETCH_FOR_WRITE(x) \
- __asm__ __volatile__ (" prefetchw %0": : "m"(*(char *)(x)))
-# endif
+# ifdef USE_I686_PREFETCH
+ /* FIXME: Thus should use __builtin_prefetch, but we'll leave that */
+ /* for the next rtelease. */
+# define PREFETCH(x) \
+ __asm__ __volatile__ (" prefetchnta %0": : "m"(*(char *)(x)))
+ /* Empirically prefetcht0 is much more effective at reducing */
+ /* cache miss stalls for the targeted load instructions. But it */
+ /* seems to interfere enough with other cache traffic that the net */
+ /* result is worse than prefetchnta. */
+# if 0
+ /* Using prefetches for write seems to have a slight negative */
+ /* impact on performance, at least for a PIII/500. */
+# define PREFETCH_FOR_WRITE(x) \
+ __asm__ __volatile__ (" prefetcht0 %0": : "m"(*(char *)(x)))
+# endif
+# endif
+# ifdef USE_3DNOW_PREFETCH
+# define PREFETCH(x) \
+ __asm__ __volatile__ (" prefetch %0": : "m"(*(char *)(x)))
+# define PREFETCH_FOR_WRITE(x) \
+ __asm__ __volatile__ (" prefetchw %0": : "m"(*(char *)(x)))
+# endif
# endif
# ifdef CYGWIN32
# define OS_TYPE "CYGWIN32"
# define DATASTART ((ptr_t)GC_DATASTART) /* From gc.h */
-# define DATAEND ((ptr_t)GC_DATAEND)
-# undef STACK_GRAN
+# define DATAEND ((ptr_t)GC_DATAEND)
+# undef STACK_GRAN
# define STACK_GRAN 0x10000
# define HEURISTIC1
# ifdef USE_MMAP
@@ -1180,22 +1180,22 @@
# endif
# endif
# ifdef OS2
-# define OS_TYPE "OS2"
- /* STACKBOTTOM and DATASTART are handled specially in */
- /* os_dep.c. OS2 actually has the right */
- /* system call! */
-# define DATAEND /* not needed */
+# define OS_TYPE "OS2"
+ /* STACKBOTTOM and DATASTART are handled specially in */
+ /* os_dep.c. OS2 actually has the right */
+ /* system call! */
+# define DATAEND /* not needed */
# endif
# ifdef MSWIN32
-# define OS_TYPE "MSWIN32"
- /* STACKBOTTOM and DATASTART are handled specially in */
- /* os_dep.c. */
-# define MPROTECT_VDB
+# define OS_TYPE "MSWIN32"
+ /* STACKBOTTOM and DATASTART are handled specially in */
+ /* os_dep.c. */
+# define MPROTECT_VDB
# define GWW_VDB
# define DATAEND /* not needed */
# endif
# ifdef MSWINCE
-# define OS_TYPE "MSWINCE"
+# define OS_TYPE "MSWINCE"
# define DATAEND /* not needed */
# endif
# ifdef DJGPP
@@ -1208,56 +1208,56 @@
/* # define STACKBOTTOM ((ptr_t)((word) _stubinfo + _stubinfo->size \
+ _stklen)) */
# define STACKBOTTOM ((ptr_t)((word) __djgpp_stack_limit + _stklen))
- /* This may not be right. */
+ /* This may not be right. */
# endif
# ifdef OPENBSD
-# define OS_TYPE "OPENBSD"
+# define OS_TYPE "OPENBSD"
# endif
# ifdef FREEBSD
-# define OS_TYPE "FREEBSD"
-# ifndef GC_FREEBSD_THREADS
-# define MPROTECT_VDB
-# endif
-# ifdef __GLIBC__
-# define SIG_SUSPEND (32+6)
-# define SIG_THR_RESTART (32+5)
- extern int _end[];
-# define DATAEND (ptr_t)(_end)
-# else
-# define SIG_SUSPEND SIGUSR1
-# define SIG_THR_RESTART SIGUSR2
-# endif
-# define FREEBSD_STACKBOTTOM
-# ifdef __ELF__
-# define DYNAMIC_LOADING
-# endif
- extern char etext[];
- extern char * GC_FreeBSDGetDataStart(size_t, ptr_t);
-# define DATASTART GC_FreeBSDGetDataStart(0x1000, (ptr_t)etext)
+# define OS_TYPE "FREEBSD"
+# ifndef GC_FREEBSD_THREADS
+# define MPROTECT_VDB
+# endif
+# ifdef __GLIBC__
+# define SIG_SUSPEND (32+6)
+# define SIG_THR_RESTART (32+5)
+ extern int _end[];
+# define DATAEND (ptr_t)(_end)
+# else
+# define SIG_SUSPEND SIGUSR1
+# define SIG_THR_RESTART SIGUSR2
+# endif
+# define FREEBSD_STACKBOTTOM
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
+ extern char etext[];
+ extern char * GC_FreeBSDGetDataStart(size_t, ptr_t);
+# define DATASTART GC_FreeBSDGetDataStart(0x1000, (ptr_t)etext)
# endif
# ifdef NETBSD
-# define OS_TYPE "NETBSD"
-# ifdef __ELF__
-# define DYNAMIC_LOADING
-# endif
+# define OS_TYPE "NETBSD"
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
# endif
# ifdef THREE86BSD
-# define OS_TYPE "THREE86BSD"
+# define OS_TYPE "THREE86BSD"
# endif
# ifdef BSDI
-# define OS_TYPE "BSDI"
+# define OS_TYPE "BSDI"
# endif
# if defined(OPENBSD) || defined(NETBSD) \
|| defined(THREE86BSD) || defined(BSDI)
-# define HEURISTIC2
- extern char etext[];
-# define DATASTART ((ptr_t)(etext))
+# define HEURISTIC2
+ extern char etext[];
+# define DATASTART ((ptr_t)(etext))
# endif
# ifdef NEXT
-# define OS_TYPE "NEXT"
-# define DATASTART ((ptr_t) get_etext())
-# define STACKBOTTOM ((ptr_t)0xc0000000)
-# define DATAEND /* not needed */
+# define OS_TYPE "NEXT"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t)0xc0000000)
+# define DATAEND /* not needed */
# endif
# ifdef DOS4GW
# define OS_TYPE "DOS4GW"
@@ -1291,9 +1291,9 @@
# define DARWIN_DONT_PARSE_STACK
# define DYNAMIC_LOADING
/* XXX: see get_end(3), get_etext() and get_end() should not be used.
- These aren't used when dyld support is enabled (it is by default) */
+ These aren't used when dyld support is enabled (it is by default) */
# define DATASTART ((ptr_t) get_etext())
-# define DATAEND ((ptr_t) get_end())
+# define DATAEND ((ptr_t) get_end())
# define STACKBOTTOM ((ptr_t) 0xc0000000)
# define USE_MMAP
# define USE_MMAP_ANON
@@ -1303,7 +1303,7 @@
# include <unistd.h>
# define GETPAGESIZE() getpagesize()
/* There seems to be some issues with trylock hanging on darwin. This
- should be looked into some more */
+ should be looked into some more */
# define NO_PTHREAD_TRYLOCK
# endif /* DARWIN */
# endif
@@ -1313,10 +1313,10 @@
# define ALIGNMENT 4
extern char **environ;
# define DATASTART ((ptr_t)(&environ))
- /* hideous kludge: environ is the first */
- /* word in crt0.o, and delimits the start */
- /* of the data segment, no matter which */
- /* ld options were passed through. */
+ /* hideous kludge: environ is the first */
+ /* word in crt0.o, and delimits the start */
+ /* of the data segment, no matter which */
+ /* ld options were passed through. */
# define STACKBOTTOM ((ptr_t) 0xfffff000) /* for Encore */
# endif
@@ -1360,37 +1360,37 @@
# define OS_TYPE "EWS4800"
# endif
# ifdef ULTRIX
-# define HEURISTIC2
+# define HEURISTIC2
# define DATASTART (ptr_t)0x10000000
- /* Could probably be slightly higher since */
- /* startup code allocates lots of stuff. */
-# define OS_TYPE "ULTRIX"
+ /* Could probably be slightly higher since */
+ /* startup code allocates lots of stuff. */
+# define OS_TYPE "ULTRIX"
# define ALIGNMENT 4
# endif
# ifdef IRIX5
-# define HEURISTIC2
+# define HEURISTIC2
extern int _fdata[];
# define DATASTART ((ptr_t)(_fdata))
# ifdef USE_MMAP
# define HEAP_START (ptr_t)0x30000000
# else
-# define HEAP_START DATASTART
+# define HEAP_START DATASTART
# endif
- /* Lowest plausible heap address. */
- /* In the MMAP case, we map there. */
- /* In either case it is used to identify */
- /* heap sections so they're not */
- /* considered as roots. */
-# define OS_TYPE "IRIX5"
+ /* Lowest plausible heap address. */
+ /* In the MMAP case, we map there. */
+ /* In either case it is used to identify */
+ /* heap sections so they're not */
+ /* considered as roots. */
+# define OS_TYPE "IRIX5"
/*# define MPROTECT_VDB DOB: this should work, but there is evidence */
-/* of recent breakage. */
+/* of recent breakage. */
# ifdef _MIPS_SZPTR
-# define CPP_WORDSZ _MIPS_SZPTR
-# define ALIGNMENT (_MIPS_SZPTR/8)
-# else
+# define CPP_WORDSZ _MIPS_SZPTR
+# define ALIGNMENT (_MIPS_SZPTR/8)
+# else
# define ALIGNMENT 4
-# endif
-# define DYNAMIC_LOADING
+# endif
+# define DYNAMIC_LOADING
# endif
# ifdef MSWINCE
# define OS_TYPE "MSWINCE"
@@ -1437,8 +1437,8 @@
# endif
# else
# ifdef PARALLEL_MARK
-# define USE_MARK_BYTES
- /* Minimize compare-and-swap usage. */
+# define USE_MARK_BYTES
+ /* Minimize compare-and-swap usage. */
# endif
# endif
# define STACK_GROWS_UP
@@ -1447,19 +1447,19 @@
extern int __data_start[];
# define DATASTART ((ptr_t)(__data_start))
# if 0
- /* The following appears to work for 7xx systems running HP/UX */
- /* 9.xx Furthermore, it might result in much faster */
- /* collections than HEURISTIC2, which may involve scanning */
- /* segments that directly precede the stack. It is not the */
- /* default, since it may not work on older machine/OS */
- /* combinations. (Thanks to Raymond X.T. Nijssen for uncovering */
- /* this.) */
+ /* The following appears to work for 7xx systems running HP/UX */
+ /* 9.xx Furthermore, it might result in much faster */
+ /* collections than HEURISTIC2, which may involve scanning */
+ /* segments that directly precede the stack. It is not the */
+ /* default, since it may not work on older machine/OS */
+ /* combinations. (Thanks to Raymond X.T. Nijssen for uncovering */
+ /* this.) */
# define STACKBOTTOM ((ptr_t) 0x7b033000) /* from /etc/conf/h/param.h */
# else
- /* Gustavo Rodriguez-Rivera suggested changing HEURISTIC2 */
- /* to this. Note that the GC must be initialized before the */
- /* first putenv call. */
- extern char ** environ;
+ /* Gustavo Rodriguez-Rivera suggested changing HEURISTIC2 */
+ /* to this. Note that the GC must be initialized before the */
+ /* first putenv call. */
+ extern char ** environ;
# define STACKBOTTOM ((ptr_t)environ)
# endif
# define DYNAMIC_LOADING
@@ -1487,179 +1487,179 @@
# define ALIGNMENT 8
# define CPP_WORDSZ 64
# ifdef NETBSD
-# define OS_TYPE "NETBSD"
-# define HEURISTIC2
-# define DATASTART GC_data_start
-# define ELFCLASS32 32
-# define ELFCLASS64 64
-# define ELF_CLASS ELFCLASS64
+# define OS_TYPE "NETBSD"
+# define HEURISTIC2
+# define DATASTART GC_data_start
+# define ELFCLASS32 32
+# define ELFCLASS64 64
+# define ELF_CLASS ELFCLASS64
# define DYNAMIC_LOADING
# endif
# ifdef OPENBSD
-# define OS_TYPE "OPENBSD"
-# define HEURISTIC2
-# ifdef __ELF__ /* since OpenBSD/Alpha 2.9 */
-# define DATASTART GC_data_start
-# define ELFCLASS32 32
-# define ELFCLASS64 64
-# define ELF_CLASS ELFCLASS64
-# else /* ECOFF, until OpenBSD/Alpha 2.7 */
-# define DATASTART ((ptr_t) 0x140000000)
-# endif
+# define OS_TYPE "OPENBSD"
+# define HEURISTIC2
+# ifdef __ELF__ /* since OpenBSD/Alpha 2.9 */
+# define DATASTART GC_data_start
+# define ELFCLASS32 32
+# define ELFCLASS64 64
+# define ELF_CLASS ELFCLASS64
+# else /* ECOFF, until OpenBSD/Alpha 2.7 */
+# define DATASTART ((ptr_t) 0x140000000)
+# endif
# endif
# ifdef FREEBSD
-# define OS_TYPE "FREEBSD"
+# define OS_TYPE "FREEBSD"
/* MPROTECT_VDB is not yet supported at all on FreeBSD/alpha. */
-# define SIG_SUSPEND SIGUSR1
-# define SIG_THR_RESTART SIGUSR2
-# define FREEBSD_STACKBOTTOM
-# ifdef __ELF__
-# define DYNAMIC_LOADING
-# endif
+# define SIG_SUSPEND SIGUSR1
+# define SIG_THR_RESTART SIGUSR2
+# define FREEBSD_STACKBOTTOM
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
/* Handle unmapped hole alpha*-*-freebsd[45]* puts between etext and edata. */
- extern char etext[];
- extern char edata[];
- extern char end[];
-# define NEED_FIND_LIMIT
-# define DATASTART ((ptr_t)(&etext))
-# define DATAEND (GC_find_limit (DATASTART, TRUE))
-# define DATASTART2 ((ptr_t)(&edata))
-# define DATAEND2 ((ptr_t)(&end))
+ extern char etext[];
+ extern char edata[];
+ extern char end[];
+# define NEED_FIND_LIMIT
+# define DATASTART ((ptr_t)(&etext))
+# define DATAEND (GC_find_limit (DATASTART, TRUE))
+# define DATASTART2 ((ptr_t)(&edata))
+# define DATAEND2 ((ptr_t)(&end))
# endif
# ifdef OSF1
-# define OS_TYPE "OSF1"
-# define DATASTART ((ptr_t) 0x140000000)
- extern int _end[];
-# define DATAEND ((ptr_t) &_end)
- extern char ** environ;
- /* round up from the value of environ to the nearest page boundary */
- /* Probably breaks if putenv is called before collector */
- /* initialization. */
-# define STACKBOTTOM ((ptr_t)(((word)(environ) | (getpagesize()-1))+1))
-/* # define HEURISTIC2 */
- /* Normally HEURISTIC2 is too conservative, since */
- /* the text segment immediately follows the stack. */
- /* Hence we give an upper pound. */
- /* This is currently unused, since we disabled HEURISTIC2 */
- extern int __start[];
-# define HEURISTIC2_LIMIT ((ptr_t)((word)(__start) & ~(getpagesize()-1)))
-# ifndef GC_OSF1_THREADS
- /* Unresolved signal issues with threads. */
-# define MPROTECT_VDB
+# define OS_TYPE "OSF1"
+# define DATASTART ((ptr_t) 0x140000000)
+ extern int _end[];
+# define DATAEND ((ptr_t) &_end)
+ extern char ** environ;
+ /* round up from the value of environ to the nearest page boundary */
+ /* Probably breaks if putenv is called before collector */
+ /* initialization. */
+# define STACKBOTTOM ((ptr_t)(((word)(environ) | (getpagesize()-1))+1))
+/* # define HEURISTIC2 */
+ /* Normally HEURISTIC2 is too conservative, since */
+ /* the text segment immediately follows the stack. */
+ /* Hence we give an upper pound. */
+ /* This is currently unused, since we disabled HEURISTIC2 */
+ extern int __start[];
+# define HEURISTIC2_LIMIT ((ptr_t)((word)(__start) & ~(getpagesize()-1)))
+# ifndef GC_OSF1_THREADS
+ /* Unresolved signal issues with threads. */
+# define MPROTECT_VDB
# endif
-# define DYNAMIC_LOADING
+# define DYNAMIC_LOADING
# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
# define LINUX_STACKBOTTOM
# ifdef __ELF__
-# define SEARCH_FOR_DATA_START
+# define SEARCH_FOR_DATA_START
# define DYNAMIC_LOADING
# else
# define DATASTART ((ptr_t) 0x140000000)
# endif
- extern int _end[];
-# define DATAEND (ptr_t)(_end)
-# define MPROTECT_VDB
- /* Has only been superficially tested. May not */
- /* work on all versions. */
+ extern int _end[];
+# define DATAEND (ptr_t)(_end)
+# define MPROTECT_VDB
+ /* Has only been superficially tested. May not */
+ /* work on all versions. */
# endif
# endif
# ifdef IA64
# define MACH_TYPE "IA64"
# ifdef HPUX
-# ifdef _ILP32
-# define CPP_WORDSZ 32
- /* Requires 8 byte alignment for malloc */
-# define ALIGNMENT 4
+# ifdef _ILP32
+# define CPP_WORDSZ 32
+ /* Requires 8 byte alignment for malloc */
+# define ALIGNMENT 4
# else
-# ifndef _LP64
- ---> unknown ABI
+# ifndef _LP64
+ ---> unknown ABI
# endif
-# define CPP_WORDSZ 64
- /* Requires 16 byte alignment for malloc */
+# define CPP_WORDSZ 64
+ /* Requires 16 byte alignment for malloc */
# define ALIGNMENT 8
# endif
-# define OS_TYPE "HPUX"
+# define OS_TYPE "HPUX"
extern int __data_start[];
# define DATASTART ((ptr_t)(__data_start))
- /* Gustavo Rodriguez-Rivera suggested changing HEURISTIC2 */
- /* to this. Note that the GC must be initialized before the */
- /* first putenv call. */
- extern char ** environ;
+ /* Gustavo Rodriguez-Rivera suggested changing HEURISTIC2 */
+ /* to this. Note that the GC must be initialized before the */
+ /* first putenv call. */
+ extern char ** environ;
# define STACKBOTTOM ((ptr_t)environ)
# define HPUX_STACKBOTTOM
# define DYNAMIC_LOADING
# include <unistd.h>
# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
- /* The following was empirically determined, and is probably */
- /* not very robust. */
- /* Note that the backing store base seems to be at a nice */
- /* address minus one page. */
-# define BACKING_STORE_DISPLACEMENT 0x1000000
-# define BACKING_STORE_ALIGNMENT 0x1000
- extern ptr_t GC_register_stackbottom;
-# define BACKING_STORE_BASE GC_register_stackbottom
- /* Known to be wrong for recent HP/UX versions!!! */
+ /* The following was empirically determined, and is probably */
+ /* not very robust. */
+ /* Note that the backing store base seems to be at a nice */
+ /* address minus one page. */
+# define BACKING_STORE_DISPLACEMENT 0x1000000
+# define BACKING_STORE_ALIGNMENT 0x1000
+ extern ptr_t GC_register_stackbottom;
+# define BACKING_STORE_BASE GC_register_stackbottom
+ /* Known to be wrong for recent HP/UX versions!!! */
# endif
# ifdef LINUX
-# define CPP_WORDSZ 64
-# define ALIGNMENT 8
+# define CPP_WORDSZ 64
+# define ALIGNMENT 8
# define OS_TYPE "LINUX"
- /* The following works on NUE and older kernels: */
-/* # define STACKBOTTOM ((ptr_t) 0xa000000000000000l) */
- /* This does not work on NUE: */
+ /* The following works on NUE and older kernels: */
+/* # define STACKBOTTOM ((ptr_t) 0xa000000000000000l) */
+ /* This does not work on NUE: */
# define LINUX_STACKBOTTOM
- /* We also need the base address of the register stack */
- /* backing store. This is computed in */
- /* GC_linux_register_stack_base based on the following */
- /* constants: */
+ /* We also need the base address of the register stack */
+ /* backing store. This is computed in */
+ /* GC_linux_register_stack_base based on the following */
+ /* constants: */
# define BACKING_STORE_ALIGNMENT 0x100000
# define BACKING_STORE_DISPLACEMENT 0x80000000
- extern ptr_t GC_register_stackbottom;
-# define BACKING_STORE_BASE GC_register_stackbottom
-# define SEARCH_FOR_DATA_START
-# ifdef __GNUC__
+ extern ptr_t GC_register_stackbottom;
+# define BACKING_STORE_BASE GC_register_stackbottom
+# define SEARCH_FOR_DATA_START
+# ifdef __GNUC__
# define DYNAMIC_LOADING
-# else
- /* In the Intel compiler environment, we seem to end up with */
- /* statically linked executables and an undefined reference */
- /* to _DYNAMIC */
-# endif
-# define MPROTECT_VDB
- /* Requires Linux 2.3.47 or later. */
- extern int _end[];
-# define DATAEND (ptr_t)(_end)
+# else
+ /* In the Intel compiler environment, we seem to end up with */
+ /* statically linked executables and an undefined reference */
+ /* to _DYNAMIC */
+# endif
+# define MPROTECT_VDB
+ /* Requires Linux 2.3.47 or later. */
+ extern int _end[];
+# define DATAEND (ptr_t)(_end)
# ifdef __GNUC__
-# ifndef __INTEL_COMPILER
-# define PREFETCH(x) \
- __asm__ (" lfetch [%0]": : "r"(x))
-# define PREFETCH_FOR_WRITE(x) \
- __asm__ (" lfetch.excl [%0]": : "r"(x))
-# define CLEAR_DOUBLE(x) \
- __asm__ (" stf.spill [%0]=f0": : "r"((void *)(x)))
-# else
+# ifndef __INTEL_COMPILER
+# define PREFETCH(x) \
+ __asm__ (" lfetch [%0]": : "r"(x))
+# define PREFETCH_FOR_WRITE(x) \
+ __asm__ (" lfetch.excl [%0]": : "r"(x))
+# define CLEAR_DOUBLE(x) \
+ __asm__ (" stf.spill [%0]=f0": : "r"((void *)(x)))
+# else
# include <ia64intrin.h>
-# define PREFETCH(x) \
- __lfetch(__lfhint_none, (x))
-# define PREFETCH_FOR_WRITE(x) \
- __lfetch(__lfhint_nta, (x))
-# define CLEAR_DOUBLE(x) \
- __stf_spill((void *)(x), 0)
-# endif // __INTEL_COMPILER
+# define PREFETCH(x) \
+ __lfetch(__lfhint_none, (x))
+# define PREFETCH_FOR_WRITE(x) \
+ __lfetch(__lfhint_nta, (x))
+# define CLEAR_DOUBLE(x) \
+ __stf_spill((void *)(x), 0)
+# endif // __INTEL_COMPILER
# endif
# endif
# ifdef MSWIN32
- /* FIXME: This is a very partial guess. There is no port, yet. */
+ /* FIXME: This is a very partial guess. There is no port, yet. */
# define OS_TYPE "MSWIN32"
- /* STACKBOTTOM and DATASTART are handled specially in */
- /* os_dep.c. */
+ /* STACKBOTTOM and DATASTART are handled specially in */
+ /* os_dep.c. */
# define DATAEND /* not needed */
# if defined(_WIN64)
# define CPP_WORDSZ 64
# else
-# define CPP_WORDSZ 32 /* Is this possible? */
+# define CPP_WORDSZ 32 /* Is this possible? */
# endif
# define ALIGNMENT 8
# endif
@@ -1670,31 +1670,31 @@
# define ALIGNMENT 4
extern int etext[];
# ifdef CX_UX
-# define OS_TYPE "CX_UX"
+# define OS_TYPE "CX_UX"
# define DATASTART ((((word)etext + 0x3fffff) & ~0x3fffff) + 0x10000)
# endif
# ifdef DGUX
-# define OS_TYPE "DGUX"
- extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+# define OS_TYPE "DGUX"
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)etext)
# endif
# define STACKBOTTOM ((char*)0xf0000000) /* determined empirically */
# endif
# ifdef S370
- /* If this still works, and if anyone cares, this should probably */
- /* be moved to the S390 category. */
+ /* If this still works, and if anyone cares, this should probably */
+ /* be moved to the S390 category. */
# define MACH_TYPE "S370"
-# define ALIGNMENT 4 /* Required by hardware */
+# define ALIGNMENT 4 /* Required by hardware */
# ifdef UTS4
# define OS_TYPE "UTS4"
- extern int etext[];
- extern int _etext[];
- extern int _end[];
- extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+ extern int etext[];
+ extern int _etext[];
+ extern int _end[];
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
# define DATASTART GC_SysVGetDataStart(0x10000, (ptr_t)_etext)
-# define DATAEND (ptr_t)(_end)
-# define HEURISTIC2
+# define DATAEND (ptr_t)(_end)
+# define HEURISTIC2
# endif
# endif
@@ -1714,7 +1714,7 @@
# define OS_TYPE "LINUX"
# define LINUX_STACKBOTTOM
# define DYNAMIC_LOADING
- extern int __data_start[];
+ extern int __data_start[];
# define DATASTART ((ptr_t)(__data_start))
extern int _end[];
# define DATAEND (ptr_t)(_end)
@@ -1730,13 +1730,13 @@
# ifdef NETBSD
# define OS_TYPE "NETBSD"
# define HEURISTIC2
-# ifdef __ELF__
+# ifdef __ELF__
# define DATASTART GC_data_start
-# define DYNAMIC_LOADING
-# else
+# define DYNAMIC_LOADING
+# else
extern char etext[];
# define DATASTART ((ptr_t)(etext))
-# endif
+# endif
# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
@@ -1745,26 +1745,26 @@
# define STACK_GRAN 0x10000000
# ifdef __ELF__
# define DYNAMIC_LOADING
-# include <features.h>
-# if defined(__GLIBC__) && __GLIBC__ >= 2 \
- || defined(PLATFORM_ANDROID)
-# define SEARCH_FOR_DATA_START
-# else
- extern char **__environ;
+# include <features.h>
+# if defined(__GLIBC__) && __GLIBC__ >= 2 \
+ || defined(PLATFORM_ANDROID)
+# define SEARCH_FOR_DATA_START
+# else
+ extern char **__environ;
# define DATASTART ((ptr_t)(&__environ))
- /* hideous kludge: __environ is the first */
- /* word in crt0.o, and delimits the start */
- /* of the data segment, no matter which */
- /* ld options were passed through. */
- /* We could use _etext instead, but that */
- /* would include .rodata, which may */
- /* contain large read-only data tables */
- /* that we'd rather not scan. */
-# endif
- extern int _end[];
-# define DATAEND (ptr_t)(_end)
-# else
- extern int etext[];
+ /* hideous kludge: __environ is the first */
+ /* word in crt0.o, and delimits the start */
+ /* of the data segment, no matter which */
+ /* ld options were passed through. */
+ /* We could use _etext instead, but that */
+ /* would include .rodata, which may */
+ /* contain large read-only data tables */
+ /* that we'd rather not scan. */
+# endif
+ extern int _end[];
+# define DATAEND (ptr_t)(_end)
+# else
+ extern int etext[];
# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
# endif
# endif
@@ -1776,7 +1776,7 @@
/* iPhone */
# define OS_TYPE "DARWIN"
# define DATASTART ((ptr_t) get_etext())
-# define DATAEND ((ptr_t) get_end())
+# define DATAEND ((ptr_t) get_end())
/* #define STACKBOTTOM ((ptr_t) 0x30000000) */ /* FIXME: Is this needed? */
# define HEURISTIC1
# define USE_MMAP
@@ -1826,7 +1826,7 @@
# define DYNAMIC_LOADING
# endif
# endif
-
+
# ifdef SH4
# define MACH_TYPE "SH4"
# define OS_TYPE "MSWINCE"
@@ -1859,42 +1859,42 @@
# endif
# define CACHE_LINE_SIZE 64
# ifdef LINUX
-# define OS_TYPE "LINUX"
+# define OS_TYPE "LINUX"
# define LINUX_STACKBOTTOM
# if !defined(GC_LINUX_THREADS) || !defined(REDIRECT_MALLOC)
-# define MPROTECT_VDB
-# else
- /* We seem to get random errors in incremental mode, */
- /* possibly because Linux threads is itself a malloc client */
- /* and can't deal with the signals. */
-# endif
+# define MPROTECT_VDB
+# else
+ /* We seem to get random errors in incremental mode, */
+ /* possibly because Linux threads is itself a malloc client */
+ /* and can't deal with the signals. */
+# endif
# ifdef __ELF__
# define DYNAMIC_LOADING
-# ifdef UNDEFINED /* includes ro data */
- extern int _etext[];
+# ifdef UNDEFINED /* includes ro data */
+ extern int _etext[];
# define DATASTART ((ptr_t)((((word) (_etext)) + 0xfff) & ~0xfff))
-# endif
-# include <features.h>
-# define SEARCH_FOR_DATA_START
- extern int _end[];
-# define DATAEND (ptr_t)(_end)
-# else
- extern int etext[];
+# endif
+# include <features.h>
+# define SEARCH_FOR_DATA_START
+ extern int _end[];
+# define DATAEND (ptr_t)(_end)
+# else
+ extern int etext[];
# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
# endif
# if defined(__GNUC__) && __GNUC__ >= 3
-# define PREFETCH(x) __builtin_prefetch((x), 0, 0)
-# define PREFETCH_FOR_WRITE(x) __builtin_prefetch((x), 1)
-# endif
+# define PREFETCH(x) __builtin_prefetch((x), 0, 0)
+# define PREFETCH_FOR_WRITE(x) __builtin_prefetch((x), 1)
+# endif
# endif
# ifdef DARWIN
# define OS_TYPE "DARWIN"
# define DARWIN_DONT_PARSE_STACK
# define DYNAMIC_LOADING
/* XXX: see get_end(3), get_etext() and get_end() should not be used.
- These aren't used when dyld support is enabled (it is by default) */
+ These aren't used when dyld support is enabled (it is by default) */
# define DATASTART ((ptr_t) get_etext())
-# define DATAEND ((ptr_t) get_end())
+# define DATAEND ((ptr_t) get_end())
# define STACKBOTTOM ((ptr_t) 0x7fff5fc00000)
# define USE_MMAP
# define USE_MMAP_ANON
@@ -1904,86 +1904,86 @@
# include <unistd.h>
# define GETPAGESIZE() getpagesize()
/* There seems to be some issues with trylock hanging on darwin. This
- should be looked into some more */
+ should be looked into some more */
# define NO_PTHREAD_TRYLOCK
# endif
# ifdef FREEBSD
-# define OS_TYPE "FREEBSD"
-# ifndef GC_FREEBSD_THREADS
-# define MPROTECT_VDB
-# endif
-# ifdef __GLIBC__
-# define SIG_SUSPEND (32+6)
-# define SIG_THR_RESTART (32+5)
- extern int _end[];
-# define DATAEND (ptr_t)(_end)
-# else
-# define SIG_SUSPEND SIGUSR1
-# define SIG_THR_RESTART SIGUSR2
-# endif
-# define FREEBSD_STACKBOTTOM
-# ifdef __ELF__
-# define DYNAMIC_LOADING
-# endif
- extern char etext[];
+# define OS_TYPE "FREEBSD"
+# ifndef GC_FREEBSD_THREADS
+# define MPROTECT_VDB
+# endif
+# ifdef __GLIBC__
+# define SIG_SUSPEND (32+6)
+# define SIG_THR_RESTART (32+5)
+ extern int _end[];
+# define DATAEND (ptr_t)(_end)
+# else
+# define SIG_SUSPEND SIGUSR1
+# define SIG_THR_RESTART SIGUSR2
+# endif
+# define FREEBSD_STACKBOTTOM
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
+ extern char etext[];
ptr_t GC_FreeBSDGetDataStart(size_t max_page_size, ptr_t etext_addr);
-# define DATASTART GC_FreeBSDGetDataStart(0x1000, &etext)
+# define DATASTART GC_FreeBSDGetDataStart(0x1000, &etext)
# endif
# ifdef NETBSD
-# define OS_TYPE "NETBSD"
-# ifdef __ELF__
-# define DYNAMIC_LOADING
-# endif
-# define HEURISTIC2
- extern char etext[];
-# define SEARCH_FOR_DATA_START
+# define OS_TYPE "NETBSD"
+# ifdef __ELF__
+# define DYNAMIC_LOADING
+# endif
+# define HEURISTIC2
+ extern char etext[];
+# define SEARCH_FOR_DATA_START
# endif
# ifdef SOLARIS
-# define OS_TYPE "SOLARIS"
-# define ELF_CLASS ELFCLASS64
+# define OS_TYPE "SOLARIS"
+# define ELF_CLASS ELFCLASS64
extern int _etext[], _end[];
- extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
+ extern ptr_t GC_SysVGetDataStart(size_t, ptr_t);
# define DATASTART GC_SysVGetDataStart(0x1000, (ptr_t)_etext)
-# define DATAEND (ptr_t)(_end)
-/* # define STACKBOTTOM ((ptr_t)(_start)) worked through 2.7, */
-/* but reportedly breaks under 2.8. It appears that the stack */
-/* base is a property of the executable, so this should not break */
-/* old executables. */
-/* HEURISTIC2 probably works, but this appears to be preferable. */
-/* Apparently USRSTACK is defined to be USERLIMIT, but in some */
-/* installations that's undefined. We work around this with a */
-/* gross hack: */
+# define DATAEND (ptr_t)(_end)
+/* # define STACKBOTTOM ((ptr_t)(_start)) worked through 2.7, */
+/* but reportedly breaks under 2.8. It appears that the stack */
+/* base is a property of the executable, so this should not break */
+/* old executables. */
+/* HEURISTIC2 probably works, but this appears to be preferable. */
+/* Apparently USRSTACK is defined to be USERLIMIT, but in some */
+/* installations that's undefined. We work around this with a */
+/* gross hack: */
# include <sys/vmparam.h>
-# ifdef USERLIMIT
- /* This should work everywhere, but doesn't. */
-# define STACKBOTTOM ((ptr_t) USRSTACK)
+# ifdef USERLIMIT
+ /* This should work everywhere, but doesn't. */
+# define STACKBOTTOM ((ptr_t) USRSTACK)
# else
-# define HEURISTIC2
+# define HEURISTIC2
# endif
/* At least in Solaris 2.5, PROC_VDB gives wrong values for dirty bits. */
-/* It appears to be fixed in 2.8 and 2.9. */
-# ifdef SOLARIS25_PROC_VDB_BUG_FIXED
-# define PROC_VDB
-# endif
-# define DYNAMIC_LOADING
-# if !defined(USE_MMAP) && defined(REDIRECT_MALLOC)
-# define USE_MMAP
- /* Otherwise we now use calloc. Mmap may result in the */
- /* heap interleaved with thread stacks, which can result in */
- /* excessive blacklisting. Sbrk is unusable since it */
- /* doesn't interact correctly with the system malloc. */
-# endif
+/* It appears to be fixed in 2.8 and 2.9. */
+# ifdef SOLARIS25_PROC_VDB_BUG_FIXED
+# define PROC_VDB
+# endif
+# define DYNAMIC_LOADING
+# if !defined(USE_MMAP) && defined(REDIRECT_MALLOC)
+# define USE_MMAP
+ /* Otherwise we now use calloc. Mmap may result in the */
+ /* heap interleaved with thread stacks, which can result in */
+ /* excessive blacklisting. Sbrk is unusable since it */
+ /* doesn't interact correctly with the system malloc. */
+# endif
# ifdef USE_MMAP
# define HEAP_START (ptr_t)0x40000000
# else
-# define HEAP_START DATAEND
+# define HEAP_START DATAEND
# endif
# endif
# ifdef MSWIN32
-# define OS_TYPE "MSWIN32"
- /* STACKBOTTOM and DATASTART are handled specially in */
- /* os_dep.c. */
-# define MPROTECT_VDB
+# define OS_TYPE "MSWIN32"
+ /* STACKBOTTOM and DATASTART are handled specially in */
+ /* os_dep.c. */
+# define MPROTECT_VDB
# define GWW_VDB
# define DATAEND /* not needed */
# endif
@@ -1991,42 +1991,42 @@
#if defined(LINUX_STACKBOTTOM) && defined(NO_PROC_STAT) \
&& !defined(USE_LIBC_PRIVATES)
- /* This combination will fail, since we have no way to get */
- /* the stack base. Use HEURISTIC2 instead. */
+ /* This combination will fail, since we have no way to get */
+ /* the stack base. Use HEURISTIC2 instead. */
# undef LINUX_STACKBOTTOM
# define HEURISTIC2
- /* This may still fail on some architectures like IA64. */
- /* We tried ... */
+ /* This may still fail on some architectures like IA64. */
+ /* We tried ... */
#endif
#if defined(LINUX) && defined(USE_MMAP)
- /* The kernel may do a somewhat better job merging mappings etc. */
- /* with anonymous mappings. */
+ /* The kernel may do a somewhat better job merging mappings etc. */
+ /* with anonymous mappings. */
# define USE_MMAP_ANON
#endif
#if defined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC)
- /* Nptl allocates thread stacks with mmap, which is fine. But it */
- /* keeps a cache of thread stacks. Thread stacks contain the */
- /* thread control blocks. These in turn contain a pointer to */
- /* (sizeof (void *) from the beginning of) the dtv for thread-local */
- /* storage, which is calloc allocated. If we don't scan the cached */
- /* thread stacks, we appear to lose the dtv. This tends to */
- /* result in something that looks like a bogus dtv count, which */
- /* tends to result in a memset call on a block that is way too */
- /* large. Sometimes we're lucky and the process just dies ... */
- /* There seems to be a similar issue with some other memory */
- /* allocated by the dynamic loader. */
- /* This should be avoidable by either: */
- /* - Defining USE_PROC_FOR_LIBRARIES here. */
- /* That performs very poorly, precisely because we end up */
- /* scanning cached stacks. */
- /* - Have calloc look at its callers. */
- /* In spite of the fact that it is gross and disgusting. */
- /* In fact neither seems to suffice, probably in part because */
- /* even with USE_PROC_FOR_LIBRARIES, we don't scan parts of stack */
- /* segments that appear to be out of bounds. Thus we actually */
- /* do both, which seems to yield the best results. */
+ /* Nptl allocates thread stacks with mmap, which is fine. But it */
+ /* keeps a cache of thread stacks. Thread stacks contain the */
+ /* thread control blocks. These in turn contain a pointer to */
+ /* (sizeof (void *) from the beginning of) the dtv for thread-local */
+ /* storage, which is calloc allocated. If we don't scan the cached */
+ /* thread stacks, we appear to lose the dtv. This tends to */
+ /* result in something that looks like a bogus dtv count, which */
+ /* tends to result in a memset call on a block that is way too */
+ /* large. Sometimes we're lucky and the process just dies ... */
+ /* There seems to be a similar issue with some other memory */
+ /* allocated by the dynamic loader. */
+ /* This should be avoidable by either: */
+ /* - Defining USE_PROC_FOR_LIBRARIES here. */
+ /* That performs very poorly, precisely because we end up */
+ /* scanning cached stacks. */
+ /* - Have calloc look at its callers. */
+ /* In spite of the fact that it is gross and disgusting. */
+ /* In fact neither seems to suffice, probably in part because */
+ /* even with USE_PROC_FOR_LIBRARIES, we don't scan parts of stack */
+ /* segments that appear to be out of bounds. Thus we actually */
+ /* do both, which seems to yield the best results. */
# define USE_PROC_FOR_LIBRARIES
#endif
@@ -2056,22 +2056,22 @@
# ifndef GETPAGESIZE
# if defined(SOLARIS) || defined(IRIX5) || defined(LINUX) \
|| defined(NETBSD) || defined(FREEBSD) || defined(HPUX)
-# include <unistd.h>
+# include <unistd.h>
# endif
# define GETPAGESIZE() getpagesize()
# endif
# if defined(SOLARIS) || defined(DRSNX) || defined(UTS4)
- /* OS has SVR4 generic features. */
- /* Probably others also qualify. */
+ /* OS has SVR4 generic features. */
+ /* Probably others also qualify. */
# define SVR4
# endif
# if defined(SOLARIS) || defined(DRSNX)
- /* OS has SOLARIS style semi-undocumented interface */
- /* to dynamic loader. */
+ /* OS has SOLARIS style semi-undocumented interface */
+ /* to dynamic loader. */
# define SOLARISDL
- /* OS has SOLARIS style signal handlers. */
+ /* OS has SOLARIS style signal handlers. */
# define SUNOS5SIGS
# endif
@@ -2090,15 +2090,15 @@
# endif
# if defined(SVR4) || defined(LINUX) || defined(IRIX5) || defined(HPUX) \
- || defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD) \
- || defined(DGUX) || defined(BSD) \
- || defined(AIX) || defined(DARWIN) || defined(OSF1) \
- || defined(HURD)
-# define UNIX_LIKE /* Basic Unix-like system calls work. */
+ || defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD) \
+ || defined(DGUX) || defined(BSD) \
+ || defined(AIX) || defined(DARWIN) || defined(OSF1) \
+ || defined(HURD)
+# define UNIX_LIKE /* Basic Unix-like system calls work. */
# endif
# if CPP_WORDSZ != 32 && CPP_WORDSZ != 64
- -> bad word size
+ -> bad word size
# endif
# ifndef ALIGNMENT
@@ -2116,19 +2116,19 @@
# endif
# ifdef SMALL_CONFIG
- /* Presumably not worth the space it takes. */
+ /* Presumably not worth the space it takes. */
# undef PROC_VDB
# undef MPROTECT_VDB
# endif
# ifdef USE_MUNMAP
- /* FIXME: Remove this undef if possible. */
+ /* FIXME: Remove this undef if possible. */
# undef MPROTECT_VDB /* Can't deal with address space holes. */
# endif
# if defined(PARALLEL_MARK)
- /* FIXME: Remove this undef if possible. */
-# undef MPROTECT_VDB /* For now. */
+ /* FIXME: Remove this undef if possible. */
+# undef MPROTECT_VDB /* For now. */
# endif
# if defined(MPROTECT_VDB) && defined(GC_PREFER_MPROTECT_VDB)
@@ -2154,7 +2154,7 @@
# endif
# ifndef CACHE_LINE_SIZE
-# define CACHE_LINE_SIZE 32 /* Wild guess */
+# define CACHE_LINE_SIZE 32 /* Wild guess */
# endif
# ifndef STATIC
@@ -2167,11 +2167,11 @@
# if defined(LINUX) || defined(HURD) || defined(__GLIBC__)
# define REGISTER_LIBRARIES_EARLY
- /* We sometimes use dl_iterate_phdr, which may acquire an internal */
- /* lock. This isn't safe after the world has stopped. So we must */
- /* call GC_register_dynamic_libraries before stopping the world. */
- /* For performance reasons, this may be beneficial on other */
- /* platforms as well, though it should be avoided in win32. */
+ /* We sometimes use dl_iterate_phdr, which may acquire an internal */
+ /* lock. This isn't safe after the world has stopped. So we must */
+ /* call GC_register_dynamic_libraries before stopping the world. */
+ /* For performance reasons, this may be beneficial on other */
+ /* platforms as well, though it should be avoided in win32. */
# endif /* LINUX */
# if defined(SEARCH_FOR_DATA_START)
@@ -2181,44 +2181,44 @@
# ifndef CLEAR_DOUBLE
# define CLEAR_DOUBLE(x) \
- ((word*)x)[0] = 0; \
- ((word*)x)[1] = 0;
+ ((word*)x)[0] = 0; \
+ ((word*)x)[1] = 0;
# endif /* CLEAR_DOUBLE */
# if defined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC) \
&& !defined(INCLUDE_LINUX_THREAD_DESCR)
- /* Will not work, since libc and the dynamic loader use thread */
- /* locals, sometimes as the only reference. */
+ /* Will not work, since libc and the dynamic loader use thread */
+ /* locals, sometimes as the only reference. */
# define INCLUDE_LINUX_THREAD_DESCR
# endif
# if defined(GC_IRIX_THREADS) && !defined(IRIX5)
- --> inconsistent configuration
+ --> inconsistent configuration
# endif
# if defined(GC_LINUX_THREADS) && !defined(LINUX)
- --> inconsistent configuration
+ --> inconsistent configuration
# endif
# if defined(GC_NETBSD_THREADS) && !defined(NETBSD)
- --> inconsistent configuration
+ --> inconsistent configuration
# endif
# if defined(GC_FREEBSD_THREADS) && !defined(FREEBSD)
- --> inconsistent configuration
+ --> inconsistent configuration
# endif
# if defined(GC_SOLARIS_THREADS) && !defined(SOLARIS)
- --> inconsistent configuration
+ --> inconsistent configuration
# endif
# if defined(GC_HPUX_THREADS) && !defined(HPUX)
- --> inconsistent configuration
+ --> inconsistent configuration
# endif
# if defined(GC_AIX_THREADS) && !defined(_AIX)
- --> inconsistent configuration
+ --> inconsistent configuration
# endif
# if defined(GC_GNU_THREADS) && !defined(HURD)
- --> inconsistent configuration
+ --> inconsistent configuration
# endif
# if defined(GC_WIN32_THREADS) && !defined(MSWIN32) && !defined(CYGWIN32) \
- && !defined(MSWINCE)
- --> inconsistent configuration
+ && !defined(MSWINCE)
+ --> inconsistent configuration
# endif
# if defined(PCR) || defined(GC_WIN32_THREADS) || defined(GC_PTHREADS)
@@ -2243,49 +2243,49 @@
# elif defined(_LLP64) || defined(__LLP64__) || defined(_WIN64)
# define STRTOULL strtoull
# else
- /* strtoul() fits since sizeof(long) >= sizeof(word). */
+ /* strtoul() fits since sizeof(long) >= sizeof(word). */
# define STRTOULL strtoul
# endif
# endif
# if defined(SPARC)
-# define ASM_CLEAR_CODE /* Stack clearing is crucial, and we */
- /* include assembly code to do it well. */
+# define ASM_CLEAR_CODE /* Stack clearing is crucial, and we */
+ /* include assembly code to do it well. */
# endif
- /* Can we save call chain in objects for debugging? */
- /* SET NFRAMES (# of saved frames) and NARGS (#of args for each */
- /* frame) to reasonable values for the platform. */
- /* Set SAVE_CALL_CHAIN if we can. SAVE_CALL_COUNT can be specified */
- /* at build time, though we feel free to adjust it slightly. */
- /* Define NEED_CALLINFO if we either save the call stack or */
- /* GC_ADD_CALLER is defined. */
- /* GC_CAN_SAVE_CALL_STACKS is set in gc.h. */
+ /* Can we save call chain in objects for debugging? */
+ /* SET NFRAMES (# of saved frames) and NARGS (#of args for each */
+ /* frame) to reasonable values for the platform. */
+ /* Set SAVE_CALL_CHAIN if we can. SAVE_CALL_COUNT can be specified */
+ /* at build time, though we feel free to adjust it slightly. */
+ /* Define NEED_CALLINFO if we either save the call stack or */
+ /* GC_ADD_CALLER is defined. */
+ /* GC_CAN_SAVE_CALL_STACKS is set in gc.h. */
#if defined(SPARC)
# define CAN_SAVE_CALL_ARGS
#endif
#if (defined(I386) || defined(X86_64)) && (defined(LINUX) || defined(__GLIBC__))
- /* SAVE_CALL_CHAIN is supported if the code is compiled to save */
- /* frame pointers by default, i.e. no -fomit-frame-pointer flag. */
+ /* SAVE_CALL_CHAIN is supported if the code is compiled to save */
+ /* frame pointers by default, i.e. no -fomit-frame-pointer flag. */
# define CAN_SAVE_CALL_ARGS
#endif
# if defined(SAVE_CALL_COUNT) && !defined(GC_ADD_CALLER) \
- && defined(GC_CAN_SAVE_CALL_STACKS)
-# define SAVE_CALL_CHAIN
+ && defined(GC_CAN_SAVE_CALL_STACKS)
+# define SAVE_CALL_CHAIN
# endif
# ifdef SAVE_CALL_CHAIN
# if defined(SAVE_CALL_NARGS) && defined(CAN_SAVE_CALL_ARGS)
# define NARGS SAVE_CALL_NARGS
# else
-# define NARGS 0 /* Number of arguments to save for each call. */
+# define NARGS 0 /* Number of arguments to save for each call. */
# endif
# endif
# ifdef SAVE_CALL_CHAIN
# ifndef SAVE_CALL_COUNT
-# define NFRAMES 6 /* Number of frames to save. Even for */
- /* alignment reasons. */
+# define NFRAMES 6 /* Number of frames to save. Even for */
+ /* alignment reasons. */
# else
# define NFRAMES ((SAVE_CALL_COUNT + 1) & ~1)
# endif
@@ -2321,10 +2321,10 @@
# endif
# if !defined(MARK_BIT_PER_GRANULE) && !defined(MARK_BIT_PER_OBJ)
-# define MARK_BIT_PER_GRANULE /* Usually faster */
+# define MARK_BIT_PER_GRANULE /* Usually faster */
# endif
-/* Some static sanity tests. */
+/* Some static sanity tests. */
# if defined(MARK_BIT_PER_GRANULE) && defined(MARK_BIT_PER_OBJ)
# error Define only one of MARK_BIT_PER_GRANULE and MARK_BIT_PER_OBJ.
# endif
@@ -2341,36 +2341,36 @@
# endif
#ifdef GC_PRIVATE_H
- /* This relies on some type definitions from gc_priv.h, from */
- /* where it's normally included. */
- /* */
- /* How to get heap memory from the OS: */
- /* Note that sbrk()-like allocation is preferred, since it */
- /* usually makes it possible to merge consecutively allocated */
- /* chunks. It also avoids unintended recursion with */
- /* -DREDIRECT_MALLOC. */
- /* GET_MEM() returns a HLKSIZE aligned chunk. */
- /* 0 is taken to mean failure. */
- /* In the case os USE_MMAP, the argument must also be a */
- /* physical page size. */
- /* GET_MEM is currently not assumed to retrieve 0 filled space, */
- /* though we should perhaps take advantage of the case in which */
- /* does. */
- struct hblk; /* See gc_priv.h. */
+ /* This relies on some type definitions from gc_priv.h, from */
+ /* where it's normally included. */
+ /* */
+ /* How to get heap memory from the OS: */
+ /* Note that sbrk()-like allocation is preferred, since it */
+ /* usually makes it possible to merge consecutively allocated */
+ /* chunks. It also avoids unintended recursion with */
+ /* -DREDIRECT_MALLOC. */
+ /* GET_MEM() returns a HLKSIZE aligned chunk. */
+ /* 0 is taken to mean failure. */
+ /* In the case os USE_MMAP, the argument must also be a */
+ /* physical page size. */
+ /* GET_MEM is currently not assumed to retrieve 0 filled space, */
+ /* though we should perhaps take advantage of the case in which */
+ /* does. */
+ struct hblk; /* See gc_priv.h. */
# if defined(PCR)
char * real_malloc(size_t bytes);
# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + GC_page_size) \
- + GC_page_size-1)
+ + GC_page_size-1)
# elif defined(OS2)
void * os2_alloc(size_t bytes);
# define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc((size_t)bytes \
- + GC_page_size) \
- + GC_page_size-1)
+ + GC_page_size) \
+ + GC_page_size-1)
# elif defined(NEXT) || defined(DOS4GW) || defined(NONSTOP) || \
- (defined(AMIGA) && !defined(GC_AMIGA_FASTALLOC)) || \
- (defined(SOLARIS) && !defined(USE_MMAP))
+ (defined(AMIGA) && !defined(GC_AMIGA_FASTALLOC)) || \
+ (defined(SOLARIS) && !defined(USE_MMAP))
# define GET_MEM(bytes) HBLKPTR((size_t) calloc(1, (size_t)bytes + GC_page_size) \
- + GC_page_size-1)
+ + GC_page_size-1)
# elif defined(MSWIN32)
ptr_t GC_win32_get_mem(GC_word bytes);
# define GET_MEM(bytes) (struct hblk *)GC_win32_get_mem(bytes)
@@ -2378,11 +2378,11 @@
# if defined(USE_TEMPORARY_MEMORY)
extern Ptr GC_MacTemporaryNewPtr(size_t size, Boolean clearMemory);
# define GET_MEM(bytes) HBLKPTR( \
- GC_MacTemporaryNewPtr(bytes + GC_page_size, true) \
- + GC_page_size-1)
+ GC_MacTemporaryNewPtr(bytes + GC_page_size, true) \
+ + GC_page_size-1)
# else
# define GET_MEM(bytes) HBLKPTR( \
- NewPtrClear(bytes + GC_page_size) + GC_page_size-1)
+ NewPtrClear(bytes + GC_page_size) + GC_page_size-1)
# endif
# elif defined(MSWINCE)
ptr_t GC_wince_get_mem(GC_word bytes);
@@ -2390,8 +2390,8 @@
# elif defined(AMIGA) && defined(GC_AMIGA_FASTALLOC)
extern void *GC_amiga_get_mem(size_t size);
# define GET_MEM(bytes) HBLKPTR((size_t) \
- GC_amiga_get_mem((size_t)bytes + GC_page_size) \
- + GC_page_size-1)
+ GC_amiga_get_mem((size_t)bytes + GC_page_size) \
+ + GC_page_size-1)
# else
ptr_t GC_unix_get_mem(GC_word bytes);
# define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes)
diff --git a/include/private/pthread_support.h b/include/private/pthread_support.h
index f3476056..f00fa681 100644
--- a/include/private/pthread_support.h
+++ b/include/private/pthread_support.h
@@ -1,10 +1,27 @@
+/*
+ * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
+ * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
+ * All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
#ifndef GC_PTHREAD_SUPPORT_H
#define GC_PTHREAD_SUPPORT_H
# include "private/gc_priv.h"
# if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS)
-
+
#if defined(GC_DARWIN_THREADS)
# include "private/darwin_stop_world.h"
#else
@@ -17,40 +34,40 @@
/* We use the allocation lock to protect thread-related data structures. */
-/* The set of all known threads. We intercept thread creation and */
-/* joins. */
-/* Protected by allocation/GC lock. */
-/* Some of this should be declared volatile, but that's inconsistent */
-/* with some library routine declarations. */
+/* The set of all known threads. We intercept thread creation and */
+/* joins. */
+/* Protected by allocation/GC lock. */
+/* Some of this should be declared volatile, but that's inconsistent */
+/* with some library routine declarations. */
typedef struct GC_Thread_Rep {
- struct GC_Thread_Rep * next; /* More recently allocated threads */
- /* with a given pthread id come */
- /* first. (All but the first are */
- /* guaranteed to be dead, but we may */
- /* not yet have registered the join.) */
+ struct GC_Thread_Rep * next; /* More recently allocated threads */
+ /* with a given pthread id come */
+ /* first. (All but the first are */
+ /* guaranteed to be dead, but we may */
+ /* not yet have registered the join.) */
pthread_t id;
/* Extra bookkeeping information the stopping code uses */
struct thread_stop_info stop_info;
-
+
short flags;
-# define FINISHED 1 /* Thread has exited. */
-# define DETACHED 2 /* Thread is treated as detached. */
- /* Thread may really be detached, or */
- /* it may have have been explicitly */
- /* registered, in which case we can */
- /* deallocate its GC_Thread_Rep once */
- /* it unregisters itself, since it */
- /* may not return a GC pointer. */
-# define MAIN_THREAD 4 /* True for the original thread only. */
- short thread_blocked; /* Protected by GC lock. */
- /* Treated as a boolean value. If set, */
- /* thread will acquire GC lock before */
- /* doing any pointer manipulations, and */
- /* has set its sp value. Thus it does */
- /* not need to be sent a signal to stop */
- /* it. */
- ptr_t stack_end; /* Cold end of the stack (except for */
- /* main thread). */
+# define FINISHED 1 /* Thread has exited. */
+# define DETACHED 2 /* Thread is treated as detached. */
+ /* Thread may really be detached, or */
+ /* it may have have been explicitly */
+ /* registered, in which case we can */
+ /* deallocate its GC_Thread_Rep once */
+ /* it unregisters itself, since it */
+ /* may not return a GC pointer. */
+# define MAIN_THREAD 4 /* True for the original thread only. */
+ short thread_blocked; /* Protected by GC lock. */
+ /* Treated as a boolean value. If set, */
+ /* thread will acquire GC lock before */
+ /* doing any pointer manipulations, and */
+ /* has set its sp value. Thus it does */
+ /* not need to be sent a signal to stop */
+ /* it. */
+ ptr_t stack_end; /* Cold end of the stack (except for */
+ /* main thread). */
# ifdef IA64
ptr_t backing_store_end;
ptr_t backing_store_ptr;
@@ -58,23 +75,23 @@ typedef struct GC_Thread_Rep {
void * status; /* The value returned from the thread. */
/* Used only to avoid premature */
/* reclamation of any data it might */
- /* reference. */
- /* This is unfortunately also the */
- /* reason we need to intercept join */
- /* and detach. */
+ /* reference. */
+ /* This is unfortunately also the */
+ /* reason we need to intercept join */
+ /* and detach. */
unsigned finalizer_nested;
- unsigned finalizer_skipped; /* Used by GC_check_finalizer_nested() */
- /* to minimize the level of recursion */
- /* when a client finalizer allocates */
- /* memory (initially both are 0). */
+ unsigned finalizer_skipped; /* Used by GC_check_finalizer_nested() */
+ /* to minimize the level of recursion */
+ /* when a client finalizer allocates */
+ /* memory (initially both are 0). */
# ifdef THREAD_LOCAL_ALLOC
struct thread_local_freelists tlfs;
# endif
} * GC_thread;
-# define THREAD_TABLE_SZ 256 /* Must be power of 2 */
+# define THREAD_TABLE_SZ 256 /* Must be power of 2 */
extern volatile GC_thread GC_threads[THREAD_TABLE_SZ];
extern GC_bool GC_thr_initialized;
@@ -84,9 +101,9 @@ GC_thread GC_lookup_thread(pthread_t id);
void GC_stop_init(void);
extern GC_bool GC_in_thread_creation;
- /* We may currently be in thread creation or destruction. */
- /* Only set to TRUE while allocation lock is held. */
- /* When set, it is OK to run GC from unknown thread. */
+ /* We may currently be in thread creation or destruction. */
+ /* Only set to TRUE while allocation lock is held. */
+ /* When set, it is OK to run GC from unknown thread. */
#endif /* GC_PTHREADS && !GC_SOLARIS_THREADS.... etc */
#endif /* GC_PTHREAD_SUPPORT_H */
diff --git a/include/private/thread_local_alloc.h b/include/private/thread_local_alloc.h
index eb30487b..15cf37e5 100644
--- a/include/private/thread_local_alloc.h
+++ b/include/private/thread_local_alloc.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (c) 2000-2005 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
@@ -11,13 +11,13 @@
* modified is included with the above copyright notice.
*/
-/* Included indirectly from a thread-library-specific file. */
-/* This is the interface for thread-local allocation, whose */
-/* implementation is mostly thread-library-independent. */
-/* Here we describe only the interface that needs to be known */
-/* and invoked from the thread support layer; the actual */
-/* implementation also exports GC_malloc and friends, which */
-/* are declared in gc.h. */
+/* Included indirectly from a thread-library-specific file. */
+/* This is the interface for thread-local allocation, whose */
+/* implementation is mostly thread-library-independent. */
+/* Here we describe only the interface that needs to be known */
+/* and invoked from the thread support layer; the actual */
+/* implementation also exports GC_malloc and friends, which */
+/* are declared in gc.h. */
#include "private/gc_priv.h"
@@ -35,58 +35,58 @@
!defined(USE_CUSTOM_SPECIFIC)
# if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
# if defined(__GNUC__) /* Fixed for versions past 2.95? */ \
- || defined(MSWINCE)
+ || defined(MSWINCE)
# define USE_WIN32_SPECIFIC
# else
# define USE_WIN32_COMPILER_TLS
# endif /* !GNU */
# elif defined(LINUX) && !defined(ARM32) && \
- (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >=3))
+ (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >=3))
# define USE_COMPILER_TLS
# elif (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \
defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)) || \
- defined(GC_NETBSD_THREADS)
+ defined(GC_NETBSD_THREADS)
# define USE_PTHREAD_SPECIFIC
# elif defined(GC_HPUX_THREADS)
# ifdef __GNUC__
# define USE_PTHREAD_SPECIFIC
- /* Empirically, as of gcc 3.3, USE_COMPILER_TLS doesn't work. */
+ /* Empirically, as of gcc 3.3, USE_COMPILER_TLS doesn't work. */
# else
# define USE_COMPILER_TLS
# endif
# else
-# define USE_CUSTOM_SPECIFIC /* Use our own. */
+# define USE_CUSTOM_SPECIFIC /* Use our own. */
# endif
# endif
# include <stdlib.h>
-/* One of these should be declared as the tlfs field in the */
-/* structure pointed to by a GC_thread. */
+/* One of these should be declared as the tlfs field in the */
+/* structure pointed to by a GC_thread. */
typedef struct thread_local_freelists {
# ifdef THREAD_LOCAL_ALLOC
- void * ptrfree_freelists[TINY_FREELISTS];
- void * normal_freelists[TINY_FREELISTS];
-# ifdef GC_GCJ_SUPPORT
- void * gcj_freelists[TINY_FREELISTS];
-# define ERROR_FL ((void *)(word)-1)
- /* Value used for gcj_freelist[-1]; allocation is */
- /* erroneous. */
-# endif
- /* Free lists contain either a pointer or a small count */
- /* reflecting the number of granules allocated at that */
- /* size. */
- /* 0 ==> thread-local allocation in use, free list */
- /* empty. */
- /* > 0, <= DIRECT_GRANULES ==> Using global allocation, */
- /* too few objects of this size have been */
- /* allocated by this thread. */
- /* >= HBLKSIZE => pointer to nonempty free list. */
- /* > DIRECT_GRANULES, < HBLKSIZE ==> transition to */
- /* local alloc, equivalent to 0. */
-# define DIRECT_GRANULES (HBLKSIZE/GRANULE_BYTES)
- /* Don't use local free lists for up to this much */
- /* allocation. */
+ void * ptrfree_freelists[TINY_FREELISTS];
+ void * normal_freelists[TINY_FREELISTS];
+# ifdef GC_GCJ_SUPPORT
+ void * gcj_freelists[TINY_FREELISTS];
+# define ERROR_FL ((void *)(word)-1)
+ /* Value used for gcj_freelist[-1]; allocation is */
+ /* erroneous. */
+# endif
+ /* Free lists contain either a pointer or a small count */
+ /* reflecting the number of granules allocated at that */
+ /* size. */
+ /* 0 ==> thread-local allocation in use, free list */
+ /* empty. */
+ /* > 0, <= DIRECT_GRANULES ==> Using global allocation, */
+ /* too few objects of this size have been */
+ /* allocated by this thread. */
+ /* >= HBLKSIZE => pointer to nonempty free list. */
+ /* > DIRECT_GRANULES, < HBLKSIZE ==> transition to */
+ /* local alloc, equivalent to 0. */
+# define DIRECT_GRANULES (HBLKSIZE/GRANULE_BYTES)
+ /* Don't use local free lists for up to this much */
+ /* allocation. */
# endif
} *GC_tlfs;
@@ -107,15 +107,15 @@ typedef struct thread_local_freelists {
# include <windows.h>
# define GC_getspecific TlsGetValue
# define GC_setspecific(key, v) !TlsSetValue(key, v)
- /* We assume 0 == success, msft does the opposite. */
+ /* We assume 0 == success, msft does the opposite. */
# ifndef TLS_OUT_OF_INDEXES
/* this is currently missing in WinCE */
# define TLS_OUT_OF_INDEXES (DWORD)0xFFFFFFFF
# endif
# define GC_key_create(key, d) \
- ((d) != 0 || (*(key) = TlsAlloc()) == TLS_OUT_OF_INDEXES ? -1 : 0)
+ ((d) != 0 || (*(key) = TlsAlloc()) == TLS_OUT_OF_INDEXES ? -1 : 0)
# define GC_remove_specific(key) /* No need for cleanup on thread exit. */
- /* Need TlsFree on process exit/detach ? */
+ /* Need TlsFree on process exit/detach ? */
typedef DWORD GC_key_t;
# elif defined(USE_CUSTOM_SPECIFIC)
# include "private/specific.h"
@@ -124,19 +124,19 @@ typedef struct thread_local_freelists {
# endif
-/* Each thread structure must be initialized. */
-/* This call must be made from the new thread. */
-/* Caller holds allocation lock. */
+/* Each thread structure must be initialized. */
+/* This call must be made from the new thread. */
+/* Caller holds allocation lock. */
void GC_init_thread_local(GC_tlfs p);
-/* Called when a thread is unregistered, or exits. */
-/* We hold the allocator lock. */
+/* Called when a thread is unregistered, or exits. */
+/* We hold the allocator lock. */
void GC_destroy_thread_local(GC_tlfs p);
-/* The thread support layer must arrange to mark thread-local */
-/* free lists explicitly, since the link field is often */
-/* invisible to the marker. It knows how to find all threads; */
-/* we take care of an individual thread freelist structure. */
+/* The thread support layer must arrange to mark thread-local */
+/* free lists explicitly, since the link field is often */
+/* invisible to the marker. It knows how to find all threads; */
+/* we take care of an individual thread freelist structure. */
void GC_mark_thread_local_fls_for(GC_tlfs p);
extern
@@ -147,10 +147,10 @@ extern
#endif
GC_key_t GC_thread_key;
-/* This is set up by the thread_local_alloc implementation. But the */
-/* thread support layer calls GC_remove_specific(GC_thread_key) */
-/* before a thread exits. */
+/* This is set up by the thread_local_alloc implementation. But the */
+/* thread support layer calls GC_remove_specific(GC_thread_key) */
+/* before a thread exits. */
/* And the thread support layer makes sure that GC_thread_key is traced,*/
-/* if necessary. */
+/* if necessary. */
#endif /* THREAD_LOCAL_ALLOC */
diff --git a/mach_dep.c b/mach_dep.c
index b3f69616..990da351 100644
--- a/mach_dep.c
+++ b/mach_dep.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
*
@@ -33,29 +33,29 @@ asm static void PushMacRegisters()
{
sub.w #4,sp // reserve space for one parameter.
move.l a2,(sp)
- jsr GC_push_one
+ jsr GC_push_one
move.l a3,(sp)
- jsr GC_push_one
+ jsr GC_push_one
move.l a4,(sp)
- jsr GC_push_one
+ jsr GC_push_one
# if !__option(a6frames)
- // <pcb> perhaps a6 should be pushed if stack frames are not being used.
- move.l a6,(sp)
- jsr GC_push_one
+ // <pcb> perhaps a6 should be pushed if stack frames are not being used.
+ move.l a6,(sp)
+ jsr GC_push_one
# endif
- // skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
+ // skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
move.l d2,(sp)
- jsr GC_push_one
+ jsr GC_push_one
move.l d3,(sp)
- jsr GC_push_one
+ jsr GC_push_one
move.l d4,(sp)
- jsr GC_push_one
+ jsr GC_push_one
move.l d5,(sp)
- jsr GC_push_one
+ jsr GC_push_one
move.l d6,(sp)
- jsr GC_push_one
+ jsr GC_push_one
move.l d7,(sp)
- jsr GC_push_one
+ jsr GC_push_one
add.w #4,sp // fix stack.
rts
}
@@ -64,14 +64,14 @@ asm static void PushMacRegisters()
# if defined(SPARC) || defined(IA64)
/* Value returned from register flushing routine; either sp (SPARC) */
- /* or ar.bsp (IA64) */
+ /* or ar.bsp (IA64) */
ptr_t GC_save_regs_ret_val;
# endif
/* Routine to mark from registers that are preserved by the C compiler. */
-/* This must be ported to every new architecture. It is not optional, */
-/* and should not be used on platforms that are either UNIX-like, or */
-/* require thread support. */
+/* This must be ported to every new architecture. It is not optional, */
+/* and should not be used on platforms that are either UNIX-like, or */
+/* require thread support. */
#undef HAVE_PUSH_REGS
@@ -80,54 +80,54 @@ asm static void PushMacRegisters()
#else /* No asm implementation */
void GC_push_regs()
{
-# if defined(M68K) && defined(AMIGA)
- /* AMIGA - could be replaced by generic code */
- /* a0, a1, d0 and d1 are caller save */
+# if defined(M68K) && defined(AMIGA)
+ /* AMIGA - could be replaced by generic code */
+ /* a0, a1, d0 and d1 are caller save */
# ifdef __GNUC__
- asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
-
- asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
- asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
- asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
- asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
- asm("mov.l %a6,(%sp)"); asm("jsr _GC_push_one");
- /* Skip frame pointer and stack pointer */
- asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
- asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
- asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
- asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
- asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
- asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
-
- asm("addq.w &0x4,%sp"); /* put stack back where it was */
-# define HAVE_PUSH_REGS
+ asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
+
+ asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %a6,(%sp)"); asm("jsr _GC_push_one");
+ /* Skip frame pointer and stack pointer */
+ asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
+ asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
+
+ asm("addq.w &0x4,%sp"); /* put stack back where it was */
+# define HAVE_PUSH_REGS
# else /* !__GNUC__ */
- GC_push_one(getreg(REG_A2));
- GC_push_one(getreg(REG_A3));
+ GC_push_one(getreg(REG_A2));
+ GC_push_one(getreg(REG_A3));
# ifndef __SASC
- /* Can probably be changed to #if 0 -Kjetil M. (a4=globals)*/
- GC_push_one(getreg(REG_A4));
-# endif
- GC_push_one(getreg(REG_A5));
- GC_push_one(getreg(REG_A6));
- /* Skip stack pointer */
- GC_push_one(getreg(REG_D2));
- GC_push_one(getreg(REG_D3));
- GC_push_one(getreg(REG_D4));
- GC_push_one(getreg(REG_D5));
- GC_push_one(getreg(REG_D6));
- GC_push_one(getreg(REG_D7));
-# define HAVE_PUSH_REGS
-# endif /* !__GNUC__ */
+ /* Can probably be changed to #if 0 -Kjetil M. (a4=globals)*/
+ GC_push_one(getreg(REG_A4));
+# endif
+ GC_push_one(getreg(REG_A5));
+ GC_push_one(getreg(REG_A6));
+ /* Skip stack pointer */
+ GC_push_one(getreg(REG_D2));
+ GC_push_one(getreg(REG_D3));
+ GC_push_one(getreg(REG_D4));
+ GC_push_one(getreg(REG_D5));
+ GC_push_one(getreg(REG_D6));
+ GC_push_one(getreg(REG_D7));
+# define HAVE_PUSH_REGS
+# endif /* !__GNUC__ */
# endif /* AMIGA */
-# if defined(M68K) && defined(MACOS)
-# if defined(THINK_C)
+# if defined(M68K) && defined(MACOS)
+# if defined(THINK_C)
# define PushMacReg(reg) \
move.l reg,(sp) \
jsr GC_push_one
- asm {
+ asm {
sub.w #4,sp ; reserve space for one parameter.
PushMacReg(a2);
PushMacReg(a3);
@@ -140,27 +140,27 @@ void GC_push_regs()
PushMacReg(d6);
PushMacReg(d7);
add.w #4,sp ; fix stack.
- }
-# define HAVE_PUSH_REGS
-# undef PushMacReg
-# endif /* THINK_C */
-# if defined(__MWERKS__)
- PushMacRegisters();
-# define HAVE_PUSH_REGS
-# endif /* __MWERKS__ */
-# endif /* MACOS */
+ }
+# define HAVE_PUSH_REGS
+# undef PushMacReg
+# endif /* THINK_C */
+# if defined(__MWERKS__)
+ PushMacRegisters();
+# define HAVE_PUSH_REGS
+# endif /* __MWERKS__ */
+# endif /* MACOS */
}
#endif /* !USE_ASM_PUSH_REGS */
#if defined(HAVE_PUSH_REGS) && defined(THREADS)
# error GC_push_regs cannot be used with threads
- /* Would fail for GC_do_blocking. There are probably other safety */
- /* issues. */
+ /* Would fail for GC_do_blocking. There are probably other safety */
+ /* issues. */
# undef HAVE_PUSH_REGS
#endif
#if defined(UNIX_LIKE) && !defined(NO_GETCONTEXT) && \
- (defined(DARWIN) || defined(HURD) || defined(ARM32) || defined(MIPS))
+ (defined(DARWIN) || defined(HURD) || defined(ARM32) || defined(MIPS))
# define NO_GETCONTEXT
#endif
@@ -175,11 +175,11 @@ void GC_push_regs()
# endif
#endif
-/* Ensure that either registers are pushed, or callee-save registers */
-/* are somewhere on the stack, and then call fn(arg, ctxt). */
-/* ctxt is either a pointer to a ucontext_t we generated, or NULL. */
+/* Ensure that either registers are pushed, or callee-save registers */
+/* are somewhere on the stack, and then call fn(arg, ctxt). */
+/* ctxt is either a pointer to a ucontext_t we generated, or NULL. */
void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
- ptr_t arg)
+ ptr_t arg)
{
word dummy;
void * context = 0;
@@ -189,29 +189,29 @@ void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
# elif defined(UNIX_LIKE) && !defined(NO_GETCONTEXT)
/* Older versions of Darwin seem to lack getcontext(). */
/* ARM and MIPS Linux often doesn't support a real */
- /* getcontext(). */
+ /* getcontext(). */
ucontext_t ctxt;
if (getcontext(&ctxt) < 0)
- ABORT ("Getcontext failed: Use another register retrieval method?");
+ ABORT ("Getcontext failed: Use another register retrieval method?");
context = &ctxt;
# if defined(SPARC) || defined(IA64)
- /* On a register window machine, we need to save register */
- /* contents on the stack for this to work. This may already be */
- /* subsumed by the getcontext() call. */
+ /* On a register window machine, we need to save register */
+ /* contents on the stack for this to work. This may already be */
+ /* subsumed by the getcontext() call. */
{
GC_save_regs_ret_val = GC_save_regs_in_stack();
}
# endif /* register windows. */
# elif defined(HAVE_BUILTIN_UNWIND_INIT) && \
- !(defined(POWERPC) && defined(DARWIN))
- /* This was suggested by Richard Henderson as the way to */
- /* force callee-save registers and register windows onto */
- /* the stack. */
- /* Mark Sibly points out that this doesn't seem to work */
- /* on MacOS 10.3.9/PowerPC. */
+ !(defined(POWERPC) && defined(DARWIN))
+ /* This was suggested by Richard Henderson as the way to */
+ /* force callee-save registers and register windows onto */
+ /* the stack. */
+ /* Mark Sibly points out that this doesn't seem to work */
+ /* on MacOS 10.3.9/PowerPC. */
__builtin_unwind_init();
# else /* !HAVE_BUILTIN_UNWIND_INIT && !UNIX_LIKE */
- /* && !HAVE_PUSH_REGS */
+ /* && !HAVE_PUSH_REGS */
/* Generic code */
/* The idea is due to Parag Patel at HP. */
/* We're not sure whether he would like */
@@ -219,28 +219,28 @@ void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
jmp_buf regs;
register word * i = (word *) regs;
register ptr_t lim = (ptr_t)(regs) + (sizeof regs);
-
- /* Setjmp doesn't always clear all of the buffer. */
- /* That tends to preserve garbage. Clear it. */
- for (; (char *)i < lim; i++) {
- *i = 0;
- }
+
+ /* Setjmp doesn't always clear all of the buffer. */
+ /* That tends to preserve garbage. Clear it. */
+ for (; (char *)i < lim; i++) {
+ *i = 0;
+ }
# if defined(MSWIN32) || defined(MSWINCE) \
|| defined(UTS4) || defined(LINUX) || defined(EWS4800)
- (void) setjmp(regs);
+ (void) setjmp(regs);
# else
(void) _setjmp(regs);
- /* We don't want to mess with signals. According to */
- /* SUSV3, setjmp() may or may not save signal mask. */
- /* _setjmp won't, but is less portable. */
+ /* We don't want to mess with signals. According to */
+ /* SUSV3, setjmp() may or may not save signal mask. */
+ /* _setjmp won't, but is less portable. */
# endif
# endif /* !HAVE_PUSH_REGS ... */
- /* FIXME: context here is sometimes just zero. At the moment the callees */
- /* don't really need it. */
+ /* FIXME: context here is sometimes just zero. At the moment the callees */
+ /* don't really need it. */
fn(arg, context);
- /* Strongly discourage the compiler from treating the above */
- /* as a tail-call, since that would pop the register */
- /* contents before we get a chance to look at them. */
+ /* Strongly discourage the compiler from treating the above */
+ /* as a tail-call, since that would pop the register */
+ /* contents before we get a chance to look at them. */
GC_noop1((word)(&dummy));
}
@@ -252,4 +252,4 @@ void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
{ return(arg); }
/* The real version is in a .S file */
# endif
-#endif /* ASM_CLEAR_CODE */
+#endif /* ASM_CLEAR_CODE */
diff --git a/malloc.c b/malloc.c
index dff23e12..e41802e8 100644
--- a/malloc.c
+++ b/malloc.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
@@ -12,7 +12,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-
+
#include "private/gc_priv.h"
#include <stdio.h>
@@ -21,67 +21,67 @@
# include <errno.h>
#endif
-extern void * GC_clear_stack(void *); /* in misc.c, behaves like identity */
-void GC_extend_size_map(size_t); /* in misc.c. */
+extern void * GC_clear_stack(void *); /* in misc.c, behaves like identity */
+void GC_extend_size_map(size_t); /* in misc.c. */
-/* Allocate reclaim list for kind: */
-/* Return TRUE on success */
+/* Allocate reclaim list for kind: */
+/* Return TRUE on success */
STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
{
struct hblk ** result = (struct hblk **)
- GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
+ GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
if (result == 0) return(FALSE);
BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
kind -> ok_reclaim_list = result;
return(TRUE);
}
-/* Allocate a large block of size lb bytes. */
-/* The block is not cleared. */
-/* Flags is 0 or IGNORE_OFF_PAGE. */
-/* We hold the allocation lock. */
-/* EXTRA_BYTES were already added to lb. */
+/* Allocate a large block of size lb bytes. */
+/* The block is not cleared. */
+/* Flags is 0 or IGNORE_OFF_PAGE. */
+/* We hold the allocation lock. */
+/* EXTRA_BYTES were already added to lb. */
ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
{
struct hblk * h;
word n_blocks;
ptr_t result;
-
+
/* Round up to a multiple of a granule. */
lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
n_blocks = OBJ_SZ_TO_BLOCKS(lb);
if (!GC_is_initialized) GC_init();
/* Do our share of marking work */
if(GC_incremental && !GC_dont_gc)
- GC_collect_a_little_inner((int)n_blocks);
+ GC_collect_a_little_inner((int)n_blocks);
h = GC_allochblk(lb, k, flags);
# ifdef USE_MUNMAP
- if (0 == h) {
- GC_merge_unmapped();
- h = GC_allochblk(lb, k, flags);
- }
+ if (0 == h) {
+ GC_merge_unmapped();
+ h = GC_allochblk(lb, k, flags);
+ }
# endif
while (0 == h && GC_collect_or_expand(n_blocks, (flags != 0))) {
- h = GC_allochblk(lb, k, flags);
+ h = GC_allochblk(lb, k, flags);
}
if (h == 0) {
- result = 0;
+ result = 0;
} else {
- size_t total_bytes = n_blocks * HBLKSIZE;
- if (n_blocks > 1) {
- GC_large_allocd_bytes += total_bytes;
- if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
- GC_max_large_allocd_bytes = GC_large_allocd_bytes;
- }
- result = h -> hb_body;
+ size_t total_bytes = n_blocks * HBLKSIZE;
+ if (n_blocks > 1) {
+ GC_large_allocd_bytes += total_bytes;
+ if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
+ GC_max_large_allocd_bytes = GC_large_allocd_bytes;
+ }
+ result = h -> hb_body;
}
return result;
}
-/* Allocate a large block of size lb bytes. Clear if appropriate. */
-/* We hold the allocation lock. */
-/* EXTRA_BYTES were already added to lb. */
+/* Allocate a large block of size lb bytes. Clear if appropriate. */
+/* We hold the allocation lock. */
+/* EXTRA_BYTES were already added to lb. */
ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
{
ptr_t result = GC_alloc_large(lb, k, flags);
@@ -89,47 +89,47 @@ ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
if (0 == result) return 0;
if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
- /* Clear the whole block, in case of GC_realloc call. */
- BZERO(result, n_blocks * HBLKSIZE);
+ /* Clear the whole block, in case of GC_realloc call. */
+ BZERO(result, n_blocks * HBLKSIZE);
}
return result;
}
-/* allocate lb bytes for an object of kind k. */
-/* Should not be used to directly to allocate */
-/* objects such as STUBBORN objects that */
-/* require special handling on allocation. */
-/* First a version that assumes we already */
-/* hold lock: */
+/* allocate lb bytes for an object of kind k. */
+/* Should not be used to directly to allocate */
+/* objects such as STUBBORN objects that */
+/* require special handling on allocation. */
+/* First a version that assumes we already */
+/* hold lock: */
void * GC_generic_malloc_inner(size_t lb, int k)
{
void *op;
if(SMALL_OBJ(lb)) {
struct obj_kind * kind = GC_obj_kinds + k;
- size_t lg = GC_size_map[lb];
- void ** opp = &(kind -> ok_freelist[lg]);
+ size_t lg = GC_size_map[lb];
+ void ** opp = &(kind -> ok_freelist[lg]);
if( (op = *opp) == 0 ) {
- if (GC_size_map[lb] == 0) {
- if (!GC_is_initialized) GC_init();
- if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
- return(GC_generic_malloc_inner(lb, k));
- }
- if (kind -> ok_reclaim_list == 0) {
- if (!GC_alloc_reclaim_list(kind)) goto out;
- }
- op = GC_allocobj(lg, k);
- if (op == 0) goto out;
+ if (GC_size_map[lb] == 0) {
+ if (!GC_is_initialized) GC_init();
+ if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
+ return(GC_generic_malloc_inner(lb, k));
+ }
+ if (kind -> ok_reclaim_list == 0) {
+ if (!GC_alloc_reclaim_list(kind)) goto out;
+ }
+ op = GC_allocobj(lg, k);
+ if (op == 0) goto out;
}
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
} else {
- op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
+ op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
GC_bytes_allocd += lb;
}
-
+
out:
return op;
}
@@ -158,38 +158,38 @@ GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k)
if (GC_have_errors) GC_print_all_errors();
GC_INVOKE_FINALIZERS();
if (SMALL_OBJ(lb)) {
- LOCK();
+ LOCK();
result = GC_generic_malloc_inner((word)lb, k);
- UNLOCK();
+ UNLOCK();
} else {
- size_t lg;
- size_t lb_rounded;
- word n_blocks;
- GC_bool init;
- lg = ROUNDED_UP_GRANULES(lb);
- lb_rounded = GRANULES_TO_BYTES(lg);
- n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
- init = GC_obj_kinds[k].ok_init;
- LOCK();
- result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
- if (0 != result) {
- if (GC_debugging_started) {
- BZERO(result, n_blocks * HBLKSIZE);
- } else {
+ size_t lg;
+ size_t lb_rounded;
+ word n_blocks;
+ GC_bool init;
+ lg = ROUNDED_UP_GRANULES(lb);
+ lb_rounded = GRANULES_TO_BYTES(lg);
+ n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
+ init = GC_obj_kinds[k].ok_init;
+ LOCK();
+ result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
+ if (0 != result) {
+ if (GC_debugging_started) {
+ BZERO(result, n_blocks * HBLKSIZE);
+ } else {
# ifdef THREADS
- /* Clear any memory that might be used for GC descriptors */
- /* before we release the lock. */
- ((word *)result)[0] = 0;
- ((word *)result)[1] = 0;
- ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
- ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
-# endif
- }
- }
- GC_bytes_allocd += lb_rounded;
- UNLOCK();
- if (init && !GC_debugging_started && 0 != result) {
- BZERO(result, n_blocks * HBLKSIZE);
+ /* Clear any memory that might be used for GC descriptors */
+ /* before we release the lock. */
+ ((word *)result)[0] = 0;
+ ((word *)result)[1] = 0;
+ ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
+ ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
+# endif
+ }
+ }
+ GC_bytes_allocd += lb_rounded;
+ UNLOCK();
+ if (init && !GC_debugging_started && 0 != result) {
+ BZERO(result, n_blocks * HBLKSIZE);
}
}
if (0 == result) {
@@ -197,13 +197,13 @@ GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k)
} else {
return(result);
}
-}
+}
#define GENERAL_MALLOC(lb,k) \
GC_clear_stack(GC_generic_malloc(lb, k))
-/* We make the GC_clear_stack_call a tail call, hoping to get more of */
-/* the stack. */
+/* We make the GC_clear_stack_call a tail call, hoping to get more of */
+/* the stack. */
/* Allocate lb bytes of atomic (pointerfree) data */
#ifdef THREAD_LOCAL_ALLOC
@@ -218,9 +218,9 @@ GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k)
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
- lg = GC_size_map[lb];
- opp = &(GC_aobjfreelist[lg]);
- LOCK();
+ lg = GC_size_map[lb];
+ opp = &(GC_aobjfreelist[lg]);
+ LOCK();
if( EXPECT((op = *opp) == 0, 0) ) {
UNLOCK();
return(GENERAL_MALLOC((word)lb, PTRFREE));
@@ -270,18 +270,18 @@ GC_API char * GC_CALL GC_strdup(const char *s)
DCL_LOCK_STATE;
if(SMALL_OBJ(lb)) {
- lg = GC_size_map[lb];
- opp = (void **)&(GC_objfreelist[lg]);
- LOCK();
+ lg = GC_size_map[lb];
+ opp = (void **)&(GC_objfreelist[lg]);
+ LOCK();
if( EXPECT((op = *opp) == 0, 0) ) {
UNLOCK();
return(GENERAL_MALLOC((word)lb, NORMAL));
}
- GC_ASSERT(0 == obj_link(op)
- || ((word)obj_link(op)
- <= (word)GC_greatest_plausible_heap_addr
- && (word)obj_link(op)
- >= (word)GC_least_plausible_heap_addr));
+ GC_ASSERT(0 == obj_link(op)
+ || ((word)obj_link(op)
+ <= (word)GC_greatest_plausible_heap_addr
+ && (word)obj_link(op)
+ >= (word)GC_least_plausible_heap_addr));
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
@@ -294,23 +294,23 @@ GC_API char * GC_CALL GC_strdup(const char *s)
# ifdef REDIRECT_MALLOC
-/* Avoid unnecessary nested procedure calls here, by #defining some */
-/* malloc replacements. Otherwise we end up saving a */
-/* meaningless return address in the object. It also speeds things up, */
-/* but it is admittedly quite ugly. */
+/* Avoid unnecessary nested procedure calls here, by #defining some */
+/* malloc replacements. Otherwise we end up saving a */
+/* meaningless return address in the object. It also speeds things up, */
+/* but it is admittedly quite ugly. */
# ifdef GC_ADD_CALLER
# define RA GC_RETURN_ADDR,
# else
# define RA
# endif
# define GC_debug_malloc_replacement(lb) \
- GC_debug_malloc(lb, RA "unknown", 0)
+ GC_debug_malloc(lb, RA "unknown", 0)
void * malloc(size_t lb)
{
- /* It might help to manually inline the GC_malloc call here. */
- /* But any decent compiler should reduce the extra procedure call */
- /* to at most a jump instruction in this case. */
+ /* It might help to manually inline the GC_malloc call here. */
+ /* But any decent compiler should reduce the extra procedure call */
+ /* to at most a jump instruction in this case. */
# if defined(I386) && defined(GC_SOLARIS_THREADS)
/*
* Thread initialisation can call malloc before
@@ -330,21 +330,21 @@ void * malloc(size_t lb)
static ptr_t GC_libld_start = 0;
static ptr_t GC_libld_end = 0;
extern GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp);
- /* From os_dep.c */
+ /* From os_dep.c */
STATIC void GC_init_lib_bounds(void)
{
if (GC_libpthread_start != 0) return;
if (!GC_text_mapping("libpthread-",
- &GC_libpthread_start, &GC_libpthread_end)) {
- WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
- /* This might still work with some versions of libpthread, */
- /* so we don't abort. Perhaps we should. */
- /* Generate message only once: */
+ &GC_libpthread_start, &GC_libpthread_end)) {
+ WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
+ /* This might still work with some versions of libpthread, */
+ /* so we don't abort. Perhaps we should. */
+ /* Generate message only once: */
GC_libpthread_start = (ptr_t)1;
}
if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
- WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
+ WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
}
}
#endif
@@ -352,23 +352,23 @@ void * malloc(size_t lb)
void * calloc(size_t n, size_t lb)
{
# if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
- /* libpthread allocated some memory that is only pointed to by */
- /* mmapped thread stacks. Make sure it's not collectable. */
- {
- static GC_bool lib_bounds_set = FALSE;
- ptr_t caller = (ptr_t)__builtin_return_address(0);
- /* This test does not need to ensure memory visibility, since */
- /* the bounds will be set when/if we create another thread. */
- if (!lib_bounds_set) {
- GC_init_lib_bounds();
- lib_bounds_set = TRUE;
- }
- if (caller >= GC_libpthread_start && caller < GC_libpthread_end
- || (caller >= GC_libld_start && caller < GC_libld_end))
- return GC_malloc_uncollectable(n*lb);
- /* The two ranges are actually usually adjacent, so there may */
- /* be a way to speed this up. */
- }
+ /* libpthread allocated some memory that is only pointed to by */
+ /* mmapped thread stacks. Make sure it's not collectable. */
+ {
+ static GC_bool lib_bounds_set = FALSE;
+ ptr_t caller = (ptr_t)__builtin_return_address(0);
+ /* This test does not need to ensure memory visibility, since */
+ /* the bounds will be set when/if we create another thread. */
+ if (!lib_bounds_set) {
+ GC_init_lib_bounds();
+ lib_bounds_set = TRUE;
+ }
+ if (caller >= GC_libpthread_start && caller < GC_libpthread_end
+ || (caller >= GC_libld_start && caller < GC_libld_end))
+ return GC_malloc_uncollectable(n*lb);
+ /* The two ranges are actually usually adjacent, so there may */
+ /* be a way to speed this up. */
+ }
# endif
return((void *)REDIRECT_MALLOC(n*lb));
}
@@ -388,40 +388,40 @@ void * calloc(size_t n, size_t lb)
}
#endif /* !defined(strdup) */
/* If strdup is macro defined, we assume that it actually calls malloc, */
- /* and thus the right thing will happen even without overriding it. */
- /* This seems to be true on most Linux systems. */
+ /* and thus the right thing will happen even without overriding it. */
+ /* This seems to be true on most Linux systems. */
#undef GC_debug_malloc_replacement
# endif /* REDIRECT_MALLOC */
-/* Explicitly deallocate an object p. */
+/* Explicitly deallocate an object p. */
GC_API void GC_CALL GC_free(void * p)
{
struct hblk *h;
hdr *hhdr;
size_t sz; /* In bytes */
- size_t ngranules; /* sz in granules */
+ size_t ngranules; /* sz in granules */
void **flh;
int knd;
struct obj_kind * ok;
DCL_LOCK_STATE;
if (p == 0) return;
- /* Required by ANSI. It's not my fault ... */
+ /* Required by ANSI. It's not my fault ... */
# ifdef LOG_ALLOCS
GC_err_printf("GC_free(%p): %lu\n", p, (unsigned long)GC_gc_no);
# endif
h = HBLKPTR(p);
hhdr = HDR(h);
# if defined(REDIRECT_MALLOC) && \
- (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
- || defined(MSWIN32))
- /* For Solaris, we have to redirect malloc calls during */
- /* initialization. For the others, this seems to happen */
- /* implicitly. */
- /* Don't try to deallocate that memory. */
- if (0 == hhdr) return;
+ (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
+ || defined(MSWIN32))
+ /* For Solaris, we have to redirect malloc calls during */
+ /* initialization. For the others, this seems to happen */
+ /* implicitly. */
+ /* Don't try to deallocate that memory. */
+ if (0 == hhdr) return;
# endif
GC_ASSERT(GC_base(p) == p);
sz = hhdr -> hb_sz;
@@ -429,35 +429,35 @@ GC_API void GC_CALL GC_free(void * p)
knd = hhdr -> hb_obj_kind;
ok = &GC_obj_kinds[knd];
if (EXPECT((ngranules <= MAXOBJGRANULES), 1)) {
- LOCK();
- GC_bytes_freed += sz;
- if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
- /* Its unnecessary to clear the mark bit. If the */
- /* object is reallocated, it doesn't matter. O.w. the */
- /* collector will do it, since it's on a free list. */
- if (ok -> ok_init) {
- BZERO((word *)p + 1, sz-sizeof(word));
- }
- flh = &(ok -> ok_freelist[ngranules]);
- obj_link(p) = *flh;
- *flh = (ptr_t)p;
- UNLOCK();
+ LOCK();
+ GC_bytes_freed += sz;
+ if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
+ /* Its unnecessary to clear the mark bit. If the */
+ /* object is reallocated, it doesn't matter. O.w. the */
+ /* collector will do it, since it's on a free list. */
+ if (ok -> ok_init) {
+ BZERO((word *)p + 1, sz-sizeof(word));
+ }
+ flh = &(ok -> ok_freelist[ngranules]);
+ obj_link(p) = *flh;
+ *flh = (ptr_t)p;
+ UNLOCK();
} else {
size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
LOCK();
GC_bytes_freed += sz;
- if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
- if (nblocks > 1) {
- GC_large_allocd_bytes -= nblocks * HBLKSIZE;
- }
+ if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
+ if (nblocks > 1) {
+ GC_large_allocd_bytes -= nblocks * HBLKSIZE;
+ }
GC_freehblk(h);
UNLOCK();
}
}
-/* Explicitly deallocate an object p when we already hold lock. */
-/* Only used for internally allocated objects, so we can take some */
-/* shortcuts. */
+/* Explicitly deallocate an object p when we already hold lock. */
+/* Only used for internally allocated objects, so we can take some */
+/* shortcuts. */
#ifdef THREADS
void GC_free_inner(void * p)
{
@@ -477,21 +477,21 @@ void GC_free_inner(void * p)
ngranules = BYTES_TO_GRANULES(sz);
ok = &GC_obj_kinds[knd];
if (ngranules <= MAXOBJGRANULES) {
- GC_bytes_freed += sz;
- if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
- if (ok -> ok_init) {
- BZERO((word *)p + 1, sz-sizeof(word));
- }
- flh = &(ok -> ok_freelist[ngranules]);
- obj_link(p) = *flh;
- *flh = (ptr_t)p;
+ GC_bytes_freed += sz;
+ if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
+ if (ok -> ok_init) {
+ BZERO((word *)p + 1, sz-sizeof(word));
+ }
+ flh = &(ok -> ok_freelist[ngranules]);
+ obj_link(p) = *flh;
+ *flh = (ptr_t)p;
} else {
size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
GC_bytes_freed += sz;
- if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
- if (nblocks > 1) {
- GC_large_allocd_bytes -= nblocks * HBLKSIZE;
- }
+ if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
+ if (nblocks > 1) {
+ GC_large_allocd_bytes -= nblocks * HBLKSIZE;
+ }
GC_freehblk(h);
}
}
@@ -504,19 +504,19 @@ void GC_free_inner(void * p)
void free(void * p)
{
# if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
- {
- /* Don't bother with initialization checks. If nothing */
- /* has been initialized, the check fails, and that's safe, */
- /* since we haven't allocated uncollectable objects either. */
- ptr_t caller = (ptr_t)__builtin_return_address(0);
- /* This test does not need to ensure memory visibility, since */
- /* the bounds will be set when/if we create another thread. */
- if (caller >= GC_libpthread_start && caller < GC_libpthread_end
- || (caller >= GC_libld_start && caller < GC_libld_end)) {
- GC_free(p);
- return;
- }
- }
+ {
+ /* Don't bother with initialization checks. If nothing */
+ /* has been initialized, the check fails, and that's safe, */
+ /* since we haven't allocated uncollectable objects either. */
+ ptr_t caller = (ptr_t)__builtin_return_address(0);
+ /* This test does not need to ensure memory visibility, since */
+ /* the bounds will be set when/if we create another thread. */
+ if (caller >= GC_libpthread_start && caller < GC_libpthread_end
+ || (caller >= GC_libld_start && caller < GC_libld_end)) {
+ GC_free(p);
+ return;
+ }
+ }
# endif
# ifndef IGNORE_FREE
REDIRECT_FREE(p);
diff --git a/mallocx.c b/mallocx.c
index e379dcff..841a0e20 100644
--- a/mallocx.c
+++ b/mallocx.c
@@ -24,11 +24,11 @@
#include <stdio.h>
#include "private/gc_priv.h"
-void * GC_clear_stack(void *); /* in misc.c, behaves like identity */
+void * GC_clear_stack(void *); /* in misc.c, behaves like identity */
/* Some externally visible but unadvertised variables to allow access to */
-/* free lists from inlined allocators without including gc_priv.h */
-/* or introducing dependencies on internal data structure layouts. */
+/* free lists from inlined allocators without including gc_priv.h */
+/* or introducing dependencies on internal data structure layouts. */
void ** const GC_objfreelist_ptr = GC_objfreelist;
void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
@@ -41,38 +41,38 @@ STATIC void * GC_generic_or_special_malloc(size_t lb, int knd)
{
switch(knd) {
# ifdef STUBBORN_ALLOC
- case STUBBORN:
- return(GC_malloc_stubborn((size_t)lb));
+ case STUBBORN:
+ return(GC_malloc_stubborn((size_t)lb));
# endif
- case PTRFREE:
- return(GC_malloc_atomic((size_t)lb));
- case NORMAL:
- return(GC_malloc((size_t)lb));
- case UNCOLLECTABLE:
- return(GC_malloc_uncollectable((size_t)lb));
+ case PTRFREE:
+ return(GC_malloc_atomic((size_t)lb));
+ case NORMAL:
+ return(GC_malloc((size_t)lb));
+ case UNCOLLECTABLE:
+ return(GC_malloc_uncollectable((size_t)lb));
# ifdef ATOMIC_UNCOLLECTABLE
- case AUNCOLLECTABLE:
- return(GC_malloc_atomic_uncollectable((size_t)lb));
-# endif /* ATOMIC_UNCOLLECTABLE */
- default:
- return(GC_generic_malloc(lb,knd));
+ case AUNCOLLECTABLE:
+ return(GC_malloc_atomic_uncollectable((size_t)lb));
+# endif /* ATOMIC_UNCOLLECTABLE */
+ default:
+ return(GC_generic_malloc(lb,knd));
}
}
/* Change the size of the block pointed to by p to contain at least */
/* lb bytes. The object may be (and quite likely will be) moved. */
-/* The kind (e.g. atomic) is the same as that of the old. */
+/* The kind (e.g. atomic) is the same as that of the old. */
/* Shrinking of large blocks is not implemented well. */
GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
{
struct hblk * h;
hdr * hhdr;
- size_t sz; /* Current size in bytes */
- size_t orig_sz; /* Original sz in bytes */
+ size_t sz; /* Current size in bytes */
+ size_t orig_sz; /* Original sz in bytes */
int obj_kind;
- if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
+ if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
h = HBLKPTR(p);
hhdr = HDR(h);
sz = hhdr -> hb_sz;
@@ -80,60 +80,60 @@ GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
orig_sz = sz;
if (sz > MAXOBJBYTES) {
- /* Round it up to the next whole heap block */
- register word descr;
-
- sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
- hhdr -> hb_sz = sz;
- descr = GC_obj_kinds[obj_kind].ok_descriptor;
+ /* Round it up to the next whole heap block */
+ register word descr;
+
+ sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
+ hhdr -> hb_sz = sz;
+ descr = GC_obj_kinds[obj_kind].ok_descriptor;
if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
hhdr -> hb_descr = descr;
-# ifdef MARK_BIT_PER_OBJ
- GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
-# else
- GC_ASSERT(hhdr -> hb_large_block &&
- hhdr -> hb_map[ANY_INDEX] == 1);
-# endif
- if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
- /* Extra area is already cleared by GC_alloc_large_and_clear. */
+# ifdef MARK_BIT_PER_OBJ
+ GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
+# else
+ GC_ASSERT(hhdr -> hb_large_block &&
+ hhdr -> hb_map[ANY_INDEX] == 1);
+# endif
+ if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
+ /* Extra area is already cleared by GC_alloc_large_and_clear. */
}
if (ADD_SLOP(lb) <= sz) {
- if (lb >= (sz >> 1)) {
-# ifdef STUBBORN_ALLOC
- if (obj_kind == STUBBORN) GC_change_stubborn(p);
-# endif
- if (orig_sz > lb) {
- /* Clear unneeded part of object to avoid bogus pointer */
- /* tracing. */
- /* Safe for stubborn objects. */
- BZERO(((ptr_t)p) + lb, orig_sz - lb);
- }
- return(p);
- } else {
- /* shrink */
- void * result =
- GC_generic_or_special_malloc((word)lb, obj_kind);
-
- if (result == 0) return(0);
- /* Could also return original object. But this */
- /* gives the client warning of imminent disaster. */
- BCOPY(p, result, lb);
-# ifndef IGNORE_FREE
- GC_free(p);
-# endif
- return(result);
- }
+ if (lb >= (sz >> 1)) {
+# ifdef STUBBORN_ALLOC
+ if (obj_kind == STUBBORN) GC_change_stubborn(p);
+# endif
+ if (orig_sz > lb) {
+ /* Clear unneeded part of object to avoid bogus pointer */
+ /* tracing. */
+ /* Safe for stubborn objects. */
+ BZERO(((ptr_t)p) + lb, orig_sz - lb);
+ }
+ return(p);
+ } else {
+ /* shrink */
+ void * result =
+ GC_generic_or_special_malloc((word)lb, obj_kind);
+
+ if (result == 0) return(0);
+ /* Could also return original object. But this */
+ /* gives the client warning of imminent disaster. */
+ BCOPY(p, result, lb);
+# ifndef IGNORE_FREE
+ GC_free(p);
+# endif
+ return(result);
+ }
} else {
- /* grow */
- void * result =
- GC_generic_or_special_malloc((word)lb, obj_kind);
-
- if (result == 0) return(0);
- BCOPY(p, result, sz);
-# ifndef IGNORE_FREE
- GC_free(p);
-# endif
- return(result);
+ /* grow */
+ void * result =
+ GC_generic_or_special_malloc((word)lb, obj_kind);
+
+ if (result == 0) return(0);
+ BCOPY(p, result, sz);
+# ifndef IGNORE_FREE
+ GC_free(p);
+# endif
+ return(result);
}
}
@@ -143,14 +143,14 @@ GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
# ifdef REDIRECT_REALLOC
-/* As with malloc, avoid two levels of extra calls here. */
+/* As with malloc, avoid two levels of extra calls here. */
# ifdef GC_ADD_CALLER
# define RA GC_RETURN_ADDR,
# else
# define RA
# endif
# define GC_debug_realloc_replacement(p, lb) \
- GC_debug_realloc(p, lb, RA "unknown", 0)
+ GC_debug_realloc(p, lb, RA "unknown", 0)
void * realloc(void * p, size_t lb)
{
@@ -163,7 +163,7 @@ void * realloc(void * p, size_t lb)
/* Allocate memory such that only pointers to near the */
/* beginning of the object are considered. */
-/* We avoid holding allocation lock while we clear memory. */
+/* We avoid holding allocation lock while we clear memory. */
void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
{
void *result;
@@ -172,7 +172,7 @@ void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
word n_blocks;
GC_bool init;
DCL_LOCK_STATE;
-
+
if (SMALL_OBJ(lb))
return(GC_generic_malloc((word)lb, k));
lg = ROUNDED_UP_GRANULES(lb);
@@ -185,27 +185,27 @@ void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
if (0 != result) {
if (GC_debugging_started) {
- BZERO(result, n_blocks * HBLKSIZE);
+ BZERO(result, n_blocks * HBLKSIZE);
} else {
# ifdef THREADS
- /* Clear any memory that might be used for GC descriptors */
- /* before we release the lock. */
- ((word *)result)[0] = 0;
- ((word *)result)[1] = 0;
- ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
- ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
-# endif
+ /* Clear any memory that might be used for GC descriptors */
+ /* before we release the lock. */
+ ((word *)result)[0] = 0;
+ ((word *)result)[1] = 0;
+ ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
+ ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
+# endif
}
}
GC_bytes_allocd += lb_rounded;
if (0 == result) {
- GC_oom_func oom_fn = GC_oom_fn;
- UNLOCK();
- return((*oom_fn)(lb));
+ GC_oom_func oom_fn = GC_oom_fn;
+ UNLOCK();
+ return((*oom_fn)(lb));
} else {
- UNLOCK();
- if (init && !GC_debugging_started) {
- BZERO(result, n_blocks * HBLKSIZE);
+ UNLOCK();
+ if (init && !GC_debugging_started) {
+ BZERO(result, n_blocks * HBLKSIZE);
}
return(result);
}
@@ -221,14 +221,14 @@ GC_API void * GC_CALL GC_malloc_atomic_ignore_off_page(size_t lb)
return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
}
-/* Increment GC_bytes_allocd from code that doesn't have direct access */
-/* to GC_arrays. */
+/* Increment GC_bytes_allocd from code that doesn't have direct access */
+/* to GC_arrays. */
GC_API void GC_CALL GC_incr_bytes_allocd(size_t n)
{
GC_bytes_allocd += n;
}
-/* The same for GC_bytes_freed. */
+/* The same for GC_bytes_freed. */
GC_API void GC_CALL GC_incr_bytes_freed(size_t n)
{
GC_bytes_freed += n;
@@ -250,28 +250,28 @@ volatile signed_word GC_bytes_allocd_tmp = 0;
/* expensive.) */
#endif /* PARALLEL_MARK */
-/* Return a list of 1 or more objects of the indicated size, linked */
-/* through the first word in the object. This has the advantage that */
-/* it acquires the allocation lock only once, and may greatly reduce */
+/* Return a list of 1 or more objects of the indicated size, linked */
+/* through the first word in the object. This has the advantage that */
+/* it acquires the allocation lock only once, and may greatly reduce */
/* time wasted contending for the allocation lock. Typical usage would */
-/* be in a thread that requires many items of the same size. It would */
-/* keep its own free list in thread-local storage, and call */
-/* GC_malloc_many or friends to replenish it. (We do not round up */
-/* object sizes, since a call indicates the intention to consume many */
-/* objects of exactly this size.) */
-/* We assume that the size is a multiple of GRANULE_BYTES. */
-/* We return the free-list by assigning it to *result, since it is */
-/* not safe to return, e.g. a linked list of pointer-free objects, */
-/* since the collector would not retain the entire list if it were */
-/* invoked just as we were returning. */
-/* Note that the client should usually clear the link field. */
+/* be in a thread that requires many items of the same size. It would */
+/* keep its own free list in thread-local storage, and call */
+/* GC_malloc_many or friends to replenish it. (We do not round up */
+/* object sizes, since a call indicates the intention to consume many */
+/* objects of exactly this size.) */
+/* We assume that the size is a multiple of GRANULE_BYTES. */
+/* We return the free-list by assigning it to *result, since it is */
+/* not safe to return, e.g. a linked list of pointer-free objects, */
+/* since the collector would not retain the entire list if it were */
+/* invoked just as we were returning. */
+/* Note that the client should usually clear the link field. */
void GC_generic_malloc_many(size_t lb, int k, void **result)
{
void *op;
void *p;
void **opp;
-size_t lw; /* Length in words. */
-size_t lg; /* Length in granules. */
+size_t lw; /* Length in words. */
+size_t lg; /* Length in granules. */
signed_word my_bytes_allocd = 0;
struct obj_kind * ok = &(GC_obj_kinds[k]);
DCL_LOCK_STATE;
@@ -280,7 +280,7 @@ DCL_LOCK_STATE;
if (!SMALL_OBJ(lb)) {
op = GC_generic_malloc(lb, k);
if(0 != op) obj_link(op) = 0;
- *result = op;
+ *result = op;
return;
}
lw = BYTES_TO_WORDS(lb);
@@ -292,87 +292,87 @@ DCL_LOCK_STATE;
/* Do our share of marking work */
if (GC_incremental && !GC_dont_gc) {
ENTER_GC();
- GC_collect_a_little_inner(1);
+ GC_collect_a_little_inner(1);
EXIT_GC();
}
/* First see if we can reclaim a page of objects waiting to be */
- /* reclaimed. */
+ /* reclaimed. */
{
- struct hblk ** rlh = ok -> ok_reclaim_list;
- struct hblk * hbp;
- hdr * hhdr;
+ struct hblk ** rlh = ok -> ok_reclaim_list;
+ struct hblk * hbp;
+ hdr * hhdr;
- rlh += lg;
- while ((hbp = *rlh) != 0) {
+ rlh += lg;
+ while ((hbp = *rlh) != 0) {
hhdr = HDR(hbp);
*rlh = hhdr -> hb_next;
- GC_ASSERT(hhdr -> hb_sz == lb);
- hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
-# ifdef PARALLEL_MARK
- if (GC_parallel) {
- signed_word my_bytes_allocd_tmp = GC_bytes_allocd_tmp;
-
- GC_ASSERT(my_bytes_allocd_tmp >= 0);
- /* We only decrement it while holding the GC lock. */
- /* Thus we can't accidentally adjust it down in more */
- /* than one thread simultaneously. */
- if (my_bytes_allocd_tmp != 0) {
- (void)AO_fetch_and_add(
- (volatile void *)(&GC_bytes_allocd_tmp),
- (AO_t)(-my_bytes_allocd_tmp));
- GC_bytes_allocd += my_bytes_allocd_tmp;
- }
- GC_acquire_mark_lock();
- ++ GC_fl_builder_count;
- UNLOCK();
- GC_release_mark_lock();
- }
-# endif
- op = GC_reclaim_generic(hbp, hhdr, lb,
- ok -> ok_init, 0, &my_bytes_allocd);
+ GC_ASSERT(hhdr -> hb_sz == lb);
+ hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
+# ifdef PARALLEL_MARK
+ if (GC_parallel) {
+ signed_word my_bytes_allocd_tmp = GC_bytes_allocd_tmp;
+
+ GC_ASSERT(my_bytes_allocd_tmp >= 0);
+ /* We only decrement it while holding the GC lock. */
+ /* Thus we can't accidentally adjust it down in more */
+ /* than one thread simultaneously. */
+ if (my_bytes_allocd_tmp != 0) {
+ (void)AO_fetch_and_add(
+ (volatile void *)(&GC_bytes_allocd_tmp),
+ (AO_t)(-my_bytes_allocd_tmp));
+ GC_bytes_allocd += my_bytes_allocd_tmp;
+ }
+ GC_acquire_mark_lock();
+ ++ GC_fl_builder_count;
+ UNLOCK();
+ GC_release_mark_lock();
+ }
+# endif
+ op = GC_reclaim_generic(hbp, hhdr, lb,
+ ok -> ok_init, 0, &my_bytes_allocd);
if (op != 0) {
- /* We also reclaimed memory, so we need to adjust */
- /* that count. */
- /* This should be atomic, so the results may be */
- /* inaccurate. */
- GC_bytes_found += my_bytes_allocd;
-# ifdef PARALLEL_MARK
- if (GC_parallel) {
- *result = op;
- (void)AO_fetch_and_add(
- (volatile AO_t *)(&GC_bytes_allocd_tmp),
- (AO_t)(my_bytes_allocd));
- GC_acquire_mark_lock();
- -- GC_fl_builder_count;
- if (GC_fl_builder_count == 0) GC_notify_all_builder();
- GC_release_mark_lock();
- (void) GC_clear_stack(0);
- return;
- }
-# endif
- GC_bytes_allocd += my_bytes_allocd;
- goto out;
- }
-# ifdef PARALLEL_MARK
- if (GC_parallel) {
- GC_acquire_mark_lock();
- -- GC_fl_builder_count;
- if (GC_fl_builder_count == 0) GC_notify_all_builder();
- GC_release_mark_lock();
- LOCK();
- /* GC lock is needed for reclaim list access. We */
- /* must decrement fl_builder_count before reaquiring GC */
- /* lock. Hopefully this path is rare. */
- }
-# endif
- }
+ /* We also reclaimed memory, so we need to adjust */
+ /* that count. */
+ /* This should be atomic, so the results may be */
+ /* inaccurate. */
+ GC_bytes_found += my_bytes_allocd;
+# ifdef PARALLEL_MARK
+ if (GC_parallel) {
+ *result = op;
+ (void)AO_fetch_and_add(
+ (volatile AO_t *)(&GC_bytes_allocd_tmp),
+ (AO_t)(my_bytes_allocd));
+ GC_acquire_mark_lock();
+ -- GC_fl_builder_count;
+ if (GC_fl_builder_count == 0) GC_notify_all_builder();
+ GC_release_mark_lock();
+ (void) GC_clear_stack(0);
+ return;
+ }
+# endif
+ GC_bytes_allocd += my_bytes_allocd;
+ goto out;
+ }
+# ifdef PARALLEL_MARK
+ if (GC_parallel) {
+ GC_acquire_mark_lock();
+ -- GC_fl_builder_count;
+ if (GC_fl_builder_count == 0) GC_notify_all_builder();
+ GC_release_mark_lock();
+ LOCK();
+ /* GC lock is needed for reclaim list access. We */
+ /* must decrement fl_builder_count before reaquiring GC */
+ /* lock. Hopefully this path is rare. */
+ }
+# endif
+ }
}
- /* Next try to use prefix of global free list if there is one. */
- /* We don't refill it, but we need to use it up before allocating */
- /* a new block ourselves. */
+ /* Next try to use prefix of global free list if there is one. */
+ /* We don't refill it, but we need to use it up before allocating */
+ /* a new block ourselves. */
opp = &(GC_obj_kinds[k].ok_freelist[lg]);
if ( (op = *opp) != 0 ) {
- *opp = 0;
+ *opp = 0;
my_bytes_allocd = 0;
for (p = op; p != 0; p = obj_link(p)) {
my_bytes_allocd += lb;
@@ -380,46 +380,46 @@ DCL_LOCK_STATE;
*opp = obj_link(p);
obj_link(p) = 0;
break;
- }
+ }
}
- GC_bytes_allocd += my_bytes_allocd;
- goto out;
+ GC_bytes_allocd += my_bytes_allocd;
+ goto out;
}
- /* Next try to allocate a new block worth of objects of this size. */
+ /* Next try to allocate a new block worth of objects of this size. */
{
- struct hblk *h = GC_allochblk(lb, k, 0);
- if (h != 0) {
- if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
- GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
-# ifdef PARALLEL_MARK
- if (GC_parallel) {
- GC_acquire_mark_lock();
- ++ GC_fl_builder_count;
- UNLOCK();
- GC_release_mark_lock();
-
- op = GC_build_fl(h, lw,
- (ok -> ok_init || GC_debugging_started), 0);
-
- *result = op;
- GC_acquire_mark_lock();
- -- GC_fl_builder_count;
- if (GC_fl_builder_count == 0) GC_notify_all_builder();
- GC_release_mark_lock();
- (void) GC_clear_stack(0);
- return;
- }
-# endif
- op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
- goto out;
- }
+ struct hblk *h = GC_allochblk(lb, k, 0);
+ if (h != 0) {
+ if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
+ GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
+# ifdef PARALLEL_MARK
+ if (GC_parallel) {
+ GC_acquire_mark_lock();
+ ++ GC_fl_builder_count;
+ UNLOCK();
+ GC_release_mark_lock();
+
+ op = GC_build_fl(h, lw,
+ (ok -> ok_init || GC_debugging_started), 0);
+
+ *result = op;
+ GC_acquire_mark_lock();
+ -- GC_fl_builder_count;
+ if (GC_fl_builder_count == 0) GC_notify_all_builder();
+ GC_release_mark_lock();
+ (void) GC_clear_stack(0);
+ return;
+ }
+# endif
+ op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
+ goto out;
+ }
}
-
- /* As a last attempt, try allocating a single object. Note that */
- /* this may trigger a collection or expand the heap. */
+
+ /* As a last attempt, try allocating a single object. Note that */
+ /* this may trigger a collection or expand the heap. */
op = GC_generic_malloc_inner(lb, k);
if (0 != op) obj_link(op) = 0;
-
+
out:
*result = op;
UNLOCK();
@@ -430,13 +430,13 @@ GC_API void * GC_CALL GC_malloc_many(size_t lb)
{
void *result;
GC_generic_malloc_many(((lb + EXTRA_BYTES + GRANULE_BYTES-1)
- & ~(GRANULE_BYTES-1)),
- NORMAL, &result);
+ & ~(GRANULE_BYTES-1)),
+ NORMAL, &result);
return result;
}
-/* Note that the "atomic" version of this would be unsafe, since the */
-/* links would not be seen by the collector. */
+/* Note that the "atomic" version of this would be unsafe, since the */
+/* links would not be seen by the collector. */
# endif
/* Allocate lb bytes of pointerful, traced, but not collectable data */
@@ -448,54 +448,54 @@ GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb)
DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
- if (EXTRA_BYTES != 0 && lb != 0) lb--;
- /* We don't need the extra byte, since this won't be */
- /* collected anyway. */
- lg = GC_size_map[lb];
- opp = &(GC_uobjfreelist[lg]);
- LOCK();
+ if (EXTRA_BYTES != 0 && lb != 0) lb--;
+ /* We don't need the extra byte, since this won't be */
+ /* collected anyway. */
+ lg = GC_size_map[lb];
+ opp = &(GC_uobjfreelist[lg]);
+ LOCK();
if( (op = *opp) != 0 ) {
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
- /* Mark bit ws already set on free list. It will be */
- /* cleared only temporarily during a collection, as a */
- /* result of the normal free list mark bit clearing. */
+ /* Mark bit ws already set on free list. It will be */
+ /* cleared only temporarily during a collection, as a */
+ /* result of the normal free list mark bit clearing. */
GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
UNLOCK();
} else {
UNLOCK();
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
- /* For small objects, the free lists are completely marked. */
- }
- GC_ASSERT(0 == op || GC_is_marked(op));
+ /* For small objects, the free lists are completely marked. */
+ }
+ GC_ASSERT(0 == op || GC_is_marked(op));
return((void *) op);
} else {
- hdr * hhdr;
-
- op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
+ hdr * hhdr;
+
+ op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
if (0 == op) return(0);
-
- GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
- hhdr = HDR(op);
- /* We don't need the lock here, since we have an undisguised */
- /* pointer. We do need to hold the lock while we adjust */
- /* mark bits. */
- LOCK();
- set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
- GC_ASSERT(hhdr -> hb_n_marks == 0);
- hhdr -> hb_n_marks = 1;
- UNLOCK();
- return((void *) op);
+
+ GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
+ hhdr = HDR(op);
+ /* We don't need the lock here, since we have an undisguised */
+ /* pointer. We do need to hold the lock while we adjust */
+ /* mark bits. */
+ LOCK();
+ set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
+ GC_ASSERT(hhdr -> hb_n_marks == 0);
+ hhdr -> hb_n_marks = 1;
+ UNLOCK();
+ return((void *) op);
}
}
-/* Not well tested nor integrated. */
-/* Debug version is tricky and currently missing. */
+/* Not well tested nor integrated. */
+/* Debug version is tricky and currently missing. */
#include <limits.h>
-GC_API void * GC_CALL GC_memalign(size_t align, size_t lb)
-{
+GC_API void * GC_CALL GC_memalign(size_t align, size_t lb)
+{
size_t new_lb;
size_t offset;
ptr_t result;
@@ -503,22 +503,22 @@ GC_API void * GC_CALL GC_memalign(size_t align, size_t lb)
if (align <= GRANULE_BYTES) return GC_malloc(lb);
if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
if (align > HBLKSIZE) {
- return (*GC_get_oom_fn())(LONG_MAX-1024); /* Fail */
- }
- return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
- /* Will be HBLKSIZE aligned. */
+ return (*GC_get_oom_fn())(LONG_MAX-1024); /* Fail */
+ }
+ return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
+ /* Will be HBLKSIZE aligned. */
}
/* We could also try to make sure that the real rounded-up object size */
- /* is a multiple of align. That would be correct up to HBLKSIZE. */
+ /* is a multiple of align. That would be correct up to HBLKSIZE. */
new_lb = lb + align - 1;
result = GC_malloc(new_lb);
offset = (word)result % align;
if (offset != 0) {
- offset = align - offset;
+ offset = align - offset;
if (!GC_all_interior_pointers) {
- if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
- GC_register_displacement(offset);
- }
+ if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
+ GC_register_displacement(offset);
+ }
}
result = (void *) ((ptr_t)result + offset);
GC_ASSERT((word)result % align == 0);
@@ -526,9 +526,9 @@ GC_API void * GC_CALL GC_memalign(size_t align, size_t lb)
}
# ifdef ATOMIC_UNCOLLECTABLE
-/* Allocate lb bytes of pointerfree, untraced, uncollectable data */
-/* This is normally roughly equivalent to the system malloc. */
-/* But it may be useful if malloc is redefined. */
+/* Allocate lb bytes of pointerfree, untraced, uncollectable data */
+/* This is normally roughly equivalent to the system malloc. */
+/* But it may be useful if malloc is redefined. */
GC_API void * GC_CALL GC_malloc_atomic_uncollectable(size_t lb)
{
void *op;
@@ -537,40 +537,40 @@ GC_API void * GC_CALL GC_malloc_atomic_uncollectable(size_t lb)
DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
- if (EXTRA_BYTES != 0 && lb != 0) lb--;
- /* We don't need the extra byte, since this won't be */
- /* collected anyway. */
- lg = GC_size_map[lb];
- opp = &(GC_auobjfreelist[lg]);
- LOCK();
+ if (EXTRA_BYTES != 0 && lb != 0) lb--;
+ /* We don't need the extra byte, since this won't be */
+ /* collected anyway. */
+ lg = GC_size_map[lb];
+ opp = &(GC_auobjfreelist[lg]);
+ LOCK();
if( (op = *opp) != 0 ) {
*opp = obj_link(op);
obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
- /* Mark bit was already set while object was on free list. */
+ /* Mark bit was already set while object was on free list. */
GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
UNLOCK();
} else {
UNLOCK();
op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
- }
- GC_ASSERT(0 == op || GC_is_marked(op));
+ }
+ GC_ASSERT(0 == op || GC_is_marked(op));
return((void *) op);
} else {
- hdr * hhdr;
-
- op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
+ hdr * hhdr;
+
+ op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
if (0 == op) return(0);
- GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0);
- hhdr = HDR(op);
-
- LOCK();
- set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
- GC_ASSERT(hhdr -> hb_n_marks == 0);
- hhdr -> hb_n_marks = 1;
- UNLOCK();
- return((void *) op);
+ GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0);
+ hhdr = HDR(op);
+
+ LOCK();
+ set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
+ GC_ASSERT(hhdr -> hb_n_marks == 0);
+ hhdr -> hb_n_marks = 1;
+ UNLOCK();
+ return((void *) op);
}
}
diff --git a/new_hblk.c b/new_hblk.c
index 6c5c0cae..33de6453 100644
--- a/new_hblk.c
+++ b/new_hblk.c
@@ -13,8 +13,8 @@
* modified is included with the above copyright notice.
*
* This file contains the functions:
- * ptr_t GC_build_flXXX(h, old_fl)
- * void GC_new_hblk(size)
+ * ptr_t GC_build_flXXX(h, old_fl)
+ * void GC_new_hblk(size)
*/
/* Boehm, May 19, 1994 2:09 pm PDT */
@@ -32,7 +32,7 @@ STATIC ptr_t GC_build_fl_clear2(struct hblk *h, ptr_t ofl)
{
word * p = (word *)(h -> hb_body);
word * lim = (word *)(h + 1);
-
+
p[0] = (word)ofl;
p[1] = 0;
p[2] = (word)p;
@@ -52,17 +52,17 @@ STATIC ptr_t GC_build_fl_clear4(struct hblk *h, ptr_t ofl)
{
word * p = (word *)(h -> hb_body);
word * lim = (word *)(h + 1);
-
+
p[0] = (word)ofl;
p[1] = 0;
p[2] = 0;
p[3] = 0;
p += 4;
for (; p < lim; p += 4) {
- PREFETCH_FOR_WRITE((ptr_t)(p+64));
+ PREFETCH_FOR_WRITE((ptr_t)(p+64));
p[0] = (word)(p-4);
p[1] = 0;
- CLEAR_DOUBLE(p+2);
+ CLEAR_DOUBLE(p+2);
};
return((ptr_t)(p-4));
}
@@ -72,7 +72,7 @@ STATIC ptr_t GC_build_fl2(struct hblk *h, ptr_t ofl)
{
word * p = (word *)(h -> hb_body);
word * lim = (word *)(h + 1);
-
+
p[0] = (word)ofl;
p[2] = (word)p;
p += 4;
@@ -88,12 +88,12 @@ STATIC ptr_t GC_build_fl4(struct hblk *h, ptr_t ofl)
{
word * p = (word *)(h -> hb_body);
word * lim = (word *)(h + 1);
-
+
p[0] = (word)ofl;
p[4] = (word)p;
p += 8;
for (; p < lim; p += 8) {
- PREFETCH_FOR_WRITE((ptr_t)(p+64));
+ PREFETCH_FOR_WRITE((ptr_t)(p+64));
p[0] = (word)(p-4);
p[4] = (word)p;
};
@@ -103,62 +103,62 @@ STATIC ptr_t GC_build_fl4(struct hblk *h, ptr_t ofl)
#endif /* !SMALL_CONFIG */
-/* Build a free list for objects of size sz inside heap block h. */
-/* Clear objects inside h if clear is set. Add list to the end of */
-/* the free list we build. Return the new free list. */
-/* This could be called without the main GC lock, if we ensure that */
-/* there is no concurrent collection which might reclaim objects that */
-/* we have not yet allocated. */
+/* Build a free list for objects of size sz inside heap block h. */
+/* Clear objects inside h if clear is set. Add list to the end of */
+/* the free list we build. Return the new free list. */
+/* This could be called without the main GC lock, if we ensure that */
+/* there is no concurrent collection which might reclaim objects that */
+/* we have not yet allocated. */
ptr_t GC_build_fl(struct hblk *h, size_t sz, GC_bool clear, ptr_t list)
{
word *p, *prev;
- word *last_object; /* points to last object in new hblk */
+ word *last_object; /* points to last object in new hblk */
- /* Do a few prefetches here, just because its cheap. */
- /* If we were more serious about it, these should go inside */
- /* the loops. But write prefetches usually don't seem to */
- /* matter much. */
+ /* Do a few prefetches here, just because its cheap. */
+ /* If we were more serious about it, these should go inside */
+ /* the loops. But write prefetches usually don't seem to */
+ /* matter much. */
PREFETCH_FOR_WRITE((ptr_t)h);
PREFETCH_FOR_WRITE((ptr_t)h + 128);
PREFETCH_FOR_WRITE((ptr_t)h + 256);
PREFETCH_FOR_WRITE((ptr_t)h + 378);
- /* Handle small objects sizes more efficiently. For larger objects */
- /* the difference is less significant. */
+ /* Handle small objects sizes more efficiently. For larger objects */
+ /* the difference is less significant. */
# ifndef SMALL_CONFIG
switch (sz) {
case 2: if (clear) {
- return GC_build_fl_clear2(h, list);
- } else {
- return GC_build_fl2(h, list);
- }
+ return GC_build_fl_clear2(h, list);
+ } else {
+ return GC_build_fl2(h, list);
+ }
case 4: if (clear) {
- return GC_build_fl_clear4(h, list);
- } else {
- return GC_build_fl4(h, list);
- }
+ return GC_build_fl_clear4(h, list);
+ } else {
+ return GC_build_fl4(h, list);
+ }
default:
- break;
+ break;
}
# endif /* !SMALL_CONFIG */
-
+
/* Clear the page if necessary. */
if (clear) BZERO(h, HBLKSIZE);
-
+
/* Add objects to free list */
- p = (word *)(h -> hb_body) + sz; /* second object in *h */
- prev = (word *)(h -> hb_body); /* One object behind p */
+ p = (word *)(h -> hb_body) + sz; /* second object in *h */
+ prev = (word *)(h -> hb_body); /* One object behind p */
last_object = (word *)((char *)h + HBLKSIZE);
last_object -= sz;
- /* Last place for last object to start */
+ /* Last place for last object to start */
/* make a list of all objects in *h with head as last object */
while (p <= last_object) {
/* current object's link points to last object */
obj_link(p) = (ptr_t)prev;
- prev = p;
- p += sz;
+ prev = p;
+ p += sz;
}
- p -= sz; /* p now points to last object */
+ p -= sz; /* p now points to last object */
/*
* put p (which is now head of list of objects in *h) as first
@@ -178,11 +178,11 @@ ptr_t GC_build_fl(struct hblk *h, size_t sz, GC_bool clear, ptr_t list)
*/
void GC_new_hblk(size_t gran, int kind)
{
- struct hblk *h; /* the new heap block */
+ struct hblk *h; /* the new heap block */
GC_bool clear = GC_obj_kinds[kind].ok_init;
GC_STATIC_ASSERT((sizeof (struct hblk)) == HBLKSIZE);
-
+
if (GC_debugging_started) clear = TRUE;
/* Allocate a new heap block */
@@ -194,7 +194,6 @@ void GC_new_hblk(size_t gran, int kind)
/* Build the free list */
GC_obj_kinds[kind].ok_freelist[gran] =
- GC_build_fl(h, GRANULES_TO_WORDS(gran), clear,
- GC_obj_kinds[kind].ok_freelist[gran]);
+ GC_build_fl(h, GRANULES_TO_WORDS(gran), clear,
+ GC_obj_kinds[kind].ok_freelist[gran]);
}
-
diff --git a/obj_map.c b/obj_map.c
index 8198a7e5..96a2174f 100644
--- a/obj_map.c
+++ b/obj_map.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991, 1992 by Xerox Corporation. All rights reserved.
* Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved.
@@ -12,13 +12,13 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-
+
/* Routines for maintaining maps describing heap block
* layouts for various object sizes. Allows fast pointer validity checks
* and fast location of object start locations on machines (such as SPARC)
* with slow division.
*/
-
+
# include "private/gc_priv.h"
/* Consider pointers that are offset bytes displaced from the beginning */
@@ -27,13 +27,13 @@
GC_API void GC_CALL GC_register_displacement(size_t offset)
{
DCL_LOCK_STATE;
-
+
LOCK();
GC_register_displacement_inner(offset);
UNLOCK();
}
-void GC_register_displacement_inner(size_t offset)
+void GC_register_displacement_inner(size_t offset)
{
if (offset >= VALID_OFFSET_SZ) {
ABORT("Bad argument to GC_register_displacement");
@@ -45,14 +45,14 @@ void GC_register_displacement_inner(size_t offset)
}
#ifdef MARK_BIT_PER_GRANULE
-/* Add a heap block map for objects of size granules to obj_map. */
-/* Return FALSE on failure. */
-/* A size of 0 granules is used for large objects. */
+/* Add a heap block map for objects of size granules to obj_map. */
+/* Return FALSE on failure. */
+/* A size of 0 granules is used for large objects. */
GC_bool GC_add_map_entry(size_t granules)
{
unsigned displ;
short * new_map;
-
+
if (granules > BYTES_TO_GRANULES(MAXOBJBYTES)) granules = 0;
if (GC_obj_map[granules] != 0) {
return(TRUE);
@@ -61,14 +61,14 @@ GC_bool GC_add_map_entry(size_t granules)
if (new_map == 0) return(FALSE);
if (GC_print_stats)
GC_log_printf("Adding block map for size of %u granules (%u bytes)\n",
- (unsigned)granules, (unsigned)(GRANULES_TO_BYTES(granules)));
+ (unsigned)granules, (unsigned)(GRANULES_TO_BYTES(granules)));
if (granules == 0) {
for (displ = 0; displ < BYTES_TO_GRANULES(HBLKSIZE); displ++) {
new_map[displ] = 1; /* Nonzero to get us out of marker fast path. */
}
} else {
for (displ = 0; displ < BYTES_TO_GRANULES(HBLKSIZE); displ++) {
- new_map[displ] = (short)(displ % granules);
+ new_map[displ] = (short)(displ % granules);
}
}
GC_obj_map[granules] = new_map;
@@ -83,7 +83,7 @@ void GC_initialize_offsets(void)
if (!offsets_initialized) {
int i;
if (GC_all_interior_pointers) {
- for (i = 0; i < VALID_OFFSET_SZ; ++i) GC_valid_offsets[i] = TRUE;
+ for (i = 0; i < VALID_OFFSET_SZ; ++i) GC_valid_offsets[i] = TRUE;
}
offsets_initialized = TRUE;
}
diff --git a/ptr_chck.c b/ptr_chck.c
index 6b72ee2f..c168ff9f 100644
--- a/ptr_chck.c
+++ b/ptr_chck.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
@@ -25,47 +25,47 @@ STATIC void GC_CALLBACK GC_default_same_obj_print_proc(void * p, void * q)
}
void (GC_CALLBACK *GC_same_obj_print_proc) (void *, void *)
- = GC_default_same_obj_print_proc;
+ = GC_default_same_obj_print_proc;
-/* Check that p and q point to the same object. Call */
-/* *GC_same_obj_print_proc if they don't. */
-/* Returns the first argument. (Return value may be hard */
-/* to use,due to typing issues. But if we had a suitable */
-/* preprocessor ...) */
-/* Succeeds if neither p nor q points to the heap. */
-/* We assume this is performance critical. (It shouldn't */
-/* be called by production code, but this can easily make */
-/* debugging intolerably slow.) */
+/* Check that p and q point to the same object. Call */
+/* *GC_same_obj_print_proc if they don't. */
+/* Returns the first argument. (Return value may be hard */
+/* to use,due to typing issues. But if we had a suitable */
+/* preprocessor ...) */
+/* Succeeds if neither p nor q points to the heap. */
+/* We assume this is performance critical. (It shouldn't */
+/* be called by production code, but this can easily make */
+/* debugging intolerably slow.) */
GC_API void * GC_CALL GC_same_obj(void *p, void *q)
{
struct hblk *h;
hdr *hhdr;
ptr_t base, limit;
word sz;
-
+
if (!GC_is_initialized) GC_init();
hhdr = HDR((word)p);
if (hhdr == 0) {
- if (divHBLKSZ((word)p) != divHBLKSZ((word)q)
- && HDR((word)q) != 0) {
- goto fail;
- }
- return(p);
+ if (divHBLKSZ((word)p) != divHBLKSZ((word)q)
+ && HDR((word)q) != 0) {
+ goto fail;
+ }
+ return(p);
}
- /* If it's a pointer to the middle of a large object, move it */
- /* to the beginning. */
+ /* If it's a pointer to the middle of a large object, move it */
+ /* to the beginning. */
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
- h = HBLKPTR(p) - (word)hhdr;
- hhdr = HDR(h);
- while (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
- h = FORWARDED_ADDR(h, hhdr);
- hhdr = HDR(h);
- }
- limit = (ptr_t)h + hhdr -> hb_sz;
- if ((ptr_t)p >= limit || (ptr_t)q >= limit || (ptr_t)q < (ptr_t)h ) {
- goto fail;
- }
- return(p);
+ h = HBLKPTR(p) - (word)hhdr;
+ hhdr = HDR(h);
+ while (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ h = FORWARDED_ADDR(h, hhdr);
+ hhdr = HDR(h);
+ }
+ limit = (ptr_t)h + hhdr -> hb_sz;
+ if ((ptr_t)p >= limit || (ptr_t)q >= limit || (ptr_t)q < (ptr_t)h ) {
+ goto fail;
+ }
+ return(p);
}
sz = hhdr -> hb_sz;
if (sz > MAXOBJBYTES) {
@@ -77,21 +77,21 @@ GC_API void * GC_CALL GC_same_obj(void *p, void *q)
} else {
size_t offset;
size_t pdispl = HBLKDISPL(p);
-
+
offset = pdispl % sz;
if (HBLKPTR(p) != HBLKPTR(q)) goto fail;
- /* W/o this check, we might miss an error if */
- /* q points to the first object on a page, and */
- /* points just before the page. */
+ /* W/o this check, we might miss an error if */
+ /* q points to the first object on a page, and */
+ /* points just before the page. */
base = (ptr_t)p - offset;
limit = base + sz;
}
- /* [base, limit) delimits the object containing p, if any. */
- /* If p is not inside a valid object, then either q is */
- /* also outside any valid object, or it is outside */
- /* [base, limit). */
+ /* [base, limit) delimits the object containing p, if any. */
+ /* If p is not inside a valid object, then either q is */
+ /* also outside any valid object, or it is outside */
+ /* [base, limit). */
if ((ptr_t)q >= limit || (ptr_t)q < base) {
- goto fail;
+ goto fail;
}
return(p);
fail:
@@ -105,15 +105,15 @@ STATIC void GC_CALLBACK GC_default_is_valid_displacement_print_proc (void *p)
ABORT("GC_is_valid_displacement test failed");
}
-void (GC_CALLBACK *GC_is_valid_displacement_print_proc)(void *) =
- GC_default_is_valid_displacement_print_proc;
+void (GC_CALLBACK *GC_is_valid_displacement_print_proc)(void *) =
+ GC_default_is_valid_displacement_print_proc;
-/* Check that if p is a pointer to a heap page, then it points to */
-/* a valid displacement within a heap object. */
-/* Uninteresting with GC_all_interior_pointers. */
-/* Always returns its argument. */
-/* Note that we don't lock, since nothing relevant about the header */
-/* should change while we have a valid object pointer to the block. */
+/* Check that if p is a pointer to a heap page, then it points to */
+/* a valid displacement within a heap object. */
+/* Uninteresting with GC_all_interior_pointers. */
+/* Always returns its argument. */
+/* Note that we don't lock, since nothing relevant about the header */
+/* should change while we have a valid object pointer to the block. */
GC_API void * GC_CALL GC_is_valid_displacement(void *p)
{
hdr *hhdr;
@@ -121,27 +121,27 @@ GC_API void * GC_CALL GC_is_valid_displacement(void *p)
word offset;
struct hblk *h;
word sz;
-
+
if (!GC_is_initialized) GC_init();
hhdr = HDR((word)p);
if (hhdr == 0) return(p);
h = HBLKPTR(p);
if (GC_all_interior_pointers) {
- while (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
- h = FORWARDED_ADDR(h, hhdr);
- hhdr = HDR(h);
- }
+ while (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ h = FORWARDED_ADDR(h, hhdr);
+ hhdr = HDR(h);
+ }
}
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
- goto fail;
+ goto fail;
}
sz = hhdr -> hb_sz;
pdispl = HBLKDISPL(p);
offset = pdispl % sz;
if ((sz > MAXOBJBYTES && (ptr_t)p >= (ptr_t)h + sz)
- || !GC_valid_offsets[offset]
- || (ptr_t)p - offset + sz > (ptr_t)(h + 1)) {
- goto fail;
+ || !GC_valid_offsets[offset]
+ || (ptr_t)p - offset + sz > (ptr_t)(h + 1)) {
+ goto fail;
}
return(p);
fail:
@@ -156,42 +156,42 @@ STATIC void GC_CALLBACK GC_default_is_visible_print_proc(void * p)
}
void (GC_CALLBACK *GC_is_visible_print_proc)(void * p) =
- GC_default_is_visible_print_proc;
+ GC_default_is_visible_print_proc;
#ifndef THREADS
/* Could p be a stack address? */
STATIC GC_bool GC_on_stack(ptr_t p)
{
- int dummy;
-# ifdef STACK_GROWS_DOWN
- if ((ptr_t)p >= (ptr_t)(&dummy) && (ptr_t)p < GC_stackbottom ) {
- return(TRUE);
- }
-# else
- if ((ptr_t)p <= (ptr_t)(&dummy) && (ptr_t)p > GC_stackbottom ) {
- return(TRUE);
- }
-# endif
- return(FALSE);
+ int dummy;
+# ifdef STACK_GROWS_DOWN
+ if ((ptr_t)p >= (ptr_t)(&dummy) && (ptr_t)p < GC_stackbottom ) {
+ return(TRUE);
+ }
+# else
+ if ((ptr_t)p <= (ptr_t)(&dummy) && (ptr_t)p > GC_stackbottom ) {
+ return(TRUE);
+ }
+# endif
+ return(FALSE);
}
#endif
-/* Check that p is visible */
-/* to the collector as a possibly pointer containing location. */
-/* If it isn't invoke *GC_is_visible_print_proc. */
-/* Returns the argument in all cases. May erroneously succeed */
-/* in hard cases. (This is intended for debugging use with */
-/* untyped allocations. The idea is that it should be possible, though */
-/* slow, to add such a call to all indirect pointer stores.) */
-/* Currently useless for multithreaded worlds. */
+/* Check that p is visible */
+/* to the collector as a possibly pointer containing location. */
+/* If it isn't invoke *GC_is_visible_print_proc. */
+/* Returns the argument in all cases. May erroneously succeed */
+/* in hard cases. (This is intended for debugging use with */
+/* untyped allocations. The idea is that it should be possible, though */
+/* slow, to add such a call to all indirect pointer stores.) */
+/* Currently useless for multithreaded worlds. */
GC_API void * GC_CALL GC_is_visible(void *p)
{
hdr *hhdr;
-
+
if ((word)p & (ALIGNMENT - 1)) goto fail;
if (!GC_is_initialized) GC_init();
# ifdef THREADS
- hhdr = HDR((word)p);
+ hhdr = HDR((word)p);
if (hhdr != 0 && GC_base(p) == 0) {
goto fail;
} else {
@@ -199,56 +199,56 @@ GC_API void * GC_CALL GC_is_visible(void *p)
return(p);
}
# else
- /* Check stack first: */
- if (GC_on_stack(p)) return(p);
- hhdr = HDR((word)p);
- if (hhdr == 0) {
- if (GC_is_static_root(p)) return(p);
- /* Else do it again correctly: */
+ /* Check stack first: */
+ if (GC_on_stack(p)) return(p);
+ hhdr = HDR((word)p);
+ if (hhdr == 0) {
+ if (GC_is_static_root(p)) return(p);
+ /* Else do it again correctly: */
# if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || \
- defined(MSWINCE) || defined(PCR))
- GC_register_dynamic_libraries();
- if (GC_is_static_root(p))
- return(p);
-# endif
- goto fail;
- } else {
- /* p points to the heap. */
- word descr;
- ptr_t base = GC_base(p); /* Should be manually inlined? */
-
- if (base == 0) goto fail;
- if (HBLKPTR(base) != HBLKPTR(p)) hhdr = HDR((word)p);
- descr = hhdr -> hb_descr;
+ defined(MSWINCE) || defined(PCR))
+ GC_register_dynamic_libraries();
+ if (GC_is_static_root(p))
+ return(p);
+# endif
+ goto fail;
+ } else {
+ /* p points to the heap. */
+ word descr;
+ ptr_t base = GC_base(p); /* Should be manually inlined? */
+
+ if (base == 0) goto fail;
+ if (HBLKPTR(base) != HBLKPTR(p)) hhdr = HDR((word)p);
+ descr = hhdr -> hb_descr;
retry:
- switch(descr & GC_DS_TAGS) {
- case GC_DS_LENGTH:
- if ((word)((ptr_t)p - (ptr_t)base) > (word)descr) goto fail;
- break;
- case GC_DS_BITMAP:
- if ((ptr_t)p - (ptr_t)base
- >= WORDS_TO_BYTES(BITMAP_BITS)
- || ((word)p & (sizeof(word) - 1))) goto fail;
- if (!(((word)1 << (WORDSZ - ((ptr_t)p - (ptr_t)base) - 1))
- & descr)) goto fail;
- break;
- case GC_DS_PROC:
- /* We could try to decipher this partially. */
- /* For now we just punt. */
- break;
- case GC_DS_PER_OBJECT:
- if ((signed_word)descr >= 0) {
- descr = *(word *)((ptr_t)base + (descr & ~GC_DS_TAGS));
- } else {
- ptr_t type_descr = *(ptr_t *)base;
- descr = *(word *)(type_descr
- - (descr - (GC_DS_PER_OBJECT
- - GC_INDIR_PER_OBJ_BIAS)));
- }
- goto retry;
- }
- return(p);
- }
+ switch(descr & GC_DS_TAGS) {
+ case GC_DS_LENGTH:
+ if ((word)((ptr_t)p - (ptr_t)base) > (word)descr) goto fail;
+ break;
+ case GC_DS_BITMAP:
+ if ((ptr_t)p - (ptr_t)base
+ >= WORDS_TO_BYTES(BITMAP_BITS)
+ || ((word)p & (sizeof(word) - 1))) goto fail;
+ if (!(((word)1 << (WORDSZ - ((ptr_t)p - (ptr_t)base) - 1))
+ & descr)) goto fail;
+ break;
+ case GC_DS_PROC:
+ /* We could try to decipher this partially. */
+ /* For now we just punt. */
+ break;
+ case GC_DS_PER_OBJECT:
+ if ((signed_word)descr >= 0) {
+ descr = *(word *)((ptr_t)base + (descr & ~GC_DS_TAGS));
+ } else {
+ ptr_t type_descr = *(ptr_t *)base;
+ descr = *(word *)(type_descr
+ - (descr - (GC_DS_PER_OBJECT
+ - GC_INDIR_PER_OBJ_BIAS)));
+ }
+ goto retry;
+ }
+ return(p);
+ }
# endif
fail:
(*GC_is_visible_print_proc)((ptr_t)p);
@@ -260,9 +260,9 @@ GC_API void * GC_CALL GC_pre_incr (void **p, ptrdiff_t how_much)
{
void * initial = *p;
void * result = GC_same_obj((void *)((ptr_t)initial + how_much), initial);
-
+
if (!GC_all_interior_pointers) {
- (void) GC_is_valid_displacement(result);
+ (void) GC_is_valid_displacement(result);
}
return (*p = result);
}
@@ -271,9 +271,9 @@ GC_API void * GC_CALL GC_post_incr (void **p, ptrdiff_t how_much)
{
void * initial = *p;
void * result = GC_same_obj((void *)((ptr_t)initial + how_much), initial);
-
+
if (!GC_all_interior_pointers) {
- (void) GC_is_valid_displacement(result);
+ (void) GC_is_valid_displacement(result);
}
*p = result;
return(initial);
diff --git a/real_malloc.c b/real_malloc.c
index 01be5f0f..cb14edcc 100644
--- a/real_malloc.c
+++ b/real_malloc.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
*
@@ -31,8 +31,7 @@ void * real_malloc(size_t size)
# else
extern int GC_quiet;
- /* ANSI C doesn't allow translation units to be empty. */
- /* So we guarantee this one is nonempty. */
+ /* ANSI C doesn't allow translation units to be empty. */
+ /* So we guarantee this one is nonempty. */
#endif /* PCR */
-
diff --git a/reclaim.c b/reclaim.c
index 12afa918..159315f8 100644
--- a/reclaim.c
+++ b/reclaim.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
@@ -18,20 +18,20 @@
#include "private/gc_priv.h"
signed_word GC_bytes_found = 0;
- /* Number of bytes of memory reclaimed */
- /* minus the number of bytes originally */
- /* on free lists which we had to drop. */
+ /* Number of bytes of memory reclaimed */
+ /* minus the number of bytes originally */
+ /* on free lists which we had to drop. */
#if defined(PARALLEL_MARK)
word GC_fl_builder_count = 0;
- /* Number of threads currently building free lists without */
- /* holding GC lock. It is not safe to collect if this is */
- /* nonzero. */
+ /* Number of threads currently building free lists without */
+ /* holding GC lock. It is not safe to collect if this is */
+ /* nonzero. */
#endif /* PARALLEL_MARK */
-/* We defer printing of leaked objects until we're done with the GC */
-/* cycle, since the routine for printing objects needs to run outside */
-/* the collector, e.g. without the allocation lock. */
+/* We defer printing of leaked objects until we're done with the GC */
+/* cycle, since the routine for printing objects needs to run outside */
+/* the collector, e.g. without the allocation lock. */
#define MAX_LEAKED 40
ptr_t GC_leaked[MAX_LEAKED];
STATIC unsigned GC_n_leaked = 0;
@@ -49,31 +49,31 @@ STATIC void GC_add_leaked(ptr_t leaked)
}
static GC_bool printing_errors = FALSE;
-/* Print all objects on the list after printing any smashed objs. */
-/* Clear both lists. */
+/* Print all objects on the list after printing any smashed objs. */
+/* Clear both lists. */
void GC_print_all_errors (void)
{
unsigned i;
LOCK();
if (printing_errors) {
- UNLOCK();
- return;
+ UNLOCK();
+ return;
}
printing_errors = TRUE;
UNLOCK();
if (GC_debugging_started) GC_print_all_smashed();
for (i = 0; i < GC_n_leaked; ++i) {
- ptr_t p = GC_leaked[i];
- if (HDR(p) -> hb_obj_kind == PTRFREE) {
- GC_err_printf("Leaked atomic object at ");
- } else {
- GC_err_printf("Leaked composite object at ");
- }
- GC_print_heap_obj(p);
- GC_err_printf("\n");
- GC_free(p);
- GC_leaked[i] = 0;
+ ptr_t p = GC_leaked[i];
+ if (HDR(p) -> hb_obj_kind == PTRFREE) {
+ GC_err_printf("Leaked atomic object at ");
+ } else {
+ GC_err_printf("Leaked composite object at ");
+ }
+ GC_print_heap_obj(p);
+ GC_err_printf("\n");
+ GC_free(p);
+ GC_leaked[i] = 0;
}
GC_n_leaked = 0;
printing_errors = FALSE;
@@ -91,7 +91,7 @@ void GC_print_all_errors (void)
* objects. This does not require the block to be in physical
* memory.
*/
-
+
GC_bool GC_block_empty(hdr *hhdr)
{
return (hhdr -> hb_n_marks == 0);
@@ -102,8 +102,8 @@ STATIC GC_bool GC_block_nearly_full(hdr *hhdr)
return (hhdr -> hb_n_marks > 7 * HBLK_OBJS(hhdr -> hb_sz)/8);
}
-/* FIXME: This should perhaps again be specialized for USE_MARK_BYTES */
-/* and USE_MARK_BITS cases. */
+/* FIXME: This should perhaps again be specialized for USE_MARK_BYTES */
+/* and USE_MARK_BITS cases. */
/*
* Restore unmarked small objects in h of size sz to the object
@@ -111,12 +111,12 @@ STATIC GC_bool GC_block_nearly_full(hdr *hhdr)
* Clears unmarked objects. Sz is in bytes.
*/
STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, size_t sz,
- ptr_t list, signed_word *count)
+ ptr_t list, signed_word *count)
{
word bit_no = 0;
word *p, *q, *plim;
signed_word n_bytes_found = 0;
-
+
GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
GC_ASSERT(sz == hhdr -> hb_sz);
GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0);
@@ -124,61 +124,61 @@ STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, size_t sz,
plim = (word *)(hbp->hb_body + HBLKSIZE - sz);
/* go through all words in block */
- while( p <= plim ) {
- if( mark_bit_from_hdr(hhdr, bit_no) ) {
- p = (word *)((ptr_t)p + sz);
- } else {
- n_bytes_found += sz;
- /* object is available - put on list */
- obj_link(p) = list;
- list = ((ptr_t)p);
- /* Clear object, advance p to next object in the process */
- q = (word *)((ptr_t)p + sz);
-# ifdef USE_MARK_BYTES
- GC_ASSERT(!(sz & 1)
- && !((word)p & (2 * sizeof(word) - 1)));
- p[1] = 0;
+ while( p <= plim ) {
+ if( mark_bit_from_hdr(hhdr, bit_no) ) {
+ p = (word *)((ptr_t)p + sz);
+ } else {
+ n_bytes_found += sz;
+ /* object is available - put on list */
+ obj_link(p) = list;
+ list = ((ptr_t)p);
+ /* Clear object, advance p to next object in the process */
+ q = (word *)((ptr_t)p + sz);
+# ifdef USE_MARK_BYTES
+ GC_ASSERT(!(sz & 1)
+ && !((word)p & (2 * sizeof(word) - 1)));
+ p[1] = 0;
p += 2;
while (p < q) {
- CLEAR_DOUBLE(p);
- p += 2;
- }
-# else
+ CLEAR_DOUBLE(p);
+ p += 2;
+ }
+# else
p++; /* Skip link field */
while (p < q) {
- *p++ = 0;
- }
-# endif
- }
- bit_no += MARK_BIT_OFFSET(sz);
- }
+ *p++ = 0;
+ }
+# endif
+ }
+ bit_no += MARK_BIT_OFFSET(sz);
+ }
*count += n_bytes_found;
return(list);
}
/* The same thing, but don't clear objects: */
STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, size_t sz,
- ptr_t list, signed_word *count)
+ ptr_t list, signed_word *count)
{
word bit_no = 0;
word *p, *plim;
signed_word n_bytes_found = 0;
-
+
GC_ASSERT(sz == hhdr -> hb_sz);
p = (word *)(hbp->hb_body);
plim = (word *)((ptr_t)hbp + HBLKSIZE - sz);
/* go through all words in block */
- while( p <= plim ) {
- if( !mark_bit_from_hdr(hhdr, bit_no) ) {
- n_bytes_found += sz;
- /* object is available - put on list */
- obj_link(p) = list;
- list = ((ptr_t)p);
- }
- p = (word *)((ptr_t)p + sz);
- bit_no += MARK_BIT_OFFSET(sz);
- }
+ while( p <= plim ) {
+ if( !mark_bit_from_hdr(hhdr, bit_no) ) {
+ n_bytes_found += sz;
+ /* object is available - put on list */
+ obj_link(p) = list;
+ list = ((ptr_t)p);
+ }
+ p = (word *)((ptr_t)p + sz);
+ bit_no += MARK_BIT_OFFSET(sz);
+ }
*count += n_bytes_found;
return(list);
}
@@ -188,19 +188,19 @@ STATIC void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
{
word bit_no = 0;
ptr_t p, plim;
-
+
GC_ASSERT(sz == hhdr -> hb_sz);
p = hbp->hb_body;
plim = p + HBLKSIZE - sz;
/* go through all words in block */
- while( p <= plim ) {
- if( !mark_bit_from_hdr(hhdr, bit_no) ) {
- GC_add_leaked(p);
- }
- p += sz;
- bit_no += MARK_BIT_OFFSET(sz);
- }
+ while( p <= plim ) {
+ if( !mark_bit_from_hdr(hhdr, bit_no) ) {
+ GC_add_leaked(p);
+ }
+ p += sz;
+ bit_no += MARK_BIT_OFFSET(sz);
+ }
}
@@ -210,7 +210,7 @@ STATIC void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
* Sz is now in bytes.
*/
ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
- GC_bool init, ptr_t list, signed_word *count)
+ GC_bool init, ptr_t list, signed_word *count)
{
ptr_t result;
@@ -221,7 +221,7 @@ ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
} else {
GC_ASSERT((hhdr)->hb_descr == 0 /* Pointer-free block */);
result = GC_reclaim_uninit(hbp, hhdr, sz, list, count);
- }
+ }
if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
return result;
}
@@ -233,22 +233,22 @@ ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
* caller should perform that check.
*/
STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp,
- int report_if_found)
+ int report_if_found)
{
hdr *hhdr = HDR(hbp);
size_t sz = hhdr -> hb_sz;
int kind = hhdr -> hb_obj_kind;
struct obj_kind * ok = &GC_obj_kinds[kind];
void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
-
+
hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
if (report_if_found) {
- GC_reclaim_check(hbp, hhdr, sz);
+ GC_reclaim_check(hbp, hhdr, sz);
} else {
*flh = GC_reclaim_generic(hbp, hhdr, sz,
- ok -> ok_init,
- *flh, &GC_bytes_found);
+ ok -> ok_init,
+ *flh, &GC_bytes_found);
}
}
@@ -263,49 +263,49 @@ STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp,
STATIC void GC_reclaim_block(struct hblk *hbp, word report_if_found)
{
hdr * hhdr = HDR(hbp);
- size_t sz = hhdr -> hb_sz; /* size of objects in current block */
+ size_t sz = hhdr -> hb_sz; /* size of objects in current block */
struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
struct hblk ** rlh;
if( sz > MAXOBJBYTES ) { /* 1 big object */
if( !mark_bit_from_hdr(hhdr, 0) ) {
- if (report_if_found) {
- GC_add_leaked((ptr_t)hbp);
- } else {
- size_t blocks = OBJ_SZ_TO_BLOCKS(sz);
- if (blocks > 1) {
- GC_large_allocd_bytes -= blocks * HBLKSIZE;
- }
- GC_bytes_found += sz;
- GC_freehblk(hbp);
- }
- } else {
- if (hhdr -> hb_descr != 0) {
- GC_composite_in_use += sz;
- } else {
- GC_atomic_in_use += sz;
- }
- }
+ if (report_if_found) {
+ GC_add_leaked((ptr_t)hbp);
+ } else {
+ size_t blocks = OBJ_SZ_TO_BLOCKS(sz);
+ if (blocks > 1) {
+ GC_large_allocd_bytes -= blocks * HBLKSIZE;
+ }
+ GC_bytes_found += sz;
+ GC_freehblk(hbp);
+ }
+ } else {
+ if (hhdr -> hb_descr != 0) {
+ GC_composite_in_use += sz;
+ } else {
+ GC_atomic_in_use += sz;
+ }
+ }
} else {
GC_bool empty = GC_block_empty(hhdr);
-# ifdef PARALLEL_MARK
- /* Count can be low or one too high because we sometimes */
- /* have to ignore decrements. Objects can also potentially */
- /* be repeatedly marked by each marker. */
- /* Here we assume two markers, but this is extremely */
- /* unlikely to fail spuriously with more. And if it does, it */
- /* should be looked at. */
- GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
-# else
- GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
-# endif
- if (hhdr -> hb_descr != 0) {
- GC_composite_in_use += sz * hhdr -> hb_n_marks;
- } else {
- GC_atomic_in_use += sz * hhdr -> hb_n_marks;
- }
+# ifdef PARALLEL_MARK
+ /* Count can be low or one too high because we sometimes */
+ /* have to ignore decrements. Objects can also potentially */
+ /* be repeatedly marked by each marker. */
+ /* Here we assume two markers, but this is extremely */
+ /* unlikely to fail spuriously with more. And if it does, it */
+ /* should be looked at. */
+ GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
+# else
+ GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
+# endif
+ if (hhdr -> hb_descr != 0) {
+ GC_composite_in_use += sz * hhdr -> hb_n_marks;
+ } else {
+ GC_atomic_in_use += sz * hhdr -> hb_n_marks;
+ }
if (report_if_found) {
- GC_reclaim_small_nonempty_block(hbp, (int)report_if_found);
+ GC_reclaim_small_nonempty_block(hbp, (int)report_if_found);
} else if (empty) {
GC_bytes_found += HBLKSIZE;
GC_freehblk(hbp);
@@ -315,27 +315,27 @@ STATIC void GC_reclaim_block(struct hblk *hbp, word report_if_found)
hhdr -> hb_next = *rlh;
*rlh = hbp;
} /* else not worth salvaging. */
- /* We used to do the nearly_full check later, but we */
- /* already have the right cache context here. Also */
- /* doing it here avoids some silly lock contention in */
- /* GC_malloc_many. */
+ /* We used to do the nearly_full check later, but we */
+ /* already have the right cache context here. Also */
+ /* doing it here avoids some silly lock contention in */
+ /* GC_malloc_many. */
}
}
#if !defined(NO_DEBUGGING)
-/* Routines to gather and print heap block info */
-/* intended for debugging. Otherwise should be called */
-/* with lock. */
+/* Routines to gather and print heap block info */
+/* intended for debugging. Otherwise should be called */
+/* with lock. */
struct Print_stats
{
- size_t number_of_blocks;
- size_t total_bytes;
+ size_t number_of_blocks;
+ size_t total_bytes;
};
#ifdef USE_MARK_BYTES
-/* Return the number of set mark bits in the given header */
+/* Return the number of set mark bits in the given header */
STATIC int GC_n_set_marks(hdr *hhdr)
{
int result = 0;
@@ -353,20 +353,20 @@ STATIC int GC_n_set_marks(hdr *hhdr)
#else
-/* Number of set bits in a word. Not performance critical. */
+/* Number of set bits in a word. Not performance critical. */
static int set_bits(word n)
{
word m = n;
int result = 0;
-
+
while (m > 0) {
- if (m & 1) result++;
- m >>= 1;
+ if (m & 1) result++;
+ m >>= 1;
}
return(result);
}
-/* Return the number of set mark bits in the given header */
+/* Return the number of set mark bits in the given header */
STATIC int GC_n_set_marks(hdr *hhdr)
{
int result = 0;
@@ -374,7 +374,7 @@ STATIC int GC_n_set_marks(hdr *hhdr)
int n_mark_words;
# ifdef MARK_BIT_PER_OBJ
int n_objs = (int)HBLK_OBJS(hhdr -> hb_sz);
-
+
if (0 == n_objs) n_objs = 1;
n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);
# else /* MARK_BIT_PER_GRANULE */
@@ -385,7 +385,7 @@ STATIC int GC_n_set_marks(hdr *hhdr)
}
# ifdef MARK_BIT_PER_OBJ
result += set_bits((hhdr -> hb_marks[n_mark_words - 1])
- << (n_mark_words * WORDSZ - n_objs));
+ << (n_mark_words * WORDSZ - n_objs));
# else
result += set_bits(hhdr -> hb_marks[n_mark_words - 1]);
# endif
@@ -395,19 +395,19 @@ STATIC int GC_n_set_marks(hdr *hhdr)
#endif /* !USE_MARK_BYTES */
STATIC void GC_print_block_descr(struct hblk *h,
- word /* struct PrintStats */ raw_ps)
+ word /* struct PrintStats */ raw_ps)
{
hdr * hhdr = HDR(h);
size_t bytes = hhdr -> hb_sz;
struct Print_stats *ps;
unsigned n_marks = GC_n_set_marks(hhdr);
-
+
if (hhdr -> hb_n_marks != n_marks) {
GC_printf("(%u:%u,%u!=%u)", hhdr -> hb_obj_kind, (unsigned)bytes,
- (unsigned)hhdr -> hb_n_marks, n_marks);
+ (unsigned)hhdr -> hb_n_marks, n_marks);
} else {
GC_printf("(%u:%u,%u)", hhdr -> hb_obj_kind,
- (unsigned)bytes, n_marks);
+ (unsigned)bytes, n_marks);
}
bytes += HBLKSIZE-1;
bytes &= ~(HBLKSIZE-1);
@@ -426,8 +426,8 @@ void GC_print_block_list(void)
pstats.total_bytes = 0;
GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats);
GC_printf("\nblocks = %lu, bytes = %lu\n",
- (unsigned long)pstats.number_of_blocks,
- (unsigned long)pstats.total_bytes);
+ (unsigned long)pstats.number_of_blocks,
+ (unsigned long)pstats.total_bytes);
}
/* Currently for debugger use only: */
@@ -476,7 +476,7 @@ STATIC void GC_clear_fl_links(void **flp)
void GC_start_reclaim(GC_bool report_if_found)
{
unsigned kind;
-
+
# if defined(PARALLEL_MARK)
GC_ASSERT(0 == GC_fl_builder_count);
# endif
@@ -490,42 +490,42 @@ void GC_start_reclaim(GC_bool report_if_found)
struct hblk ** rlp;
struct hblk ** rlim;
struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
- GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
-
- if (rlist == 0) continue; /* This kind not used. */
+ GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
+
+ if (rlist == 0) continue; /* This kind not used. */
if (!report_if_found) {
lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
- for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) {
- if (*fop != 0) {
- if (should_clobber) {
- GC_clear_fl_links(fop);
- } else {
- *fop = 0;
- }
- }
- }
- } /* otherwise free list objects are marked, */
- /* and its safe to leave them */
- rlim = rlist + MAXOBJGRANULES+1;
- for( rlp = rlist; rlp < rlim; rlp++ ) {
- *rlp = 0;
- }
+ for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) {
+ if (*fop != 0) {
+ if (should_clobber) {
+ GC_clear_fl_links(fop);
+ } else {
+ *fop = 0;
+ }
+ }
+ }
+ } /* otherwise free list objects are marked, */
+ /* and its safe to leave them */
+ rlim = rlist + MAXOBJGRANULES+1;
+ for( rlp = rlist; rlp < rlim; rlp++ ) {
+ *rlp = 0;
+ }
}
-
+
/* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
- /* or enqueue the block for later processing. */
+ /* or enqueue the block for later processing. */
GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
# ifdef EAGER_SWEEP
- /* This is a very stupid thing to do. We make it possible anyway, */
- /* so that you can convince yourself that it really is very stupid. */
+ /* This is a very stupid thing to do. We make it possible anyway, */
+ /* so that you can convince yourself that it really is very stupid. */
GC_reclaim_all((GC_stop_func)0, FALSE);
# endif
# if defined(PARALLEL_MARK)
GC_ASSERT(0 == GC_fl_builder_count);
# endif
-
+
}
/*
@@ -540,8 +540,8 @@ void GC_continue_reclaim(size_t sz /* granules */, int kind)
struct obj_kind * ok = &(GC_obj_kinds[kind]);
struct hblk ** rlh = ok -> ok_reclaim_list;
void **flh = &(ok -> ok_freelist[sz]);
-
- if (rlh == 0) return; /* No blocks of this kind. */
+
+ if (rlh == 0) return; /* No blocks of this kind. */
rlh += sz;
while ((hbp = *rlh) != 0) {
hhdr = HDR(hbp);
@@ -556,7 +556,7 @@ void GC_continue_reclaim(size_t sz /* granules */, int kind)
* Abort and return FALSE when/if (*stop_func)() returns TRUE.
* If this returns TRUE, then it's safe to restart the world
* with incorrectly cleared mark bits.
- * If ignore_old is TRUE, then reclaim only blocks that have been
+ * If ignore_old is TRUE, then reclaim only blocks that have been
* recently reclaimed, and discard the rest.
* Stop_func may be 0.
*/
@@ -572,37 +572,37 @@ GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
# ifndef SMALL_CONFIG
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
CLOCK_TYPE done_time;
-
+
if (GC_print_stats == VERBOSE)
- GET_TIME(start_time);
+ GET_TIME(start_time);
# endif
-
+
for (kind = 0; kind < GC_n_kinds; kind++) {
- ok = &(GC_obj_kinds[kind]);
- rlp = ok -> ok_reclaim_list;
- if (rlp == 0) continue;
- for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
- rlh = rlp + sz;
- while ((hbp = *rlh) != 0) {
- if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
- return(FALSE);
- }
- hhdr = HDR(hbp);
- *rlh = hhdr -> hb_next;
- if (!ignore_old || hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
- /* It's likely we'll need it this time, too */
- /* It's been touched recently, so this */
- /* shouldn't trigger paging. */
- GC_reclaim_small_nonempty_block(hbp, FALSE);
- }
+ ok = &(GC_obj_kinds[kind]);
+ rlp = ok -> ok_reclaim_list;
+ if (rlp == 0) continue;
+ for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
+ rlh = rlp + sz;
+ while ((hbp = *rlh) != 0) {
+ if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
+ return(FALSE);
+ }
+ hhdr = HDR(hbp);
+ *rlh = hhdr -> hb_next;
+ if (!ignore_old || hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
+ /* It's likely we'll need it this time, too */
+ /* It's been touched recently, so this */
+ /* shouldn't trigger paging. */
+ GC_reclaim_small_nonempty_block(hbp, FALSE);
+ }
}
}
}
# ifndef SMALL_CONFIG
if (GC_print_stats == VERBOSE) {
- GET_TIME(done_time);
- GC_log_printf("Disposing of reclaim lists took %lu msecs\n",
- MS_TIME_DIFF(done_time,start_time));
+ GET_TIME(done_time);
+ GC_log_printf("Disposing of reclaim lists took %lu msecs\n",
+ MS_TIME_DIFF(done_time,start_time));
}
# endif
return(TRUE);
diff --git a/stubborn.c b/stubborn.c
index e5d08e20..9e8419ed 100644
--- a/stubborn.c
+++ b/stubborn.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
*
@@ -18,10 +18,10 @@
#if defined(MANUAL_VDB)
/* Stubborn object (hard to change, nearly immutable) allocation. */
-/* This interface is deprecated. We mostly emulate it using */
-/* MANUAL_VDB. But that imposes the additional constraint that */
-/* written, but not yet GC_dirty()ed objects must be referenced */
-/* by a stack. */
+/* This interface is deprecated. We mostly emulate it using */
+/* MANUAL_VDB. But that imposes the additional constraint that */
+/* written, but not yet GC_dirty()ed objects must be referenced */
+/* by a stack. */
GC_API void * GC_CALL GC_malloc_stubborn(size_t lb)
{
return(GC_malloc(lb));
diff --git a/tests/test.c b/tests/test.c
index 01037dde..a14024b8 100644
--- a/tests/test.c
+++ b/tests/test.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996 by Silicon Graphics. All rights reserved.
@@ -12,11 +12,11 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* An incomplete test for the garbage collector. */
-/* Some more obscure entry points are not tested at all. */
-/* This must be compiled with the same flags used to build the */
-/* GC. It uses GC internals to allow more precise results */
-/* checking for some of the tests. */
+/* An incomplete test for the garbage collector. */
+/* Some more obscure entry points are not tested at all. */
+/* This must be compiled with the same flags used to build the */
+/* GC. It uses GC internals to allow more precise results */
+/* checking for some of the tests. */
# ifdef HAVE_CONFIG_H
# include "private/config.h"
@@ -26,7 +26,7 @@
#ifndef NTHREADS /* Number of additional threads to fork. */
# define NTHREADS 5 /* excludes main thread, which also runs a test. */
- /* Not respected by PCR test. */
+ /* Not respected by PCR test. */
#endif
#if (defined(DBG_HDRS_ALL) || defined(MAKE_BACK_GRAPH)) && !defined(GC_DEBUG)
@@ -47,8 +47,8 @@
# endif
# include "gc.h"
# include "gc_typed.h"
-# include "private/gc_priv.h" /* For output, locking, MIN_WORDS, */
- /* and some statistics, and gcconfig.h. */
+# include "private/gc_priv.h" /* For output, locking, MIN_WORDS, */
+ /* and some statistics, and gcconfig.h. */
# if defined(MSWIN32) || defined(MSWINCE)
# include <windows.h>
@@ -58,8 +58,8 @@
# ifdef GC_PRINT_VERBOSE_STATS
# define GC_print_stats VERBOSE
# else
-# define GC_print_stats 0 /* Not exported from DLL */
- /* Redefine to 1 to generate output. */
+# define GC_print_stats 0 /* Not exported from DLL */
+ /* Redefine to 1 to generate output. */
# endif
# endif
@@ -79,17 +79,17 @@
# include <stdarg.h>
-/* Call GC_INIT only on platforms on which we think we really need it, */
-/* so that we can test automatic initialization on the rest. */
+/* Call GC_INIT only on platforms on which we think we really need it, */
+/* so that we can test automatic initialization on the rest. */
#if defined(CYGWIN32) || defined (AIX) || defined(DARWIN) \
- || defined(THREAD_LOCAL_ALLOC)
+ || defined(THREAD_LOCAL_ALLOC)
# define GC_COND_INIT() GC_INIT()
#else
# define GC_COND_INIT()
#endif
/* Allocation Statistics. Incremented without synchronization. */
-/* FIXME: We should be using synchronization. */
+/* FIXME: We should be using synchronization. */
int stubborn_count = 0;
int uncollectable_count = 0;
int collectable_count = 0;
@@ -104,13 +104,13 @@ int realloc_count = 0;
void *GC_amiga_gctest_malloc_explicitly_typed(size_t lb, GC_descr d){
void *ret=GC_malloc_explicitly_typed(lb,d);
if(ret==NULL){
- if(!GC_dont_gc){
- GC_gcollect();
- ret=GC_malloc_explicitly_typed(lb,d);
- }
+ if(!GC_dont_gc){
+ GC_gcollect();
+ ret=GC_malloc_explicitly_typed(lb,d);
+ }
if(ret==NULL){
GC_printf("Out of memory, (typed allocations are not directly "
- "supported with the GC_AMIGA_FASTALLOC option.)\n");
+ "supported with the GC_AMIGA_FASTALLOC option.)\n");
FAIL;
}
}
@@ -119,20 +119,20 @@ int realloc_count = 0;
void *GC_amiga_gctest_calloc_explicitly_typed(size_t a,size_t lb, GC_descr d){
void *ret=GC_calloc_explicitly_typed(a,lb,d);
if(ret==NULL){
- if(!GC_dont_gc){
- GC_gcollect();
- ret=GC_calloc_explicitly_typed(a,lb,d);
- }
+ if(!GC_dont_gc){
+ GC_gcollect();
+ ret=GC_calloc_explicitly_typed(a,lb,d);
+ }
if(ret==NULL){
GC_printf("Out of memory, (typed allocations are not directly "
- "supported with the GC_AMIGA_FASTALLOC option.)\n");
+ "supported with the GC_AMIGA_FASTALLOC option.)\n");
FAIL;
}
}
return ret;
}
-# define GC_malloc_explicitly_typed(a,b) GC_amiga_gctest_malloc_explicitly_typed(a,b)
-# define GC_calloc_explicitly_typed(a,b,c) GC_amiga_gctest_calloc_explicitly_typed(a,b,c)
+# define GC_malloc_explicitly_typed(a,b) GC_amiga_gctest_malloc_explicitly_typed(a,b)
+# define GC_calloc_explicitly_typed(a,b,c) GC_amiga_gctest_calloc_explicitly_typed(a,b,c)
#else /* !AMIGA_FASTALLOC */
@@ -144,11 +144,11 @@ int realloc_count = 0;
#endif /* !AMIGA_FASTALLOC */
-/* AT_END may be defined to exercise the interior pointer test */
+/* AT_END may be defined to exercise the interior pointer test */
/* if the collector is configured with ALL_INTERIOR_POINTERS. */
-/* As it stands, this test should succeed with either */
-/* configuration. In the FIND_LEAK configuration, it should */
-/* find lots of leaks, since we free almost nothing. */
+/* As it stands, this test should succeed with either */
+/* configuration. In the FIND_LEAK configuration, it should */
+/* find lots of leaks, since we free almost nothing. */
struct SEXPR {
struct SEXPR * sexpr_car;
@@ -180,7 +180,7 @@ sexpr cons (sexpr x, sexpr y)
sexpr r;
int *p;
int my_extra = extra_count;
-
+
stubborn_count++;
r = (sexpr) GC_MALLOC_STUBBORN(sizeof(struct SEXPR) + my_extra);
if (r == 0) {
@@ -189,14 +189,14 @@ sexpr cons (sexpr x, sexpr y)
}
for (p = (int *)r;
((char *)p) < ((char *)r) + my_extra + sizeof(struct SEXPR); p++) {
- if (*p) {
- (void)GC_printf("Found nonzero at %p - allocator is broken\n", p);
- FAIL;
+ if (*p) {
+ (void)GC_printf("Found nonzero at %p - allocator is broken\n", p);
+ FAIL;
}
*p = (int)((13 << 12) + ((p - (int *)r) & 0xfff));
}
# ifdef AT_END
- r = (sexpr)((char *)r + (my_extra & ~7));
+ r = (sexpr)((char *)r + (my_extra & ~7));
# endif
r -> sexpr_car = x;
r -> sexpr_cdr = y;
@@ -216,37 +216,37 @@ sexpr cons (sexpr x, sexpr y)
#include "gc_mark.h"
#include "gc_gcj.h"
-/* The following struct emulates the vtable in gcj. */
+/* The following struct emulates the vtable in gcj. */
/* This assumes the default value of MARK_DESCR_OFFSET. */
struct fake_vtable {
- void * dummy; /* class pointer in real gcj. */
+ void * dummy; /* class pointer in real gcj. */
GC_word descr;
};
struct fake_vtable gcj_class_struct1 = { 0, sizeof(struct SEXPR)
- + sizeof(struct fake_vtable *) };
- /* length based descriptor. */
+ + sizeof(struct fake_vtable *) };
+ /* length based descriptor. */
struct fake_vtable gcj_class_struct2 =
- { 0, ((GC_word)3 << (CPP_WORDSZ - 3)) | GC_DS_BITMAP};
- /* Bitmap based descriptor. */
+ { 0, ((GC_word)3 << (CPP_WORDSZ - 3)) | GC_DS_BITMAP};
+ /* Bitmap based descriptor. */
struct GC_ms_entry * fake_gcj_mark_proc(word * addr,
- struct GC_ms_entry *mark_stack_ptr,
- struct GC_ms_entry *mark_stack_limit,
- word env )
+ struct GC_ms_entry *mark_stack_ptr,
+ struct GC_ms_entry *mark_stack_limit,
+ word env )
{
sexpr x;
if (1 == env) {
- /* Object allocated with debug allocator. */
- addr = (word *)GC_USR_PTR_FROM_BASE(addr);
+ /* Object allocated with debug allocator. */
+ addr = (word *)GC_USR_PTR_FROM_BASE(addr);
}
x = (sexpr)(addr + 1); /* Skip the vtable pointer. */
mark_stack_ptr = GC_MARK_AND_PUSH(
- (void *)(x -> sexpr_cdr), mark_stack_ptr,
- mark_stack_limit, (void * *)&(x -> sexpr_cdr));
+ (void *)(x -> sexpr_cdr), mark_stack_ptr,
+ mark_stack_limit, (void * *)&(x -> sexpr_cdr));
mark_stack_ptr = GC_MARK_AND_PUSH(
- (void *)(x -> sexpr_car), mark_stack_ptr,
- mark_stack_limit, (void * *)&(x -> sexpr_car));
+ (void *)(x -> sexpr_car), mark_stack_ptr,
+ mark_stack_limit, (void * *)&(x -> sexpr_car));
return(mark_stack_ptr);
}
@@ -256,7 +256,7 @@ struct GC_ms_entry * fake_gcj_mark_proc(word * addr,
sexpr small_cons (sexpr x, sexpr y)
{
sexpr r;
-
+
collectable_count++;
r = (sexpr) GC_MALLOC(sizeof(struct SEXPR));
if (r == 0) {
@@ -271,7 +271,7 @@ sexpr small_cons (sexpr x, sexpr y)
sexpr small_cons_uncollectable (sexpr x, sexpr y)
{
sexpr r;
-
+
uncollectable_count++;
r = (sexpr) GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR));
if (r == 0) {
@@ -290,10 +290,10 @@ sexpr gcj_cons(sexpr x, sexpr y)
{
GC_word * r;
sexpr result;
-
+
r = (GC_word *) GC_GCJ_MALLOC(sizeof(struct SEXPR)
- + sizeof(struct fake_vtable*),
- &gcj_class_struct2);
+ + sizeof(struct fake_vtable*),
+ &gcj_class_struct2);
if (r == 0) {
(void)GC_printf("Out of memory\n");
exit(1);
@@ -326,7 +326,7 @@ sexpr reverse(sexpr x)
sexpr ints(int low, int up)
{
if (low > up) {
- return(nil);
+ return(nil);
} else {
return(small_cons(small_cons(INT_TO_SEXPR(low), nil), ints(low+1, up)));
}
@@ -351,19 +351,19 @@ sexpr gcj_reverse(sexpr x)
sexpr gcj_ints(int low, int up)
{
if (low > up) {
- return(nil);
+ return(nil);
} else {
return(gcj_cons(gcj_cons(INT_TO_SEXPR(low), nil), gcj_ints(low+1, up)));
}
}
#endif /* GC_GCJ_SUPPORT */
-/* To check uncollectable allocation we build lists with disguised cdr */
-/* pointers, and make sure they don't go away. */
+/* To check uncollectable allocation we build lists with disguised cdr */
+/* pointers, and make sure they don't go away. */
sexpr uncollectable_ints(int low, int up)
{
if (low > up) {
- return(nil);
+ return(nil);
} else {
return(small_cons_uncollectable(small_cons(INT_TO_SEXPR(low), nil),
uncollectable_ints(low+1, up)));
@@ -426,9 +426,9 @@ void print_int_list(sexpr x)
void check_marks_int_list(sexpr x)
{
if (!GC_is_marked((ptr_t)x))
- GC_printf("[unm:%p]", x);
+ GC_printf("[unm:%p]", x);
else
- GC_printf("[mkd:%p]", x);
+ GC_printf("[mkd:%p]", x);
if (is_nil(x)) {
(void)GC_printf("NIL\n");
} else {
@@ -463,7 +463,7 @@ void check_marks_int_list(sexpr x)
int i;
for (i = 0; i < 5; ++i) {
check_ints(reverse(reverse(ints(1, TINY_REVERSE_UPPER_VALUE))),
- 1, TINY_REVERSE_UPPER_VALUE);
+ 1, TINY_REVERSE_UPPER_VALUE);
}
return 0;
}
@@ -474,8 +474,8 @@ void check_marks_int_list(sexpr x)
pthread_t t;
int code;
if ((code = pthread_create(&t, 0, tiny_reverse_test, 0)) != 0) {
- (void)GC_printf("Small thread creation failed %d\n", code);
- FAIL;
+ (void)GC_printf("Small thread creation failed %d\n", code);
+ FAIL;
}
if ((code = pthread_join(t, 0)) != 0) {
(void)GC_printf("Small thread join failed %d\n", code);
@@ -486,24 +486,24 @@ void check_marks_int_list(sexpr x)
# elif defined(GC_WIN32_THREADS)
void fork_a_thread(void)
{
- DWORD thread_id;
- HANDLE h;
- h = GC_CreateThread(NULL, 0, tiny_reverse_test, 0, 0, &thread_id);
+ DWORD thread_id;
+ HANDLE h;
+ h = GC_CreateThread(NULL, 0, tiny_reverse_test, 0, 0, &thread_id);
if (h == (HANDLE)NULL) {
(void)GC_printf("Small thread creation failed %d\n",
- (int)GetLastError());
- FAIL;
+ (int)GetLastError());
+ FAIL;
+ }
+ if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) {
+ (void)GC_printf("Small thread wait failed %d\n",
+ (int)GetLastError());
+ FAIL;
}
- if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) {
- (void)GC_printf("Small thread wait failed %d\n",
- (int)GetLastError());
- FAIL;
- }
}
# endif
-#endif
+#endif
/* Try to force a to be strangely aligned */
struct {
@@ -529,20 +529,20 @@ void reverse_test(void)
# define BIG 1000
# else
# if defined(PCR)
- /* PCR default stack is 100K. Stack frames are up to 120 bytes. */
-# define BIG 700
+ /* PCR default stack is 100K. Stack frames are up to 120 bytes. */
+# define BIG 700
# else
-# if defined(MSWINCE)
- /* WinCE only allows 64K stacks */
-# define BIG 500
-# else
-# if defined(OSF1)
- /* OSF has limited stack space by default, and large frames. */
+# if defined(MSWINCE)
+ /* WinCE only allows 64K stacks */
+# define BIG 500
+# else
+# if defined(OSF1)
+ /* OSF has limited stack space by default, and large frames. */
# define BIG 200
-# else
+# else
# define BIG 4500
-# endif
-# endif
+# endif
+# endif
# endif
# endif
@@ -569,7 +569,7 @@ void reverse_test(void)
h = (sexpr *)GC_REALLOC((void *)h, 2000 * sizeof(sexpr));
# ifdef GC_GCJ_SUPPORT
h[1999] = gcj_ints(1,200);
- for (i = 0; i < 51; ++i)
+ for (i = 0; i < 51; ++i)
h[1999] = gcj_reverse(h[1999]);
/* Leave it as the reveresed list for now. */
# else
@@ -594,21 +594,21 @@ void reverse_test(void)
check_ints(b,1,50);
check_ints(a,1,49);
for (i = 0; i < 60; i++) {
-# if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
- if (i % 10 == 0) fork_a_thread();
-# endif
- /* This maintains the invariant that a always points to a list of */
- /* 49 integers. Thus this is thread safe without locks, */
- /* assuming atomic pointer assignments. */
+# if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
+ if (i % 10 == 0) fork_a_thread();
+# endif
+ /* This maintains the invariant that a always points to a list of */
+ /* 49 integers. Thus this is thread safe without locks, */
+ /* assuming atomic pointer assignments. */
a = reverse(reverse(a));
-# if !defined(AT_END) && !defined(THREADS)
- /* This is not thread safe, since realloc explicitly deallocates */
+# if !defined(AT_END) && !defined(THREADS)
+ /* This is not thread safe, since realloc explicitly deallocates */
if (i & 1) {
a = (sexpr)GC_REALLOC((void *)a, 500);
} else {
a = (sexpr)GC_REALLOC((void *)a, 8200);
}
-# endif
+# endif
}
check_ints(a,1,49);
check_ints(b,1,50);
@@ -623,8 +623,8 @@ void reverse_test(void)
# endif
check_ints(h[1999], 1,200);
# ifndef THREADS
- a = 0;
-# endif
+ a = 0;
+# endif
*(volatile void **)&b = 0;
*(volatile void **)&c = 0;
}
@@ -663,7 +663,7 @@ void GC_CALLBACK finalizer(void * obj, void * client_data)
FAIL;
}
finalized_count++;
- t -> level = -1; /* detect duplicate finalization immediately */
+ t -> level = -1; /* detect duplicate finalization immediately */
# ifdef PCR
PCR_ThCrSec_ExitSys();
# endif
@@ -690,14 +690,14 @@ int live_indicators_count = 0;
tn * mktree(int n)
{
tn * result = (tn *)GC_MALLOC(sizeof(tn));
-
+
collectable_count++;
# if defined(MACOS)
- /* get around static data limitations. */
- if (!live_indicators)
- live_indicators =
- (GC_word*)NewPtrClear(MAX_FINALIZED * sizeof(GC_word));
- if (!live_indicators) {
+ /* get around static data limitations. */
+ if (!live_indicators)
+ live_indicators =
+ (GC_word*)NewPtrClear(MAX_FINALIZED * sizeof(GC_word));
+ if (!live_indicators) {
(void)GC_printf("Out of memory\n");
exit(1);
}
@@ -712,62 +712,62 @@ tn * mktree(int n)
result -> rchild = mktree(n-1);
if (counter++ % 17 == 0 && n >= 2) {
tn * tmp = result -> lchild -> rchild;
-
+
result -> lchild -> rchild = result -> rchild -> lchild;
result -> rchild -> lchild = tmp;
}
if (counter++ % 119 == 0) {
int my_index;
-
+
{
-# ifdef PCR
- PCR_ThCrSec_EnterSys();
-# endif
+# ifdef PCR
+ PCR_ThCrSec_EnterSys();
+# endif
# if defined(GC_PTHREADS)
static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
pthread_mutex_lock(&incr_lock);
# elif defined(GC_WIN32_THREADS)
EnterCriticalSection(&incr_cs);
# endif
- /* Losing a count here causes erroneous report of failure. */
+ /* Losing a count here causes erroneous report of failure. */
finalizable_count++;
my_index = live_indicators_count++;
-# ifdef PCR
- PCR_ThCrSec_ExitSys();
-# endif
-# if defined(GC_PTHREADS)
- pthread_mutex_unlock(&incr_lock);
-# elif defined(GC_WIN32_THREADS)
+# ifdef PCR
+ PCR_ThCrSec_ExitSys();
+# endif
+# if defined(GC_PTHREADS)
+ pthread_mutex_unlock(&incr_lock);
+# elif defined(GC_WIN32_THREADS)
LeaveCriticalSection(&incr_cs);
# endif
- }
+ }
GC_REGISTER_FINALIZER((void *)result, finalizer, (void *)(GC_word)n,
- (GC_finalization_proc *)0, (void * *)0);
+ (GC_finalization_proc *)0, (void * *)0);
if (my_index >= MAX_FINALIZED) {
- GC_printf("live_indicators overflowed\n");
- FAIL;
- }
+ GC_printf("live_indicators overflowed\n");
+ FAIL;
+ }
live_indicators[my_index] = 13;
if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
- (void * *)(&(live_indicators[my_index])),
- (void *)result) != 0) {
- GC_printf("GC_general_register_disappearing_link failed\n");
- FAIL;
+ (void * *)(&(live_indicators[my_index])),
+ (void *)result) != 0) {
+ GC_printf("GC_general_register_disappearing_link failed\n");
+ FAIL;
}
if (GC_unregister_disappearing_link(
- (void * *)
- (&(live_indicators[my_index]))) == 0) {
- GC_printf("GC_unregister_disappearing_link failed\n");
- FAIL;
+ (void * *)
+ (&(live_indicators[my_index]))) == 0) {
+ GC_printf("GC_unregister_disappearing_link failed\n");
+ FAIL;
}
if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
- (void * *)(&(live_indicators[my_index])),
- (void *)result) != 0) {
- GC_printf("GC_general_register_disappearing_link failed 2\n");
- FAIL;
+ (void * *)(&(live_indicators[my_index])),
+ (void *)result) != 0) {
+ GC_printf("GC_general_register_disappearing_link failed 2\n");
+ FAIL;
}
- GC_reachable_here(result);
+ GC_reachable_here(result);
}
return(result);
}
@@ -784,13 +784,13 @@ void chktree(tn *t, int n)
FAIL;
}
if (counter++ % 373 == 0) {
- collectable_count++;
- (void) GC_MALLOC(counter%5001);
+ collectable_count++;
+ (void) GC_MALLOC(counter%5001);
}
chktree(t -> lchild, n-1);
if (counter++ % 73 == 0) {
- collectable_count++;
- (void) GC_MALLOC(counter%373);
+ collectable_count++;
+ (void) GC_MALLOC(counter%373);
}
chktree(t -> rchild, n-1);
}
@@ -807,14 +807,14 @@ void * alloc8bytes(void)
# else
void ** my_free_list_ptr;
void * my_free_list;
-
+
my_free_list_ptr = (void **)pthread_getspecific(fl_key);
if (my_free_list_ptr == 0) {
uncollectable_count++;
my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
if (pthread_setspecific(fl_key, my_free_list_ptr) != 0) {
- (void)GC_printf("pthread_setspecific failed\n");
- FAIL;
+ (void)GC_printf("pthread_setspecific failed\n");
+ FAIL;
}
}
my_free_list = *my_free_list_ptr;
@@ -822,7 +822,7 @@ void * alloc8bytes(void)
my_free_list = GC_malloc_many(8);
if (my_free_list == 0) {
(void)GC_printf("alloc8bytes out of memory\n");
- FAIL;
+ FAIL;
}
}
*my_free_list_ptr = GC_NEXT(my_free_list);
@@ -839,7 +839,7 @@ void * alloc8bytes(void)
void alloc_small(int n)
{
int i;
-
+
for (i = 0; i < n; i += 8) {
atomic_count++;
if (alloc8bytes() == 0) {
@@ -866,7 +866,7 @@ void tree_test(void)
{
tn * root;
int i;
-
+
root = mktree(TREE_HEIGHT);
# ifndef VERY_SMALL_CONFIG
alloc_small(5000000);
@@ -877,8 +877,8 @@ void tree_test(void)
FAIL;
}
dropped_something = 1;
- GC_noop1((word)root); /* Root needs to remain live until */
- /* dropped_something is set. */
+ GC_noop1((word)root); /* Root needs to remain live until */
+ /* dropped_something is set. */
root = mktree(TREE_HEIGHT);
chktree(root, TREE_HEIGHT);
for (i = TREE_HEIGHT; i >= 0; i--) {
@@ -905,7 +905,7 @@ GC_word bm_huge[10] = {
0x00ffffff,
};
-/* A very simple test of explicitly typed allocation */
+/* A very simple test of explicitly typed allocation */
void typed_test(void)
{
GC_word * old, * new;
@@ -918,50 +918,50 @@ void typed_test(void)
GC_descr d4 = GC_make_descriptor(bm_huge, 320);
GC_word * x = (GC_word *)GC_malloc_explicitly_typed(2000, d4);
int i;
-
+
# ifndef LINT
(void)GC_make_descriptor(&bm_large, 32);
# endif
collectable_count++;
old = 0;
for (i = 0; i < 4000; i++) {
- collectable_count++;
+ collectable_count++;
new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d1);
if (0 != new[0] || 0 != new[1]) {
- GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
- FAIL;
- }
+ GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
+ FAIL;
+ }
new[0] = 17;
new[1] = (GC_word)old;
old = new;
- collectable_count++;
+ collectable_count++;
new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d2);
new[0] = 17;
new[1] = (GC_word)old;
old = new;
- collectable_count++;
+ collectable_count++;
new = (GC_word *) GC_malloc_explicitly_typed(33 * sizeof(GC_word), d3);
new[0] = 17;
new[1] = (GC_word)old;
old = new;
- collectable_count++;
+ collectable_count++;
new = (GC_word *) GC_calloc_explicitly_typed(4, 2 * sizeof(GC_word),
- d1);
+ d1);
new[0] = 17;
new[1] = (GC_word)old;
old = new;
- collectable_count++;
+ collectable_count++;
if (i & 0xff) {
new = (GC_word *) GC_calloc_explicitly_typed(7, 3 * sizeof(GC_word),
- d2);
+ d2);
} else {
new = (GC_word *) GC_calloc_explicitly_typed(1001,
- 3 * sizeof(GC_word),
- d2);
+ 3 * sizeof(GC_word),
+ d2);
if (0 != new[0] || 0 != new[1]) {
- GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
- FAIL;
- }
+ GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
+ FAIL;
+ }
}
new[0] = 17;
new[1] = (GC_word)old;
@@ -970,7 +970,7 @@ void typed_test(void)
for (i = 0; i < 20000; i++) {
if (new[0] != 17) {
(void)GC_printf("typed alloc failed at %lu\n",
- (unsigned long)i);
+ (unsigned long)i);
FAIL;
}
new[0] = 0;
@@ -987,7 +987,7 @@ int fail_count = 0;
void GC_CALLBACK fail_proc1(void * x)
{
fail_count++;
-}
+}
static void uniq(void *p, ...) {
va_list a;
@@ -1004,13 +1004,13 @@ static void uniq(void *p, ...) {
"Apparently failed to mark from some function arguments.\n"
"Perhaps GC_push_regs was configured incorrectly?\n"
);
- FAIL;
+ FAIL;
}
}
#ifdef THREADS
# define TEST_FAIL_COUNT(n) 1
-#else
+#else
# define TEST_FAIL_COUNT(n) (fail_count >= (n))
#endif
@@ -1023,73 +1023,73 @@ void * GC_CALLBACK inc_int_counter(void *pcounter)
void run_one_test(void)
{
# ifndef DBG_HDRS_ALL
- char *x;
- char **z;
-# ifdef LINT
- char *y = 0;
-# else
- char *y = (char *)(GC_word)fail_proc1;
-# endif
- CLOCK_TYPE typed_time;
+ char *x;
+ char **z;
+# ifdef LINT
+ char *y = 0;
+# else
+ char *y = (char *)(GC_word)fail_proc1;
+# endif
+ CLOCK_TYPE typed_time;
# endif
CLOCK_TYPE start_time;
CLOCK_TYPE reverse_time;
CLOCK_TYPE tree_time;
unsigned long time_diff;
-
+
# ifdef FIND_LEAK
- GC_printf(
- "This test program is not designed for leak detection mode\n");
- GC_printf("Expect lots of problems.\n");
+ GC_printf(
+ "This test program is not designed for leak detection mode\n");
+ GC_printf("Expect lots of problems.\n");
# endif
GC_FREE(0);
# ifndef DBG_HDRS_ALL
collectable_count += 3;
if ((GC_size(GC_malloc(7)) != 8 &&
- GC_size(GC_malloc(7)) != MIN_WORDS * sizeof(GC_word))
- || GC_size(GC_malloc(15)) != 16) {
- GC_printf("GC_size produced unexpected results\n");
- FAIL;
+ GC_size(GC_malloc(7)) != MIN_WORDS * sizeof(GC_word))
+ || GC_size(GC_malloc(15)) != 16) {
+ GC_printf("GC_size produced unexpected results\n");
+ FAIL;
}
collectable_count += 1;
if (GC_size(GC_malloc(0)) != MIN_WORDS * sizeof(GC_word)) {
- GC_printf("GC_malloc(0) failed: GC_size returns %ld\n",
- (unsigned long)GC_size(GC_malloc(0)));
- FAIL;
+ GC_printf("GC_malloc(0) failed: GC_size returns %ld\n",
+ (unsigned long)GC_size(GC_malloc(0)));
+ FAIL;
}
collectable_count += 1;
if (GC_size(GC_malloc_uncollectable(0)) != MIN_WORDS * sizeof(GC_word)) {
- GC_printf("GC_malloc_uncollectable(0) failed\n");
- FAIL;
+ GC_printf("GC_malloc_uncollectable(0) failed\n");
+ FAIL;
}
GC_is_valid_displacement_print_proc = fail_proc1;
GC_is_visible_print_proc = fail_proc1;
collectable_count += 1;
x = GC_malloc(16);
if (GC_base(GC_PTR_ADD(x, 13)) != x) {
- GC_printf("GC_base(heap ptr) produced incorrect result\n");
- FAIL;
+ GC_printf("GC_base(heap ptr) produced incorrect result\n");
+ FAIL;
}
(void)GC_PRE_INCR(x, 0);
(void)GC_POST_INCR(x);
(void)GC_POST_DECR(x);
if (GC_base(x) != x) {
- GC_printf("Bad INCR/DECR result\n");
- FAIL;
+ GC_printf("Bad INCR/DECR result\n");
+ FAIL;
}
# ifndef PCR
if (GC_base(y) != 0) {
- GC_printf("GC_base(fn_ptr) produced incorrect result\n");
- FAIL;
+ GC_printf("GC_base(fn_ptr) produced incorrect result\n");
+ FAIL;
}
# endif
if (GC_same_obj(x+5, x) != x + 5) {
- GC_printf("GC_same_obj produced incorrect result\n");
- FAIL;
+ GC_printf("GC_same_obj produced incorrect result\n");
+ FAIL;
}
if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
- GC_printf("GC_is_visible produced incorrect result\n");
- FAIL;
+ GC_printf("GC_is_visible produced incorrect result\n");
+ FAIL;
}
z = GC_malloc(8);
GC_PTR_STORE(z, x);
@@ -1098,74 +1098,74 @@ void run_one_test(void)
FAIL;
}
if (!TEST_FAIL_COUNT(1)) {
-# if!(defined(POWERPC) || defined(IA64)) || defined(M68K)
- /* On POWERPCs function pointers point to a descriptor in the */
- /* data segment, so there should have been no failures. */
- /* The same applies to IA64. Something similar seems to */
- /* be going on with NetBSD/M68K. */
- GC_printf("GC_is_visible produced wrong failure indication\n");
- FAIL;
-# endif
+# if!(defined(POWERPC) || defined(IA64)) || defined(M68K)
+ /* On POWERPCs function pointers point to a descriptor in the */
+ /* data segment, so there should have been no failures. */
+ /* The same applies to IA64. Something similar seems to */
+ /* be going on with NetBSD/M68K. */
+ GC_printf("GC_is_visible produced wrong failure indication\n");
+ FAIL;
+# endif
}
if (GC_is_valid_displacement(y) != y
|| GC_is_valid_displacement(x) != x
|| GC_is_valid_displacement(x + 3) != x + 3) {
- GC_printf(
- "GC_is_valid_displacement produced incorrect result\n");
- FAIL;
+ GC_printf(
+ "GC_is_valid_displacement produced incorrect result\n");
+ FAIL;
}
{
- size_t i;
-
- GC_malloc(17);
- for (i = sizeof(GC_word); i < 512; i *= 2) {
- GC_word result = (GC_word) GC_memalign(i, 17);
- if (result % i != 0 || result == 0 || *(int *)result != 0) FAIL;
- }
- }
+ size_t i;
+
+ GC_malloc(17);
+ for (i = sizeof(GC_word); i < 512; i *= 2) {
+ GC_word result = (GC_word) GC_memalign(i, 17);
+ if (result % i != 0 || result == 0 || *(int *)result != 0) FAIL;
+ }
+ }
# ifndef ALL_INTERIOR_POINTERS
# if defined(RS6000) || defined(POWERPC)
if (!TEST_FAIL_COUNT(1))
# else
if ((GC_all_interior_pointers && !TEST_FAIL_COUNT(1))
- || (!GC_all_interior_pointers && !TEST_FAIL_COUNT(2)))
+ || (!GC_all_interior_pointers && !TEST_FAIL_COUNT(2)))
# endif
- {
- GC_printf("GC_is_valid_displacement produced wrong failure indication\n");
- FAIL;
+ {
+ GC_printf("GC_is_valid_displacement produced wrong failure indication\n");
+ FAIL;
}
# endif
# endif /* DBG_HDRS_ALL */
/* Test floating point alignment */
collectable_count += 2;
- *(double *)GC_MALLOC(sizeof(double)) = 1.0;
- *(double *)GC_MALLOC(sizeof(double)) = 1.0;
+ *(double *)GC_MALLOC(sizeof(double)) = 1.0;
+ *(double *)GC_MALLOC(sizeof(double)) = 1.0;
/* Test size 0 allocation a bit more */
- {
- size_t i;
- for (i = 0; i < 10000; ++i) {
- GC_MALLOC(0);
- GC_FREE(GC_MALLOC(0));
- GC_MALLOC_ATOMIC(0);
- GC_FREE(GC_MALLOC_ATOMIC(0));
- }
- }
+ {
+ size_t i;
+ for (i = 0; i < 10000; ++i) {
+ GC_MALLOC(0);
+ GC_FREE(GC_MALLOC(0));
+ GC_MALLOC_ATOMIC(0);
+ GC_FREE(GC_MALLOC_ATOMIC(0));
+ }
+ }
# ifdef GC_GCJ_SUPPORT
GC_REGISTER_DISPLACEMENT(sizeof(struct fake_vtable *));
GC_init_gcj_malloc(0, (void *)(GC_word)fake_gcj_mark_proc);
# endif
- /* Make sure that fn arguments are visible to the collector. */
+ /* Make sure that fn arguments are visible to the collector. */
uniq(
GC_malloc(12), GC_malloc(12), GC_malloc(12),
(GC_gcollect(),GC_malloc(12)),
GC_malloc(12), GC_malloc(12), GC_malloc(12),
- (GC_gcollect(),GC_malloc(12)),
+ (GC_gcollect(),GC_malloc(12)),
GC_malloc(12), GC_malloc(12), GC_malloc(12),
- (GC_gcollect(),GC_malloc(12)),
+ (GC_gcollect(),GC_malloc(12)),
GC_malloc(12), GC_malloc(12), GC_malloc(12),
- (GC_gcollect(),GC_malloc(12)),
+ (GC_gcollect(),GC_malloc(12)),
GC_malloc(12), GC_malloc(12), GC_malloc(12),
- (GC_gcollect(),GC_malloc(12)),
+ (GC_gcollect(),GC_malloc(12)),
(void *)0);
/* GC_malloc(0) must return NULL or something we can deallocate. */
GC_free(GC_malloc(0));
@@ -1174,20 +1174,20 @@ void run_one_test(void)
GC_free(GC_malloc_atomic(0));
/* Repeated list reversal test. */
GET_TIME(start_time);
- reverse_test();
- if (GC_print_stats) {
+ reverse_test();
+ if (GC_print_stats) {
GET_TIME(reverse_time);
time_diff = MS_TIME_DIFF(reverse_time, start_time);
- GC_log_printf("-------------Finished reverse_test at time %u (%p)\n",
- (unsigned) time_diff, &start_time);
- }
+ GC_log_printf("-------------Finished reverse_test at time %u (%p)\n",
+ (unsigned) time_diff, &start_time);
+ }
# ifndef DBG_HDRS_ALL
typed_test();
if (GC_print_stats) {
GET_TIME(typed_time);
time_diff = MS_TIME_DIFF(typed_time, start_time);
- GC_log_printf("-------------Finished typed_test at time %u (%p)\n",
- (unsigned) time_diff, &start_time);
+ GC_log_printf("-------------Finished typed_test at time %u (%p)\n",
+ (unsigned) time_diff, &start_time);
}
# endif /* DBG_HDRS_ALL */
tree_test();
@@ -1195,27 +1195,27 @@ void run_one_test(void)
GET_TIME(tree_time);
time_diff = MS_TIME_DIFF(tree_time, start_time);
GC_log_printf("-------------Finished tree_test at time %u (%p)\n",
- (unsigned) time_diff, &start_time);
+ (unsigned) time_diff, &start_time);
}
/* Run reverse_test a second time, so we hopefully notice corruption. */
reverse_test();
if (GC_print_stats) {
GET_TIME(reverse_time);
time_diff = MS_TIME_DIFF(reverse_time, start_time);
- GC_log_printf("-------------Finished second reverse_test at time %u (%p)\n",
- (unsigned) time_diff, &start_time);
+ GC_log_printf("-------------Finished second reverse_test at time %u (%p)\n",
+ (unsigned) time_diff, &start_time);
}
- /* GC_allocate_ml and GC_need_to_lock are no longer exported, and */
- /* AO_fetch_and_add1() may be unavailable to update a counter. */
+ /* GC_allocate_ml and GC_need_to_lock are no longer exported, and */
+ /* AO_fetch_and_add1() may be unavailable to update a counter. */
(void)GC_call_with_alloc_lock(inc_int_counter, &n_tests);
# if defined(THREADS) && defined(HANDLE_FORK)
if (fork() == 0) {
- GC_gcollect();
- tiny_reverse_test(0);
- GC_gcollect();
+ GC_gcollect();
+ tiny_reverse_test(0);
+ GC_gcollect();
if (GC_print_stats)
- GC_log_printf("Finished a child process\n");
- exit(0);
+ GC_log_printf("Finished a child process\n");
+ exit(0);
}
# endif
if (GC_print_stats)
@@ -1228,59 +1228,59 @@ void check_heap_stats(void)
int i;
int still_live;
# ifdef FINALIZE_ON_DEMAND
- int late_finalize_count = 0;
+ int late_finalize_count = 0;
# endif
-
+
# ifdef VERY_SMALL_CONFIG
- /* The upper bounds are a guess, which has been empirically */
- /* adjusted. On low end uniprocessors with incremental GC */
+ /* The upper bounds are a guess, which has been empirically */
+ /* adjusted. On low end uniprocessors with incremental GC */
/* these may be particularly dubious, since empirically the */
- /* heap tends to grow largely as a result of the GC not */
- /* getting enough cycles. */
+ /* heap tends to grow largely as a result of the GC not */
+ /* getting enough cycles. */
# if CPP_WORDSZ == 64
max_heap_sz = 4500000;
# else
- max_heap_sz = 2800000;
+ max_heap_sz = 2800000;
# endif
# else
# if CPP_WORDSZ == 64
max_heap_sz = 19000000;
# else
- max_heap_sz = 12000000;
+ max_heap_sz = 12000000;
# endif
# endif
# ifdef GC_DEBUG
- max_heap_sz *= 2;
+ max_heap_sz *= 2;
# ifdef SAVE_CALL_CHAIN
- max_heap_sz *= 3;
+ max_heap_sz *= 3;
# ifdef SAVE_CALL_COUNT
- max_heap_sz += max_heap_sz * SAVE_CALL_COUNT/4;
-# endif
+ max_heap_sz += max_heap_sz * SAVE_CALL_COUNT/4;
+# endif
# endif
# endif
- /* Garbage collect repeatedly so that all inaccessible objects */
- /* can be finalized. */
+ /* Garbage collect repeatedly so that all inaccessible objects */
+ /* can be finalized. */
while (GC_collect_a_little()) { }
for (i = 0; i < 16; i++) {
GC_gcollect();
# ifdef FINALIZE_ON_DEMAND
- late_finalize_count +=
+ late_finalize_count +=
# endif
- GC_invoke_finalizers();
+ GC_invoke_finalizers();
}
(void)GC_printf("Completed %u tests\n", n_tests);
(void)GC_printf("Allocated %d collectable objects\n", collectable_count);
(void)GC_printf("Allocated %d uncollectable objects\n",
- uncollectable_count);
+ uncollectable_count);
(void)GC_printf("Allocated %d atomic objects\n", atomic_count);
(void)GC_printf("Allocated %d stubborn objects\n", stubborn_count);
(void)GC_printf("Finalized %d/%d objects - ",
- finalized_count, finalizable_count);
+ finalized_count, finalizable_count);
# ifdef FINALIZE_ON_DEMAND
- if (finalized_count != late_finalize_count) {
+ if (finalized_count != late_finalize_count) {
(void)GC_printf("Demand finalization error\n");
- FAIL;
- }
+ FAIL;
+ }
# endif
if (finalized_count > finalizable_count
|| finalized_count < finalizable_count/2) {
@@ -1291,25 +1291,25 @@ void check_heap_stats(void)
}
still_live = 0;
for (i = 0; i < MAX_FINALIZED; i++) {
- if (live_indicators[i] != 0) {
- still_live++;
- }
+ if (live_indicators[i] != 0) {
+ still_live++;
+ }
}
i = finalizable_count - finalized_count - still_live;
if (0 != i) {
GC_printf("%d disappearing links remain and %d more objects "
- "were not finalized\n", still_live, i);
+ "were not finalized\n", still_live, i);
if (i > 10) {
- GC_printf("\tVery suspicious!\n");
- } else {
- GC_printf("\tSlightly suspicious, but probably OK.\n");
- }
+ GC_printf("\tVery suspicious!\n");
+ } else {
+ GC_printf("\tSlightly suspicious, but probably OK.\n");
+ }
}
(void)GC_printf("Total number of bytes allocated is %lu\n",
- (unsigned long)
- (GC_bytes_allocd + GC_bytes_allocd_before_gc));
+ (unsigned long)
+ (GC_bytes_allocd + GC_bytes_allocd_before_gc));
(void)GC_printf("Final heap size is %lu bytes\n",
- (unsigned long)GC_get_heap_size());
+ (unsigned long)GC_get_heap_size());
if (GC_bytes_allocd + GC_bytes_allocd_before_gc < n_tests *
# ifdef VERY_SMALL_CONFIG
2700000
@@ -1321,7 +1321,7 @@ void check_heap_stats(void)
FAIL;
}
if (GC_get_heap_size() > max_heap_sz*n_tests) {
- /* FIXME: is the condition correct? */
+ /* FIXME: is the condition correct? */
(void)GC_printf("Unexpected heap growth - collector may be broken\n");
FAIL;
}
@@ -1331,15 +1331,15 @@ void check_heap_stats(void)
#if defined(MACOS)
void SetMinimumStack(long minSize)
{
- long newApplLimit;
-
- if (minSize > LMGetDefltStack())
- {
- newApplLimit = (long) GetApplLimit()
- - (minSize - LMGetDefltStack());
- SetApplLimit((Ptr) newApplLimit);
- MaxApplZone();
- }
+ long newApplLimit;
+
+ if (minSize > LMGetDefltStack())
+ {
+ newApplLimit = (long) GetApplLimit()
+ - (minSize - LMGetDefltStack());
+ SetApplLimit((Ptr) newApplLimit);
+ MaxApplZone();
+ }
}
#define cMinStackSpace (512L * 1024L)
@@ -1366,10 +1366,10 @@ void GC_CALLBACK warn_proc(char *msg, GC_word p)
{
n_tests = 0;
# if defined(MACOS)
- /* Make sure we have lots and lots of stack space. */
- SetMinimumStack(cMinStackSpace);
- /* Cheat and let stdio initialize toolbox for us. */
- printf("Testing GC Macintosh port.\n");
+ /* Make sure we have lots and lots of stack space. */
+ SetMinimumStack(cMinStackSpace);
+ /* Cheat and let stdio initialize toolbox for us. */
+ printf("Testing GC Macintosh port.\n");
# endif
GC_COND_INIT();
GC_set_warn_proc(warn_proc);
@@ -1378,12 +1378,12 @@ void GC_CALLBACK warn_proc(char *msg, GC_word p)
GC_enable_incremental();
GC_printf("Switched to incremental mode\n");
# if defined(MPROTECT_VDB)
- GC_printf("Emulating dirty bits with mprotect/signals\n");
+ GC_printf("Emulating dirty bits with mprotect/signals\n");
# else
# ifdef PROC_VDB
- GC_printf("Reading dirty bits from /proc\n");
+ GC_printf("Reading dirty bits from /proc\n");
# else
- GC_printf("Using DEFAULT_VDB dirty bit implementation\n");
+ GC_printf("Using DEFAULT_VDB dirty bit implementation\n");
# endif
# endif
# endif
@@ -1393,21 +1393,21 @@ void GC_CALLBACK warn_proc(char *msg, GC_word p)
fflush(stdout);
# endif
# ifdef LINT
- /* Entry points we should be testing, but aren't. */
- /* Some can be tested by defining GC_DEBUG at the top of this file */
- /* This is a bit SunOS4 specific. */
- GC_noop(GC_expand_hp, GC_add_roots, GC_clear_roots,
- GC_register_disappearing_link,
- GC_register_finalizer_ignore_self,
- GC_debug_register_displacement,
- GC_print_obj, GC_debug_change_stubborn,
- GC_debug_end_stubborn_change, GC_debug_malloc_uncollectable,
- GC_debug_free, GC_debug_realloc, GC_generic_malloc_words_small,
- GC_init, GC_make_closure, GC_debug_invoke_finalizer,
- GC_page_was_ever_dirty, GC_is_fresh,
- GC_malloc_ignore_off_page, GC_malloc_atomic_ignore_off_page,
- GC_set_max_heap_size, GC_get_bytes_since_gc,
- GC_get_total_bytes, GC_pre_incr, GC_post_incr);
+ /* Entry points we should be testing, but aren't. */
+ /* Some can be tested by defining GC_DEBUG at the top of this file */
+ /* This is a bit SunOS4 specific. */
+ GC_noop(GC_expand_hp, GC_add_roots, GC_clear_roots,
+ GC_register_disappearing_link,
+ GC_register_finalizer_ignore_self,
+ GC_debug_register_displacement,
+ GC_print_obj, GC_debug_change_stubborn,
+ GC_debug_end_stubborn_change, GC_debug_malloc_uncollectable,
+ GC_debug_free, GC_debug_realloc, GC_generic_malloc_words_small,
+ GC_init, GC_make_closure, GC_debug_invoke_finalizer,
+ GC_page_was_ever_dirty, GC_is_fresh,
+ GC_malloc_ignore_off_page, GC_malloc_atomic_ignore_off_page,
+ GC_set_max_heap_size, GC_get_bytes_since_gc,
+ GC_get_total_bytes, GC_pre_incr, GC_post_incr);
# endif
# ifdef MSWIN32
GC_win32_free_heap();
@@ -1499,7 +1499,7 @@ DWORD __stdcall thr_window(void *arg)
#ifdef MSWINCE
int APIENTRY GC_WinMain(HINSTANCE instance, HINSTANCE prev,
- GC_WINMAIN_WINCE_LPTSTR cmd, int n)
+ GC_WINMAIN_WINCE_LPTSTR cmd, int n)
#else
int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
#endif
@@ -1513,7 +1513,7 @@ DWORD __stdcall thr_window(void *arg)
# endif
DWORD thread_id;
# if defined(GC_DLL) && !defined(GC_NO_DLLMAIN) && !defined(MSWINCE) \
- && !defined(THREAD_LOCAL_ALLOC) && !defined(PARALLEL_MARK)
+ && !defined(THREAD_LOCAL_ALLOC) && !defined(PARALLEL_MARK)
GC_use_DllMain(); /* Test with implicit thread registration if possible. */
GC_printf("Using DllMain to track threads\n");
# endif
@@ -1612,34 +1612,34 @@ int main(void)
int code;
int i;
# ifdef GC_IRIX_THREADS
- /* Force a larger stack to be preallocated */
- /* Since the initial cant always grow later. */
- *((volatile char *)&code - 1024*1024) = 0; /* Require 1 Mb */
+ /* Force a larger stack to be preallocated */
+ /* Since the initial cant always grow later. */
+ *((volatile char *)&code - 1024*1024) = 0; /* Require 1 Mb */
# endif /* GC_IRIX_THREADS */
# if defined(GC_HPUX_THREADS)
- /* Default stack size is too small, especially with the 64 bit ABI */
- /* Increase it. */
- if (pthread_default_stacksize_np(1024*1024, 0) != 0) {
+ /* Default stack size is too small, especially with the 64 bit ABI */
+ /* Increase it. */
+ if (pthread_default_stacksize_np(1024*1024, 0) != 0) {
(void)GC_printf("pthread_default_stacksize_np failed.\n");
- }
-# endif /* GC_HPUX_THREADS */
+ }
+# endif /* GC_HPUX_THREADS */
# ifdef PTW32_STATIC_LIB
- pthread_win32_process_attach_np ();
- pthread_win32_thread_attach_np ();
+ pthread_win32_process_attach_np ();
+ pthread_win32_thread_attach_np ();
# endif
GC_COND_INIT();
pthread_attr_init(&attr);
# if defined(GC_IRIX_THREADS) || defined(GC_FREEBSD_THREADS) \
- || defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)
- pthread_attr_setstacksize(&attr, 1000000);
+ || defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)
+ pthread_attr_setstacksize(&attr, 1000000);
# endif
n_tests = 0;
# if (defined(MPROTECT_VDB)) \
&& !defined(PARALLEL_MARK) &&!defined(REDIRECT_MALLOC) \
&& !defined(MAKE_BACK_GRAPH) && !defined(USE_PROC_FOR_LIBRARIES) \
- && !defined(NO_INCREMENTAL)
- GC_enable_incremental();
+ && !defined(NO_INCREMENTAL)
+ GC_enable_incremental();
(void) GC_printf("Switched to incremental mode\n");
# if defined(MPROTECT_VDB)
(void)GC_printf("Emulating dirty bits with mprotect/signals\n");
@@ -1654,12 +1654,12 @@ int main(void)
GC_set_warn_proc(warn_proc);
if ((code = pthread_key_create(&fl_key, 0)) != 0) {
(void)GC_printf("Key creation failed %d\n", code);
- FAIL;
+ FAIL;
}
for (i = 0; i < NTHREADS; ++i) {
if ((code = pthread_create(th+i, &attr, thr_run_one_test, 0)) != 0) {
- (void)GC_printf("Thread %d creation failed %d\n", i, code);
- FAIL;
+ (void)GC_printf("Thread %d creation failed %d\n", i, code);
+ FAIL;
}
}
run_one_test();
@@ -1674,8 +1674,8 @@ int main(void)
pthread_attr_destroy(&attr);
GC_printf("Completed %u collections\n", (unsigned)GC_gc_no);
# ifdef PTW32_STATIC_LIB
- pthread_win32_thread_detach_np ();
- pthread_win32_process_detach_np ();
+ pthread_win32_thread_detach_np ();
+ pthread_win32_process_detach_np ();
# endif
return(0);
}
diff --git a/tests/test_cpp.cc b/tests/test_cpp.cc
index c5b04451..94a8ca37 100644
--- a/tests/test_cpp.cc
+++ b/tests/test_cpp.cc
@@ -1,9 +1,9 @@
/****************************************************************************
Copyright (c) 1994 by Xerox Corporation. All rights reserved.
-
+
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
-
+
Permission is hereby granted to use or copy this program for any
purpose, provided the above notices are retained on all copies.
Permission to modify the code and to distribute modified code is
@@ -44,8 +44,8 @@ extern "C" {
# include "private/gcconfig.h"
GC_API void GC_printf(const char *format, ...);
/* Use GC private output to reach the same log file. */
- /* Don't include gc_priv.h, since that may include Windows system */
- /* header files that don't take kindly to this context. */
+ /* Don't include gc_priv.h, since that may include Windows system */
+ /* header files that don't take kindly to this context. */
}
#ifdef MSWIN32
# include <windows.h>
@@ -70,7 +70,7 @@ class A {public:
A( int iArg ): i( iArg ) {}
void Test( int iArg ) {
- my_assert( i == iArg );}
+ my_assert( i == iArg );}
int i;};
@@ -100,7 +100,7 @@ class C: public gc_cleanup, public A {public:
~C() {
this->A::Test( level );
nFreed++;
- my_assert( level == 0 ?
+ my_assert( level == 0 ?
left == 0 && right == 0 :
level == left->level + 1 && level == right->level + 1 );
left = right = 0;
@@ -130,7 +130,7 @@ class D: public gc {public:
my_assert( self->i == (int) (GC_word) data );}
static void Test() {
my_assert( nFreed >= .8 * nAllocated );}
-
+
int i;
static int nFreed;
static int nAllocated;};
@@ -149,10 +149,10 @@ class E: public gc_cleanup {public:
static int nFreed;
static int nAllocated;};
-
+
int E::nFreed = 0;
int E::nAllocated = 0;
-
+
class F: public E {public:
/* A collectable class with clean-up, a base with clean-up, and a
@@ -165,14 +165,14 @@ class F: public E {public:
static void Test() {
my_assert( nFreed >= .8 * nAllocated );
my_assert( 2 * nFreed == E::nFreed );}
-
+
E e;
static int nFreed;
static int nAllocated;};
-
+
int F::nFreed = 0;
int F::nAllocated = 0;
-
+
GC_word Disguise( void* p ) {
return ~ (GC_word) p;}
@@ -183,7 +183,7 @@ void* Undisguise( GC_word i ) {
#ifdef MSWIN32
int APIENTRY WinMain(
- HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int cmdShow )
+ HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int cmdShow )
{
int argc;
char* argv[ 3 ];
@@ -206,13 +206,13 @@ int APIENTRY WinMain(
char* argv_[] = {"test_cpp", "10"}; // doesn't
argv = argv_; // have a
argc = sizeof(argv_)/sizeof(argv_[0]); // commandline
-# endif
+# endif
int i, iters, n;
# ifdef USE_STD_ALLOCATOR
int *x = gc_allocator<int>().allocate(1);
int *xio = gc_allocator_ignore_off_page<int>().allocate(1);
int **xptr = traceable_allocator<int *>().allocate(1);
-# else
+# else
# ifdef __GNUC__
int *x = (int *)gc_alloc::allocate(sizeof(int));
# else
@@ -227,7 +227,7 @@ int APIENTRY WinMain(
if (argc != 2 || (0 >= (n = atoi( argv[ 1 ] )))) {
GC_printf( "usage: test_cpp number-of-iterations\nAssuming 10 iters\n" );
n = 10;}
-
+
for (iters = 1; iters <= n; iters++) {
GC_printf( "Starting iteration %d\n", iters );
@@ -260,10 +260,10 @@ int APIENTRY WinMain(
B::Deleting( 1 );
delete b;
B::Deleting( 0 );}
-# ifdef FINALIZE_ON_DEMAND
- GC_invoke_finalizers();
-# endif
- }
+# ifdef FINALIZE_ON_DEMAND
+ GC_invoke_finalizers();
+# endif
+ }
/* Make sure the uncollectable As and Bs are still there. */
for (i = 0; i < 1000; i++) {
@@ -275,11 +275,11 @@ int APIENTRY WinMain(
B::Deleting( 1 );
delete b;
B::Deleting( 0 );
-# ifdef FINALIZE_ON_DEMAND
- GC_invoke_finalizers();
-# endif
+# ifdef FINALIZE_ON_DEMAND
+ GC_invoke_finalizers();
+# endif
- }
+ }
/* Make sure most of the finalizable Cs, Ds, and Fs have
gone away. */
@@ -293,5 +293,3 @@ int APIENTRY WinMain(
my_assert (29 == x[0]);
GC_printf( "The test appears to have succeeded.\n" );
return( 0 );}
-
-
diff --git a/thread_local_alloc.c b/thread_local_alloc.c
index db217c26..f45b08da 100644
--- a/thread_local_alloc.c
+++ b/thread_local_alloc.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (c) 2000-2005 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
@@ -32,8 +32,8 @@ GC_key_t GC_thread_key;
static GC_bool keys_initialized;
-/* Return a single nonempty freelist fl to the global one pointed to */
-/* by gfl. */
+/* Return a single nonempty freelist fl to the global one pointed to */
+/* by gfl. */
static void return_single_freelist(void *fl, void **gfl)
{
@@ -44,30 +44,30 @@ static void return_single_freelist(void *fl, void **gfl)
} else {
GC_ASSERT(GC_size(fl) == GC_size(*gfl));
/* Concatenate: */
- qptr = &(obj_link(fl));
- while ((word)(q = *qptr) >= HBLKSIZE)
- qptr = &(obj_link(q));
- GC_ASSERT(0 == q);
- *qptr = *gfl;
- *gfl = fl;
+ qptr = &(obj_link(fl));
+ while ((word)(q = *qptr) >= HBLKSIZE)
+ qptr = &(obj_link(q));
+ GC_ASSERT(0 == q);
+ *qptr = *gfl;
+ *gfl = fl;
}
}
/* Recover the contents of the freelist array fl into the global one gfl.*/
-/* We hold the allocator lock. */
+/* We hold the allocator lock. */
static void return_freelists(void **fl, void **gfl)
{
int i;
for (i = 1; i < TINY_FREELISTS; ++i) {
- if ((word)(fl[i]) >= HBLKSIZE) {
- return_single_freelist(fl[i], gfl+i);
- }
- /* Clear fl[i], since the thread structure may hang around. */
- /* Do it in a way that is likely to trap if we access it. */
- fl[i] = (ptr_t)HBLKSIZE;
+ if ((word)(fl[i]) >= HBLKSIZE) {
+ return_single_freelist(fl[i], gfl+i);
+ }
+ /* Clear fl[i], since the thread structure may hang around. */
+ /* Do it in a way that is likely to trap if we access it. */
+ fl[i] = (ptr_t)HBLKSIZE;
}
- /* The 0 granule freelist really contains 1 granule objects. */
+ /* The 0 granule freelist really contains 1 granule objects. */
# ifdef GC_GCJ_SUPPORT
if (fl[0] == ERROR_FL) return;
# endif
@@ -76,33 +76,33 @@ static void return_freelists(void **fl, void **gfl)
}
}
-/* Each thread structure must be initialized. */
-/* This call must be made from the new thread. */
+/* Each thread structure must be initialized. */
+/* This call must be made from the new thread. */
void GC_init_thread_local(GC_tlfs p)
{
int i;
GC_ASSERT(I_HOLD_LOCK());
if (!keys_initialized) {
- if (0 != GC_key_create(&GC_thread_key, 0)) {
- ABORT("Failed to create key for local allocator");
+ if (0 != GC_key_create(&GC_thread_key, 0)) {
+ ABORT("Failed to create key for local allocator");
}
- keys_initialized = TRUE;
+ keys_initialized = TRUE;
}
if (0 != GC_setspecific(GC_thread_key, p)) {
- ABORT("Failed to set thread specific allocation pointers");
+ ABORT("Failed to set thread specific allocation pointers");
}
for (i = 1; i < TINY_FREELISTS; ++i) {
- p -> ptrfree_freelists[i] = (void *)(word)1;
- p -> normal_freelists[i] = (void *)(word)1;
-# ifdef GC_GCJ_SUPPORT
- p -> gcj_freelists[i] = (void *)(word)1;
-# endif
- }
- /* Set up the size 0 free lists. */
- /* We now handle most of them like regular free lists, to ensure */
- /* That explicit deallocation works. However, allocation of a */
- /* size 0 "gcj" object is always an error. */
+ p -> ptrfree_freelists[i] = (void *)(word)1;
+ p -> normal_freelists[i] = (void *)(word)1;
+# ifdef GC_GCJ_SUPPORT
+ p -> gcj_freelists[i] = (void *)(word)1;
+# endif
+ }
+ /* Set up the size 0 free lists. */
+ /* We now handle most of them like regular free lists, to ensure */
+ /* That explicit deallocation works. However, allocation of a */
+ /* size 0 "gcj" object is always an error. */
p -> ptrfree_freelists[0] = (void *)(word)1;
p -> normal_freelists[0] = (void *)(word)1;
# ifdef GC_GCJ_SUPPORT
@@ -114,18 +114,18 @@ void GC_init_thread_local(GC_tlfs p)
extern void ** GC_gcjobjfreelist;
#endif
-/* We hold the allocator lock. */
+/* We hold the allocator lock. */
void GC_destroy_thread_local(GC_tlfs p)
{
- /* We currently only do this from the thread itself or from */
- /* the fork handler for a child process. */
+ /* We currently only do this from the thread itself or from */
+ /* the fork handler for a child process. */
# ifndef HANDLE_FORK
GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
# endif
return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
return_freelists(p -> normal_freelists, GC_objfreelist);
# ifdef GC_GCJ_SUPPORT
- return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
+ return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
# endif
}
@@ -149,9 +149,9 @@ GC_API void * GC_CALL GC_malloc(size_t bytes)
# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
GC_key_t k = GC_thread_key;
if (EXPECT(0 == k, 0)) {
- /* We haven't yet run GC_init_parallel. That means */
- /* we also aren't locking, so this is fairly cheap. */
- return GC_core_malloc(bytes);
+ /* We haven't yet run GC_init_parallel. That means */
+ /* we also aren't locking, so this is fairly cheap. */
+ return GC_core_malloc(bytes);
}
tsd = GC_getspecific(k);
# else
@@ -159,30 +159,30 @@ GC_API void * GC_CALL GC_malloc(size_t bytes)
# endif
# if defined(USE_PTHREAD_SPECIFIC) || defined(USE_WIN32_SPECIFIC)
if (EXPECT(0 == tsd, 0)) {
- return GC_core_malloc(bytes);
+ return GC_core_malloc(bytes);
}
# endif
GC_ASSERT(GC_is_initialized);
# ifdef GC_ASSERTIONS
- /* We can't check tsd correctly, since we don't have access to */
- /* the right declarations. But we can check that it's close. */
+ /* We can't check tsd correctly, since we don't have access to */
+ /* the right declarations. But we can check that it's close. */
LOCK();
{
-# if defined(GC_WIN32_THREADS)
- char * me = (char *)GC_lookup_thread_inner(GetCurrentThreadId());
+# if defined(GC_WIN32_THREADS)
+ char * me = (char *)GC_lookup_thread_inner(GetCurrentThreadId());
# else
- char * me = GC_lookup_thread(pthread_self());
-# endif
+ char * me = GC_lookup_thread(pthread_self());
+# endif
GC_ASSERT((char *)tsd > me && (char *)tsd < me + 1000);
}
UNLOCK();
# endif
tiny_fl = ((GC_tlfs)tsd) -> normal_freelists;
GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES,
- NORMAL, GC_core_malloc(bytes), obj_link(result)=0);
+ NORMAL, GC_core_malloc(bytes), obj_link(result)=0);
# ifdef LOG_ALLOCS
GC_err_printf("GC_malloc(%u) = %p : %u\n",
- (unsigned)bytes, result, (unsigned)GC_gc_no);
+ (unsigned)bytes, result, (unsigned)GC_gc_no);
# endif
return result;
}
@@ -197,9 +197,9 @@ GC_API void * GC_CALL GC_malloc_atomic(size_t bytes)
# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
GC_key_t k = GC_thread_key;
if (EXPECT(0 == k, 0)) {
- /* We haven't yet run GC_init_parallel. That means */
- /* we also aren't locking, so this is fairly cheap. */
- return GC_core_malloc(bytes);
+ /* We haven't yet run GC_init_parallel. That means */
+ /* we also aren't locking, so this is fairly cheap. */
+ return GC_core_malloc(bytes);
}
tsd = GC_getspecific(k);
# else
@@ -207,13 +207,13 @@ GC_API void * GC_CALL GC_malloc_atomic(size_t bytes)
# endif
# if defined(USE_PTHREAD_SPECIFIC) || defined(USE_WIN32_SPECIFIC)
if (EXPECT(0 == tsd, 0)) {
- return GC_core_malloc(bytes);
+ return GC_core_malloc(bytes);
}
# endif
GC_ASSERT(GC_is_initialized);
tiny_fl = ((GC_tlfs)tsd) -> ptrfree_freelists;
GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES, PTRFREE,
- GC_core_malloc_atomic(bytes), (void)0 /* no init */);
+ GC_core_malloc_atomic(bytes), (void)0 /* no init */);
return result;
}
@@ -229,28 +229,28 @@ GC_API void * GC_CALL GC_malloc_atomic(size_t bytes)
extern int GC_gcj_kind;
-/* Gcj-style allocation without locks is extremely tricky. The */
-/* fundamental issue is that we may end up marking a free list, which */
-/* has freelist links instead of "vtable" pointers. That is usually */
-/* OK, since the next object on the free list will be cleared, and */
-/* will thus be interpreted as containing a zero descriptor. That's */
-/* fine if the object has not yet been initialized. But there are */
-/* interesting potential races. */
-/* In the case of incremental collection, this seems hopeless, since */
-/* the marker may run asynchronously, and may pick up the pointer to */
-/* the next freelist entry (which it thinks is a vtable pointer), get */
-/* suspended for a while, and then see an allocated object instead */
-/* of the vtable. This may be avoidable with either a handshake with */
-/* the collector or, probably more easily, by moving the free list */
-/* links to the second word of each object. The latter isn't a */
-/* universal win, since on architecture like Itanium, nonzero offsets */
-/* are not necessarily free. And there may be cache fill order issues. */
-/* For now, we punt with incremental GC. This probably means that */
-/* incremental GC should be enabled before we fork a second thread. */
-/* Unlike the other thread local allocation calls, we assume that the */
-/* collector has been explicitly initialized. */
+/* Gcj-style allocation without locks is extremely tricky. The */
+/* fundamental issue is that we may end up marking a free list, which */
+/* has freelist links instead of "vtable" pointers. That is usually */
+/* OK, since the next object on the free list will be cleared, and */
+/* will thus be interpreted as containing a zero descriptor. That's */
+/* fine if the object has not yet been initialized. But there are */
+/* interesting potential races. */
+/* In the case of incremental collection, this seems hopeless, since */
+/* the marker may run asynchronously, and may pick up the pointer to */
+/* the next freelist entry (which it thinks is a vtable pointer), get */
+/* suspended for a while, and then see an allocated object instead */
+/* of the vtable. This may be avoidable with either a handshake with */
+/* the collector or, probably more easily, by moving the free list */
+/* links to the second word of each object. The latter isn't a */
+/* universal win, since on architecture like Itanium, nonzero offsets */
+/* are not necessarily free. And there may be cache fill order issues. */
+/* For now, we punt with incremental GC. This probably means that */
+/* incremental GC should be enabled before we fork a second thread. */
+/* Unlike the other thread local allocation calls, we assume that the */
+/* collector has been explicitly initialized. */
GC_API void * GC_CALL GC_gcj_malloc(size_t bytes,
- void * ptr_to_struct_containing_descr)
+ void * ptr_to_struct_containing_descr)
{
if (GC_EXPECT(GC_incremental, 0)) {
return GC_core_gcj_malloc(bytes, ptr_to_struct_containing_descr);
@@ -258,80 +258,79 @@ GC_API void * GC_CALL GC_gcj_malloc(size_t bytes,
size_t granules = ROUNDED_UP_GRANULES(bytes);
void *result;
void **tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))
- -> gcj_freelists;
+ -> gcj_freelists;
GC_ASSERT(GC_gcj_malloc_initialized);
GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES,
- GC_gcj_kind,
- GC_core_gcj_malloc(bytes,
- ptr_to_struct_containing_descr),
- {AO_compiler_barrier();
- *(void **)result = ptr_to_struct_containing_descr;});
- /* This forces the initialization of the "method ptr". */
- /* This is necessary to ensure some very subtle properties */
- /* required if a GC is run in the middle of such an allocation. */
- /* Here we implicitly also assume atomicity for the free list. */
- /* and method pointer assignments. */
- /* We must update the freelist before we store the pointer. */
- /* Otherwise a GC at this point would see a corrupted */
- /* free list. */
- /* A real memory barrier is not needed, since the */
- /* action of stopping this thread will cause prior writes */
- /* to complete. */
- /* We assert that any concurrent marker will stop us. */
- /* Thus it is impossible for a mark procedure to see the */
- /* allocation of the next object, but to see this object */
- /* still containing a free list pointer. Otherwise the */
- /* marker, by misinterpreting the freelist link as a vtable */
- /* pointer, might find a random "mark descriptor" in the next */
- /* object. */
+ GC_gcj_kind,
+ GC_core_gcj_malloc(bytes,
+ ptr_to_struct_containing_descr),
+ {AO_compiler_barrier();
+ *(void **)result = ptr_to_struct_containing_descr;});
+ /* This forces the initialization of the "method ptr". */
+ /* This is necessary to ensure some very subtle properties */
+ /* required if a GC is run in the middle of such an allocation. */
+ /* Here we implicitly also assume atomicity for the free list. */
+ /* and method pointer assignments. */
+ /* We must update the freelist before we store the pointer. */
+ /* Otherwise a GC at this point would see a corrupted */
+ /* free list. */
+ /* A real memory barrier is not needed, since the */
+ /* action of stopping this thread will cause prior writes */
+ /* to complete. */
+ /* We assert that any concurrent marker will stop us. */
+ /* Thus it is impossible for a mark procedure to see the */
+ /* allocation of the next object, but to see this object */
+ /* still containing a free list pointer. Otherwise the */
+ /* marker, by misinterpreting the freelist link as a vtable */
+ /* pointer, might find a random "mark descriptor" in the next */
+ /* object. */
return result;
}
}
#endif /* GC_GCJ_SUPPORT */
-/* The thread support layer must arrange to mark thread-local */
-/* free lists explicitly, since the link field is often */
-/* invisible to the marker. It knows how to find all threads; */
-/* we take care of an individual thread freelist structure. */
+/* The thread support layer must arrange to mark thread-local */
+/* free lists explicitly, since the link field is often */
+/* invisible to the marker. It knows how to find all threads; */
+/* we take care of an individual thread freelist structure. */
void GC_mark_thread_local_fls_for(GC_tlfs p)
{
ptr_t q;
int j;
-
+
for (j = 0; j < TINY_FREELISTS; ++j) {
q = p -> ptrfree_freelists[j];
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
q = p -> normal_freelists[j];
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
# ifdef GC_GCJ_SUPPORT
- if (j > 0) {
+ if (j > 0) {
q = p -> gcj_freelists[j];
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
- }
+ }
# endif /* GC_GCJ_SUPPORT */
}
}
#if defined(GC_ASSERTIONS)
- /* Check that all thread-local free-lists in p are completely marked. */
+ /* Check that all thread-local free-lists in p are completely marked. */
void GC_check_tls_for(GC_tlfs p)
{
- ptr_t q;
- int j;
-
- for (j = 1; j < TINY_FREELISTS; ++j) {
- q = p -> ptrfree_freelists[j];
- if ((word)q > HBLKSIZE) GC_check_fl_marks(q);
- q = p -> normal_freelists[j];
- if ((word)q > HBLKSIZE) GC_check_fl_marks(q);
-# ifdef GC_GCJ_SUPPORT
- q = p -> gcj_freelists[j];
- if ((word)q > HBLKSIZE) GC_check_fl_marks(q);
-# endif /* GC_GCJ_SUPPORT */
- }
+ ptr_t q;
+ int j;
+
+ for (j = 1; j < TINY_FREELISTS; ++j) {
+ q = p -> ptrfree_freelists[j];
+ if ((word)q > HBLKSIZE) GC_check_fl_marks(q);
+ q = p -> normal_freelists[j];
+ if ((word)q > HBLKSIZE) GC_check_fl_marks(q);
+# ifdef GC_GCJ_SUPPORT
+ q = p -> gcj_freelists[j];
+ if ((word)q > HBLKSIZE) GC_check_fl_marks(q);
+# endif /* GC_GCJ_SUPPORT */
+ }
}
#endif /* GC_ASSERTIONS */
# endif /* THREAD_LOCAL_ALLOC */
-
diff --git a/typd_mlc.c b/typd_mlc.c
index 048fd726..6e29975a 100644
--- a/typd_mlc.c
+++ b/typd_mlc.c
@@ -44,70 +44,70 @@
STATIC GC_bool GC_explicit_typing_initialized = FALSE;
STATIC int GC_explicit_kind;
- /* Object kind for objects with indirect */
- /* (possibly extended) descriptors. */
+ /* Object kind for objects with indirect */
+ /* (possibly extended) descriptors. */
STATIC int GC_array_kind;
- /* Object kind for objects with complex */
- /* descriptors and GC_array_mark_proc. */
+ /* Object kind for objects with complex */
+ /* descriptors and GC_array_mark_proc. */
-/* Extended descriptors. GC_typed_mark_proc understands these. */
-/* These are used for simple objects that are larger than what */
-/* can be described by a BITMAP_BITS sized bitmap. */
+/* Extended descriptors. GC_typed_mark_proc understands these. */
+/* These are used for simple objects that are larger than what */
+/* can be described by a BITMAP_BITS sized bitmap. */
typedef struct {
- word ed_bitmap; /* lsb corresponds to first word. */
- GC_bool ed_continued; /* next entry is continuation. */
+ word ed_bitmap; /* lsb corresponds to first word. */
+ GC_bool ed_continued; /* next entry is continuation. */
} ext_descr;
-/* Array descriptors. GC_array_mark_proc understands these. */
-/* We may eventually need to add provisions for headers and */
+/* Array descriptors. GC_array_mark_proc understands these. */
+/* We may eventually need to add provisions for headers and */
/* trailers. Hence we provide for tree structured descriptors, */
-/* though we don't really use them currently. */
+/* though we don't really use them currently. */
typedef union ComplexDescriptor {
- struct LeafDescriptor { /* Describes simple array */
+ struct LeafDescriptor { /* Describes simple array */
word ld_tag;
-# define LEAF_TAG 1
- size_t ld_size; /* bytes per element */
- /* multiple of ALIGNMENT */
- size_t ld_nelements; /* Number of elements. */
- GC_descr ld_descriptor; /* A simple length, bitmap, */
- /* or procedure descriptor. */
+# define LEAF_TAG 1
+ size_t ld_size; /* bytes per element */
+ /* multiple of ALIGNMENT */
+ size_t ld_nelements; /* Number of elements. */
+ GC_descr ld_descriptor; /* A simple length, bitmap, */
+ /* or procedure descriptor. */
} ld;
struct ComplexArrayDescriptor {
word ad_tag;
-# define ARRAY_TAG 2
- size_t ad_nelements;
- union ComplexDescriptor * ad_element_descr;
+# define ARRAY_TAG 2
+ size_t ad_nelements;
+ union ComplexDescriptor * ad_element_descr;
} ad;
struct SequenceDescriptor {
word sd_tag;
-# define SEQUENCE_TAG 3
- union ComplexDescriptor * sd_first;
- union ComplexDescriptor * sd_second;
+# define SEQUENCE_TAG 3
+ union ComplexDescriptor * sd_first;
+ union ComplexDescriptor * sd_second;
} sd;
} complex_descriptor;
#define TAG ld.ld_tag
-STATIC ext_descr * GC_ext_descriptors; /* Points to array of extended */
- /* descriptors. */
+STATIC ext_descr * GC_ext_descriptors; /* Points to array of extended */
+ /* descriptors. */
-STATIC size_t GC_ed_size = 0; /* Current size of above arrays. */
+STATIC size_t GC_ed_size = 0; /* Current size of above arrays. */
# define ED_INITIAL_SIZE 100;
-STATIC size_t GC_avail_descr = 0; /* Next available slot. */
+STATIC size_t GC_avail_descr = 0; /* Next available slot. */
-STATIC int GC_typed_mark_proc_index; /* Indices of my mark */
-STATIC int GC_array_mark_proc_index; /* procedures. */
+STATIC int GC_typed_mark_proc_index; /* Indices of my mark */
+STATIC int GC_array_mark_proc_index; /* procedures. */
static void GC_push_typed_structures_proc (void)
{
GC_push_all((ptr_t)&GC_ext_descriptors, (ptr_t)&GC_ext_descriptors + sizeof(word));
}
-/* Add a multiword bitmap to GC_ext_descriptors arrays. Return */
-/* starting index. */
-/* Returns -1 on failure. */
-/* Caller does not hold allocation lock. */
+/* Add a multiword bitmap to GC_ext_descriptors arrays. Return */
+/* starting index. */
+/* Returns -1 on failure. */
+/* Caller does not hold allocation lock. */
STATIC signed_word GC_add_ext_descriptor(GC_bitmap bm, word nbits)
{
size_t nwords = divWORDSZ(nbits + WORDSZ-1);
@@ -119,30 +119,30 @@ STATIC signed_word GC_add_ext_descriptor(GC_bitmap bm, word nbits)
LOCK();
while (GC_avail_descr + nwords >= GC_ed_size) {
- ext_descr * new;
- size_t new_size;
- word ed_size = GC_ed_size;
-
- if (ed_size == 0) {
- GC_push_typed_structures = GC_push_typed_structures_proc;
- UNLOCK();
- new_size = ED_INITIAL_SIZE;
- } else {
- UNLOCK();
- new_size = 2 * ed_size;
- if (new_size > MAX_ENV) return(-1);
- }
- new = (ext_descr *) GC_malloc_atomic(new_size * sizeof(ext_descr));
- if (new == 0) return(-1);
+ ext_descr * new;
+ size_t new_size;
+ word ed_size = GC_ed_size;
+
+ if (ed_size == 0) {
+ GC_push_typed_structures = GC_push_typed_structures_proc;
+ UNLOCK();
+ new_size = ED_INITIAL_SIZE;
+ } else {
+ UNLOCK();
+ new_size = 2 * ed_size;
+ if (new_size > MAX_ENV) return(-1);
+ }
+ new = (ext_descr *) GC_malloc_atomic(new_size * sizeof(ext_descr));
+ if (new == 0) return(-1);
LOCK();
if (ed_size == GC_ed_size) {
if (GC_avail_descr != 0) {
- BCOPY(GC_ext_descriptors, new,
- GC_avail_descr * sizeof(ext_descr));
- }
- GC_ed_size = new_size;
- GC_ext_descriptors = new;
- } /* else another thread already resized it in the meantime */
+ BCOPY(GC_ext_descriptors, new,
+ GC_avail_descr * sizeof(ext_descr));
+ }
+ GC_ed_size = new_size;
+ GC_ext_descriptors = new;
+ } /* else another thread already resized it in the meantime */
}
result = GC_avail_descr;
for (i = 0; i < nwords-1; i++) {
@@ -161,14 +161,14 @@ STATIC signed_word GC_add_ext_descriptor(GC_bitmap bm, word nbits)
return(result);
}
-/* Table of bitmap descriptors for n word long all pointer objects. */
+/* Table of bitmap descriptors for n word long all pointer objects. */
GC_descr GC_bm_table[WORDSZ/2];
-
-/* Return a descriptor for the concatenation of 2 nwords long objects, */
-/* each of which is described by descriptor. */
-/* The result is known to be short enough to fit into a bitmap */
-/* descriptor. */
-/* Descriptor is a GC_DS_LENGTH or GC_DS_BITMAP descriptor. */
+
+/* Return a descriptor for the concatenation of 2 nwords long objects, */
+/* each of which is described by descriptor. */
+/* The result is known to be short enough to fit into a bitmap */
+/* descriptor. */
+/* Descriptor is a GC_DS_LENGTH or GC_DS_BITMAP descriptor. */
STATIC GC_descr GC_double_descr(GC_descr descriptor, word nwords)
{
if ((descriptor & GC_DS_TAGS) == GC_DS_LENGTH) {
@@ -180,40 +180,40 @@ STATIC GC_descr GC_double_descr(GC_descr descriptor, word nwords)
STATIC complex_descriptor *
GC_make_sequence_descriptor(complex_descriptor *first,
- complex_descriptor *second);
+ complex_descriptor *second);
-/* Build a descriptor for an array with nelements elements, */
-/* each of which can be described by a simple descriptor. */
-/* We try to optimize some common cases. */
+/* Build a descriptor for an array with nelements elements, */
+/* each of which can be described by a simple descriptor. */
+/* We try to optimize some common cases. */
/* If the result is COMPLEX, then a complex_descr* is returned */
-/* in *complex_d. */
-/* If the result is LEAF, then we built a LeafDescriptor in */
-/* the structure pointed to by leaf. */
-/* The tag in the leaf structure is not set. */
-/* If the result is SIMPLE, then a GC_descr */
-/* is returned in *simple_d. */
-/* If the result is NO_MEM, then */
-/* we failed to allocate the descriptor. */
-/* The implementation knows that GC_DS_LENGTH is 0. */
-/* *leaf, *complex_d, and *simple_d may be used as temporaries */
-/* during the construction. */
+/* in *complex_d. */
+/* If the result is LEAF, then we built a LeafDescriptor in */
+/* the structure pointed to by leaf. */
+/* The tag in the leaf structure is not set. */
+/* If the result is SIMPLE, then a GC_descr */
+/* is returned in *simple_d. */
+/* If the result is NO_MEM, then */
+/* we failed to allocate the descriptor. */
+/* The implementation knows that GC_DS_LENGTH is 0. */
+/* *leaf, *complex_d, and *simple_d may be used as temporaries */
+/* during the construction. */
# define COMPLEX 2
# define LEAF 1
# define SIMPLE 0
# define NO_MEM (-1)
STATIC int GC_make_array_descriptor(size_t nelements, size_t size,
- GC_descr descriptor, GC_descr *simple_d,
- complex_descriptor **complex_d,
- struct LeafDescriptor * leaf)
+ GC_descr descriptor, GC_descr *simple_d,
+ complex_descriptor **complex_d,
+ struct LeafDescriptor * leaf)
{
# define OPT_THRESHOLD 50
- /* For larger arrays, we try to combine descriptors of adjacent */
- /* descriptors to speed up marking, and to reduce the amount */
- /* of space needed on the mark stack. */
+ /* For larger arrays, we try to combine descriptors of adjacent */
+ /* descriptors to speed up marking, and to reduce the amount */
+ /* of space needed on the mark stack. */
if ((descriptor & GC_DS_TAGS) == GC_DS_LENGTH) {
if (descriptor == (GC_descr)size) {
- *simple_d = nelements * descriptor;
- return(SIMPLE);
+ *simple_d = nelements * descriptor;
+ return(SIMPLE);
} else if ((word)descriptor == 0) {
*simple_d = (GC_descr)0;
return(SIMPLE);
@@ -230,20 +230,20 @@ STATIC int GC_make_array_descriptor(size_t nelements, size_t size,
}
}
} else if (size <= BITMAP_BITS/2
- && (descriptor & GC_DS_TAGS) != GC_DS_PROC
- && (size & (sizeof(word)-1)) == 0) {
- int result =
+ && (descriptor & GC_DS_TAGS) != GC_DS_PROC
+ && (size & (sizeof(word)-1)) == 0) {
+ int result =
GC_make_array_descriptor(nelements/2, 2*size,
- GC_double_descr(descriptor,
- BYTES_TO_WORDS(size)),
- simple_d, complex_d, leaf);
+ GC_double_descr(descriptor,
+ BYTES_TO_WORDS(size)),
+ simple_d, complex_d, leaf);
if ((nelements & 1) == 0) {
return(result);
} else {
struct LeafDescriptor * one_element =
(struct LeafDescriptor *)
- GC_malloc_atomic(sizeof(struct LeafDescriptor));
-
+ GC_malloc_atomic(sizeof(struct LeafDescriptor));
+
if (result == NO_MEM || one_element == 0) return(NO_MEM);
one_element -> ld_tag = LEAF_TAG;
one_element -> ld_size = size;
@@ -254,36 +254,36 @@ STATIC int GC_make_array_descriptor(size_t nelements, size_t size,
{
struct LeafDescriptor * beginning =
(struct LeafDescriptor *)
- GC_malloc_atomic(sizeof(struct LeafDescriptor));
+ GC_malloc_atomic(sizeof(struct LeafDescriptor));
if (beginning == 0) return(NO_MEM);
beginning -> ld_tag = LEAF_TAG;
beginning -> ld_size = size;
beginning -> ld_nelements = 1;
beginning -> ld_descriptor = *simple_d;
*complex_d = GC_make_sequence_descriptor(
- (complex_descriptor *)beginning,
- (complex_descriptor *)one_element);
+ (complex_descriptor *)beginning,
+ (complex_descriptor *)one_element);
break;
}
case LEAF:
{
struct LeafDescriptor * beginning =
(struct LeafDescriptor *)
- GC_malloc_atomic(sizeof(struct LeafDescriptor));
+ GC_malloc_atomic(sizeof(struct LeafDescriptor));
if (beginning == 0) return(NO_MEM);
beginning -> ld_tag = LEAF_TAG;
beginning -> ld_size = leaf -> ld_size;
beginning -> ld_nelements = leaf -> ld_nelements;
beginning -> ld_descriptor = leaf -> ld_descriptor;
*complex_d = GC_make_sequence_descriptor(
- (complex_descriptor *)beginning,
- (complex_descriptor *)one_element);
+ (complex_descriptor *)beginning,
+ (complex_descriptor *)one_element);
break;
}
case COMPLEX:
*complex_d = GC_make_sequence_descriptor(
- *complex_d,
- (complex_descriptor *)one_element);
+ *complex_d,
+ (complex_descriptor *)one_element);
break;
}
return(COMPLEX);
@@ -299,16 +299,16 @@ STATIC int GC_make_array_descriptor(size_t nelements, size_t size,
STATIC complex_descriptor *
GC_make_sequence_descriptor(complex_descriptor *first,
- complex_descriptor *second)
+ complex_descriptor *second)
{
struct SequenceDescriptor * result =
(struct SequenceDescriptor *)
- GC_malloc(sizeof(struct SequenceDescriptor));
- /* Can't result in overly conservative marking, since tags are */
- /* very small integers. Probably faster than maintaining type */
- /* info. */
+ GC_malloc(sizeof(struct SequenceDescriptor));
+ /* Can't result in overly conservative marking, since tags are */
+ /* very small integers. Probably faster than maintaining type */
+ /* info. */
if (result != 0) {
- result -> sd_tag = SEQUENCE_TAG;
+ result -> sd_tag = SEQUENCE_TAG;
result -> sd_first = first;
result -> sd_second = second;
}
@@ -317,14 +317,14 @@ GC_make_sequence_descriptor(complex_descriptor *first,
#ifdef UNDEFINED
complex_descriptor * GC_make_complex_array_descriptor(word nelements,
- complex_descriptor *descr)
+ complex_descriptor *descr)
{
struct ComplexArrayDescriptor * result =
(struct ComplexArrayDescriptor *)
- GC_malloc(sizeof(struct ComplexArrayDescriptor));
-
+ GC_malloc(sizeof(struct ComplexArrayDescriptor));
+
if (result != 0) {
- result -> ad_tag = ARRAY_TAG;
+ result -> ad_tag = ARRAY_TAG;
result -> ad_nelements = nelements;
result -> ad_element_descr = descr;
}
@@ -337,10 +337,10 @@ STATIC ptr_t * GC_eobjfreelist;
STATIC ptr_t * GC_arobjfreelist;
STATIC mse * GC_typed_mark_proc(word * addr, mse * mark_stack_ptr,
- mse * mark_stack_limit, word env);
+ mse * mark_stack_limit, word env);
STATIC mse * GC_array_mark_proc(word * addr, mse * mark_stack_ptr,
- mse * mark_stack_limit, word env);
+ mse * mark_stack_limit, word env);
/* Caller does not hold allocation lock. */
STATIC void GC_init_explicit_typing(void)
@@ -358,18 +358,18 @@ STATIC void GC_init_explicit_typing(void)
/* Set up object kind with simple indirect descriptor. */
GC_eobjfreelist = (ptr_t *)GC_new_free_list_inner();
GC_explicit_kind = GC_new_kind_inner(
- (void **)GC_eobjfreelist,
- (((word)WORDS_TO_BYTES(-1)) | GC_DS_PER_OBJECT),
- TRUE, TRUE);
- /* Descriptors are in the last word of the object. */
+ (void **)GC_eobjfreelist,
+ (((word)WORDS_TO_BYTES(-1)) | GC_DS_PER_OBJECT),
+ TRUE, TRUE);
+ /* Descriptors are in the last word of the object. */
GC_typed_mark_proc_index = GC_new_proc_inner(GC_typed_mark_proc);
/* Set up object kind with array descriptor. */
GC_arobjfreelist = (ptr_t *)GC_new_free_list_inner();
GC_array_mark_proc_index = GC_new_proc_inner(GC_array_mark_proc);
GC_array_kind = GC_new_kind_inner(
- (void **)GC_arobjfreelist,
- GC_MAKE_PROC(GC_array_mark_proc_index, 0),
- FALSE, TRUE);
+ (void **)GC_arobjfreelist,
+ GC_MAKE_PROC(GC_array_mark_proc_index, 0),
+ FALSE, TRUE);
for (i = 0; i < WORDSZ/2; i++) {
GC_descr d = (((word)(-1)) >> (WORDSZ - i)) << (WORDSZ - i);
d |= GC_DS_BITMAP;
@@ -379,7 +379,7 @@ STATIC void GC_init_explicit_typing(void)
}
STATIC mse * GC_typed_mark_proc(word * addr, mse * mark_stack_ptr,
- mse * mark_stack_limit, word env)
+ mse * mark_stack_limit, word env)
{
word bm = GC_ext_descriptors[env].ed_bitmap;
word * current_p = addr;
@@ -390,39 +390,39 @@ STATIC mse * GC_typed_mark_proc(word * addr, mse * mark_stack_ptr,
INIT_HDR_CACHE;
for (; bm != 0; bm >>= 1, current_p++) {
- if (bm & 1) {
- current = *current_p;
- FIXUP_POINTER(current);
- if ((ptr_t)current >= least_ha && (ptr_t)current <= greatest_ha) {
- PUSH_CONTENTS((ptr_t)current, mark_stack_ptr,
- mark_stack_limit, (ptr_t)current_p, exit1);
- }
- }
+ if (bm & 1) {
+ current = *current_p;
+ FIXUP_POINTER(current);
+ if ((ptr_t)current >= least_ha && (ptr_t)current <= greatest_ha) {
+ PUSH_CONTENTS((ptr_t)current, mark_stack_ptr,
+ mark_stack_limit, (ptr_t)current_p, exit1);
+ }
+ }
}
if (GC_ext_descriptors[env].ed_continued) {
- /* Push an entry with the rest of the descriptor back onto the */
- /* stack. Thus we never do too much work at once. Note that */
- /* we also can't overflow the mark stack unless we actually */
- /* mark something. */
+ /* Push an entry with the rest of the descriptor back onto the */
+ /* stack. Thus we never do too much work at once. Note that */
+ /* we also can't overflow the mark stack unless we actually */
+ /* mark something. */
mark_stack_ptr++;
if (mark_stack_ptr >= mark_stack_limit) {
mark_stack_ptr = GC_signal_mark_stack_overflow(mark_stack_ptr);
}
mark_stack_ptr -> mse_start = (ptr_t)(addr + WORDSZ);
mark_stack_ptr -> mse_descr =
- GC_MAKE_PROC(GC_typed_mark_proc_index, env+1);
+ GC_MAKE_PROC(GC_typed_mark_proc_index, env+1);
}
return(mark_stack_ptr);
}
-/* Return the size of the object described by d. It would be faster to */
-/* store this directly, or to compute it as part of */
-/* GC_push_complex_descriptor, but hopefully it doesn't matter. */
+/* Return the size of the object described by d. It would be faster to */
+/* store this directly, or to compute it as part of */
+/* GC_push_complex_descriptor, but hopefully it doesn't matter. */
STATIC word GC_descr_obj_size(complex_descriptor *d)
{
switch(d -> TAG) {
case LEAF_TAG:
- return(d -> ld.ld_nelements * d -> ld.ld_size);
+ return(d -> ld.ld_nelements * d -> ld.ld_size);
case ARRAY_TAG:
return(d -> ad.ad_nelements
* GC_descr_obj_size(d -> ad.ad_element_descr));
@@ -435,21 +435,21 @@ STATIC word GC_descr_obj_size(complex_descriptor *d)
}
}
-/* Push descriptors for the object at addr with complex descriptor d */
-/* onto the mark stack. Return 0 if the mark stack overflowed. */
+/* Push descriptors for the object at addr with complex descriptor d */
+/* onto the mark stack. Return 0 if the mark stack overflowed. */
STATIC mse * GC_push_complex_descriptor(word *addr, complex_descriptor *d,
- mse *msp, mse *msl)
+ mse *msp, mse *msl)
{
register ptr_t current = (ptr_t) addr;
register word nelements;
register word sz;
register word i;
-
+
switch(d -> TAG) {
case LEAF_TAG:
{
register GC_descr descr = d -> ld.ld_descriptor;
-
+
nelements = d -> ld.ld_nelements;
if (msl - msp <= (ptrdiff_t)nelements) return(0);
sz = d -> ld.ld_size;
@@ -464,12 +464,12 @@ STATIC mse * GC_push_complex_descriptor(word *addr, complex_descriptor *d,
case ARRAY_TAG:
{
register complex_descriptor *descr = d -> ad.ad_element_descr;
-
+
nelements = d -> ad.ad_nelements;
sz = GC_descr_obj_size(descr);
for (i = 0; i < nelements; i++) {
msp = GC_push_complex_descriptor((word *)current, descr,
- msp, msl);
+ msp, msl);
if (msp == 0) return(0);
current += sz;
}
@@ -479,11 +479,11 @@ STATIC mse * GC_push_complex_descriptor(word *addr, complex_descriptor *d,
{
sz = GC_descr_obj_size(d -> sd.sd_first);
msp = GC_push_complex_descriptor((word *)current, d -> sd.sd_first,
- msp, msl);
+ msp, msl);
if (msp == 0) return(0);
current += sz;
msp = GC_push_complex_descriptor((word *)current, d -> sd.sd_second,
- msp, msl);
+ msp, msl);
return(msp);
}
default:
@@ -494,7 +494,7 @@ STATIC mse * GC_push_complex_descriptor(word *addr, complex_descriptor *d,
/*ARGSUSED*/
STATIC mse * GC_array_mark_proc(word * addr, mse * mark_stack_ptr,
- mse * mark_stack_limit, word env)
+ mse * mark_stack_limit, word env)
{
hdr * hhdr = HDR(addr);
size_t sz = hhdr -> hb_sz;
@@ -502,26 +502,26 @@ STATIC mse * GC_array_mark_proc(word * addr, mse * mark_stack_ptr,
complex_descriptor * descr = (complex_descriptor *)(addr[nwords-1]);
mse * orig_mark_stack_ptr = mark_stack_ptr;
mse * new_mark_stack_ptr;
-
+
if (descr == 0) {
- /* Found a reference to a free list entry. Ignore it. */
- return(orig_mark_stack_ptr);
+ /* Found a reference to a free list entry. Ignore it. */
+ return(orig_mark_stack_ptr);
}
- /* In use counts were already updated when array descriptor was */
- /* pushed. Here we only replace it by subobject descriptors, so */
- /* no update is necessary. */
+ /* In use counts were already updated when array descriptor was */
+ /* pushed. Here we only replace it by subobject descriptors, so */
+ /* no update is necessary. */
new_mark_stack_ptr = GC_push_complex_descriptor(addr, descr,
- mark_stack_ptr,
- mark_stack_limit-1);
+ mark_stack_ptr,
+ mark_stack_limit-1);
if (new_mark_stack_ptr == 0) {
- /* Doesn't fit. Conservatively push the whole array as a unit */
- /* and request a mark stack expansion. */
- /* This cannot cause a mark stack overflow, since it replaces */
- /* the original array entry. */
- GC_mark_stack_too_small = TRUE;
- new_mark_stack_ptr = orig_mark_stack_ptr + 1;
- new_mark_stack_ptr -> mse_start = (ptr_t)addr;
- new_mark_stack_ptr -> mse_descr = sz | GC_DS_LENGTH;
+ /* Doesn't fit. Conservatively push the whole array as a unit */
+ /* and request a mark stack expansion. */
+ /* This cannot cause a mark stack overflow, since it replaces */
+ /* the original array entry. */
+ GC_mark_stack_too_small = TRUE;
+ new_mark_stack_ptr = orig_mark_stack_ptr + 1;
+ new_mark_stack_ptr -> mse_start = (ptr_t)addr;
+ new_mark_stack_ptr -> mse_descr = sz | GC_DS_LENGTH;
} else {
/* Push descriptor itself */
new_mark_stack_ptr++;
@@ -537,7 +537,7 @@ GC_API GC_descr GC_CALL GC_make_descriptor(GC_bitmap bm, size_t len)
GC_descr result;
signed_word i;
# define HIGH_BIT (((word)1) << (WORDSZ - 1))
-
+
if (!GC_explicit_typing_initialized) GC_init_explicit_typing();
while (last_set_bit >= 0 && !GC_get_bit(bm, last_set_bit)) last_set_bit --;
if (last_set_bit < 0) return(0 /* no pointers */);
@@ -545,36 +545,36 @@ GC_API GC_descr GC_CALL GC_make_descriptor(GC_bitmap bm, size_t len)
{
register GC_bool all_bits_set = TRUE;
for (i = 0; i < last_set_bit; i++) {
- if (!GC_get_bit(bm, i)) {
- all_bits_set = FALSE;
- break;
- }
+ if (!GC_get_bit(bm, i)) {
+ all_bits_set = FALSE;
+ break;
+ }
}
if (all_bits_set) {
- /* An initial section contains all pointers. Use length descriptor. */
+ /* An initial section contains all pointers. Use length descriptor. */
return (WORDS_TO_BYTES(last_set_bit+1) | GC_DS_LENGTH);
}
}
# endif
if (last_set_bit < BITMAP_BITS) {
- /* Hopefully the common case. */
- /* Build bitmap descriptor (with bits reversed) */
- result = HIGH_BIT;
- for (i = last_set_bit - 1; i >= 0; i--) {
- result >>= 1;
- if (GC_get_bit(bm, i)) result |= HIGH_BIT;
- }
- result |= GC_DS_BITMAP;
- return(result);
+ /* Hopefully the common case. */
+ /* Build bitmap descriptor (with bits reversed) */
+ result = HIGH_BIT;
+ for (i = last_set_bit - 1; i >= 0; i--) {
+ result >>= 1;
+ if (GC_get_bit(bm, i)) result |= HIGH_BIT;
+ }
+ result |= GC_DS_BITMAP;
+ return(result);
} else {
- signed_word index;
-
- index = GC_add_ext_descriptor(bm, (word)last_set_bit+1);
- if (index == -1) return(WORDS_TO_BYTES(last_set_bit+1) | GC_DS_LENGTH);
- /* Out of memory: use conservative */
- /* approximation. */
- result = GC_MAKE_PROC(GC_typed_mark_proc_index, (word)index);
- return result;
+ signed_word index;
+
+ index = GC_add_ext_descriptor(bm, (word)last_set_bit+1);
+ if (index == -1) return(WORDS_TO_BYTES(last_set_bit+1) | GC_DS_LENGTH);
+ /* Out of memory: use conservative */
+ /* approximation. */
+ result = GC_MAKE_PROC(GC_typed_mark_proc_index, (word)index);
+ return result;
}
}
@@ -582,7 +582,7 @@ void * GC_clear_stack(void *);
#define GENERAL_MALLOC(lb,k) \
(void *)GC_clear_stack(GC_generic_malloc((word)lb, k))
-
+
#define GENERAL_MALLOC_IOP(lb,k) \
(void *)GC_clear_stack(GC_generic_malloc_ignore_off_page(lb, k))
@@ -595,33 +595,33 @@ GC_API void * GC_CALL GC_malloc_explicitly_typed(size_t lb, GC_descr d)
lb += TYPD_EXTRA_BYTES;
if(SMALL_OBJ(lb)) {
- lg = GC_size_map[lb];
- opp = &(GC_eobjfreelist[lg]);
- LOCK();
+ lg = GC_size_map[lb];
+ opp = &(GC_eobjfreelist[lg]);
+ LOCK();
if( (op = *opp) == 0 ) {
UNLOCK();
op = (ptr_t)GENERAL_MALLOC((word)lb, GC_explicit_kind);
- if (0 == op) return 0;
- lg = GC_size_map[lb]; /* May have been uninitialized. */
+ if (0 == op) return 0;
+ lg = GC_size_map[lb]; /* May have been uninitialized. */
} else {
*opp = obj_link(op);
- obj_link(op) = 0;
+ obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
UNLOCK();
}
- ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
+ ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
} else {
op = (ptr_t)GENERAL_MALLOC((word)lb, GC_explicit_kind);
if (op != NULL) {
- lg = BYTES_TO_GRANULES(GC_size(op));
- ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
+ lg = BYTES_TO_GRANULES(GC_size(op));
+ ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
}
}
return((void *) op);
}
GC_API void * GC_CALL GC_malloc_explicitly_typed_ignore_off_page(size_t lb,
- GC_descr d)
+ GC_descr d)
{
ptr_t op;
ptr_t * opp;
@@ -630,33 +630,33 @@ DCL_LOCK_STATE;
lb += TYPD_EXTRA_BYTES;
if( SMALL_OBJ(lb) ) {
- lg = GC_size_map[lb];
- opp = &(GC_eobjfreelist[lg]);
- LOCK();
+ lg = GC_size_map[lb];
+ opp = &(GC_eobjfreelist[lg]);
+ LOCK();
if( (op = *opp) == 0 ) {
UNLOCK();
op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_explicit_kind);
- if (0 == op) return 0;
- lg = GC_size_map[lb]; /* May have been uninitialized. */
+ if (0 == op) return 0;
+ lg = GC_size_map[lb]; /* May have been uninitialized. */
} else {
*opp = obj_link(op);
- obj_link(op) = 0;
+ obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
UNLOCK();
}
- ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
+ ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
} else {
op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_explicit_kind);
if (op != NULL) {
lg = BYTES_TO_WORDS(GC_size(op));
- ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
+ ((word *)op)[GRANULES_TO_WORDS(lg) - 1] = d;
}
}
return((void *) op);
}
GC_API void * GC_CALL GC_calloc_explicitly_typed(size_t n, size_t lb,
- GC_descr d)
+ GC_descr d)
{
ptr_t op;
ptr_t * opp;
@@ -668,31 +668,31 @@ struct LeafDescriptor leaf;
DCL_LOCK_STATE;
descr_type = GC_make_array_descriptor((word)n, (word)lb, d,
- &simple_descr, &complex_descr, &leaf);
+ &simple_descr, &complex_descr, &leaf);
switch(descr_type) {
- case NO_MEM: return(0);
- case SIMPLE: return(GC_malloc_explicitly_typed(n*lb, simple_descr));
- case LEAF:
- lb *= n;
- lb += sizeof(struct LeafDescriptor) + TYPD_EXTRA_BYTES;
- break;
- case COMPLEX:
- lb *= n;
- lb += TYPD_EXTRA_BYTES;
- break;
+ case NO_MEM: return(0);
+ case SIMPLE: return(GC_malloc_explicitly_typed(n*lb, simple_descr));
+ case LEAF:
+ lb *= n;
+ lb += sizeof(struct LeafDescriptor) + TYPD_EXTRA_BYTES;
+ break;
+ case COMPLEX:
+ lb *= n;
+ lb += TYPD_EXTRA_BYTES;
+ break;
}
if( SMALL_OBJ(lb) ) {
- lg = GC_size_map[lb];
- opp = &(GC_arobjfreelist[lg]);
- LOCK();
+ lg = GC_size_map[lb];
+ opp = &(GC_arobjfreelist[lg]);
+ LOCK();
if( (op = *opp) == 0 ) {
UNLOCK();
op = (ptr_t)GENERAL_MALLOC((word)lb, GC_array_kind);
- if (0 == op) return(0);
- lg = GC_size_map[lb]; /* May have been uninitialized. */
+ if (0 == op) return(0);
+ lg = GC_size_map[lb]; /* May have been uninitialized. */
} else {
*opp = obj_link(op);
- obj_link(op) = 0;
+ obj_link(op) = 0;
GC_bytes_allocd += GRANULES_TO_BYTES(lg);
UNLOCK();
}
@@ -707,8 +707,8 @@ DCL_LOCK_STATE;
(struct LeafDescriptor *)
((word *)op
+ GRANULES_TO_WORDS(lg)
- - (BYTES_TO_WORDS(sizeof(struct LeafDescriptor)) + 1));
-
+ - (BYTES_TO_WORDS(sizeof(struct LeafDescriptor)) + 1));
+
lp -> ld_tag = LEAF_TAG;
lp -> ld_size = leaf.ld_size;
lp -> ld_nelements = leaf.ld_nelements;
@@ -716,17 +716,17 @@ DCL_LOCK_STATE;
((volatile word *)op)[GRANULES_TO_WORDS(lg) - 1] = (word)lp;
} else {
size_t lw = GRANULES_TO_WORDS(lg);
-
+
((word *)op)[lw - 1] = (word)complex_descr;
- /* Make sure the descriptor is cleared once there is any danger */
- /* it may have been collected. */
+ /* Make sure the descriptor is cleared once there is any danger */
+ /* it may have been collected. */
if (GC_general_register_disappearing_link((void * *)((word *)op+lw-1),
- op) == GC_NO_MEMORY) {
- /* Couldn't register it due to lack of memory. Punt. */
- /* This will probably fail too, but gives the recovery code */
- /* a chance. */
- return(GC_malloc(n*lb));
- }
+ op) == GC_NO_MEMORY) {
+ /* Couldn't register it due to lack of memory. Punt. */
+ /* This will probably fail too, but gives the recovery code */
+ /* a chance. */
+ return(GC_malloc(n*lb));
+ }
}
return((void *) op);
}